applied-ai-018 commited on
Commit
45a4c44
·
verified ·
1 Parent(s): 3a7ab8a

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/15.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
  2. ckpts/universal/global_step40/zero/15.mlp.dense_h_to_4h.weight/exp_avg.pt +3 -0
  3. ckpts/universal/global_step40/zero/17.input_layernorm.weight/exp_avg_sq.pt +3 -0
  4. ckpts/universal/global_step40/zero/17.mlp.dense_h_to_4h.weight/exp_avg.pt +3 -0
  5. venv/lib/python3.10/site-packages/transformers/models/autoformer/__pycache__/configuration_autoformer.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/transformers/models/autoformer/__pycache__/modeling_autoformer.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/transformers/models/cvt/__init__.py +81 -0
  8. venv/lib/python3.10/site-packages/transformers/models/cvt/__pycache__/__init__.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/transformers/models/cvt/__pycache__/convert_cvt_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/transformers/models/cvt/configuration_cvt.py +146 -0
  11. venv/lib/python3.10/site-packages/transformers/models/cvt/convert_cvt_original_pytorch_checkpoint_to_pytorch.py +362 -0
  12. venv/lib/python3.10/site-packages/transformers/models/cvt/modeling_cvt.py +725 -0
  13. venv/lib/python3.10/site-packages/transformers/models/cvt/modeling_tf_cvt.py +1097 -0
  14. venv/lib/python3.10/site-packages/transformers/models/ernie/__init__.py +70 -0
  15. venv/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/__init__.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/configuration_ernie.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/modeling_ernie.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/transformers/models/ernie/configuration_ernie.py +162 -0
  19. venv/lib/python3.10/site-packages/transformers/models/ernie/modeling_ernie.py +1820 -0
  20. venv/lib/python3.10/site-packages/transformers/models/mgp_str/__pycache__/__init__.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/transformers/models/mgp_str/__pycache__/configuration_mgp_str.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/transformers/models/mgp_str/__pycache__/modeling_mgp_str.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/transformers/models/mgp_str/processing_mgp_str.py +230 -0
  24. venv/lib/python3.10/site-packages/transformers/models/recurrent_gemma/__init__.py +59 -0
  25. venv/lib/python3.10/site-packages/transformers/models/recurrent_gemma/__pycache__/__init__.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/transformers/models/recurrent_gemma/__pycache__/configuration_recurrent_gemma.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/transformers/models/recurrent_gemma/__pycache__/convert_recurrent_gemma_to_hf.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/transformers/models/recurrent_gemma/__pycache__/modeling_recurrent_gemma.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/transformers/models/recurrent_gemma/configuration_recurrent_gemma.py +158 -0
  30. venv/lib/python3.10/site-packages/transformers/models/recurrent_gemma/convert_recurrent_gemma_to_hf.py +222 -0
  31. venv/lib/python3.10/site-packages/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py +942 -0
  32. venv/lib/python3.10/site-packages/transformers/models/seamless_m4t/__init__.py +111 -0
  33. venv/lib/python3.10/site-packages/transformers/models/seamless_m4t/__pycache__/__init__.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/transformers/models/seamless_m4t/__pycache__/configuration_seamless_m4t.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/transformers/models/seamless_m4t/__pycache__/convert_fairseq2_to_hf.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/transformers/models/seamless_m4t/__pycache__/feature_extraction_seamless_m4t.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/transformers/models/seamless_m4t/__pycache__/modeling_seamless_m4t.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/transformers/models/seamless_m4t/__pycache__/processing_seamless_m4t.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/transformers/models/seamless_m4t/__pycache__/tokenization_seamless_m4t.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/transformers/models/seamless_m4t/__pycache__/tokenization_seamless_m4t_fast.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/transformers/models/seamless_m4t/configuration_seamless_m4t.py +416 -0
  42. venv/lib/python3.10/site-packages/transformers/models/seamless_m4t/convert_fairseq2_to_hf.py +397 -0
  43. venv/lib/python3.10/site-packages/transformers/models/seamless_m4t/feature_extraction_seamless_m4t.py +306 -0
  44. venv/lib/python3.10/site-packages/transformers/models/seamless_m4t/modeling_seamless_m4t.py +0 -0
  45. venv/lib/python3.10/site-packages/transformers/models/seamless_m4t/processing_seamless_m4t.py +117 -0
  46. venv/lib/python3.10/site-packages/transformers/models/seamless_m4t/tokenization_seamless_m4t.py +562 -0
  47. venv/lib/python3.10/site-packages/transformers/models/seamless_m4t/tokenization_seamless_m4t_fast.py +446 -0
  48. venv/lib/python3.10/site-packages/transformers/models/vilt/__pycache__/__init__.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/transformers/models/vilt/__pycache__/configuration_vilt.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/transformers/models/vilt/__pycache__/convert_vilt_original_to_pytorch.cpython-310.pyc +0 -0
ckpts/universal/global_step40/zero/15.attention.query_key_value.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f6133aed6e4cbc1847338e6787c9b3d52878c67fd8e4c05897ccfa3dce89c1f
3
+ size 50332843
ckpts/universal/global_step40/zero/15.mlp.dense_h_to_4h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1052f284ef706155459598fa4d0a5cd04f0901ae70c09a0c08e36312fbf4bee0
3
+ size 33555612
ckpts/universal/global_step40/zero/17.input_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39acee8a8a6e6e56c894fb1a77385dd607d89daf7f254d5fb6b636ffb17d38a2
3
+ size 9387
ckpts/universal/global_step40/zero/17.mlp.dense_h_to_4h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c55508f6386cabcde92c10133fb91e6e18086f699b4d3e3ba62a6f34214e0bd1
3
+ size 33555612
venv/lib/python3.10/site-packages/transformers/models/autoformer/__pycache__/configuration_autoformer.cpython-310.pyc ADDED
Binary file (10.3 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/autoformer/__pycache__/modeling_autoformer.cpython-310.pyc ADDED
Binary file (79.5 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/cvt/__init__.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
17
+
18
+
19
+ _import_structure = {"configuration_cvt": ["CVT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CvtConfig"]}
20
+
21
+
22
+ try:
23
+ if not is_torch_available():
24
+ raise OptionalDependencyNotAvailable()
25
+ except OptionalDependencyNotAvailable:
26
+ pass
27
+ else:
28
+ _import_structure["modeling_cvt"] = [
29
+ "CVT_PRETRAINED_MODEL_ARCHIVE_LIST",
30
+ "CvtForImageClassification",
31
+ "CvtModel",
32
+ "CvtPreTrainedModel",
33
+ ]
34
+
35
+ try:
36
+ if not is_tf_available():
37
+ raise OptionalDependencyNotAvailable()
38
+ except OptionalDependencyNotAvailable:
39
+ pass
40
+ else:
41
+ _import_structure["modeling_tf_cvt"] = [
42
+ "TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST",
43
+ "TFCvtForImageClassification",
44
+ "TFCvtModel",
45
+ "TFCvtPreTrainedModel",
46
+ ]
47
+
48
+ if TYPE_CHECKING:
49
+ from .configuration_cvt import CVT_PRETRAINED_CONFIG_ARCHIVE_MAP, CvtConfig
50
+
51
+ try:
52
+ if not is_torch_available():
53
+ raise OptionalDependencyNotAvailable()
54
+ except OptionalDependencyNotAvailable:
55
+ pass
56
+ else:
57
+ from .modeling_cvt import (
58
+ CVT_PRETRAINED_MODEL_ARCHIVE_LIST,
59
+ CvtForImageClassification,
60
+ CvtModel,
61
+ CvtPreTrainedModel,
62
+ )
63
+
64
+ try:
65
+ if not is_tf_available():
66
+ raise OptionalDependencyNotAvailable()
67
+ except OptionalDependencyNotAvailable:
68
+ pass
69
+ else:
70
+ from .modeling_tf_cvt import (
71
+ TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST,
72
+ TFCvtForImageClassification,
73
+ TFCvtModel,
74
+ TFCvtPreTrainedModel,
75
+ )
76
+
77
+
78
+ else:
79
+ import sys
80
+
81
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/cvt/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.2 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/cvt/__pycache__/convert_cvt_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (9.45 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/cvt/configuration_cvt.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ CvT model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import CVT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ class CvtConfig(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`CvtModel`]. It is used to instantiate a CvT model
30
+ according to the specified arguments, defining the model architecture. Instantiating a configuration with the
31
+ defaults will yield a similar configuration to that of the CvT
32
+ [microsoft/cvt-13](https://huggingface.co/microsoft/cvt-13) architecture.
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+ Args:
38
+ num_channels (`int`, *optional*, defaults to 3):
39
+ The number of input channels.
40
+ patch_sizes (`List[int]`, *optional*, defaults to `[7, 3, 3]`):
41
+ The kernel size of each encoder's patch embedding.
42
+ patch_stride (`List[int]`, *optional*, defaults to `[4, 2, 2]`):
43
+ The stride size of each encoder's patch embedding.
44
+ patch_padding (`List[int]`, *optional*, defaults to `[2, 1, 1]`):
45
+ The padding size of each encoder's patch embedding.
46
+ embed_dim (`List[int]`, *optional*, defaults to `[64, 192, 384]`):
47
+ Dimension of each of the encoder blocks.
48
+ num_heads (`List[int]`, *optional*, defaults to `[1, 3, 6]`):
49
+ Number of attention heads for each attention layer in each block of the Transformer encoder.
50
+ depth (`List[int]`, *optional*, defaults to `[1, 2, 10]`):
51
+ The number of layers in each encoder block.
52
+ mlp_ratios (`List[float]`, *optional*, defaults to `[4.0, 4.0, 4.0, 4.0]`):
53
+ Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the
54
+ encoder blocks.
55
+ attention_drop_rate (`List[float]`, *optional*, defaults to `[0.0, 0.0, 0.0]`):
56
+ The dropout ratio for the attention probabilities.
57
+ drop_rate (`List[float]`, *optional*, defaults to `[0.0, 0.0, 0.0]`):
58
+ The dropout ratio for the patch embeddings probabilities.
59
+ drop_path_rate (`List[float]`, *optional*, defaults to `[0.0, 0.0, 0.1]`):
60
+ The dropout probability for stochastic depth, used in the blocks of the Transformer encoder.
61
+ qkv_bias (`List[bool]`, *optional*, defaults to `[True, True, True]`):
62
+ The bias bool for query, key and value in attentions
63
+ cls_token (`List[bool]`, *optional*, defaults to `[False, False, True]`):
64
+ Whether or not to add a classification token to the output of each of the last 3 stages.
65
+ qkv_projection_method (`List[string]`, *optional*, defaults to ["dw_bn", "dw_bn", "dw_bn"]`):
66
+ The projection method for query, key and value Default is depth-wise convolutions with batch norm. For
67
+ Linear projection use "avg".
68
+ kernel_qkv (`List[int]`, *optional*, defaults to `[3, 3, 3]`):
69
+ The kernel size for query, key and value in attention layer
70
+ padding_kv (`List[int]`, *optional*, defaults to `[1, 1, 1]`):
71
+ The padding size for key and value in attention layer
72
+ stride_kv (`List[int]`, *optional*, defaults to `[2, 2, 2]`):
73
+ The stride size for key and value in attention layer
74
+ padding_q (`List[int]`, *optional*, defaults to `[1, 1, 1]`):
75
+ The padding size for query in attention layer
76
+ stride_q (`List[int]`, *optional*, defaults to `[1, 1, 1]`):
77
+ The stride size for query in attention layer
78
+ initializer_range (`float`, *optional*, defaults to 0.02):
79
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
80
+ layer_norm_eps (`float`, *optional*, defaults to 1e-6):
81
+ The epsilon used by the layer normalization layers.
82
+
83
+ Example:
84
+
85
+ ```python
86
+ >>> from transformers import CvtConfig, CvtModel
87
+
88
+ >>> # Initializing a Cvt msft/cvt style configuration
89
+ >>> configuration = CvtConfig()
90
+
91
+ >>> # Initializing a model (with random weights) from the msft/cvt style configuration
92
+ >>> model = CvtModel(configuration)
93
+
94
+ >>> # Accessing the model configuration
95
+ >>> configuration = model.config
96
+ ```"""
97
+
98
+ model_type = "cvt"
99
+
100
+ def __init__(
101
+ self,
102
+ num_channels=3,
103
+ patch_sizes=[7, 3, 3],
104
+ patch_stride=[4, 2, 2],
105
+ patch_padding=[2, 1, 1],
106
+ embed_dim=[64, 192, 384],
107
+ num_heads=[1, 3, 6],
108
+ depth=[1, 2, 10],
109
+ mlp_ratio=[4.0, 4.0, 4.0],
110
+ attention_drop_rate=[0.0, 0.0, 0.0],
111
+ drop_rate=[0.0, 0.0, 0.0],
112
+ drop_path_rate=[0.0, 0.0, 0.1],
113
+ qkv_bias=[True, True, True],
114
+ cls_token=[False, False, True],
115
+ qkv_projection_method=["dw_bn", "dw_bn", "dw_bn"],
116
+ kernel_qkv=[3, 3, 3],
117
+ padding_kv=[1, 1, 1],
118
+ stride_kv=[2, 2, 2],
119
+ padding_q=[1, 1, 1],
120
+ stride_q=[1, 1, 1],
121
+ initializer_range=0.02,
122
+ layer_norm_eps=1e-12,
123
+ **kwargs,
124
+ ):
125
+ super().__init__(**kwargs)
126
+ self.num_channels = num_channels
127
+ self.patch_sizes = patch_sizes
128
+ self.patch_stride = patch_stride
129
+ self.patch_padding = patch_padding
130
+ self.embed_dim = embed_dim
131
+ self.num_heads = num_heads
132
+ self.depth = depth
133
+ self.mlp_ratio = mlp_ratio
134
+ self.attention_drop_rate = attention_drop_rate
135
+ self.drop_rate = drop_rate
136
+ self.drop_path_rate = drop_path_rate
137
+ self.qkv_bias = qkv_bias
138
+ self.cls_token = cls_token
139
+ self.qkv_projection_method = qkv_projection_method
140
+ self.kernel_qkv = kernel_qkv
141
+ self.padding_kv = padding_kv
142
+ self.stride_kv = stride_kv
143
+ self.padding_q = padding_q
144
+ self.stride_q = stride_q
145
+ self.initializer_range = initializer_range
146
+ self.layer_norm_eps = layer_norm_eps
venv/lib/python3.10/site-packages/transformers/models/cvt/convert_cvt_original_pytorch_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,362 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert CvT checkpoints from the original repository.
16
+
17
+ URL: https://github.com/microsoft/CvT"""
18
+
19
+
20
+ import argparse
21
+ import json
22
+ from collections import OrderedDict
23
+
24
+ import torch
25
+ from huggingface_hub import cached_download, hf_hub_url
26
+
27
+ from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
28
+
29
+
30
+ def embeddings(idx):
31
+ """
32
+ The function helps in renaming embedding layer weights.
33
+
34
+ Args:
35
+ idx: stage number in original model
36
+ """
37
+ embed = []
38
+ embed.append(
39
+ (
40
+ f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
41
+ f"stage{idx}.patch_embed.proj.weight",
42
+ )
43
+ )
44
+ embed.append(
45
+ (
46
+ f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
47
+ f"stage{idx}.patch_embed.proj.bias",
48
+ )
49
+ )
50
+ embed.append(
51
+ (
52
+ f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
53
+ f"stage{idx}.patch_embed.norm.weight",
54
+ )
55
+ )
56
+ embed.append(
57
+ (
58
+ f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
59
+ f"stage{idx}.patch_embed.norm.bias",
60
+ )
61
+ )
62
+ return embed
63
+
64
+
65
+ def attention(idx, cnt):
66
+ """
67
+ The function helps in renaming attention block layers weights.
68
+
69
+ Args:
70
+ idx: stage number in original model
71
+ cnt: count of blocks in each stage
72
+ """
73
+ attention_weights = []
74
+ attention_weights.append(
75
+ (
76
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
77
+ f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
78
+ )
79
+ )
80
+ attention_weights.append(
81
+ (
82
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
83
+ f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
84
+ )
85
+ )
86
+ attention_weights.append(
87
+ (
88
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
89
+ f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
90
+ )
91
+ )
92
+ attention_weights.append(
93
+ (
94
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
95
+ f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
96
+ )
97
+ )
98
+ attention_weights.append(
99
+ (
100
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
101
+ f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
102
+ )
103
+ )
104
+ attention_weights.append(
105
+ (
106
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
107
+ f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
108
+ )
109
+ )
110
+ attention_weights.append(
111
+ (
112
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
113
+ f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
114
+ )
115
+ )
116
+ attention_weights.append(
117
+ (
118
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
119
+ f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
120
+ )
121
+ )
122
+ attention_weights.append(
123
+ (
124
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
125
+ f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
126
+ )
127
+ )
128
+ attention_weights.append(
129
+ (
130
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
131
+ f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
132
+ )
133
+ )
134
+ attention_weights.append(
135
+ (
136
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
137
+ f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
138
+ )
139
+ )
140
+ attention_weights.append(
141
+ (
142
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
143
+ f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
144
+ )
145
+ )
146
+ attention_weights.append(
147
+ (
148
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
149
+ f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
150
+ )
151
+ )
152
+ attention_weights.append(
153
+ (
154
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
155
+ f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
156
+ )
157
+ )
158
+ attention_weights.append(
159
+ (
160
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
161
+ f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
162
+ )
163
+ )
164
+ attention_weights.append(
165
+ (
166
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
167
+ f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
168
+ )
169
+ )
170
+ attention_weights.append(
171
+ (
172
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
173
+ f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
174
+ )
175
+ )
176
+ attention_weights.append(
177
+ (
178
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
179
+ f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
180
+ )
181
+ )
182
+ attention_weights.append(
183
+ (
184
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
185
+ f"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
186
+ )
187
+ )
188
+ attention_weights.append(
189
+ (
190
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
191
+ f"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
192
+ )
193
+ )
194
+ attention_weights.append(
195
+ (
196
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
197
+ f"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
198
+ )
199
+ )
200
+ attention_weights.append(
201
+ (
202
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
203
+ f"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
204
+ )
205
+ )
206
+ attention_weights.append(
207
+ (
208
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
209
+ f"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
210
+ )
211
+ )
212
+ attention_weights.append(
213
+ (
214
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
215
+ f"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
216
+ )
217
+ )
218
+ attention_weights.append(
219
+ (
220
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
221
+ f"stage{idx}.blocks.{cnt}.attn.proj.weight",
222
+ )
223
+ )
224
+ attention_weights.append(
225
+ (
226
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
227
+ f"stage{idx}.blocks.{cnt}.attn.proj.bias",
228
+ )
229
+ )
230
+ attention_weights.append(
231
+ (f"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", f"stage{idx}.blocks.{cnt}.mlp.fc1.weight")
232
+ )
233
+ attention_weights.append(
234
+ (f"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", f"stage{idx}.blocks.{cnt}.mlp.fc1.bias")
235
+ )
236
+ attention_weights.append(
237
+ (f"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", f"stage{idx}.blocks.{cnt}.mlp.fc2.weight")
238
+ )
239
+ attention_weights.append(
240
+ (f"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", f"stage{idx}.blocks.{cnt}.mlp.fc2.bias")
241
+ )
242
+ attention_weights.append(
243
+ (f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", f"stage{idx}.blocks.{cnt}.norm1.weight")
244
+ )
245
+ attention_weights.append(
246
+ (f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", f"stage{idx}.blocks.{cnt}.norm1.bias")
247
+ )
248
+ attention_weights.append(
249
+ (f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", f"stage{idx}.blocks.{cnt}.norm2.weight")
250
+ )
251
+ attention_weights.append(
252
+ (f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", f"stage{idx}.blocks.{cnt}.norm2.bias")
253
+ )
254
+ return attention_weights
255
+
256
+
257
+ def cls_token(idx):
258
+ """
259
+ Function helps in renaming cls_token weights
260
+ """
261
+ token = []
262
+ token.append((f"cvt.encoder.stages.{idx}.cls_token", "stage2.cls_token"))
263
+ return token
264
+
265
+
266
+ def final():
267
+ """
268
+ Function helps in renaming final classification layer
269
+ """
270
+ head = []
271
+ head.append(("layernorm.weight", "norm.weight"))
272
+ head.append(("layernorm.bias", "norm.bias"))
273
+ head.append(("classifier.weight", "head.weight"))
274
+ head.append(("classifier.bias", "head.bias"))
275
+ return head
276
+
277
+
278
+ def convert_cvt_checkpoint(cvt_model, image_size, cvt_file_name, pytorch_dump_folder):
279
+ """
280
+ Fucntion to convert the microsoft cvt checkpoint to huggingface checkpoint
281
+ """
282
+ img_labels_file = "imagenet-1k-id2label.json"
283
+ num_labels = 1000
284
+
285
+ repo_id = "huggingface/label-files"
286
+ num_labels = num_labels
287
+ id2label = json.load(open(cached_download(hf_hub_url(repo_id, img_labels_file, repo_type="dataset")), "r"))
288
+ id2label = {int(k): v for k, v in id2label.items()}
289
+
290
+ id2label = id2label
291
+ label2id = {v: k for k, v in id2label.items()}
292
+
293
+ config = config = CvtConfig(num_labels=num_labels, id2label=id2label, label2id=label2id)
294
+
295
+ # For depth size 13 (13 = 1+2+10)
296
+ if cvt_model.rsplit("/", 1)[-1][4:6] == "13":
297
+ config.depth = [1, 2, 10]
298
+
299
+ # For depth size 21 (21 = 1+4+16)
300
+ elif cvt_model.rsplit("/", 1)[-1][4:6] == "21":
301
+ config.depth = [1, 4, 16]
302
+
303
+ # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
304
+ else:
305
+ config.depth = [2, 2, 20]
306
+ config.num_heads = [3, 12, 16]
307
+ config.embed_dim = [192, 768, 1024]
308
+
309
+ model = CvtForImageClassification(config)
310
+ image_processor = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k")
311
+ image_processor.size["shortest_edge"] = image_size
312
+ original_weights = torch.load(cvt_file_name, map_location=torch.device("cpu"))
313
+
314
+ huggingface_weights = OrderedDict()
315
+ list_of_state_dict = []
316
+
317
+ for idx in range(len(config.depth)):
318
+ if config.cls_token[idx]:
319
+ list_of_state_dict = list_of_state_dict + cls_token(idx)
320
+ list_of_state_dict = list_of_state_dict + embeddings(idx)
321
+ for cnt in range(config.depth[idx]):
322
+ list_of_state_dict = list_of_state_dict + attention(idx, cnt)
323
+
324
+ list_of_state_dict = list_of_state_dict + final()
325
+ for gg in list_of_state_dict:
326
+ print(gg)
327
+ for i in range(len(list_of_state_dict)):
328
+ huggingface_weights[list_of_state_dict[i][0]] = original_weights[list_of_state_dict[i][1]]
329
+
330
+ model.load_state_dict(huggingface_weights)
331
+ model.save_pretrained(pytorch_dump_folder)
332
+ image_processor.save_pretrained(pytorch_dump_folder)
333
+
334
+
335
+ # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
336
+
337
+ if __name__ == "__main__":
338
+ parser = argparse.ArgumentParser()
339
+ parser.add_argument(
340
+ "--cvt_model",
341
+ default="cvt-w24",
342
+ type=str,
343
+ help="Name of the cvt model you'd like to convert.",
344
+ )
345
+ parser.add_argument(
346
+ "--image_size",
347
+ default=384,
348
+ type=int,
349
+ help="Input Image Size",
350
+ )
351
+ parser.add_argument(
352
+ "--cvt_file_name",
353
+ default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth",
354
+ type=str,
355
+ help="Input Image Size",
356
+ )
357
+ parser.add_argument(
358
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
359
+ )
360
+
361
+ args = parser.parse_args()
362
+ convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
venv/lib/python3.10/site-packages/transformers/models/cvt/modeling_cvt.py ADDED
@@ -0,0 +1,725 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch CvT model."""
16
+
17
+
18
+ import collections.abc
19
+ from dataclasses import dataclass
20
+ from typing import Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
26
+
27
+ from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
28
+ from ...modeling_outputs import ImageClassifierOutputWithNoAttention, ModelOutput
29
+ from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
30
+ from ...utils import logging
31
+ from .configuration_cvt import CvtConfig
32
+
33
+
34
+ logger = logging.get_logger(__name__)
35
+
36
+ # General docstring
37
+ _CONFIG_FOR_DOC = "CvtConfig"
38
+
39
+ # Base docstring
40
+ _CHECKPOINT_FOR_DOC = "microsoft/cvt-13"
41
+ _EXPECTED_OUTPUT_SHAPE = [1, 384, 14, 14]
42
+
43
+ # Image classification docstring
44
+ _IMAGE_CLASS_CHECKPOINT = "microsoft/cvt-13"
45
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
46
+
47
+
48
+ from ..deprecated._archive_maps import CVT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
49
+
50
+
51
+ @dataclass
52
+ class BaseModelOutputWithCLSToken(ModelOutput):
53
+ """
54
+ Base class for model's outputs, with potential hidden states and attentions.
55
+
56
+ Args:
57
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
58
+ Sequence of hidden-states at the output of the last layer of the model.
59
+ cls_token_value (`torch.FloatTensor` of shape `(batch_size, 1, hidden_size)`):
60
+ Classification token at the output of the last layer of the model.
61
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
62
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
63
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
64
+ plus the initial embedding outputs.
65
+ """
66
+
67
+ last_hidden_state: torch.FloatTensor = None
68
+ cls_token_value: torch.FloatTensor = None
69
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
70
+
71
+
72
+ # Copied from transformers.models.beit.modeling_beit.drop_path
73
+ def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
74
+ """
75
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
76
+
77
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
78
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
79
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
80
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
81
+ argument.
82
+ """
83
+ if drop_prob == 0.0 or not training:
84
+ return input
85
+ keep_prob = 1 - drop_prob
86
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
87
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
88
+ random_tensor.floor_() # binarize
89
+ output = input.div(keep_prob) * random_tensor
90
+ return output
91
+
92
+
93
+ # Copied from transformers.models.beit.modeling_beit.BeitDropPath
94
+ class CvtDropPath(nn.Module):
95
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
96
+
97
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
98
+ super().__init__()
99
+ self.drop_prob = drop_prob
100
+
101
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
102
+ return drop_path(hidden_states, self.drop_prob, self.training)
103
+
104
+ def extra_repr(self) -> str:
105
+ return "p={}".format(self.drop_prob)
106
+
107
+
108
+ class CvtEmbeddings(nn.Module):
109
+ """
110
+ Construct the CvT embeddings.
111
+ """
112
+
113
+ def __init__(self, patch_size, num_channels, embed_dim, stride, padding, dropout_rate):
114
+ super().__init__()
115
+ self.convolution_embeddings = CvtConvEmbeddings(
116
+ patch_size=patch_size, num_channels=num_channels, embed_dim=embed_dim, stride=stride, padding=padding
117
+ )
118
+ self.dropout = nn.Dropout(dropout_rate)
119
+
120
+ def forward(self, pixel_values):
121
+ hidden_state = self.convolution_embeddings(pixel_values)
122
+ hidden_state = self.dropout(hidden_state)
123
+ return hidden_state
124
+
125
+
126
+ class CvtConvEmbeddings(nn.Module):
127
+ """
128
+ Image to Conv Embedding.
129
+ """
130
+
131
+ def __init__(self, patch_size, num_channels, embed_dim, stride, padding):
132
+ super().__init__()
133
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
134
+ self.patch_size = patch_size
135
+ self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=patch_size, stride=stride, padding=padding)
136
+ self.normalization = nn.LayerNorm(embed_dim)
137
+
138
+ def forward(self, pixel_values):
139
+ pixel_values = self.projection(pixel_values)
140
+ batch_size, num_channels, height, width = pixel_values.shape
141
+ hidden_size = height * width
142
+ # rearrange "b c h w -> b (h w) c"
143
+ pixel_values = pixel_values.view(batch_size, num_channels, hidden_size).permute(0, 2, 1)
144
+ if self.normalization:
145
+ pixel_values = self.normalization(pixel_values)
146
+ # rearrange "b (h w) c" -> b c h w"
147
+ pixel_values = pixel_values.permute(0, 2, 1).view(batch_size, num_channels, height, width)
148
+ return pixel_values
149
+
150
+
151
+ class CvtSelfAttentionConvProjection(nn.Module):
152
+ def __init__(self, embed_dim, kernel_size, padding, stride):
153
+ super().__init__()
154
+ self.convolution = nn.Conv2d(
155
+ embed_dim,
156
+ embed_dim,
157
+ kernel_size=kernel_size,
158
+ padding=padding,
159
+ stride=stride,
160
+ bias=False,
161
+ groups=embed_dim,
162
+ )
163
+ self.normalization = nn.BatchNorm2d(embed_dim)
164
+
165
+ def forward(self, hidden_state):
166
+ hidden_state = self.convolution(hidden_state)
167
+ hidden_state = self.normalization(hidden_state)
168
+ return hidden_state
169
+
170
+
171
+ class CvtSelfAttentionLinearProjection(nn.Module):
172
+ def forward(self, hidden_state):
173
+ batch_size, num_channels, height, width = hidden_state.shape
174
+ hidden_size = height * width
175
+ # rearrange " b c h w -> b (h w) c"
176
+ hidden_state = hidden_state.view(batch_size, num_channels, hidden_size).permute(0, 2, 1)
177
+ return hidden_state
178
+
179
+
180
+ class CvtSelfAttentionProjection(nn.Module):
181
+ def __init__(self, embed_dim, kernel_size, padding, stride, projection_method="dw_bn"):
182
+ super().__init__()
183
+ if projection_method == "dw_bn":
184
+ self.convolution_projection = CvtSelfAttentionConvProjection(embed_dim, kernel_size, padding, stride)
185
+ self.linear_projection = CvtSelfAttentionLinearProjection()
186
+
187
+ def forward(self, hidden_state):
188
+ hidden_state = self.convolution_projection(hidden_state)
189
+ hidden_state = self.linear_projection(hidden_state)
190
+ return hidden_state
191
+
192
+
193
+ class CvtSelfAttention(nn.Module):
194
+ def __init__(
195
+ self,
196
+ num_heads,
197
+ embed_dim,
198
+ kernel_size,
199
+ padding_q,
200
+ padding_kv,
201
+ stride_q,
202
+ stride_kv,
203
+ qkv_projection_method,
204
+ qkv_bias,
205
+ attention_drop_rate,
206
+ with_cls_token=True,
207
+ **kwargs,
208
+ ):
209
+ super().__init__()
210
+ self.scale = embed_dim**-0.5
211
+ self.with_cls_token = with_cls_token
212
+ self.embed_dim = embed_dim
213
+ self.num_heads = num_heads
214
+
215
+ self.convolution_projection_query = CvtSelfAttentionProjection(
216
+ embed_dim,
217
+ kernel_size,
218
+ padding_q,
219
+ stride_q,
220
+ projection_method="linear" if qkv_projection_method == "avg" else qkv_projection_method,
221
+ )
222
+ self.convolution_projection_key = CvtSelfAttentionProjection(
223
+ embed_dim, kernel_size, padding_kv, stride_kv, projection_method=qkv_projection_method
224
+ )
225
+ self.convolution_projection_value = CvtSelfAttentionProjection(
226
+ embed_dim, kernel_size, padding_kv, stride_kv, projection_method=qkv_projection_method
227
+ )
228
+
229
+ self.projection_query = nn.Linear(embed_dim, embed_dim, bias=qkv_bias)
230
+ self.projection_key = nn.Linear(embed_dim, embed_dim, bias=qkv_bias)
231
+ self.projection_value = nn.Linear(embed_dim, embed_dim, bias=qkv_bias)
232
+
233
+ self.dropout = nn.Dropout(attention_drop_rate)
234
+
235
+ def rearrange_for_multi_head_attention(self, hidden_state):
236
+ batch_size, hidden_size, _ = hidden_state.shape
237
+ head_dim = self.embed_dim // self.num_heads
238
+ # rearrange 'b t (h d) -> b h t d'
239
+ return hidden_state.view(batch_size, hidden_size, self.num_heads, head_dim).permute(0, 2, 1, 3)
240
+
241
+ def forward(self, hidden_state, height, width):
242
+ if self.with_cls_token:
243
+ cls_token, hidden_state = torch.split(hidden_state, [1, height * width], 1)
244
+ batch_size, hidden_size, num_channels = hidden_state.shape
245
+ # rearrange "b (h w) c -> b c h w"
246
+ hidden_state = hidden_state.permute(0, 2, 1).view(batch_size, num_channels, height, width)
247
+
248
+ key = self.convolution_projection_key(hidden_state)
249
+ query = self.convolution_projection_query(hidden_state)
250
+ value = self.convolution_projection_value(hidden_state)
251
+
252
+ if self.with_cls_token:
253
+ query = torch.cat((cls_token, query), dim=1)
254
+ key = torch.cat((cls_token, key), dim=1)
255
+ value = torch.cat((cls_token, value), dim=1)
256
+
257
+ head_dim = self.embed_dim // self.num_heads
258
+
259
+ query = self.rearrange_for_multi_head_attention(self.projection_query(query))
260
+ key = self.rearrange_for_multi_head_attention(self.projection_key(key))
261
+ value = self.rearrange_for_multi_head_attention(self.projection_value(value))
262
+
263
+ attention_score = torch.einsum("bhlk,bhtk->bhlt", [query, key]) * self.scale
264
+ attention_probs = torch.nn.functional.softmax(attention_score, dim=-1)
265
+ attention_probs = self.dropout(attention_probs)
266
+
267
+ context = torch.einsum("bhlt,bhtv->bhlv", [attention_probs, value])
268
+ # rearrange"b h t d -> b t (h d)"
269
+ _, _, hidden_size, _ = context.shape
270
+ context = context.permute(0, 2, 1, 3).contiguous().view(batch_size, hidden_size, self.num_heads * head_dim)
271
+ return context
272
+
273
+
274
+ class CvtSelfOutput(nn.Module):
275
+ """
276
+ The residual connection is defined in CvtLayer instead of here (as is the case with other models), due to the
277
+ layernorm applied before each block.
278
+ """
279
+
280
+ def __init__(self, embed_dim, drop_rate):
281
+ super().__init__()
282
+ self.dense = nn.Linear(embed_dim, embed_dim)
283
+ self.dropout = nn.Dropout(drop_rate)
284
+
285
+ def forward(self, hidden_state, input_tensor):
286
+ hidden_state = self.dense(hidden_state)
287
+ hidden_state = self.dropout(hidden_state)
288
+ return hidden_state
289
+
290
+
291
+ class CvtAttention(nn.Module):
292
+ def __init__(
293
+ self,
294
+ num_heads,
295
+ embed_dim,
296
+ kernel_size,
297
+ padding_q,
298
+ padding_kv,
299
+ stride_q,
300
+ stride_kv,
301
+ qkv_projection_method,
302
+ qkv_bias,
303
+ attention_drop_rate,
304
+ drop_rate,
305
+ with_cls_token=True,
306
+ ):
307
+ super().__init__()
308
+ self.attention = CvtSelfAttention(
309
+ num_heads,
310
+ embed_dim,
311
+ kernel_size,
312
+ padding_q,
313
+ padding_kv,
314
+ stride_q,
315
+ stride_kv,
316
+ qkv_projection_method,
317
+ qkv_bias,
318
+ attention_drop_rate,
319
+ with_cls_token,
320
+ )
321
+ self.output = CvtSelfOutput(embed_dim, drop_rate)
322
+ self.pruned_heads = set()
323
+
324
+ def prune_heads(self, heads):
325
+ if len(heads) == 0:
326
+ return
327
+ heads, index = find_pruneable_heads_and_indices(
328
+ heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
329
+ )
330
+
331
+ # Prune linear layers
332
+ self.attention.query = prune_linear_layer(self.attention.query, index)
333
+ self.attention.key = prune_linear_layer(self.attention.key, index)
334
+ self.attention.value = prune_linear_layer(self.attention.value, index)
335
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
336
+
337
+ # Update hyper params and store pruned heads
338
+ self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
339
+ self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
340
+ self.pruned_heads = self.pruned_heads.union(heads)
341
+
342
+ def forward(self, hidden_state, height, width):
343
+ self_output = self.attention(hidden_state, height, width)
344
+ attention_output = self.output(self_output, hidden_state)
345
+ return attention_output
346
+
347
+
348
+ class CvtIntermediate(nn.Module):
349
+ def __init__(self, embed_dim, mlp_ratio):
350
+ super().__init__()
351
+ self.dense = nn.Linear(embed_dim, int(embed_dim * mlp_ratio))
352
+ self.activation = nn.GELU()
353
+
354
+ def forward(self, hidden_state):
355
+ hidden_state = self.dense(hidden_state)
356
+ hidden_state = self.activation(hidden_state)
357
+ return hidden_state
358
+
359
+
360
+ class CvtOutput(nn.Module):
361
+ def __init__(self, embed_dim, mlp_ratio, drop_rate):
362
+ super().__init__()
363
+ self.dense = nn.Linear(int(embed_dim * mlp_ratio), embed_dim)
364
+ self.dropout = nn.Dropout(drop_rate)
365
+
366
+ def forward(self, hidden_state, input_tensor):
367
+ hidden_state = self.dense(hidden_state)
368
+ hidden_state = self.dropout(hidden_state)
369
+ hidden_state = hidden_state + input_tensor
370
+ return hidden_state
371
+
372
+
373
+ class CvtLayer(nn.Module):
374
+ """
375
+ CvtLayer composed by attention layers, normalization and multi-layer perceptrons (mlps).
376
+ """
377
+
378
+ def __init__(
379
+ self,
380
+ num_heads,
381
+ embed_dim,
382
+ kernel_size,
383
+ padding_q,
384
+ padding_kv,
385
+ stride_q,
386
+ stride_kv,
387
+ qkv_projection_method,
388
+ qkv_bias,
389
+ attention_drop_rate,
390
+ drop_rate,
391
+ mlp_ratio,
392
+ drop_path_rate,
393
+ with_cls_token=True,
394
+ ):
395
+ super().__init__()
396
+ self.attention = CvtAttention(
397
+ num_heads,
398
+ embed_dim,
399
+ kernel_size,
400
+ padding_q,
401
+ padding_kv,
402
+ stride_q,
403
+ stride_kv,
404
+ qkv_projection_method,
405
+ qkv_bias,
406
+ attention_drop_rate,
407
+ drop_rate,
408
+ with_cls_token,
409
+ )
410
+
411
+ self.intermediate = CvtIntermediate(embed_dim, mlp_ratio)
412
+ self.output = CvtOutput(embed_dim, mlp_ratio, drop_rate)
413
+ self.drop_path = CvtDropPath(drop_prob=drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
414
+ self.layernorm_before = nn.LayerNorm(embed_dim)
415
+ self.layernorm_after = nn.LayerNorm(embed_dim)
416
+
417
+ def forward(self, hidden_state, height, width):
418
+ self_attention_output = self.attention(
419
+ self.layernorm_before(hidden_state), # in Cvt, layernorm is applied before self-attention
420
+ height,
421
+ width,
422
+ )
423
+ attention_output = self_attention_output
424
+ attention_output = self.drop_path(attention_output)
425
+
426
+ # first residual connection
427
+ hidden_state = attention_output + hidden_state
428
+
429
+ # in Cvt, layernorm is also applied after self-attention
430
+ layer_output = self.layernorm_after(hidden_state)
431
+ layer_output = self.intermediate(layer_output)
432
+
433
+ # second residual connection is done here
434
+ layer_output = self.output(layer_output, hidden_state)
435
+ layer_output = self.drop_path(layer_output)
436
+ return layer_output
437
+
438
+
439
+ class CvtStage(nn.Module):
440
+ def __init__(self, config, stage):
441
+ super().__init__()
442
+ self.config = config
443
+ self.stage = stage
444
+ if self.config.cls_token[self.stage]:
445
+ self.cls_token = nn.Parameter(torch.randn(1, 1, self.config.embed_dim[-1]))
446
+
447
+ self.embedding = CvtEmbeddings(
448
+ patch_size=config.patch_sizes[self.stage],
449
+ stride=config.patch_stride[self.stage],
450
+ num_channels=config.num_channels if self.stage == 0 else config.embed_dim[self.stage - 1],
451
+ embed_dim=config.embed_dim[self.stage],
452
+ padding=config.patch_padding[self.stage],
453
+ dropout_rate=config.drop_rate[self.stage],
454
+ )
455
+
456
+ drop_path_rates = [x.item() for x in torch.linspace(0, config.drop_path_rate[self.stage], config.depth[stage])]
457
+
458
+ self.layers = nn.Sequential(
459
+ *[
460
+ CvtLayer(
461
+ num_heads=config.num_heads[self.stage],
462
+ embed_dim=config.embed_dim[self.stage],
463
+ kernel_size=config.kernel_qkv[self.stage],
464
+ padding_q=config.padding_q[self.stage],
465
+ padding_kv=config.padding_kv[self.stage],
466
+ stride_kv=config.stride_kv[self.stage],
467
+ stride_q=config.stride_q[self.stage],
468
+ qkv_projection_method=config.qkv_projection_method[self.stage],
469
+ qkv_bias=config.qkv_bias[self.stage],
470
+ attention_drop_rate=config.attention_drop_rate[self.stage],
471
+ drop_rate=config.drop_rate[self.stage],
472
+ drop_path_rate=drop_path_rates[self.stage],
473
+ mlp_ratio=config.mlp_ratio[self.stage],
474
+ with_cls_token=config.cls_token[self.stage],
475
+ )
476
+ for _ in range(config.depth[self.stage])
477
+ ]
478
+ )
479
+
480
+ def forward(self, hidden_state):
481
+ cls_token = None
482
+ hidden_state = self.embedding(hidden_state)
483
+ batch_size, num_channels, height, width = hidden_state.shape
484
+ # rearrange b c h w -> b (h w) c"
485
+ hidden_state = hidden_state.view(batch_size, num_channels, height * width).permute(0, 2, 1)
486
+ if self.config.cls_token[self.stage]:
487
+ cls_token = self.cls_token.expand(batch_size, -1, -1)
488
+ hidden_state = torch.cat((cls_token, hidden_state), dim=1)
489
+
490
+ for layer in self.layers:
491
+ layer_outputs = layer(hidden_state, height, width)
492
+ hidden_state = layer_outputs
493
+
494
+ if self.config.cls_token[self.stage]:
495
+ cls_token, hidden_state = torch.split(hidden_state, [1, height * width], 1)
496
+ hidden_state = hidden_state.permute(0, 2, 1).view(batch_size, num_channels, height, width)
497
+ return hidden_state, cls_token
498
+
499
+
500
+ class CvtEncoder(nn.Module):
501
+ def __init__(self, config):
502
+ super().__init__()
503
+ self.config = config
504
+ self.stages = nn.ModuleList([])
505
+ for stage_idx in range(len(config.depth)):
506
+ self.stages.append(CvtStage(config, stage_idx))
507
+
508
+ def forward(self, pixel_values, output_hidden_states=False, return_dict=True):
509
+ all_hidden_states = () if output_hidden_states else None
510
+ hidden_state = pixel_values
511
+
512
+ cls_token = None
513
+ for _, (stage_module) in enumerate(self.stages):
514
+ hidden_state, cls_token = stage_module(hidden_state)
515
+ if output_hidden_states:
516
+ all_hidden_states = all_hidden_states + (hidden_state,)
517
+
518
+ if not return_dict:
519
+ return tuple(v for v in [hidden_state, cls_token, all_hidden_states] if v is not None)
520
+
521
+ return BaseModelOutputWithCLSToken(
522
+ last_hidden_state=hidden_state,
523
+ cls_token_value=cls_token,
524
+ hidden_states=all_hidden_states,
525
+ )
526
+
527
+
528
+ class CvtPreTrainedModel(PreTrainedModel):
529
+ """
530
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
531
+ models.
532
+ """
533
+
534
+ config_class = CvtConfig
535
+ base_model_prefix = "cvt"
536
+ main_input_name = "pixel_values"
537
+
538
+ def _init_weights(self, module):
539
+ """Initialize the weights"""
540
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
541
+ module.weight.data = nn.init.trunc_normal_(module.weight.data, mean=0.0, std=self.config.initializer_range)
542
+ if module.bias is not None:
543
+ module.bias.data.zero_()
544
+ elif isinstance(module, nn.LayerNorm):
545
+ module.bias.data.zero_()
546
+ module.weight.data.fill_(1.0)
547
+ elif isinstance(module, CvtStage):
548
+ if self.config.cls_token[module.stage]:
549
+ module.cls_token.data = nn.init.trunc_normal_(
550
+ torch.zeros(1, 1, self.config.embed_dim[-1]), mean=0.0, std=self.config.initializer_range
551
+ )
552
+
553
+
554
+ CVT_START_DOCSTRING = r"""
555
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
556
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
557
+ behavior.
558
+
559
+ Parameters:
560
+ config ([`CvtConfig`]): Model configuration class with all the parameters of the model.
561
+ Initializing with a config file does not load the weights associated with the model, only the
562
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
563
+ """
564
+
565
+ CVT_INPUTS_DOCSTRING = r"""
566
+ Args:
567
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
568
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`CvtImageProcessor.__call__`]
569
+ for details.
570
+ output_hidden_states (`bool`, *optional*):
571
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
572
+ more detail.
573
+ return_dict (`bool`, *optional*):
574
+ Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
575
+ """
576
+
577
+
578
+ @add_start_docstrings(
579
+ "The bare Cvt Model transformer outputting raw hidden-states without any specific head on top.",
580
+ CVT_START_DOCSTRING,
581
+ )
582
+ class CvtModel(CvtPreTrainedModel):
583
+ def __init__(self, config, add_pooling_layer=True):
584
+ super().__init__(config)
585
+ self.config = config
586
+ self.encoder = CvtEncoder(config)
587
+ self.post_init()
588
+
589
+ def _prune_heads(self, heads_to_prune):
590
+ """
591
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
592
+ class PreTrainedModel
593
+ """
594
+ for layer, heads in heads_to_prune.items():
595
+ self.encoder.layer[layer].attention.prune_heads(heads)
596
+
597
+ @add_start_docstrings_to_model_forward(CVT_INPUTS_DOCSTRING)
598
+ @add_code_sample_docstrings(
599
+ checkpoint=_CHECKPOINT_FOR_DOC,
600
+ output_type=BaseModelOutputWithCLSToken,
601
+ config_class=_CONFIG_FOR_DOC,
602
+ modality="vision",
603
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
604
+ )
605
+ def forward(
606
+ self,
607
+ pixel_values: Optional[torch.Tensor] = None,
608
+ output_hidden_states: Optional[bool] = None,
609
+ return_dict: Optional[bool] = None,
610
+ ) -> Union[Tuple, BaseModelOutputWithCLSToken]:
611
+ output_hidden_states = (
612
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
613
+ )
614
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
615
+
616
+ if pixel_values is None:
617
+ raise ValueError("You have to specify pixel_values")
618
+
619
+ encoder_outputs = self.encoder(
620
+ pixel_values,
621
+ output_hidden_states=output_hidden_states,
622
+ return_dict=return_dict,
623
+ )
624
+ sequence_output = encoder_outputs[0]
625
+
626
+ if not return_dict:
627
+ return (sequence_output,) + encoder_outputs[1:]
628
+
629
+ return BaseModelOutputWithCLSToken(
630
+ last_hidden_state=sequence_output,
631
+ cls_token_value=encoder_outputs.cls_token_value,
632
+ hidden_states=encoder_outputs.hidden_states,
633
+ )
634
+
635
+
636
+ @add_start_docstrings(
637
+ """
638
+ Cvt Model transformer with an image classification head on top (a linear layer on top of the final hidden state of
639
+ the [CLS] token) e.g. for ImageNet.
640
+ """,
641
+ CVT_START_DOCSTRING,
642
+ )
643
+ class CvtForImageClassification(CvtPreTrainedModel):
644
+ def __init__(self, config):
645
+ super().__init__(config)
646
+
647
+ self.num_labels = config.num_labels
648
+ self.cvt = CvtModel(config, add_pooling_layer=False)
649
+ self.layernorm = nn.LayerNorm(config.embed_dim[-1])
650
+ # Classifier head
651
+ self.classifier = (
652
+ nn.Linear(config.embed_dim[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
653
+ )
654
+
655
+ # Initialize weights and apply final processing
656
+ self.post_init()
657
+
658
+ @add_start_docstrings_to_model_forward(CVT_INPUTS_DOCSTRING)
659
+ @add_code_sample_docstrings(
660
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
661
+ output_type=ImageClassifierOutputWithNoAttention,
662
+ config_class=_CONFIG_FOR_DOC,
663
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
664
+ )
665
+ def forward(
666
+ self,
667
+ pixel_values: Optional[torch.Tensor] = None,
668
+ labels: Optional[torch.Tensor] = None,
669
+ output_hidden_states: Optional[bool] = None,
670
+ return_dict: Optional[bool] = None,
671
+ ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
672
+ r"""
673
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
674
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
675
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
676
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
677
+ """
678
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
679
+ outputs = self.cvt(
680
+ pixel_values,
681
+ output_hidden_states=output_hidden_states,
682
+ return_dict=return_dict,
683
+ )
684
+
685
+ sequence_output = outputs[0]
686
+ cls_token = outputs[1]
687
+ if self.config.cls_token[-1]:
688
+ sequence_output = self.layernorm(cls_token)
689
+ else:
690
+ batch_size, num_channels, height, width = sequence_output.shape
691
+ # rearrange "b c h w -> b (h w) c"
692
+ sequence_output = sequence_output.view(batch_size, num_channels, height * width).permute(0, 2, 1)
693
+ sequence_output = self.layernorm(sequence_output)
694
+
695
+ sequence_output_mean = sequence_output.mean(dim=1)
696
+ logits = self.classifier(sequence_output_mean)
697
+
698
+ loss = None
699
+ if labels is not None:
700
+ if self.config.problem_type is None:
701
+ if self.config.num_labels == 1:
702
+ self.config.problem_type = "regression"
703
+ elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
704
+ self.config.problem_type = "single_label_classification"
705
+ else:
706
+ self.config.problem_type = "multi_label_classification"
707
+
708
+ if self.config.problem_type == "regression":
709
+ loss_fct = MSELoss()
710
+ if self.config.num_labels == 1:
711
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
712
+ else:
713
+ loss = loss_fct(logits, labels)
714
+ elif self.config.problem_type == "single_label_classification":
715
+ loss_fct = CrossEntropyLoss()
716
+ loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
717
+ elif self.config.problem_type == "multi_label_classification":
718
+ loss_fct = BCEWithLogitsLoss()
719
+ loss = loss_fct(logits, labels)
720
+
721
+ if not return_dict:
722
+ output = (logits,) + outputs[2:]
723
+ return ((loss,) + output) if loss is not None else output
724
+
725
+ return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
venv/lib/python3.10/site-packages/transformers/models/cvt/modeling_tf_cvt.py ADDED
@@ -0,0 +1,1097 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TF 2.0 Cvt model."""
16
+
17
+
18
+ from __future__ import annotations
19
+
20
+ import collections.abc
21
+ from dataclasses import dataclass
22
+ from typing import Optional, Tuple, Union
23
+
24
+ import tensorflow as tf
25
+
26
+ from ...modeling_tf_outputs import TFImageClassifierOutputWithNoAttention
27
+ from ...modeling_tf_utils import (
28
+ TFModelInputType,
29
+ TFPreTrainedModel,
30
+ TFSequenceClassificationLoss,
31
+ get_initializer,
32
+ keras,
33
+ keras_serializable,
34
+ unpack_inputs,
35
+ )
36
+ from ...tf_utils import shape_list, stable_softmax
37
+ from ...utils import (
38
+ ModelOutput,
39
+ add_start_docstrings,
40
+ add_start_docstrings_to_model_forward,
41
+ logging,
42
+ replace_return_docstrings,
43
+ )
44
+ from .configuration_cvt import CvtConfig
45
+
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+ # General docstring
50
+ _CONFIG_FOR_DOC = "CvtConfig"
51
+
52
+
53
+ from ..deprecated._archive_maps import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
54
+
55
+
56
+ @dataclass
57
+ class TFBaseModelOutputWithCLSToken(ModelOutput):
58
+ """
59
+ Base class for model's outputs.
60
+
61
+ Args:
62
+ last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
63
+ Sequence of hidden-states at the output of the last layer of the model.
64
+ cls_token_value (`tf.Tensor` of shape `(batch_size, 1, hidden_size)`):
65
+ Classification token at the output of the last layer of the model.
66
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
67
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
68
+ `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus
69
+ the initial embedding outputs.
70
+ """
71
+
72
+ last_hidden_state: tf.Tensor = None
73
+ cls_token_value: tf.Tensor = None
74
+ hidden_states: Tuple[tf.Tensor, ...] | None = None
75
+
76
+
77
+ class TFCvtDropPath(keras.layers.Layer):
78
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
79
+ References:
80
+ (1) github.com:rwightman/pytorch-image-models
81
+ """
82
+
83
+ def __init__(self, drop_prob: float, **kwargs):
84
+ super().__init__(**kwargs)
85
+ self.drop_prob = drop_prob
86
+
87
+ def call(self, x: tf.Tensor, training=None):
88
+ if self.drop_prob == 0.0 or not training:
89
+ return x
90
+ keep_prob = 1 - self.drop_prob
91
+ shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1)
92
+ random_tensor = keep_prob + tf.random.uniform(shape, 0, 1, dtype=self.compute_dtype)
93
+ random_tensor = tf.floor(random_tensor)
94
+ return (x / keep_prob) * random_tensor
95
+
96
+
97
+ class TFCvtEmbeddings(keras.layers.Layer):
98
+ """Construct the Convolutional Token Embeddings."""
99
+
100
+ def __init__(
101
+ self,
102
+ config: CvtConfig,
103
+ patch_size: int,
104
+ num_channels: int,
105
+ embed_dim: int,
106
+ stride: int,
107
+ padding: int,
108
+ dropout_rate: float,
109
+ **kwargs,
110
+ ):
111
+ super().__init__(**kwargs)
112
+ self.convolution_embeddings = TFCvtConvEmbeddings(
113
+ config,
114
+ patch_size=patch_size,
115
+ num_channels=num_channels,
116
+ embed_dim=embed_dim,
117
+ stride=stride,
118
+ padding=padding,
119
+ name="convolution_embeddings",
120
+ )
121
+ self.dropout = keras.layers.Dropout(dropout_rate)
122
+
123
+ def call(self, pixel_values: tf.Tensor, training: bool = False) -> tf.Tensor:
124
+ hidden_state = self.convolution_embeddings(pixel_values)
125
+ hidden_state = self.dropout(hidden_state, training=training)
126
+ return hidden_state
127
+
128
+ def build(self, input_shape=None):
129
+ if self.built:
130
+ return
131
+ self.built = True
132
+ if getattr(self, "convolution_embeddings", None) is not None:
133
+ with tf.name_scope(self.convolution_embeddings.name):
134
+ self.convolution_embeddings.build(None)
135
+
136
+
137
+ class TFCvtConvEmbeddings(keras.layers.Layer):
138
+ """Image to Convolution Embeddings. This convolutional operation aims to model local spatial contexts."""
139
+
140
+ def __init__(
141
+ self,
142
+ config: CvtConfig,
143
+ patch_size: int,
144
+ num_channels: int,
145
+ embed_dim: int,
146
+ stride: int,
147
+ padding: int,
148
+ **kwargs,
149
+ ):
150
+ super().__init__(**kwargs)
151
+ self.padding = keras.layers.ZeroPadding2D(padding=padding)
152
+ self.patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
153
+ self.projection = keras.layers.Conv2D(
154
+ filters=embed_dim,
155
+ kernel_size=patch_size,
156
+ strides=stride,
157
+ padding="valid",
158
+ data_format="channels_last",
159
+ kernel_initializer=get_initializer(config.initializer_range),
160
+ name="projection",
161
+ )
162
+ # Using the same default epsilon as PyTorch
163
+ self.normalization = keras.layers.LayerNormalization(epsilon=1e-5, name="normalization")
164
+ self.num_channels = num_channels
165
+ self.embed_dim = embed_dim
166
+
167
+ def call(self, pixel_values: tf.Tensor) -> tf.Tensor:
168
+ if isinstance(pixel_values, dict):
169
+ pixel_values = pixel_values["pixel_values"]
170
+
171
+ pixel_values = self.projection(self.padding(pixel_values))
172
+
173
+ # "batch_size, height, width, num_channels -> batch_size, (height*width), num_channels"
174
+ batch_size, height, width, num_channels = shape_list(pixel_values)
175
+ hidden_size = height * width
176
+ pixel_values = tf.reshape(pixel_values, shape=(batch_size, hidden_size, num_channels))
177
+ pixel_values = self.normalization(pixel_values)
178
+
179
+ # "batch_size, (height*width), num_channels -> batch_size, height, width, num_channels"
180
+ pixel_values = tf.reshape(pixel_values, shape=(batch_size, height, width, num_channels))
181
+ return pixel_values
182
+
183
+ def build(self, input_shape=None):
184
+ if self.built:
185
+ return
186
+ self.built = True
187
+ if getattr(self, "projection", None) is not None:
188
+ with tf.name_scope(self.projection.name):
189
+ self.projection.build([None, None, None, self.num_channels])
190
+ if getattr(self, "normalization", None) is not None:
191
+ with tf.name_scope(self.normalization.name):
192
+ self.normalization.build([None, None, self.embed_dim])
193
+
194
+
195
+ class TFCvtSelfAttentionConvProjection(keras.layers.Layer):
196
+ """Convolutional projection layer."""
197
+
198
+ def __init__(self, config: CvtConfig, embed_dim: int, kernel_size: int, stride: int, padding: int, **kwargs):
199
+ super().__init__(**kwargs)
200
+ self.padding = keras.layers.ZeroPadding2D(padding=padding)
201
+ self.convolution = keras.layers.Conv2D(
202
+ filters=embed_dim,
203
+ kernel_size=kernel_size,
204
+ kernel_initializer=get_initializer(config.initializer_range),
205
+ padding="valid",
206
+ strides=stride,
207
+ use_bias=False,
208
+ name="convolution",
209
+ groups=embed_dim,
210
+ )
211
+ # Using the same default epsilon as PyTorch, TF uses (1 - pytorch momentum)
212
+ self.normalization = keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9, name="normalization")
213
+ self.embed_dim = embed_dim
214
+
215
+ def call(self, hidden_state: tf.Tensor, training: bool = False) -> tf.Tensor:
216
+ hidden_state = self.convolution(self.padding(hidden_state))
217
+ hidden_state = self.normalization(hidden_state, training=training)
218
+ return hidden_state
219
+
220
+ def build(self, input_shape=None):
221
+ if self.built:
222
+ return
223
+ self.built = True
224
+ if getattr(self, "convolution", None) is not None:
225
+ with tf.name_scope(self.convolution.name):
226
+ self.convolution.build([None, None, None, self.embed_dim])
227
+ if getattr(self, "normalization", None) is not None:
228
+ with tf.name_scope(self.normalization.name):
229
+ self.normalization.build([None, None, None, self.embed_dim])
230
+
231
+
232
+ class TFCvtSelfAttentionLinearProjection(keras.layers.Layer):
233
+ """Linear projection layer used to flatten tokens into 1D."""
234
+
235
+ def call(self, hidden_state: tf.Tensor) -> tf.Tensor:
236
+ # "batch_size, height, width, num_channels -> batch_size, (height*width), num_channels"
237
+ batch_size, height, width, num_channels = shape_list(hidden_state)
238
+ hidden_size = height * width
239
+ hidden_state = tf.reshape(hidden_state, shape=(batch_size, hidden_size, num_channels))
240
+ return hidden_state
241
+
242
+
243
+ class TFCvtSelfAttentionProjection(keras.layers.Layer):
244
+ """Convolutional Projection for Attention."""
245
+
246
+ def __init__(
247
+ self,
248
+ config: CvtConfig,
249
+ embed_dim: int,
250
+ kernel_size: int,
251
+ stride: int,
252
+ padding: int,
253
+ projection_method: str = "dw_bn",
254
+ **kwargs,
255
+ ):
256
+ super().__init__(**kwargs)
257
+ if projection_method == "dw_bn":
258
+ self.convolution_projection = TFCvtSelfAttentionConvProjection(
259
+ config, embed_dim, kernel_size, stride, padding, name="convolution_projection"
260
+ )
261
+ self.linear_projection = TFCvtSelfAttentionLinearProjection()
262
+
263
+ def call(self, hidden_state: tf.Tensor, training: bool = False) -> tf.Tensor:
264
+ hidden_state = self.convolution_projection(hidden_state, training=training)
265
+ hidden_state = self.linear_projection(hidden_state)
266
+ return hidden_state
267
+
268
+ def build(self, input_shape=None):
269
+ if self.built:
270
+ return
271
+ self.built = True
272
+ if getattr(self, "convolution_projection", None) is not None:
273
+ with tf.name_scope(self.convolution_projection.name):
274
+ self.convolution_projection.build(None)
275
+
276
+
277
+ class TFCvtSelfAttention(keras.layers.Layer):
278
+ """
279
+ Self-attention layer. A depth-wise separable convolution operation (Convolutional Projection), is applied for
280
+ query, key, and value embeddings.
281
+ """
282
+
283
+ def __init__(
284
+ self,
285
+ config: CvtConfig,
286
+ num_heads: int,
287
+ embed_dim: int,
288
+ kernel_size: int,
289
+ stride_q: int,
290
+ stride_kv: int,
291
+ padding_q: int,
292
+ padding_kv: int,
293
+ qkv_projection_method: str,
294
+ qkv_bias: bool,
295
+ attention_drop_rate: float,
296
+ with_cls_token: bool = True,
297
+ **kwargs,
298
+ ):
299
+ super().__init__(**kwargs)
300
+ self.scale = embed_dim**-0.5
301
+ self.with_cls_token = with_cls_token
302
+ self.embed_dim = embed_dim
303
+ self.num_heads = num_heads
304
+
305
+ self.convolution_projection_query = TFCvtSelfAttentionProjection(
306
+ config,
307
+ embed_dim,
308
+ kernel_size,
309
+ stride_q,
310
+ padding_q,
311
+ projection_method="linear" if qkv_projection_method == "avg" else qkv_projection_method,
312
+ name="convolution_projection_query",
313
+ )
314
+ self.convolution_projection_key = TFCvtSelfAttentionProjection(
315
+ config,
316
+ embed_dim,
317
+ kernel_size,
318
+ stride_kv,
319
+ padding_kv,
320
+ projection_method=qkv_projection_method,
321
+ name="convolution_projection_key",
322
+ )
323
+ self.convolution_projection_value = TFCvtSelfAttentionProjection(
324
+ config,
325
+ embed_dim,
326
+ kernel_size,
327
+ stride_kv,
328
+ padding_kv,
329
+ projection_method=qkv_projection_method,
330
+ name="convolution_projection_value",
331
+ )
332
+
333
+ self.projection_query = keras.layers.Dense(
334
+ units=embed_dim,
335
+ kernel_initializer=get_initializer(config.initializer_range),
336
+ use_bias=qkv_bias,
337
+ bias_initializer="zeros",
338
+ name="projection_query",
339
+ )
340
+ self.projection_key = keras.layers.Dense(
341
+ units=embed_dim,
342
+ kernel_initializer=get_initializer(config.initializer_range),
343
+ use_bias=qkv_bias,
344
+ bias_initializer="zeros",
345
+ name="projection_key",
346
+ )
347
+ self.projection_value = keras.layers.Dense(
348
+ units=embed_dim,
349
+ kernel_initializer=get_initializer(config.initializer_range),
350
+ use_bias=qkv_bias,
351
+ bias_initializer="zeros",
352
+ name="projection_value",
353
+ )
354
+ self.dropout = keras.layers.Dropout(attention_drop_rate)
355
+
356
+ def rearrange_for_multi_head_attention(self, hidden_state: tf.Tensor) -> tf.Tensor:
357
+ batch_size, hidden_size, _ = shape_list(hidden_state)
358
+ head_dim = self.embed_dim // self.num_heads
359
+ hidden_state = tf.reshape(hidden_state, shape=(batch_size, hidden_size, self.num_heads, head_dim))
360
+ hidden_state = tf.transpose(hidden_state, perm=(0, 2, 1, 3))
361
+ return hidden_state
362
+
363
+ def call(self, hidden_state: tf.Tensor, height: int, width: int, training: bool = False) -> tf.Tensor:
364
+ if self.with_cls_token:
365
+ cls_token, hidden_state = tf.split(hidden_state, [1, height * width], 1)
366
+
367
+ # "batch_size, (height*width), num_channels -> batch_size, height, width, num_channels"
368
+ batch_size, hidden_size, num_channels = shape_list(hidden_state)
369
+ hidden_state = tf.reshape(hidden_state, shape=(batch_size, height, width, num_channels))
370
+
371
+ key = self.convolution_projection_key(hidden_state, training=training)
372
+ query = self.convolution_projection_query(hidden_state, training=training)
373
+ value = self.convolution_projection_value(hidden_state, training=training)
374
+
375
+ if self.with_cls_token:
376
+ query = tf.concat((cls_token, query), axis=1)
377
+ key = tf.concat((cls_token, key), axis=1)
378
+ value = tf.concat((cls_token, value), axis=1)
379
+
380
+ head_dim = self.embed_dim // self.num_heads
381
+
382
+ query = self.rearrange_for_multi_head_attention(self.projection_query(query))
383
+ key = self.rearrange_for_multi_head_attention(self.projection_key(key))
384
+ value = self.rearrange_for_multi_head_attention(self.projection_value(value))
385
+
386
+ attention_score = tf.matmul(query, key, transpose_b=True) * self.scale
387
+ attention_probs = stable_softmax(logits=attention_score, axis=-1)
388
+ attention_probs = self.dropout(attention_probs, training=training)
389
+
390
+ context = tf.matmul(attention_probs, value)
391
+ # "batch_size, num_heads, hidden_size, head_dim -> batch_size, hidden_size, (num_heads*head_dim)"
392
+ _, _, hidden_size, _ = shape_list(context)
393
+ context = tf.transpose(context, perm=(0, 2, 1, 3))
394
+ context = tf.reshape(context, (batch_size, hidden_size, self.num_heads * head_dim))
395
+ return context
396
+
397
+ def build(self, input_shape=None):
398
+ if self.built:
399
+ return
400
+ self.built = True
401
+ if getattr(self, "convolution_projection_query", None) is not None:
402
+ with tf.name_scope(self.convolution_projection_query.name):
403
+ self.convolution_projection_query.build(None)
404
+ if getattr(self, "convolution_projection_key", None) is not None:
405
+ with tf.name_scope(self.convolution_projection_key.name):
406
+ self.convolution_projection_key.build(None)
407
+ if getattr(self, "convolution_projection_value", None) is not None:
408
+ with tf.name_scope(self.convolution_projection_value.name):
409
+ self.convolution_projection_value.build(None)
410
+ if getattr(self, "projection_query", None) is not None:
411
+ with tf.name_scope(self.projection_query.name):
412
+ self.projection_query.build([None, None, self.embed_dim])
413
+ if getattr(self, "projection_key", None) is not None:
414
+ with tf.name_scope(self.projection_key.name):
415
+ self.projection_key.build([None, None, self.embed_dim])
416
+ if getattr(self, "projection_value", None) is not None:
417
+ with tf.name_scope(self.projection_value.name):
418
+ self.projection_value.build([None, None, self.embed_dim])
419
+
420
+
421
+ class TFCvtSelfOutput(keras.layers.Layer):
422
+ """Output of the Attention layer ."""
423
+
424
+ def __init__(self, config: CvtConfig, embed_dim: int, drop_rate: float, **kwargs):
425
+ super().__init__(**kwargs)
426
+ self.dense = keras.layers.Dense(
427
+ units=embed_dim, kernel_initializer=get_initializer(config.initializer_range), name="dense"
428
+ )
429
+ self.dropout = keras.layers.Dropout(drop_rate)
430
+ self.embed_dim = embed_dim
431
+
432
+ def call(self, hidden_state: tf.Tensor, training: bool = False) -> tf.Tensor:
433
+ hidden_state = self.dense(inputs=hidden_state)
434
+ hidden_state = self.dropout(inputs=hidden_state, training=training)
435
+ return hidden_state
436
+
437
+ def build(self, input_shape=None):
438
+ if self.built:
439
+ return
440
+ self.built = True
441
+ if getattr(self, "dense", None) is not None:
442
+ with tf.name_scope(self.dense.name):
443
+ self.dense.build([None, None, self.embed_dim])
444
+
445
+
446
+ class TFCvtAttention(keras.layers.Layer):
447
+ """Attention layer. First chunk of the convolutional transformer block."""
448
+
449
+ def __init__(
450
+ self,
451
+ config: CvtConfig,
452
+ num_heads: int,
453
+ embed_dim: int,
454
+ kernel_size: int,
455
+ stride_q: int,
456
+ stride_kv: int,
457
+ padding_q: int,
458
+ padding_kv: int,
459
+ qkv_projection_method: str,
460
+ qkv_bias: bool,
461
+ attention_drop_rate: float,
462
+ drop_rate: float,
463
+ with_cls_token: bool = True,
464
+ **kwargs,
465
+ ):
466
+ super().__init__(**kwargs)
467
+ self.attention = TFCvtSelfAttention(
468
+ config,
469
+ num_heads,
470
+ embed_dim,
471
+ kernel_size,
472
+ stride_q,
473
+ stride_kv,
474
+ padding_q,
475
+ padding_kv,
476
+ qkv_projection_method,
477
+ qkv_bias,
478
+ attention_drop_rate,
479
+ with_cls_token,
480
+ name="attention",
481
+ )
482
+ self.dense_output = TFCvtSelfOutput(config, embed_dim, drop_rate, name="output")
483
+
484
+ def prune_heads(self, heads):
485
+ raise NotImplementedError
486
+
487
+ def call(self, hidden_state: tf.Tensor, height: int, width: int, training: bool = False):
488
+ self_output = self.attention(hidden_state, height, width, training=training)
489
+ attention_output = self.dense_output(self_output, training=training)
490
+ return attention_output
491
+
492
+ def build(self, input_shape=None):
493
+ if self.built:
494
+ return
495
+ self.built = True
496
+ if getattr(self, "attention", None) is not None:
497
+ with tf.name_scope(self.attention.name):
498
+ self.attention.build(None)
499
+ if getattr(self, "dense_output", None) is not None:
500
+ with tf.name_scope(self.dense_output.name):
501
+ self.dense_output.build(None)
502
+
503
+
504
+ class TFCvtIntermediate(keras.layers.Layer):
505
+ """Intermediate dense layer. Second chunk of the convolutional transformer block."""
506
+
507
+ def __init__(self, config: CvtConfig, embed_dim: int, mlp_ratio: int, **kwargs):
508
+ super().__init__(**kwargs)
509
+ self.dense = keras.layers.Dense(
510
+ units=int(embed_dim * mlp_ratio),
511
+ kernel_initializer=get_initializer(config.initializer_range),
512
+ activation="gelu",
513
+ name="dense",
514
+ )
515
+ self.embed_dim = embed_dim
516
+
517
+ def call(self, hidden_state: tf.Tensor) -> tf.Tensor:
518
+ hidden_state = self.dense(hidden_state)
519
+ return hidden_state
520
+
521
+ def build(self, input_shape=None):
522
+ if self.built:
523
+ return
524
+ self.built = True
525
+ if getattr(self, "dense", None) is not None:
526
+ with tf.name_scope(self.dense.name):
527
+ self.dense.build([None, None, self.embed_dim])
528
+
529
+
530
+ class TFCvtOutput(keras.layers.Layer):
531
+ """
532
+ Output of the Convolutional Transformer Block (last chunk). It consists of a MLP and a residual connection.
533
+ """
534
+
535
+ def __init__(self, config: CvtConfig, embed_dim: int, mlp_ratio: int, drop_rate: int, **kwargs):
536
+ super().__init__(**kwargs)
537
+ self.dense = keras.layers.Dense(
538
+ units=embed_dim, kernel_initializer=get_initializer(config.initializer_range), name="dense"
539
+ )
540
+ self.dropout = keras.layers.Dropout(drop_rate)
541
+ self.embed_dim = embed_dim
542
+ self.mlp_ratio = mlp_ratio
543
+
544
+ def call(self, hidden_state: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
545
+ hidden_state = self.dense(inputs=hidden_state)
546
+ hidden_state = self.dropout(inputs=hidden_state, training=training)
547
+ hidden_state = hidden_state + input_tensor
548
+ return hidden_state
549
+
550
+ def build(self, input_shape=None):
551
+ if self.built:
552
+ return
553
+ self.built = True
554
+ if getattr(self, "dense", None) is not None:
555
+ with tf.name_scope(self.dense.name):
556
+ self.dense.build([None, None, int(self.embed_dim * self.mlp_ratio)])
557
+
558
+
559
+ class TFCvtLayer(keras.layers.Layer):
560
+ """
561
+ Convolutional Transformer Block composed by attention layers, normalization and multi-layer perceptrons (mlps). It
562
+ consists of 3 chunks : an attention layer, an intermediate dense layer and an output layer. This corresponds to the
563
+ `Block` class in the original implementation.
564
+ """
565
+
566
+ def __init__(
567
+ self,
568
+ config: CvtConfig,
569
+ num_heads: int,
570
+ embed_dim: int,
571
+ kernel_size: int,
572
+ stride_q: int,
573
+ stride_kv: int,
574
+ padding_q: int,
575
+ padding_kv: int,
576
+ qkv_projection_method: str,
577
+ qkv_bias: bool,
578
+ attention_drop_rate: float,
579
+ drop_rate: float,
580
+ mlp_ratio: float,
581
+ drop_path_rate: float,
582
+ with_cls_token: bool = True,
583
+ **kwargs,
584
+ ):
585
+ super().__init__(**kwargs)
586
+ self.attention = TFCvtAttention(
587
+ config,
588
+ num_heads,
589
+ embed_dim,
590
+ kernel_size,
591
+ stride_q,
592
+ stride_kv,
593
+ padding_q,
594
+ padding_kv,
595
+ qkv_projection_method,
596
+ qkv_bias,
597
+ attention_drop_rate,
598
+ drop_rate,
599
+ with_cls_token,
600
+ name="attention",
601
+ )
602
+ self.intermediate = TFCvtIntermediate(config, embed_dim, mlp_ratio, name="intermediate")
603
+ self.dense_output = TFCvtOutput(config, embed_dim, mlp_ratio, drop_rate, name="output")
604
+ # Using `layers.Activation` instead of `tf.identity` to better control `training` behaviour.
605
+ self.drop_path = (
606
+ TFCvtDropPath(drop_path_rate, name="drop_path")
607
+ if drop_path_rate > 0.0
608
+ else keras.layers.Activation("linear", name="drop_path")
609
+ )
610
+ # Using the same default epsilon as PyTorch
611
+ self.layernorm_before = keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_before")
612
+ self.layernorm_after = keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_after")
613
+ self.embed_dim = embed_dim
614
+
615
+ def call(self, hidden_state: tf.Tensor, height: int, width: int, training: bool = False) -> tf.Tensor:
616
+ # in Cvt, layernorm is applied before self-attention
617
+ attention_output = self.attention(self.layernorm_before(hidden_state), height, width, training=training)
618
+ attention_output = self.drop_path(attention_output, training=training)
619
+
620
+ # first residual connection
621
+ hidden_state = attention_output + hidden_state
622
+
623
+ # in Cvt, layernorm is also applied after self-attention
624
+ layer_output = self.layernorm_after(hidden_state)
625
+ layer_output = self.intermediate(layer_output)
626
+
627
+ # second residual connection is done here
628
+ layer_output = self.dense_output(layer_output, hidden_state)
629
+ layer_output = self.drop_path(layer_output, training=training)
630
+ return layer_output
631
+
632
+ def build(self, input_shape=None):
633
+ if self.built:
634
+ return
635
+ self.built = True
636
+ if getattr(self, "attention", None) is not None:
637
+ with tf.name_scope(self.attention.name):
638
+ self.attention.build(None)
639
+ if getattr(self, "intermediate", None) is not None:
640
+ with tf.name_scope(self.intermediate.name):
641
+ self.intermediate.build(None)
642
+ if getattr(self, "dense_output", None) is not None:
643
+ with tf.name_scope(self.dense_output.name):
644
+ self.dense_output.build(None)
645
+ if getattr(self, "drop_path", None) is not None:
646
+ with tf.name_scope(self.drop_path.name):
647
+ self.drop_path.build(None)
648
+ if getattr(self, "layernorm_before", None) is not None:
649
+ with tf.name_scope(self.layernorm_before.name):
650
+ self.layernorm_before.build([None, None, self.embed_dim])
651
+ if getattr(self, "layernorm_after", None) is not None:
652
+ with tf.name_scope(self.layernorm_after.name):
653
+ self.layernorm_after.build([None, None, self.embed_dim])
654
+
655
+
656
+ class TFCvtStage(keras.layers.Layer):
657
+ """
658
+ Cvt stage (encoder block). Each stage has 2 parts :
659
+ - (1) A Convolutional Token Embedding layer
660
+ - (2) A Convolutional Transformer Block (layer).
661
+ The classification token is added only in the last stage.
662
+
663
+ Args:
664
+ config ([`CvtConfig`]): Model configuration class.
665
+ stage (`int`): Stage number.
666
+ """
667
+
668
+ def __init__(self, config: CvtConfig, stage: int, **kwargs):
669
+ super().__init__(**kwargs)
670
+ self.config = config
671
+ self.stage = stage
672
+ if self.config.cls_token[self.stage]:
673
+ self.cls_token = self.add_weight(
674
+ shape=(1, 1, self.config.embed_dim[-1]),
675
+ initializer=get_initializer(self.config.initializer_range),
676
+ trainable=True,
677
+ name="cvt.encoder.stages.2.cls_token",
678
+ )
679
+
680
+ self.embedding = TFCvtEmbeddings(
681
+ self.config,
682
+ patch_size=config.patch_sizes[self.stage],
683
+ num_channels=config.num_channels if self.stage == 0 else config.embed_dim[self.stage - 1],
684
+ stride=config.patch_stride[self.stage],
685
+ embed_dim=config.embed_dim[self.stage],
686
+ padding=config.patch_padding[self.stage],
687
+ dropout_rate=config.drop_rate[self.stage],
688
+ name="embedding",
689
+ )
690
+
691
+ drop_path_rates = tf.linspace(0.0, config.drop_path_rate[self.stage], config.depth[stage])
692
+ drop_path_rates = [x.numpy().item() for x in drop_path_rates]
693
+ self.layers = [
694
+ TFCvtLayer(
695
+ config,
696
+ num_heads=config.num_heads[self.stage],
697
+ embed_dim=config.embed_dim[self.stage],
698
+ kernel_size=config.kernel_qkv[self.stage],
699
+ stride_q=config.stride_q[self.stage],
700
+ stride_kv=config.stride_kv[self.stage],
701
+ padding_q=config.padding_q[self.stage],
702
+ padding_kv=config.padding_kv[self.stage],
703
+ qkv_projection_method=config.qkv_projection_method[self.stage],
704
+ qkv_bias=config.qkv_bias[self.stage],
705
+ attention_drop_rate=config.attention_drop_rate[self.stage],
706
+ drop_rate=config.drop_rate[self.stage],
707
+ mlp_ratio=config.mlp_ratio[self.stage],
708
+ drop_path_rate=drop_path_rates[self.stage],
709
+ with_cls_token=config.cls_token[self.stage],
710
+ name=f"layers.{j}",
711
+ )
712
+ for j in range(config.depth[self.stage])
713
+ ]
714
+
715
+ def call(self, hidden_state: tf.Tensor, training: bool = False):
716
+ cls_token = None
717
+ hidden_state = self.embedding(hidden_state, training)
718
+
719
+ # "batch_size, height, width, num_channels -> batch_size, (height*width), num_channels"
720
+ batch_size, height, width, num_channels = shape_list(hidden_state)
721
+ hidden_size = height * width
722
+ hidden_state = tf.reshape(hidden_state, shape=(batch_size, hidden_size, num_channels))
723
+
724
+ if self.config.cls_token[self.stage]:
725
+ cls_token = tf.repeat(self.cls_token, repeats=batch_size, axis=0)
726
+ hidden_state = tf.concat((cls_token, hidden_state), axis=1)
727
+
728
+ for layer in self.layers:
729
+ layer_outputs = layer(hidden_state, height, width, training=training)
730
+ hidden_state = layer_outputs
731
+
732
+ if self.config.cls_token[self.stage]:
733
+ cls_token, hidden_state = tf.split(hidden_state, [1, height * width], 1)
734
+
735
+ # "batch_size, (height*width), num_channels -> batch_size, height, width, num_channels"
736
+ hidden_state = tf.reshape(hidden_state, shape=(batch_size, height, width, num_channels))
737
+ return hidden_state, cls_token
738
+
739
+ def build(self, input_shape=None):
740
+ if self.built:
741
+ return
742
+ self.built = True
743
+ if getattr(self, "embedding", None) is not None:
744
+ with tf.name_scope(self.embedding.name):
745
+ self.embedding.build(None)
746
+ if getattr(self, "layers", None) is not None:
747
+ for layer in self.layers:
748
+ with tf.name_scope(layer.name):
749
+ layer.build(None)
750
+
751
+
752
+ class TFCvtEncoder(keras.layers.Layer):
753
+ """
754
+ Convolutional Vision Transformer encoder. CVT has 3 stages of encoder blocks with their respective number of layers
755
+ (depth) being 1, 2 and 10.
756
+
757
+ Args:
758
+ config ([`CvtConfig`]): Model configuration class.
759
+ """
760
+
761
+ config_class = CvtConfig
762
+
763
+ def __init__(self, config: CvtConfig, **kwargs):
764
+ super().__init__(**kwargs)
765
+ self.config = config
766
+ self.stages = [
767
+ TFCvtStage(config, stage_idx, name=f"stages.{stage_idx}") for stage_idx in range(len(config.depth))
768
+ ]
769
+
770
+ def call(
771
+ self,
772
+ pixel_values: TFModelInputType,
773
+ output_hidden_states: Optional[bool] = False,
774
+ return_dict: Optional[bool] = True,
775
+ training: Optional[bool] = False,
776
+ ) -> Union[TFBaseModelOutputWithCLSToken, Tuple[tf.Tensor]]:
777
+ all_hidden_states = () if output_hidden_states else None
778
+ hidden_state = pixel_values
779
+ # When running on CPU, `keras.layers.Conv2D` doesn't support (batch_size, num_channels, height, width)
780
+ # as input format. So change the input format to (batch_size, height, width, num_channels).
781
+ hidden_state = tf.transpose(hidden_state, perm=(0, 2, 3, 1))
782
+
783
+ cls_token = None
784
+ for _, (stage_module) in enumerate(self.stages):
785
+ hidden_state, cls_token = stage_module(hidden_state, training=training)
786
+ if output_hidden_states:
787
+ all_hidden_states = all_hidden_states + (hidden_state,)
788
+
789
+ # Change back to (batch_size, num_channels, height, width) format to have uniformity in the modules
790
+ hidden_state = tf.transpose(hidden_state, perm=(0, 3, 1, 2))
791
+ if output_hidden_states:
792
+ all_hidden_states = tuple([tf.transpose(hs, perm=(0, 3, 1, 2)) for hs in all_hidden_states])
793
+
794
+ if not return_dict:
795
+ return tuple(v for v in [hidden_state, cls_token, all_hidden_states] if v is not None)
796
+
797
+ return TFBaseModelOutputWithCLSToken(
798
+ last_hidden_state=hidden_state,
799
+ cls_token_value=cls_token,
800
+ hidden_states=all_hidden_states,
801
+ )
802
+
803
+ def build(self, input_shape=None):
804
+ if self.built:
805
+ return
806
+ self.built = True
807
+ if getattr(self, "stages", None) is not None:
808
+ for layer in self.stages:
809
+ with tf.name_scope(layer.name):
810
+ layer.build(None)
811
+
812
+
813
+ @keras_serializable
814
+ class TFCvtMainLayer(keras.layers.Layer):
815
+ """Construct the Cvt model."""
816
+
817
+ config_class = CvtConfig
818
+
819
+ def __init__(self, config: CvtConfig, **kwargs):
820
+ super().__init__(**kwargs)
821
+ self.config = config
822
+ self.encoder = TFCvtEncoder(config, name="encoder")
823
+
824
+ @unpack_inputs
825
+ def call(
826
+ self,
827
+ pixel_values: TFModelInputType | None = None,
828
+ output_hidden_states: Optional[bool] = None,
829
+ return_dict: Optional[bool] = None,
830
+ training: Optional[bool] = False,
831
+ ) -> Union[TFBaseModelOutputWithCLSToken, Tuple[tf.Tensor]]:
832
+ if pixel_values is None:
833
+ raise ValueError("You have to specify pixel_values")
834
+
835
+ encoder_outputs = self.encoder(
836
+ pixel_values,
837
+ output_hidden_states=output_hidden_states,
838
+ return_dict=return_dict,
839
+ training=training,
840
+ )
841
+
842
+ sequence_output = encoder_outputs[0]
843
+
844
+ if not return_dict:
845
+ return (sequence_output,) + encoder_outputs[1:]
846
+
847
+ return TFBaseModelOutputWithCLSToken(
848
+ last_hidden_state=sequence_output,
849
+ cls_token_value=encoder_outputs.cls_token_value,
850
+ hidden_states=encoder_outputs.hidden_states,
851
+ )
852
+
853
+ def build(self, input_shape=None):
854
+ if self.built:
855
+ return
856
+ self.built = True
857
+ if getattr(self, "encoder", None) is not None:
858
+ with tf.name_scope(self.encoder.name):
859
+ self.encoder.build(None)
860
+
861
+
862
+ class TFCvtPreTrainedModel(TFPreTrainedModel):
863
+ """
864
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
865
+ models.
866
+ """
867
+
868
+ config_class = CvtConfig
869
+ base_model_prefix = "cvt"
870
+ main_input_name = "pixel_values"
871
+
872
+
873
+ TFCVT_START_DOCSTRING = r"""
874
+
875
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
876
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
877
+ etc.)
878
+
879
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
880
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
881
+ behavior.
882
+
883
+ <Tip>
884
+
885
+ TF 2.0 models accepts two formats as inputs:
886
+
887
+ - having all inputs as keyword arguments (like PyTorch models), or
888
+ - having all inputs as a list, tuple or dict in the first positional arguments.
889
+
890
+ This second option is useful when using [`keras.Model.fit`] method which currently requires having all the
891
+ tensors in the first argument of the model call function: `model(inputs)`.
892
+
893
+ </Tip>
894
+
895
+ Args:
896
+ config ([`CvtConfig`]): Model configuration class with all the parameters of the model.
897
+ Initializing with a config file does not load the weights associated with the model, only the
898
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
899
+ """
900
+
901
+ TFCVT_INPUTS_DOCSTRING = r"""
902
+ Args:
903
+ pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):
904
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`CvtImageProcessor.__call__`]
905
+ for details.
906
+
907
+ output_hidden_states (`bool`, *optional*):
908
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
909
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
910
+ used instead.
911
+ return_dict (`bool`, *optional*):
912
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
913
+ eager mode, in graph mode the value will always be set to True.
914
+ training (`bool`, *optional*, defaults to `False``):
915
+ Whether or not to use the model in training mode (some modules like dropout modules have different
916
+ behaviors between training and evaluation).
917
+ """
918
+
919
+
920
+ @add_start_docstrings(
921
+ "The bare Cvt Model transformer outputting raw hidden-states without any specific head on top.",
922
+ TFCVT_START_DOCSTRING,
923
+ )
924
+ class TFCvtModel(TFCvtPreTrainedModel):
925
+ def __init__(self, config: CvtConfig, *inputs, **kwargs):
926
+ super().__init__(config, *inputs, **kwargs)
927
+
928
+ self.cvt = TFCvtMainLayer(config, name="cvt")
929
+
930
+ @unpack_inputs
931
+ @add_start_docstrings_to_model_forward(TFCVT_INPUTS_DOCSTRING)
932
+ @replace_return_docstrings(output_type=TFBaseModelOutputWithCLSToken, config_class=_CONFIG_FOR_DOC)
933
+ def call(
934
+ self,
935
+ pixel_values: tf.Tensor | None = None,
936
+ output_hidden_states: Optional[bool] = None,
937
+ return_dict: Optional[bool] = None,
938
+ training: Optional[bool] = False,
939
+ ) -> Union[TFBaseModelOutputWithCLSToken, Tuple[tf.Tensor]]:
940
+ r"""
941
+ Returns:
942
+
943
+ Examples:
944
+
945
+ ```python
946
+ >>> from transformers import AutoImageProcessor, TFCvtModel
947
+ >>> from PIL import Image
948
+ >>> import requests
949
+
950
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
951
+ >>> image = Image.open(requests.get(url, stream=True).raw)
952
+
953
+ >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/cvt-13")
954
+ >>> model = TFCvtModel.from_pretrained("microsoft/cvt-13")
955
+
956
+ >>> inputs = image_processor(images=image, return_tensors="tf")
957
+ >>> outputs = model(**inputs)
958
+ >>> last_hidden_states = outputs.last_hidden_state
959
+ ```"""
960
+
961
+ if pixel_values is None:
962
+ raise ValueError("You have to specify pixel_values")
963
+
964
+ outputs = self.cvt(
965
+ pixel_values=pixel_values,
966
+ output_hidden_states=output_hidden_states,
967
+ return_dict=return_dict,
968
+ training=training,
969
+ )
970
+
971
+ if not return_dict:
972
+ return (outputs[0],) + outputs[1:]
973
+
974
+ return TFBaseModelOutputWithCLSToken(
975
+ last_hidden_state=outputs.last_hidden_state,
976
+ cls_token_value=outputs.cls_token_value,
977
+ hidden_states=outputs.hidden_states,
978
+ )
979
+
980
+ def build(self, input_shape=None):
981
+ if self.built:
982
+ return
983
+ self.built = True
984
+ if getattr(self, "cvt", None) is not None:
985
+ with tf.name_scope(self.cvt.name):
986
+ self.cvt.build(None)
987
+
988
+
989
+ @add_start_docstrings(
990
+ """
991
+ Cvt Model transformer with an image classification head on top (a linear layer on top of the final hidden state of
992
+ the [CLS] token) e.g. for ImageNet.
993
+ """,
994
+ TFCVT_START_DOCSTRING,
995
+ )
996
+ class TFCvtForImageClassification(TFCvtPreTrainedModel, TFSequenceClassificationLoss):
997
+ def __init__(self, config: CvtConfig, *inputs, **kwargs):
998
+ super().__init__(config, *inputs, **kwargs)
999
+
1000
+ self.num_labels = config.num_labels
1001
+ self.cvt = TFCvtMainLayer(config, name="cvt")
1002
+ # Using same default epsilon as in the original implementation.
1003
+ self.layernorm = keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm")
1004
+
1005
+ # Classifier head
1006
+ self.classifier = keras.layers.Dense(
1007
+ units=config.num_labels,
1008
+ kernel_initializer=get_initializer(config.initializer_range),
1009
+ use_bias=True,
1010
+ bias_initializer="zeros",
1011
+ name="classifier",
1012
+ )
1013
+ self.config = config
1014
+
1015
+ @unpack_inputs
1016
+ @add_start_docstrings_to_model_forward(TFCVT_INPUTS_DOCSTRING)
1017
+ @replace_return_docstrings(output_type=TFImageClassifierOutputWithNoAttention, config_class=_CONFIG_FOR_DOC)
1018
+ def call(
1019
+ self,
1020
+ pixel_values: tf.Tensor | None = None,
1021
+ labels: tf.Tensor | None = None,
1022
+ output_hidden_states: Optional[bool] = None,
1023
+ return_dict: Optional[bool] = None,
1024
+ training: Optional[bool] = False,
1025
+ ) -> Union[TFImageClassifierOutputWithNoAttention, Tuple[tf.Tensor]]:
1026
+ r"""
1027
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
1028
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
1029
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1030
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1031
+
1032
+ Returns:
1033
+
1034
+ Examples:
1035
+
1036
+ ```python
1037
+ >>> from transformers import AutoImageProcessor, TFCvtForImageClassification
1038
+ >>> import tensorflow as tf
1039
+ >>> from PIL import Image
1040
+ >>> import requests
1041
+
1042
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1043
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1044
+
1045
+ >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/cvt-13")
1046
+ >>> model = TFCvtForImageClassification.from_pretrained("microsoft/cvt-13")
1047
+
1048
+ >>> inputs = image_processor(images=image, return_tensors="tf")
1049
+ >>> outputs = model(**inputs)
1050
+ >>> logits = outputs.logits
1051
+ >>> # model predicts one of the 1000 ImageNet classes
1052
+ >>> predicted_class_idx = tf.math.argmax(logits, axis=-1)[0]
1053
+ >>> print("Predicted class:", model.config.id2label[int(predicted_class_idx)])
1054
+ ```"""
1055
+
1056
+ outputs = self.cvt(
1057
+ pixel_values,
1058
+ output_hidden_states=output_hidden_states,
1059
+ return_dict=return_dict,
1060
+ training=training,
1061
+ )
1062
+
1063
+ sequence_output = outputs[0]
1064
+ cls_token = outputs[1]
1065
+ if self.config.cls_token[-1]:
1066
+ sequence_output = self.layernorm(cls_token)
1067
+ else:
1068
+ # rearrange "batch_size, num_channels, height, width -> batch_size, (height*width), num_channels"
1069
+ batch_size, num_channels, height, width = shape_list(sequence_output)
1070
+ sequence_output = tf.reshape(sequence_output, shape=(batch_size, num_channels, height * width))
1071
+ sequence_output = tf.transpose(sequence_output, perm=(0, 2, 1))
1072
+ sequence_output = self.layernorm(sequence_output)
1073
+
1074
+ sequence_output_mean = tf.reduce_mean(sequence_output, axis=1)
1075
+ logits = self.classifier(sequence_output_mean)
1076
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
1077
+
1078
+ if not return_dict:
1079
+ output = (logits,) + outputs[2:]
1080
+ return ((loss,) + output) if loss is not None else output
1081
+
1082
+ return TFImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
1083
+
1084
+ def build(self, input_shape=None):
1085
+ if self.built:
1086
+ return
1087
+ self.built = True
1088
+ if getattr(self, "cvt", None) is not None:
1089
+ with tf.name_scope(self.cvt.name):
1090
+ self.cvt.build(None)
1091
+ if getattr(self, "layernorm", None) is not None:
1092
+ with tf.name_scope(self.layernorm.name):
1093
+ self.layernorm.build([None, None, self.config.embed_dim[-1]])
1094
+ if getattr(self, "classifier", None) is not None:
1095
+ if hasattr(self.classifier, "name"):
1096
+ with tf.name_scope(self.classifier.name):
1097
+ self.classifier.build([None, None, self.config.embed_dim[-1]])
venv/lib/python3.10/site-packages/transformers/models/ernie/__init__.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
22
+ }
23
+
24
+ try:
25
+ if not is_torch_available():
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ pass
29
+ else:
30
+ _import_structure["modeling_ernie"] = [
31
+ "ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
32
+ "ErnieForCausalLM",
33
+ "ErnieForMaskedLM",
34
+ "ErnieForMultipleChoice",
35
+ "ErnieForNextSentencePrediction",
36
+ "ErnieForPreTraining",
37
+ "ErnieForQuestionAnswering",
38
+ "ErnieForSequenceClassification",
39
+ "ErnieForTokenClassification",
40
+ "ErnieModel",
41
+ "ErniePreTrainedModel",
42
+ ]
43
+
44
+ if TYPE_CHECKING:
45
+ from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
46
+
47
+ try:
48
+ if not is_torch_available():
49
+ raise OptionalDependencyNotAvailable()
50
+ except OptionalDependencyNotAvailable:
51
+ pass
52
+ else:
53
+ from .modeling_ernie import (
54
+ ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
55
+ ErnieForCausalLM,
56
+ ErnieForMaskedLM,
57
+ ErnieForMultipleChoice,
58
+ ErnieForNextSentencePrediction,
59
+ ErnieForPreTraining,
60
+ ErnieForQuestionAnswering,
61
+ ErnieForSequenceClassification,
62
+ ErnieForTokenClassification,
63
+ ErnieModel,
64
+ ErniePreTrainedModel,
65
+ )
66
+
67
+ else:
68
+ import sys
69
+
70
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.2 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/configuration_ernie.cpython-310.pyc ADDED
Binary file (6.88 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/modeling_ernie.cpython-310.pyc ADDED
Binary file (52.9 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/ernie/configuration_ernie.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ ERNIE model configuration"""
17
+ from collections import OrderedDict
18
+ from typing import Mapping
19
+
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...onnx import OnnxConfig
22
+ from ...utils import logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ from ..deprecated._archive_maps import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
29
+
30
+
31
+ class ErnieConfig(PretrainedConfig):
32
+ r"""
33
+ This is the configuration class to store the configuration of a [`ErnieModel`] or a [`TFErnieModel`]. It is used to
34
+ instantiate a ERNIE model according to the specified arguments, defining the model architecture. Instantiating a
35
+ configuration with the defaults will yield a similar configuration to that of the ERNIE
36
+ [nghuyong/ernie-3.0-base-zh](https://huggingface.co/nghuyong/ernie-3.0-base-zh) architecture.
37
+
38
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
39
+ documentation from [`PretrainedConfig`] for more information.
40
+
41
+
42
+ Args:
43
+ vocab_size (`int`, *optional*, defaults to 30522):
44
+ Vocabulary size of the ERNIE model. Defines the number of different tokens that can be represented by the
45
+ `inputs_ids` passed when calling [`ErnieModel`] or [`TFErnieModel`].
46
+ hidden_size (`int`, *optional*, defaults to 768):
47
+ Dimensionality of the encoder layers and the pooler layer.
48
+ num_hidden_layers (`int`, *optional*, defaults to 12):
49
+ Number of hidden layers in the Transformer encoder.
50
+ num_attention_heads (`int`, *optional*, defaults to 12):
51
+ Number of attention heads for each attention layer in the Transformer encoder.
52
+ intermediate_size (`int`, *optional*, defaults to 3072):
53
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
54
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
55
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
56
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
57
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
58
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
59
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
60
+ The dropout ratio for the attention probabilities.
61
+ max_position_embeddings (`int`, *optional*, defaults to 512):
62
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
63
+ just in case (e.g., 512 or 1024 or 2048).
64
+ type_vocab_size (`int`, *optional*, defaults to 2):
65
+ The vocabulary size of the `token_type_ids` passed when calling [`ErnieModel`] or [`TFErnieModel`].
66
+ task_type_vocab_size (`int`, *optional*, defaults to 3):
67
+ The vocabulary size of the `task_type_ids` for ERNIE2.0/ERNIE3.0 model
68
+ use_task_id (`bool`, *optional*, defaults to `False`):
69
+ Whether or not the model support `task_type_ids`
70
+ initializer_range (`float`, *optional*, defaults to 0.02):
71
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
72
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
73
+ The epsilon used by the layer normalization layers.
74
+ pad_token_id (`int`, *optional*, defaults to 0):
75
+ Padding token id.
76
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
77
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
78
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
79
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
80
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
81
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
82
+ use_cache (`bool`, *optional*, defaults to `True`):
83
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
84
+ relevant if `config.is_decoder=True`.
85
+ classifier_dropout (`float`, *optional*):
86
+ The dropout ratio for the classification head.
87
+
88
+ Examples:
89
+
90
+ ```python
91
+ >>> from transformers import ErnieConfig, ErnieModel
92
+
93
+ >>> # Initializing a ERNIE nghuyong/ernie-3.0-base-zh style configuration
94
+ >>> configuration = ErnieConfig()
95
+
96
+ >>> # Initializing a model (with random weights) from the nghuyong/ernie-3.0-base-zh style configuration
97
+ >>> model = ErnieModel(configuration)
98
+
99
+ >>> # Accessing the model configuration
100
+ >>> configuration = model.config
101
+ ```"""
102
+
103
+ model_type = "ernie"
104
+
105
+ def __init__(
106
+ self,
107
+ vocab_size=30522,
108
+ hidden_size=768,
109
+ num_hidden_layers=12,
110
+ num_attention_heads=12,
111
+ intermediate_size=3072,
112
+ hidden_act="gelu",
113
+ hidden_dropout_prob=0.1,
114
+ attention_probs_dropout_prob=0.1,
115
+ max_position_embeddings=512,
116
+ type_vocab_size=2,
117
+ task_type_vocab_size=3,
118
+ use_task_id=False,
119
+ initializer_range=0.02,
120
+ layer_norm_eps=1e-12,
121
+ pad_token_id=0,
122
+ position_embedding_type="absolute",
123
+ use_cache=True,
124
+ classifier_dropout=None,
125
+ **kwargs,
126
+ ):
127
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
128
+
129
+ self.vocab_size = vocab_size
130
+ self.hidden_size = hidden_size
131
+ self.num_hidden_layers = num_hidden_layers
132
+ self.num_attention_heads = num_attention_heads
133
+ self.hidden_act = hidden_act
134
+ self.intermediate_size = intermediate_size
135
+ self.hidden_dropout_prob = hidden_dropout_prob
136
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
137
+ self.max_position_embeddings = max_position_embeddings
138
+ self.type_vocab_size = type_vocab_size
139
+ self.task_type_vocab_size = task_type_vocab_size
140
+ self.use_task_id = use_task_id
141
+ self.initializer_range = initializer_range
142
+ self.layer_norm_eps = layer_norm_eps
143
+ self.position_embedding_type = position_embedding_type
144
+ self.use_cache = use_cache
145
+ self.classifier_dropout = classifier_dropout
146
+
147
+
148
+ class ErnieOnnxConfig(OnnxConfig):
149
+ @property
150
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
151
+ if self.task == "multiple-choice":
152
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
153
+ else:
154
+ dynamic_axis = {0: "batch", 1: "sequence"}
155
+ return OrderedDict(
156
+ [
157
+ ("input_ids", dynamic_axis),
158
+ ("attention_mask", dynamic_axis),
159
+ ("token_type_ids", dynamic_axis),
160
+ ("task_type_ids", dynamic_axis),
161
+ ]
162
+ )
venv/lib/python3.10/site-packages/transformers/models/ernie/modeling_ernie.py ADDED
@@ -0,0 +1,1820 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch ERNIE model."""
16
+
17
+
18
+ import math
19
+ import warnings
20
+ from dataclasses import dataclass
21
+ from typing import List, Optional, Tuple, Union
22
+
23
+ import torch
24
+ import torch.utils.checkpoint
25
+ from torch import nn
26
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
27
+
28
+ from ...activations import ACT2FN
29
+ from ...modeling_outputs import (
30
+ BaseModelOutputWithPastAndCrossAttentions,
31
+ BaseModelOutputWithPoolingAndCrossAttentions,
32
+ CausalLMOutputWithCrossAttentions,
33
+ MaskedLMOutput,
34
+ MultipleChoiceModelOutput,
35
+ NextSentencePredictorOutput,
36
+ QuestionAnsweringModelOutput,
37
+ SequenceClassifierOutput,
38
+ TokenClassifierOutput,
39
+ )
40
+ from ...modeling_utils import PreTrainedModel
41
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
42
+ from ...utils import (
43
+ ModelOutput,
44
+ add_code_sample_docstrings,
45
+ add_start_docstrings,
46
+ add_start_docstrings_to_model_forward,
47
+ logging,
48
+ replace_return_docstrings,
49
+ )
50
+ from .configuration_ernie import ErnieConfig
51
+
52
+
53
+ logger = logging.get_logger(__name__)
54
+
55
+ _CHECKPOINT_FOR_DOC = "nghuyong/ernie-1.0-base-zh"
56
+ _CONFIG_FOR_DOC = "ErnieConfig"
57
+
58
+
59
+ from ..deprecated._archive_maps import ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
60
+
61
+
62
+ class ErnieEmbeddings(nn.Module):
63
+ """Construct the embeddings from word, position and token_type embeddings."""
64
+
65
+ def __init__(self, config):
66
+ super().__init__()
67
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
68
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
69
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
70
+ self.use_task_id = config.use_task_id
71
+ if config.use_task_id:
72
+ self.task_type_embeddings = nn.Embedding(config.task_type_vocab_size, config.hidden_size)
73
+
74
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
75
+ # any TensorFlow checkpoint file
76
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
77
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
78
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
79
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
80
+ self.register_buffer(
81
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
82
+ )
83
+ self.register_buffer(
84
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
85
+ )
86
+
87
+ def forward(
88
+ self,
89
+ input_ids: Optional[torch.LongTensor] = None,
90
+ token_type_ids: Optional[torch.LongTensor] = None,
91
+ task_type_ids: Optional[torch.LongTensor] = None,
92
+ position_ids: Optional[torch.LongTensor] = None,
93
+ inputs_embeds: Optional[torch.FloatTensor] = None,
94
+ past_key_values_length: int = 0,
95
+ ) -> torch.Tensor:
96
+ if input_ids is not None:
97
+ input_shape = input_ids.size()
98
+ else:
99
+ input_shape = inputs_embeds.size()[:-1]
100
+
101
+ seq_length = input_shape[1]
102
+
103
+ if position_ids is None:
104
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
105
+
106
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
107
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
108
+ # issue #5664
109
+ if token_type_ids is None:
110
+ if hasattr(self, "token_type_ids"):
111
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
112
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
113
+ token_type_ids = buffered_token_type_ids_expanded
114
+ else:
115
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
116
+
117
+ if inputs_embeds is None:
118
+ inputs_embeds = self.word_embeddings(input_ids)
119
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
120
+
121
+ embeddings = inputs_embeds + token_type_embeddings
122
+ if self.position_embedding_type == "absolute":
123
+ position_embeddings = self.position_embeddings(position_ids)
124
+ embeddings += position_embeddings
125
+
126
+ # add `task_type_id` for ERNIE model
127
+ if self.use_task_id:
128
+ if task_type_ids is None:
129
+ task_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
130
+ task_type_embeddings = self.task_type_embeddings(task_type_ids)
131
+ embeddings += task_type_embeddings
132
+
133
+ embeddings = self.LayerNorm(embeddings)
134
+ embeddings = self.dropout(embeddings)
135
+ return embeddings
136
+
137
+
138
+ # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Ernie
139
+ class ErnieSelfAttention(nn.Module):
140
+ def __init__(self, config, position_embedding_type=None):
141
+ super().__init__()
142
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
143
+ raise ValueError(
144
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
145
+ f"heads ({config.num_attention_heads})"
146
+ )
147
+
148
+ self.num_attention_heads = config.num_attention_heads
149
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
150
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
151
+
152
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
153
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
154
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
155
+
156
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
157
+ self.position_embedding_type = position_embedding_type or getattr(
158
+ config, "position_embedding_type", "absolute"
159
+ )
160
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
161
+ self.max_position_embeddings = config.max_position_embeddings
162
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
163
+
164
+ self.is_decoder = config.is_decoder
165
+
166
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
167
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
168
+ x = x.view(new_x_shape)
169
+ return x.permute(0, 2, 1, 3)
170
+
171
+ def forward(
172
+ self,
173
+ hidden_states: torch.Tensor,
174
+ attention_mask: Optional[torch.FloatTensor] = None,
175
+ head_mask: Optional[torch.FloatTensor] = None,
176
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
177
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
178
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
179
+ output_attentions: Optional[bool] = False,
180
+ ) -> Tuple[torch.Tensor]:
181
+ mixed_query_layer = self.query(hidden_states)
182
+
183
+ # If this is instantiated as a cross-attention module, the keys
184
+ # and values come from an encoder; the attention mask needs to be
185
+ # such that the encoder's padding tokens are not attended to.
186
+ is_cross_attention = encoder_hidden_states is not None
187
+
188
+ if is_cross_attention and past_key_value is not None:
189
+ # reuse k,v, cross_attentions
190
+ key_layer = past_key_value[0]
191
+ value_layer = past_key_value[1]
192
+ attention_mask = encoder_attention_mask
193
+ elif is_cross_attention:
194
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
195
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
196
+ attention_mask = encoder_attention_mask
197
+ elif past_key_value is not None:
198
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
199
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
200
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
201
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
202
+ else:
203
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
204
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
205
+
206
+ query_layer = self.transpose_for_scores(mixed_query_layer)
207
+
208
+ use_cache = past_key_value is not None
209
+ if self.is_decoder:
210
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
211
+ # Further calls to cross_attention layer can then reuse all cross-attention
212
+ # key/value_states (first "if" case)
213
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
214
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
215
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
216
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
217
+ past_key_value = (key_layer, value_layer)
218
+
219
+ # Take the dot product between "query" and "key" to get the raw attention scores.
220
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
221
+
222
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
223
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
224
+ if use_cache:
225
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
226
+ -1, 1
227
+ )
228
+ else:
229
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
230
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
231
+ distance = position_ids_l - position_ids_r
232
+
233
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
234
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
235
+
236
+ if self.position_embedding_type == "relative_key":
237
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
238
+ attention_scores = attention_scores + relative_position_scores
239
+ elif self.position_embedding_type == "relative_key_query":
240
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
241
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
242
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
243
+
244
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
245
+ if attention_mask is not None:
246
+ # Apply the attention mask is (precomputed for all layers in ErnieModel forward() function)
247
+ attention_scores = attention_scores + attention_mask
248
+
249
+ # Normalize the attention scores to probabilities.
250
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
251
+
252
+ # This is actually dropping out entire tokens to attend to, which might
253
+ # seem a bit unusual, but is taken from the original Transformer paper.
254
+ attention_probs = self.dropout(attention_probs)
255
+
256
+ # Mask heads if we want to
257
+ if head_mask is not None:
258
+ attention_probs = attention_probs * head_mask
259
+
260
+ context_layer = torch.matmul(attention_probs, value_layer)
261
+
262
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
263
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
264
+ context_layer = context_layer.view(new_context_layer_shape)
265
+
266
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
267
+
268
+ if self.is_decoder:
269
+ outputs = outputs + (past_key_value,)
270
+ return outputs
271
+
272
+
273
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->Ernie
274
+ class ErnieSelfOutput(nn.Module):
275
+ def __init__(self, config):
276
+ super().__init__()
277
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
278
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
279
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
280
+
281
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
282
+ hidden_states = self.dense(hidden_states)
283
+ hidden_states = self.dropout(hidden_states)
284
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
285
+ return hidden_states
286
+
287
+
288
+ # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Ernie
289
+ class ErnieAttention(nn.Module):
290
+ def __init__(self, config, position_embedding_type=None):
291
+ super().__init__()
292
+ self.self = ErnieSelfAttention(config, position_embedding_type=position_embedding_type)
293
+ self.output = ErnieSelfOutput(config)
294
+ self.pruned_heads = set()
295
+
296
+ def prune_heads(self, heads):
297
+ if len(heads) == 0:
298
+ return
299
+ heads, index = find_pruneable_heads_and_indices(
300
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
301
+ )
302
+
303
+ # Prune linear layers
304
+ self.self.query = prune_linear_layer(self.self.query, index)
305
+ self.self.key = prune_linear_layer(self.self.key, index)
306
+ self.self.value = prune_linear_layer(self.self.value, index)
307
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
308
+
309
+ # Update hyper params and store pruned heads
310
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
311
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
312
+ self.pruned_heads = self.pruned_heads.union(heads)
313
+
314
+ def forward(
315
+ self,
316
+ hidden_states: torch.Tensor,
317
+ attention_mask: Optional[torch.FloatTensor] = None,
318
+ head_mask: Optional[torch.FloatTensor] = None,
319
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
320
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
321
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
322
+ output_attentions: Optional[bool] = False,
323
+ ) -> Tuple[torch.Tensor]:
324
+ self_outputs = self.self(
325
+ hidden_states,
326
+ attention_mask,
327
+ head_mask,
328
+ encoder_hidden_states,
329
+ encoder_attention_mask,
330
+ past_key_value,
331
+ output_attentions,
332
+ )
333
+ attention_output = self.output(self_outputs[0], hidden_states)
334
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
335
+ return outputs
336
+
337
+
338
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Ernie
339
+ class ErnieIntermediate(nn.Module):
340
+ def __init__(self, config):
341
+ super().__init__()
342
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
343
+ if isinstance(config.hidden_act, str):
344
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
345
+ else:
346
+ self.intermediate_act_fn = config.hidden_act
347
+
348
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
349
+ hidden_states = self.dense(hidden_states)
350
+ hidden_states = self.intermediate_act_fn(hidden_states)
351
+ return hidden_states
352
+
353
+
354
+ # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->Ernie
355
+ class ErnieOutput(nn.Module):
356
+ def __init__(self, config):
357
+ super().__init__()
358
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
359
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
360
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
361
+
362
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
363
+ hidden_states = self.dense(hidden_states)
364
+ hidden_states = self.dropout(hidden_states)
365
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
366
+ return hidden_states
367
+
368
+
369
+ # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Ernie
370
+ class ErnieLayer(nn.Module):
371
+ def __init__(self, config):
372
+ super().__init__()
373
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
374
+ self.seq_len_dim = 1
375
+ self.attention = ErnieAttention(config)
376
+ self.is_decoder = config.is_decoder
377
+ self.add_cross_attention = config.add_cross_attention
378
+ if self.add_cross_attention:
379
+ if not self.is_decoder:
380
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
381
+ self.crossattention = ErnieAttention(config, position_embedding_type="absolute")
382
+ self.intermediate = ErnieIntermediate(config)
383
+ self.output = ErnieOutput(config)
384
+
385
+ def forward(
386
+ self,
387
+ hidden_states: torch.Tensor,
388
+ attention_mask: Optional[torch.FloatTensor] = None,
389
+ head_mask: Optional[torch.FloatTensor] = None,
390
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
391
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
392
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
393
+ output_attentions: Optional[bool] = False,
394
+ ) -> Tuple[torch.Tensor]:
395
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
396
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
397
+ self_attention_outputs = self.attention(
398
+ hidden_states,
399
+ attention_mask,
400
+ head_mask,
401
+ output_attentions=output_attentions,
402
+ past_key_value=self_attn_past_key_value,
403
+ )
404
+ attention_output = self_attention_outputs[0]
405
+
406
+ # if decoder, the last output is tuple of self-attn cache
407
+ if self.is_decoder:
408
+ outputs = self_attention_outputs[1:-1]
409
+ present_key_value = self_attention_outputs[-1]
410
+ else:
411
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
412
+
413
+ cross_attn_present_key_value = None
414
+ if self.is_decoder and encoder_hidden_states is not None:
415
+ if not hasattr(self, "crossattention"):
416
+ raise ValueError(
417
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
418
+ " by setting `config.add_cross_attention=True`"
419
+ )
420
+
421
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
422
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
423
+ cross_attention_outputs = self.crossattention(
424
+ attention_output,
425
+ attention_mask,
426
+ head_mask,
427
+ encoder_hidden_states,
428
+ encoder_attention_mask,
429
+ cross_attn_past_key_value,
430
+ output_attentions,
431
+ )
432
+ attention_output = cross_attention_outputs[0]
433
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
434
+
435
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
436
+ cross_attn_present_key_value = cross_attention_outputs[-1]
437
+ present_key_value = present_key_value + cross_attn_present_key_value
438
+
439
+ layer_output = apply_chunking_to_forward(
440
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
441
+ )
442
+ outputs = (layer_output,) + outputs
443
+
444
+ # if decoder, return the attn key/values as the last output
445
+ if self.is_decoder:
446
+ outputs = outputs + (present_key_value,)
447
+
448
+ return outputs
449
+
450
+ def feed_forward_chunk(self, attention_output):
451
+ intermediate_output = self.intermediate(attention_output)
452
+ layer_output = self.output(intermediate_output, attention_output)
453
+ return layer_output
454
+
455
+
456
+ # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Ernie
457
+ class ErnieEncoder(nn.Module):
458
+ def __init__(self, config):
459
+ super().__init__()
460
+ self.config = config
461
+ self.layer = nn.ModuleList([ErnieLayer(config) for _ in range(config.num_hidden_layers)])
462
+ self.gradient_checkpointing = False
463
+
464
+ def forward(
465
+ self,
466
+ hidden_states: torch.Tensor,
467
+ attention_mask: Optional[torch.FloatTensor] = None,
468
+ head_mask: Optional[torch.FloatTensor] = None,
469
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
470
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
471
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
472
+ use_cache: Optional[bool] = None,
473
+ output_attentions: Optional[bool] = False,
474
+ output_hidden_states: Optional[bool] = False,
475
+ return_dict: Optional[bool] = True,
476
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
477
+ all_hidden_states = () if output_hidden_states else None
478
+ all_self_attentions = () if output_attentions else None
479
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
480
+
481
+ if self.gradient_checkpointing and self.training:
482
+ if use_cache:
483
+ logger.warning_once(
484
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
485
+ )
486
+ use_cache = False
487
+
488
+ next_decoder_cache = () if use_cache else None
489
+ for i, layer_module in enumerate(self.layer):
490
+ if output_hidden_states:
491
+ all_hidden_states = all_hidden_states + (hidden_states,)
492
+
493
+ layer_head_mask = head_mask[i] if head_mask is not None else None
494
+ past_key_value = past_key_values[i] if past_key_values is not None else None
495
+
496
+ if self.gradient_checkpointing and self.training:
497
+ layer_outputs = self._gradient_checkpointing_func(
498
+ layer_module.__call__,
499
+ hidden_states,
500
+ attention_mask,
501
+ layer_head_mask,
502
+ encoder_hidden_states,
503
+ encoder_attention_mask,
504
+ past_key_value,
505
+ output_attentions,
506
+ )
507
+ else:
508
+ layer_outputs = layer_module(
509
+ hidden_states,
510
+ attention_mask,
511
+ layer_head_mask,
512
+ encoder_hidden_states,
513
+ encoder_attention_mask,
514
+ past_key_value,
515
+ output_attentions,
516
+ )
517
+
518
+ hidden_states = layer_outputs[0]
519
+ if use_cache:
520
+ next_decoder_cache += (layer_outputs[-1],)
521
+ if output_attentions:
522
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
523
+ if self.config.add_cross_attention:
524
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
525
+
526
+ if output_hidden_states:
527
+ all_hidden_states = all_hidden_states + (hidden_states,)
528
+
529
+ if not return_dict:
530
+ return tuple(
531
+ v
532
+ for v in [
533
+ hidden_states,
534
+ next_decoder_cache,
535
+ all_hidden_states,
536
+ all_self_attentions,
537
+ all_cross_attentions,
538
+ ]
539
+ if v is not None
540
+ )
541
+ return BaseModelOutputWithPastAndCrossAttentions(
542
+ last_hidden_state=hidden_states,
543
+ past_key_values=next_decoder_cache,
544
+ hidden_states=all_hidden_states,
545
+ attentions=all_self_attentions,
546
+ cross_attentions=all_cross_attentions,
547
+ )
548
+
549
+
550
+ # Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->Ernie
551
+ class ErniePooler(nn.Module):
552
+ def __init__(self, config):
553
+ super().__init__()
554
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
555
+ self.activation = nn.Tanh()
556
+
557
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
558
+ # We "pool" the model by simply taking the hidden state corresponding
559
+ # to the first token.
560
+ first_token_tensor = hidden_states[:, 0]
561
+ pooled_output = self.dense(first_token_tensor)
562
+ pooled_output = self.activation(pooled_output)
563
+ return pooled_output
564
+
565
+
566
+ # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->Ernie
567
+ class ErniePredictionHeadTransform(nn.Module):
568
+ def __init__(self, config):
569
+ super().__init__()
570
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
571
+ if isinstance(config.hidden_act, str):
572
+ self.transform_act_fn = ACT2FN[config.hidden_act]
573
+ else:
574
+ self.transform_act_fn = config.hidden_act
575
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
576
+
577
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
578
+ hidden_states = self.dense(hidden_states)
579
+ hidden_states = self.transform_act_fn(hidden_states)
580
+ hidden_states = self.LayerNorm(hidden_states)
581
+ return hidden_states
582
+
583
+
584
+ # Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->Ernie
585
+ class ErnieLMPredictionHead(nn.Module):
586
+ def __init__(self, config):
587
+ super().__init__()
588
+ self.transform = ErniePredictionHeadTransform(config)
589
+
590
+ # The output weights are the same as the input embeddings, but there is
591
+ # an output-only bias for each token.
592
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
593
+
594
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
595
+
596
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
597
+ self.decoder.bias = self.bias
598
+
599
+ def forward(self, hidden_states):
600
+ hidden_states = self.transform(hidden_states)
601
+ hidden_states = self.decoder(hidden_states)
602
+ return hidden_states
603
+
604
+
605
+ # Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->Ernie
606
+ class ErnieOnlyMLMHead(nn.Module):
607
+ def __init__(self, config):
608
+ super().__init__()
609
+ self.predictions = ErnieLMPredictionHead(config)
610
+
611
+ def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
612
+ prediction_scores = self.predictions(sequence_output)
613
+ return prediction_scores
614
+
615
+
616
+ # Copied from transformers.models.bert.modeling_bert.BertOnlyNSPHead with Bert->Ernie
617
+ class ErnieOnlyNSPHead(nn.Module):
618
+ def __init__(self, config):
619
+ super().__init__()
620
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
621
+
622
+ def forward(self, pooled_output):
623
+ seq_relationship_score = self.seq_relationship(pooled_output)
624
+ return seq_relationship_score
625
+
626
+
627
+ # Copied from transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert->Ernie
628
+ class ErniePreTrainingHeads(nn.Module):
629
+ def __init__(self, config):
630
+ super().__init__()
631
+ self.predictions = ErnieLMPredictionHead(config)
632
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
633
+
634
+ def forward(self, sequence_output, pooled_output):
635
+ prediction_scores = self.predictions(sequence_output)
636
+ seq_relationship_score = self.seq_relationship(pooled_output)
637
+ return prediction_scores, seq_relationship_score
638
+
639
+
640
+ class ErniePreTrainedModel(PreTrainedModel):
641
+ """
642
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
643
+ models.
644
+ """
645
+
646
+ config_class = ErnieConfig
647
+ base_model_prefix = "ernie"
648
+ supports_gradient_checkpointing = True
649
+
650
+ def _init_weights(self, module):
651
+ """Initialize the weights"""
652
+ if isinstance(module, nn.Linear):
653
+ # Slightly different from the TF version which uses truncated_normal for initialization
654
+ # cf https://github.com/pytorch/pytorch/pull/5617
655
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
656
+ if module.bias is not None:
657
+ module.bias.data.zero_()
658
+ elif isinstance(module, nn.Embedding):
659
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
660
+ if module.padding_idx is not None:
661
+ module.weight.data[module.padding_idx].zero_()
662
+ elif isinstance(module, nn.LayerNorm):
663
+ module.bias.data.zero_()
664
+ module.weight.data.fill_(1.0)
665
+
666
+
667
+ @dataclass
668
+ # Copied from transformers.models.bert.modeling_bert.BertForPreTrainingOutput with Bert->Ernie
669
+ class ErnieForPreTrainingOutput(ModelOutput):
670
+ """
671
+ Output type of [`ErnieForPreTraining`].
672
+
673
+ Args:
674
+ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
675
+ Total loss as the sum of the masked language modeling loss and the next sequence prediction
676
+ (classification) loss.
677
+ prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
678
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
679
+ seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
680
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
681
+ before SoftMax).
682
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
683
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
684
+ shape `(batch_size, sequence_length, hidden_size)`.
685
+
686
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
687
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
688
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
689
+ sequence_length)`.
690
+
691
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
692
+ heads.
693
+ """
694
+
695
+ loss: Optional[torch.FloatTensor] = None
696
+ prediction_logits: torch.FloatTensor = None
697
+ seq_relationship_logits: torch.FloatTensor = None
698
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
699
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
700
+
701
+
702
+ ERNIE_START_DOCSTRING = r"""
703
+
704
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
705
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
706
+ etc.)
707
+
708
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
709
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
710
+ and behavior.
711
+
712
+ Parameters:
713
+ config ([`ErnieConfig`]): Model configuration class with all the parameters of the model.
714
+ Initializing with a config file does not load the weights associated with the model, only the
715
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
716
+ """
717
+
718
+ ERNIE_INPUTS_DOCSTRING = r"""
719
+ Args:
720
+ input_ids (`torch.LongTensor` of shape `({0})`):
721
+ Indices of input sequence tokens in the vocabulary.
722
+
723
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
724
+ [`PreTrainedTokenizer.__call__`] for details.
725
+
726
+ [What are input IDs?](../glossary#input-ids)
727
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
728
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
729
+
730
+ - 1 for tokens that are **not masked**,
731
+ - 0 for tokens that are **masked**.
732
+
733
+ [What are attention masks?](../glossary#attention-mask)
734
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
735
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
736
+ 1]`:
737
+
738
+ - 0 corresponds to a *sentence A* token,
739
+ - 1 corresponds to a *sentence B* token.
740
+
741
+ [What are token type IDs?](../glossary#token-type-ids)
742
+ task_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
743
+ Task type embedding is a special embedding to represent the characteristic of different tasks, such as
744
+ word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We
745
+ assign a `task_type_id` to each task and the `task_type_id` is in the range `[0,
746
+ config.task_type_vocab_size-1]
747
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
748
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
749
+ config.max_position_embeddings - 1]`.
750
+
751
+ [What are position IDs?](../glossary#position-ids)
752
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
753
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
754
+
755
+ - 1 indicates the head is **not masked**,
756
+ - 0 indicates the head is **masked**.
757
+
758
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
759
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
760
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
761
+ model's internal embedding lookup matrix.
762
+ output_attentions (`bool`, *optional*):
763
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
764
+ tensors for more detail.
765
+ output_hidden_states (`bool`, *optional*):
766
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
767
+ more detail.
768
+ return_dict (`bool`, *optional*):
769
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
770
+ """
771
+
772
+
773
+ @add_start_docstrings(
774
+ "The bare Ernie Model transformer outputting raw hidden-states without any specific head on top.",
775
+ ERNIE_START_DOCSTRING,
776
+ )
777
+ class ErnieModel(ErniePreTrainedModel):
778
+ """
779
+
780
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
781
+ cross-attention is added between the self-attention layers, following the architecture described in [Attention is
782
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
783
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
784
+
785
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
786
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
787
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
788
+ """
789
+
790
+ # Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->Ernie
791
+ def __init__(self, config, add_pooling_layer=True):
792
+ super().__init__(config)
793
+ self.config = config
794
+
795
+ self.embeddings = ErnieEmbeddings(config)
796
+ self.encoder = ErnieEncoder(config)
797
+
798
+ self.pooler = ErniePooler(config) if add_pooling_layer else None
799
+
800
+ # Initialize weights and apply final processing
801
+ self.post_init()
802
+
803
+ # Copied from transformers.models.bert.modeling_bert.BertModel.get_input_embeddings
804
+ def get_input_embeddings(self):
805
+ return self.embeddings.word_embeddings
806
+
807
+ # Copied from transformers.models.bert.modeling_bert.BertModel.set_input_embeddings
808
+ def set_input_embeddings(self, value):
809
+ self.embeddings.word_embeddings = value
810
+
811
+ # Copied from transformers.models.bert.modeling_bert.BertModel._prune_heads
812
+ def _prune_heads(self, heads_to_prune):
813
+ """
814
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
815
+ class PreTrainedModel
816
+ """
817
+ for layer, heads in heads_to_prune.items():
818
+ self.encoder.layer[layer].attention.prune_heads(heads)
819
+
820
+ @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
821
+ @add_code_sample_docstrings(
822
+ checkpoint=_CHECKPOINT_FOR_DOC,
823
+ output_type=BaseModelOutputWithPoolingAndCrossAttentions,
824
+ config_class=_CONFIG_FOR_DOC,
825
+ )
826
+ def forward(
827
+ self,
828
+ input_ids: Optional[torch.Tensor] = None,
829
+ attention_mask: Optional[torch.Tensor] = None,
830
+ token_type_ids: Optional[torch.Tensor] = None,
831
+ task_type_ids: Optional[torch.Tensor] = None,
832
+ position_ids: Optional[torch.Tensor] = None,
833
+ head_mask: Optional[torch.Tensor] = None,
834
+ inputs_embeds: Optional[torch.Tensor] = None,
835
+ encoder_hidden_states: Optional[torch.Tensor] = None,
836
+ encoder_attention_mask: Optional[torch.Tensor] = None,
837
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
838
+ use_cache: Optional[bool] = None,
839
+ output_attentions: Optional[bool] = None,
840
+ output_hidden_states: Optional[bool] = None,
841
+ return_dict: Optional[bool] = None,
842
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
843
+ r"""
844
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
845
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
846
+ the model is configured as a decoder.
847
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
848
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
849
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
850
+
851
+ - 1 for tokens that are **not masked**,
852
+ - 0 for tokens that are **masked**.
853
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
854
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
855
+
856
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
857
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
858
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
859
+ use_cache (`bool`, *optional*):
860
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
861
+ `past_key_values`).
862
+ """
863
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
864
+ output_hidden_states = (
865
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
866
+ )
867
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
868
+
869
+ if self.config.is_decoder:
870
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
871
+ else:
872
+ use_cache = False
873
+
874
+ if input_ids is not None and inputs_embeds is not None:
875
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
876
+ elif input_ids is not None:
877
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
878
+ input_shape = input_ids.size()
879
+ elif inputs_embeds is not None:
880
+ input_shape = inputs_embeds.size()[:-1]
881
+ else:
882
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
883
+
884
+ batch_size, seq_length = input_shape
885
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
886
+
887
+ # past_key_values_length
888
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
889
+
890
+ if attention_mask is None:
891
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
892
+
893
+ if token_type_ids is None:
894
+ if hasattr(self.embeddings, "token_type_ids"):
895
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
896
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
897
+ token_type_ids = buffered_token_type_ids_expanded
898
+ else:
899
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
900
+
901
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
902
+ # ourselves in which case we just need to make it broadcastable to all heads.
903
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
904
+
905
+ # If a 2D or 3D attention mask is provided for the cross-attention
906
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
907
+ if self.config.is_decoder and encoder_hidden_states is not None:
908
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
909
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
910
+ if encoder_attention_mask is None:
911
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
912
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
913
+ else:
914
+ encoder_extended_attention_mask = None
915
+
916
+ # Prepare head mask if needed
917
+ # 1.0 in head_mask indicate we keep the head
918
+ # attention_probs has shape bsz x n_heads x N x N
919
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
920
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
921
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
922
+
923
+ embedding_output = self.embeddings(
924
+ input_ids=input_ids,
925
+ position_ids=position_ids,
926
+ token_type_ids=token_type_ids,
927
+ task_type_ids=task_type_ids,
928
+ inputs_embeds=inputs_embeds,
929
+ past_key_values_length=past_key_values_length,
930
+ )
931
+ encoder_outputs = self.encoder(
932
+ embedding_output,
933
+ attention_mask=extended_attention_mask,
934
+ head_mask=head_mask,
935
+ encoder_hidden_states=encoder_hidden_states,
936
+ encoder_attention_mask=encoder_extended_attention_mask,
937
+ past_key_values=past_key_values,
938
+ use_cache=use_cache,
939
+ output_attentions=output_attentions,
940
+ output_hidden_states=output_hidden_states,
941
+ return_dict=return_dict,
942
+ )
943
+ sequence_output = encoder_outputs[0]
944
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
945
+
946
+ if not return_dict:
947
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
948
+
949
+ return BaseModelOutputWithPoolingAndCrossAttentions(
950
+ last_hidden_state=sequence_output,
951
+ pooler_output=pooled_output,
952
+ past_key_values=encoder_outputs.past_key_values,
953
+ hidden_states=encoder_outputs.hidden_states,
954
+ attentions=encoder_outputs.attentions,
955
+ cross_attentions=encoder_outputs.cross_attentions,
956
+ )
957
+
958
+
959
+ @add_start_docstrings(
960
+ """
961
+ Ernie Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next
962
+ sentence prediction (classification)` head.
963
+ """,
964
+ ERNIE_START_DOCSTRING,
965
+ )
966
+ class ErnieForPreTraining(ErniePreTrainedModel):
967
+ _tied_weights_keys = ["cls.predictions.decoder.bias", "cls.predictions.decoder.weight"]
968
+
969
+ # Copied from transformers.models.bert.modeling_bert.BertForPreTraining.__init__ with Bert->Ernie,bert->ernie
970
+ def __init__(self, config):
971
+ super().__init__(config)
972
+
973
+ self.ernie = ErnieModel(config)
974
+ self.cls = ErniePreTrainingHeads(config)
975
+
976
+ # Initialize weights and apply final processing
977
+ self.post_init()
978
+
979
+ # Copied from transformers.models.bert.modeling_bert.BertForPreTraining.get_output_embeddings
980
+ def get_output_embeddings(self):
981
+ return self.cls.predictions.decoder
982
+
983
+ # Copied from transformers.models.bert.modeling_bert.BertForPreTraining.set_output_embeddings
984
+ def set_output_embeddings(self, new_embeddings):
985
+ self.cls.predictions.decoder = new_embeddings
986
+
987
+ @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
988
+ @replace_return_docstrings(output_type=ErnieForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
989
+ def forward(
990
+ self,
991
+ input_ids: Optional[torch.Tensor] = None,
992
+ attention_mask: Optional[torch.Tensor] = None,
993
+ token_type_ids: Optional[torch.Tensor] = None,
994
+ task_type_ids: Optional[torch.Tensor] = None,
995
+ position_ids: Optional[torch.Tensor] = None,
996
+ head_mask: Optional[torch.Tensor] = None,
997
+ inputs_embeds: Optional[torch.Tensor] = None,
998
+ labels: Optional[torch.Tensor] = None,
999
+ next_sentence_label: Optional[torch.Tensor] = None,
1000
+ output_attentions: Optional[bool] = None,
1001
+ output_hidden_states: Optional[bool] = None,
1002
+ return_dict: Optional[bool] = None,
1003
+ ) -> Union[Tuple[torch.Tensor], ErnieForPreTrainingOutput]:
1004
+ r"""
1005
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1006
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1007
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked),
1008
+ the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1009
+ next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1010
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence
1011
+ pair (see `input_ids` docstring) Indices should be in `[0, 1]`:
1012
+
1013
+ - 0 indicates sequence B is a continuation of sequence A,
1014
+ - 1 indicates sequence B is a random sequence.
1015
+ kwargs (`Dict[str, any]`, optional, defaults to *{}*):
1016
+ Used to hide legacy arguments that have been deprecated.
1017
+
1018
+ Returns:
1019
+
1020
+ Example:
1021
+
1022
+ ```python
1023
+ >>> from transformers import AutoTokenizer, ErnieForPreTraining
1024
+ >>> import torch
1025
+
1026
+ >>> tokenizer = AutoTokenizer.from_pretrained("nghuyong/ernie-1.0-base-zh")
1027
+ >>> model = ErnieForPreTraining.from_pretrained("nghuyong/ernie-1.0-base-zh")
1028
+
1029
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
1030
+ >>> outputs = model(**inputs)
1031
+
1032
+ >>> prediction_logits = outputs.prediction_logits
1033
+ >>> seq_relationship_logits = outputs.seq_relationship_logits
1034
+ ```
1035
+ """
1036
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1037
+
1038
+ outputs = self.ernie(
1039
+ input_ids,
1040
+ attention_mask=attention_mask,
1041
+ token_type_ids=token_type_ids,
1042
+ task_type_ids=task_type_ids,
1043
+ position_ids=position_ids,
1044
+ head_mask=head_mask,
1045
+ inputs_embeds=inputs_embeds,
1046
+ output_attentions=output_attentions,
1047
+ output_hidden_states=output_hidden_states,
1048
+ return_dict=return_dict,
1049
+ )
1050
+
1051
+ sequence_output, pooled_output = outputs[:2]
1052
+ prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
1053
+
1054
+ total_loss = None
1055
+ if labels is not None and next_sentence_label is not None:
1056
+ loss_fct = CrossEntropyLoss()
1057
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1058
+ next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
1059
+ total_loss = masked_lm_loss + next_sentence_loss
1060
+
1061
+ if not return_dict:
1062
+ output = (prediction_scores, seq_relationship_score) + outputs[2:]
1063
+ return ((total_loss,) + output) if total_loss is not None else output
1064
+
1065
+ return ErnieForPreTrainingOutput(
1066
+ loss=total_loss,
1067
+ prediction_logits=prediction_scores,
1068
+ seq_relationship_logits=seq_relationship_score,
1069
+ hidden_states=outputs.hidden_states,
1070
+ attentions=outputs.attentions,
1071
+ )
1072
+
1073
+
1074
+ @add_start_docstrings(
1075
+ """Ernie Model with a `language modeling` head on top for CLM fine-tuning.""", ERNIE_START_DOCSTRING
1076
+ )
1077
+ class ErnieForCausalLM(ErniePreTrainedModel):
1078
+ _tied_weights_keys = ["cls.predictions.decoder.bias", "cls.predictions.decoder.weight"]
1079
+
1080
+ # Copied from transformers.models.bert.modeling_bert.BertLMHeadModel.__init__ with BertLMHeadModel->ErnieForCausalLM,Bert->Ernie,bert->ernie
1081
+ def __init__(self, config):
1082
+ super().__init__(config)
1083
+
1084
+ if not config.is_decoder:
1085
+ logger.warning("If you want to use `ErnieForCausalLM` as a standalone, add `is_decoder=True.`")
1086
+
1087
+ self.ernie = ErnieModel(config, add_pooling_layer=False)
1088
+ self.cls = ErnieOnlyMLMHead(config)
1089
+
1090
+ # Initialize weights and apply final processing
1091
+ self.post_init()
1092
+
1093
+ # Copied from transformers.models.bert.modeling_bert.BertLMHeadModel.get_output_embeddings
1094
+ def get_output_embeddings(self):
1095
+ return self.cls.predictions.decoder
1096
+
1097
+ # Copied from transformers.models.bert.modeling_bert.BertLMHeadModel.set_output_embeddings
1098
+ def set_output_embeddings(self, new_embeddings):
1099
+ self.cls.predictions.decoder = new_embeddings
1100
+
1101
+ @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1102
+ @add_code_sample_docstrings(
1103
+ checkpoint=_CHECKPOINT_FOR_DOC,
1104
+ output_type=CausalLMOutputWithCrossAttentions,
1105
+ config_class=_CONFIG_FOR_DOC,
1106
+ )
1107
+ def forward(
1108
+ self,
1109
+ input_ids: Optional[torch.Tensor] = None,
1110
+ attention_mask: Optional[torch.Tensor] = None,
1111
+ token_type_ids: Optional[torch.Tensor] = None,
1112
+ task_type_ids: Optional[torch.Tensor] = None,
1113
+ position_ids: Optional[torch.Tensor] = None,
1114
+ head_mask: Optional[torch.Tensor] = None,
1115
+ inputs_embeds: Optional[torch.Tensor] = None,
1116
+ encoder_hidden_states: Optional[torch.Tensor] = None,
1117
+ encoder_attention_mask: Optional[torch.Tensor] = None,
1118
+ labels: Optional[torch.Tensor] = None,
1119
+ past_key_values: Optional[List[torch.Tensor]] = None,
1120
+ use_cache: Optional[bool] = None,
1121
+ output_attentions: Optional[bool] = None,
1122
+ output_hidden_states: Optional[bool] = None,
1123
+ return_dict: Optional[bool] = None,
1124
+ ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
1125
+ r"""
1126
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1127
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1128
+ the model is configured as a decoder.
1129
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1130
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1131
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1132
+
1133
+ - 1 for tokens that are **not masked**,
1134
+ - 0 for tokens that are **masked**.
1135
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1136
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
1137
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
1138
+ ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`
1139
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
1140
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1141
+
1142
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1143
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1144
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1145
+ use_cache (`bool`, *optional*):
1146
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1147
+ `past_key_values`).
1148
+ """
1149
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1150
+ if labels is not None:
1151
+ use_cache = False
1152
+
1153
+ outputs = self.ernie(
1154
+ input_ids,
1155
+ attention_mask=attention_mask,
1156
+ token_type_ids=token_type_ids,
1157
+ task_type_ids=task_type_ids,
1158
+ position_ids=position_ids,
1159
+ head_mask=head_mask,
1160
+ inputs_embeds=inputs_embeds,
1161
+ encoder_hidden_states=encoder_hidden_states,
1162
+ encoder_attention_mask=encoder_attention_mask,
1163
+ past_key_values=past_key_values,
1164
+ use_cache=use_cache,
1165
+ output_attentions=output_attentions,
1166
+ output_hidden_states=output_hidden_states,
1167
+ return_dict=return_dict,
1168
+ )
1169
+
1170
+ sequence_output = outputs[0]
1171
+ prediction_scores = self.cls(sequence_output)
1172
+
1173
+ lm_loss = None
1174
+ if labels is not None:
1175
+ # we are doing next-token prediction; shift prediction scores and input ids by one
1176
+ shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
1177
+ labels = labels[:, 1:].contiguous()
1178
+ loss_fct = CrossEntropyLoss()
1179
+ lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1180
+
1181
+ if not return_dict:
1182
+ output = (prediction_scores,) + outputs[2:]
1183
+ return ((lm_loss,) + output) if lm_loss is not None else output
1184
+
1185
+ return CausalLMOutputWithCrossAttentions(
1186
+ loss=lm_loss,
1187
+ logits=prediction_scores,
1188
+ past_key_values=outputs.past_key_values,
1189
+ hidden_states=outputs.hidden_states,
1190
+ attentions=outputs.attentions,
1191
+ cross_attentions=outputs.cross_attentions,
1192
+ )
1193
+
1194
+ # Copied from transformers.models.bert.modeling_bert.BertLMHeadModel.prepare_inputs_for_generation
1195
+ def prepare_inputs_for_generation(
1196
+ self, input_ids, past_key_values=None, attention_mask=None, use_cache=True, **model_kwargs
1197
+ ):
1198
+ input_shape = input_ids.shape
1199
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
1200
+ if attention_mask is None:
1201
+ attention_mask = input_ids.new_ones(input_shape)
1202
+
1203
+ # cut decoder_input_ids if past_key_values is used
1204
+ if past_key_values is not None:
1205
+ past_length = past_key_values[0][0].shape[2]
1206
+
1207
+ # Some generation methods already pass only the last input ID
1208
+ if input_ids.shape[1] > past_length:
1209
+ remove_prefix_length = past_length
1210
+ else:
1211
+ # Default to old behavior: keep only final ID
1212
+ remove_prefix_length = input_ids.shape[1] - 1
1213
+
1214
+ input_ids = input_ids[:, remove_prefix_length:]
1215
+
1216
+ return {
1217
+ "input_ids": input_ids,
1218
+ "attention_mask": attention_mask,
1219
+ "past_key_values": past_key_values,
1220
+ "use_cache": use_cache,
1221
+ }
1222
+
1223
+ # Copied from transformers.models.bert.modeling_bert.BertLMHeadModel._reorder_cache
1224
+ def _reorder_cache(self, past_key_values, beam_idx):
1225
+ reordered_past = ()
1226
+ for layer_past in past_key_values:
1227
+ reordered_past += (
1228
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1229
+ )
1230
+ return reordered_past
1231
+
1232
+
1233
+ @add_start_docstrings("""Ernie Model with a `language modeling` head on top.""", ERNIE_START_DOCSTRING)
1234
+ class ErnieForMaskedLM(ErniePreTrainedModel):
1235
+ _tied_weights_keys = ["cls.predictions.decoder.bias", "cls.predictions.decoder.weight"]
1236
+
1237
+ # Copied from transformers.models.bert.modeling_bert.BertForMaskedLM.__init__ with Bert->Ernie,bert->ernie
1238
+ def __init__(self, config):
1239
+ super().__init__(config)
1240
+
1241
+ if config.is_decoder:
1242
+ logger.warning(
1243
+ "If you want to use `ErnieForMaskedLM` make sure `config.is_decoder=False` for "
1244
+ "bi-directional self-attention."
1245
+ )
1246
+
1247
+ self.ernie = ErnieModel(config, add_pooling_layer=False)
1248
+ self.cls = ErnieOnlyMLMHead(config)
1249
+
1250
+ # Initialize weights and apply final processing
1251
+ self.post_init()
1252
+
1253
+ # Copied from transformers.models.bert.modeling_bert.BertForMaskedLM.get_output_embeddings
1254
+ def get_output_embeddings(self):
1255
+ return self.cls.predictions.decoder
1256
+
1257
+ # Copied from transformers.models.bert.modeling_bert.BertForMaskedLM.set_output_embeddings
1258
+ def set_output_embeddings(self, new_embeddings):
1259
+ self.cls.predictions.decoder = new_embeddings
1260
+
1261
+ @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1262
+ @add_code_sample_docstrings(
1263
+ checkpoint=_CHECKPOINT_FOR_DOC,
1264
+ output_type=MaskedLMOutput,
1265
+ config_class=_CONFIG_FOR_DOC,
1266
+ expected_output="'paris'",
1267
+ expected_loss=0.88,
1268
+ )
1269
+ def forward(
1270
+ self,
1271
+ input_ids: Optional[torch.Tensor] = None,
1272
+ attention_mask: Optional[torch.Tensor] = None,
1273
+ token_type_ids: Optional[torch.Tensor] = None,
1274
+ task_type_ids: Optional[torch.Tensor] = None,
1275
+ position_ids: Optional[torch.Tensor] = None,
1276
+ head_mask: Optional[torch.Tensor] = None,
1277
+ inputs_embeds: Optional[torch.Tensor] = None,
1278
+ encoder_hidden_states: Optional[torch.Tensor] = None,
1279
+ encoder_attention_mask: Optional[torch.Tensor] = None,
1280
+ labels: Optional[torch.Tensor] = None,
1281
+ output_attentions: Optional[bool] = None,
1282
+ output_hidden_states: Optional[bool] = None,
1283
+ return_dict: Optional[bool] = None,
1284
+ ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]:
1285
+ r"""
1286
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1287
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1288
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1289
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1290
+ """
1291
+
1292
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1293
+
1294
+ outputs = self.ernie(
1295
+ input_ids,
1296
+ attention_mask=attention_mask,
1297
+ token_type_ids=token_type_ids,
1298
+ task_type_ids=task_type_ids,
1299
+ position_ids=position_ids,
1300
+ head_mask=head_mask,
1301
+ inputs_embeds=inputs_embeds,
1302
+ encoder_hidden_states=encoder_hidden_states,
1303
+ encoder_attention_mask=encoder_attention_mask,
1304
+ output_attentions=output_attentions,
1305
+ output_hidden_states=output_hidden_states,
1306
+ return_dict=return_dict,
1307
+ )
1308
+
1309
+ sequence_output = outputs[0]
1310
+ prediction_scores = self.cls(sequence_output)
1311
+
1312
+ masked_lm_loss = None
1313
+ if labels is not None:
1314
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
1315
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1316
+
1317
+ if not return_dict:
1318
+ output = (prediction_scores,) + outputs[2:]
1319
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1320
+
1321
+ return MaskedLMOutput(
1322
+ loss=masked_lm_loss,
1323
+ logits=prediction_scores,
1324
+ hidden_states=outputs.hidden_states,
1325
+ attentions=outputs.attentions,
1326
+ )
1327
+
1328
+ # Copied from transformers.models.bert.modeling_bert.BertForMaskedLM.prepare_inputs_for_generation
1329
+ def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
1330
+ input_shape = input_ids.shape
1331
+ effective_batch_size = input_shape[0]
1332
+
1333
+ # add a dummy token
1334
+ if self.config.pad_token_id is None:
1335
+ raise ValueError("The PAD token should be defined for generation")
1336
+
1337
+ attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
1338
+ dummy_token = torch.full(
1339
+ (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
1340
+ )
1341
+ input_ids = torch.cat([input_ids, dummy_token], dim=1)
1342
+
1343
+ return {"input_ids": input_ids, "attention_mask": attention_mask}
1344
+
1345
+
1346
+ @add_start_docstrings(
1347
+ """Ernie Model with a `next sentence prediction (classification)` head on top.""",
1348
+ ERNIE_START_DOCSTRING,
1349
+ )
1350
+ class ErnieForNextSentencePrediction(ErniePreTrainedModel):
1351
+ # Copied from transformers.models.bert.modeling_bert.BertForNextSentencePrediction.__init__ with Bert->Ernie,bert->ernie
1352
+ def __init__(self, config):
1353
+ super().__init__(config)
1354
+
1355
+ self.ernie = ErnieModel(config)
1356
+ self.cls = ErnieOnlyNSPHead(config)
1357
+
1358
+ # Initialize weights and apply final processing
1359
+ self.post_init()
1360
+
1361
+ @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1362
+ @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
1363
+ def forward(
1364
+ self,
1365
+ input_ids: Optional[torch.Tensor] = None,
1366
+ attention_mask: Optional[torch.Tensor] = None,
1367
+ token_type_ids: Optional[torch.Tensor] = None,
1368
+ task_type_ids: Optional[torch.Tensor] = None,
1369
+ position_ids: Optional[torch.Tensor] = None,
1370
+ head_mask: Optional[torch.Tensor] = None,
1371
+ inputs_embeds: Optional[torch.Tensor] = None,
1372
+ labels: Optional[torch.Tensor] = None,
1373
+ output_attentions: Optional[bool] = None,
1374
+ output_hidden_states: Optional[bool] = None,
1375
+ return_dict: Optional[bool] = None,
1376
+ **kwargs,
1377
+ ) -> Union[Tuple[torch.Tensor], NextSentencePredictorOutput]:
1378
+ r"""
1379
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1380
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
1381
+ (see `input_ids` docstring). Indices should be in `[0, 1]`:
1382
+
1383
+ - 0 indicates sequence B is a continuation of sequence A,
1384
+ - 1 indicates sequence B is a random sequence.
1385
+
1386
+ Returns:
1387
+
1388
+ Example:
1389
+
1390
+ ```python
1391
+ >>> from transformers import AutoTokenizer, ErnieForNextSentencePrediction
1392
+ >>> import torch
1393
+
1394
+ >>> tokenizer = AutoTokenizer.from_pretrained("nghuyong/ernie-1.0-base-zh")
1395
+ >>> model = ErnieForNextSentencePrediction.from_pretrained("nghuyong/ernie-1.0-base-zh")
1396
+
1397
+ >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
1398
+ >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
1399
+ >>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt")
1400
+
1401
+ >>> outputs = model(**encoding, labels=torch.LongTensor([1]))
1402
+ >>> logits = outputs.logits
1403
+ >>> assert logits[0, 0] < logits[0, 1] # next sentence was random
1404
+ ```
1405
+ """
1406
+
1407
+ if "next_sentence_label" in kwargs:
1408
+ warnings.warn(
1409
+ "The `next_sentence_label` argument is deprecated and will be removed in a future version, use"
1410
+ " `labels` instead.",
1411
+ FutureWarning,
1412
+ )
1413
+ labels = kwargs.pop("next_sentence_label")
1414
+
1415
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1416
+
1417
+ outputs = self.ernie(
1418
+ input_ids,
1419
+ attention_mask=attention_mask,
1420
+ token_type_ids=token_type_ids,
1421
+ task_type_ids=task_type_ids,
1422
+ position_ids=position_ids,
1423
+ head_mask=head_mask,
1424
+ inputs_embeds=inputs_embeds,
1425
+ output_attentions=output_attentions,
1426
+ output_hidden_states=output_hidden_states,
1427
+ return_dict=return_dict,
1428
+ )
1429
+
1430
+ pooled_output = outputs[1]
1431
+
1432
+ seq_relationship_scores = self.cls(pooled_output)
1433
+
1434
+ next_sentence_loss = None
1435
+ if labels is not None:
1436
+ loss_fct = CrossEntropyLoss()
1437
+ next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))
1438
+
1439
+ if not return_dict:
1440
+ output = (seq_relationship_scores,) + outputs[2:]
1441
+ return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
1442
+
1443
+ return NextSentencePredictorOutput(
1444
+ loss=next_sentence_loss,
1445
+ logits=seq_relationship_scores,
1446
+ hidden_states=outputs.hidden_states,
1447
+ attentions=outputs.attentions,
1448
+ )
1449
+
1450
+
1451
+ @add_start_docstrings(
1452
+ """
1453
+ Ernie Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
1454
+ output) e.g. for GLUE tasks.
1455
+ """,
1456
+ ERNIE_START_DOCSTRING,
1457
+ )
1458
+ class ErnieForSequenceClassification(ErniePreTrainedModel):
1459
+ # Copied from transformers.models.bert.modeling_bert.BertForSequenceClassification.__init__ with Bert->Ernie,bert->ernie
1460
+ def __init__(self, config):
1461
+ super().__init__(config)
1462
+ self.num_labels = config.num_labels
1463
+ self.config = config
1464
+
1465
+ self.ernie = ErnieModel(config)
1466
+ classifier_dropout = (
1467
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1468
+ )
1469
+ self.dropout = nn.Dropout(classifier_dropout)
1470
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1471
+
1472
+ # Initialize weights and apply final processing
1473
+ self.post_init()
1474
+
1475
+ @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1476
+ def forward(
1477
+ self,
1478
+ input_ids: Optional[torch.Tensor] = None,
1479
+ attention_mask: Optional[torch.Tensor] = None,
1480
+ token_type_ids: Optional[torch.Tensor] = None,
1481
+ task_type_ids: Optional[torch.Tensor] = None,
1482
+ position_ids: Optional[torch.Tensor] = None,
1483
+ head_mask: Optional[torch.Tensor] = None,
1484
+ inputs_embeds: Optional[torch.Tensor] = None,
1485
+ labels: Optional[torch.Tensor] = None,
1486
+ output_attentions: Optional[bool] = None,
1487
+ output_hidden_states: Optional[bool] = None,
1488
+ return_dict: Optional[bool] = None,
1489
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
1490
+ r"""
1491
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1492
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1493
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1494
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1495
+ """
1496
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1497
+
1498
+ outputs = self.ernie(
1499
+ input_ids,
1500
+ attention_mask=attention_mask,
1501
+ token_type_ids=token_type_ids,
1502
+ task_type_ids=task_type_ids,
1503
+ position_ids=position_ids,
1504
+ head_mask=head_mask,
1505
+ inputs_embeds=inputs_embeds,
1506
+ output_attentions=output_attentions,
1507
+ output_hidden_states=output_hidden_states,
1508
+ return_dict=return_dict,
1509
+ )
1510
+
1511
+ pooled_output = outputs[1]
1512
+
1513
+ pooled_output = self.dropout(pooled_output)
1514
+ logits = self.classifier(pooled_output)
1515
+
1516
+ loss = None
1517
+ if labels is not None:
1518
+ if self.config.problem_type is None:
1519
+ if self.num_labels == 1:
1520
+ self.config.problem_type = "regression"
1521
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1522
+ self.config.problem_type = "single_label_classification"
1523
+ else:
1524
+ self.config.problem_type = "multi_label_classification"
1525
+
1526
+ if self.config.problem_type == "regression":
1527
+ loss_fct = MSELoss()
1528
+ if self.num_labels == 1:
1529
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1530
+ else:
1531
+ loss = loss_fct(logits, labels)
1532
+ elif self.config.problem_type == "single_label_classification":
1533
+ loss_fct = CrossEntropyLoss()
1534
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1535
+ elif self.config.problem_type == "multi_label_classification":
1536
+ loss_fct = BCEWithLogitsLoss()
1537
+ loss = loss_fct(logits, labels)
1538
+ if not return_dict:
1539
+ output = (logits,) + outputs[2:]
1540
+ return ((loss,) + output) if loss is not None else output
1541
+
1542
+ return SequenceClassifierOutput(
1543
+ loss=loss,
1544
+ logits=logits,
1545
+ hidden_states=outputs.hidden_states,
1546
+ attentions=outputs.attentions,
1547
+ )
1548
+
1549
+
1550
+ @add_start_docstrings(
1551
+ """
1552
+ Ernie Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1553
+ softmax) e.g. for RocStories/SWAG tasks.
1554
+ """,
1555
+ ERNIE_START_DOCSTRING,
1556
+ )
1557
+ class ErnieForMultipleChoice(ErniePreTrainedModel):
1558
+ # Copied from transformers.models.bert.modeling_bert.BertForMultipleChoice.__init__ with Bert->Ernie,bert->ernie
1559
+ def __init__(self, config):
1560
+ super().__init__(config)
1561
+
1562
+ self.ernie = ErnieModel(config)
1563
+ classifier_dropout = (
1564
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1565
+ )
1566
+ self.dropout = nn.Dropout(classifier_dropout)
1567
+ self.classifier = nn.Linear(config.hidden_size, 1)
1568
+
1569
+ # Initialize weights and apply final processing
1570
+ self.post_init()
1571
+
1572
+ @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
1573
+ @add_code_sample_docstrings(
1574
+ checkpoint=_CHECKPOINT_FOR_DOC,
1575
+ output_type=MultipleChoiceModelOutput,
1576
+ config_class=_CONFIG_FOR_DOC,
1577
+ )
1578
+ def forward(
1579
+ self,
1580
+ input_ids: Optional[torch.Tensor] = None,
1581
+ attention_mask: Optional[torch.Tensor] = None,
1582
+ token_type_ids: Optional[torch.Tensor] = None,
1583
+ task_type_ids: Optional[torch.Tensor] = None,
1584
+ position_ids: Optional[torch.Tensor] = None,
1585
+ head_mask: Optional[torch.Tensor] = None,
1586
+ inputs_embeds: Optional[torch.Tensor] = None,
1587
+ labels: Optional[torch.Tensor] = None,
1588
+ output_attentions: Optional[bool] = None,
1589
+ output_hidden_states: Optional[bool] = None,
1590
+ return_dict: Optional[bool] = None,
1591
+ ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]:
1592
+ r"""
1593
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1594
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1595
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
1596
+ `input_ids` above)
1597
+ """
1598
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1599
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1600
+
1601
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1602
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1603
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1604
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
1605
+ inputs_embeds = (
1606
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1607
+ if inputs_embeds is not None
1608
+ else None
1609
+ )
1610
+
1611
+ outputs = self.ernie(
1612
+ input_ids,
1613
+ attention_mask=attention_mask,
1614
+ token_type_ids=token_type_ids,
1615
+ task_type_ids=task_type_ids,
1616
+ position_ids=position_ids,
1617
+ head_mask=head_mask,
1618
+ inputs_embeds=inputs_embeds,
1619
+ output_attentions=output_attentions,
1620
+ output_hidden_states=output_hidden_states,
1621
+ return_dict=return_dict,
1622
+ )
1623
+
1624
+ pooled_output = outputs[1]
1625
+
1626
+ pooled_output = self.dropout(pooled_output)
1627
+ logits = self.classifier(pooled_output)
1628
+ reshaped_logits = logits.view(-1, num_choices)
1629
+
1630
+ loss = None
1631
+ if labels is not None:
1632
+ loss_fct = CrossEntropyLoss()
1633
+ loss = loss_fct(reshaped_logits, labels)
1634
+
1635
+ if not return_dict:
1636
+ output = (reshaped_logits,) + outputs[2:]
1637
+ return ((loss,) + output) if loss is not None else output
1638
+
1639
+ return MultipleChoiceModelOutput(
1640
+ loss=loss,
1641
+ logits=reshaped_logits,
1642
+ hidden_states=outputs.hidden_states,
1643
+ attentions=outputs.attentions,
1644
+ )
1645
+
1646
+
1647
+ @add_start_docstrings(
1648
+ """
1649
+ Ernie Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1650
+ Named-Entity-Recognition (NER) tasks.
1651
+ """,
1652
+ ERNIE_START_DOCSTRING,
1653
+ )
1654
+ class ErnieForTokenClassification(ErniePreTrainedModel):
1655
+ # Copied from transformers.models.bert.modeling_bert.BertForTokenClassification.__init__ with Bert->Ernie,bert->ernie
1656
+ def __init__(self, config):
1657
+ super().__init__(config)
1658
+ self.num_labels = config.num_labels
1659
+
1660
+ self.ernie = ErnieModel(config, add_pooling_layer=False)
1661
+ classifier_dropout = (
1662
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1663
+ )
1664
+ self.dropout = nn.Dropout(classifier_dropout)
1665
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1666
+
1667
+ # Initialize weights and apply final processing
1668
+ self.post_init()
1669
+
1670
+ @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1671
+ def forward(
1672
+ self,
1673
+ input_ids: Optional[torch.Tensor] = None,
1674
+ attention_mask: Optional[torch.Tensor] = None,
1675
+ token_type_ids: Optional[torch.Tensor] = None,
1676
+ task_type_ids: Optional[torch.Tensor] = None,
1677
+ position_ids: Optional[torch.Tensor] = None,
1678
+ head_mask: Optional[torch.Tensor] = None,
1679
+ inputs_embeds: Optional[torch.Tensor] = None,
1680
+ labels: Optional[torch.Tensor] = None,
1681
+ output_attentions: Optional[bool] = None,
1682
+ output_hidden_states: Optional[bool] = None,
1683
+ return_dict: Optional[bool] = None,
1684
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
1685
+ r"""
1686
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1687
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1688
+ """
1689
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1690
+
1691
+ outputs = self.ernie(
1692
+ input_ids,
1693
+ attention_mask=attention_mask,
1694
+ token_type_ids=token_type_ids,
1695
+ task_type_ids=task_type_ids,
1696
+ position_ids=position_ids,
1697
+ head_mask=head_mask,
1698
+ inputs_embeds=inputs_embeds,
1699
+ output_attentions=output_attentions,
1700
+ output_hidden_states=output_hidden_states,
1701
+ return_dict=return_dict,
1702
+ )
1703
+
1704
+ sequence_output = outputs[0]
1705
+
1706
+ sequence_output = self.dropout(sequence_output)
1707
+ logits = self.classifier(sequence_output)
1708
+
1709
+ loss = None
1710
+ if labels is not None:
1711
+ loss_fct = CrossEntropyLoss()
1712
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1713
+
1714
+ if not return_dict:
1715
+ output = (logits,) + outputs[2:]
1716
+ return ((loss,) + output) if loss is not None else output
1717
+
1718
+ return TokenClassifierOutput(
1719
+ loss=loss,
1720
+ logits=logits,
1721
+ hidden_states=outputs.hidden_states,
1722
+ attentions=outputs.attentions,
1723
+ )
1724
+
1725
+
1726
+ @add_start_docstrings(
1727
+ """
1728
+ Ernie Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1729
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1730
+ """,
1731
+ ERNIE_START_DOCSTRING,
1732
+ )
1733
+ class ErnieForQuestionAnswering(ErniePreTrainedModel):
1734
+ # Copied from transformers.models.bert.modeling_bert.BertForQuestionAnswering.__init__ with Bert->Ernie,bert->ernie
1735
+ def __init__(self, config):
1736
+ super().__init__(config)
1737
+ self.num_labels = config.num_labels
1738
+
1739
+ self.ernie = ErnieModel(config, add_pooling_layer=False)
1740
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1741
+
1742
+ # Initialize weights and apply final processing
1743
+ self.post_init()
1744
+
1745
+ @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1746
+ def forward(
1747
+ self,
1748
+ input_ids: Optional[torch.Tensor] = None,
1749
+ attention_mask: Optional[torch.Tensor] = None,
1750
+ token_type_ids: Optional[torch.Tensor] = None,
1751
+ task_type_ids: Optional[torch.Tensor] = None,
1752
+ position_ids: Optional[torch.Tensor] = None,
1753
+ head_mask: Optional[torch.Tensor] = None,
1754
+ inputs_embeds: Optional[torch.Tensor] = None,
1755
+ start_positions: Optional[torch.Tensor] = None,
1756
+ end_positions: Optional[torch.Tensor] = None,
1757
+ output_attentions: Optional[bool] = None,
1758
+ output_hidden_states: Optional[bool] = None,
1759
+ return_dict: Optional[bool] = None,
1760
+ ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
1761
+ r"""
1762
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1763
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1764
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1765
+ are not taken into account for computing the loss.
1766
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1767
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1768
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1769
+ are not taken into account for computing the loss.
1770
+ """
1771
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1772
+
1773
+ outputs = self.ernie(
1774
+ input_ids,
1775
+ attention_mask=attention_mask,
1776
+ token_type_ids=token_type_ids,
1777
+ task_type_ids=task_type_ids,
1778
+ position_ids=position_ids,
1779
+ head_mask=head_mask,
1780
+ inputs_embeds=inputs_embeds,
1781
+ output_attentions=output_attentions,
1782
+ output_hidden_states=output_hidden_states,
1783
+ return_dict=return_dict,
1784
+ )
1785
+
1786
+ sequence_output = outputs[0]
1787
+
1788
+ logits = self.qa_outputs(sequence_output)
1789
+ start_logits, end_logits = logits.split(1, dim=-1)
1790
+ start_logits = start_logits.squeeze(-1).contiguous()
1791
+ end_logits = end_logits.squeeze(-1).contiguous()
1792
+
1793
+ total_loss = None
1794
+ if start_positions is not None and end_positions is not None:
1795
+ # If we are on multi-GPU, split add a dimension
1796
+ if len(start_positions.size()) > 1:
1797
+ start_positions = start_positions.squeeze(-1)
1798
+ if len(end_positions.size()) > 1:
1799
+ end_positions = end_positions.squeeze(-1)
1800
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1801
+ ignored_index = start_logits.size(1)
1802
+ start_positions = start_positions.clamp(0, ignored_index)
1803
+ end_positions = end_positions.clamp(0, ignored_index)
1804
+
1805
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1806
+ start_loss = loss_fct(start_logits, start_positions)
1807
+ end_loss = loss_fct(end_logits, end_positions)
1808
+ total_loss = (start_loss + end_loss) / 2
1809
+
1810
+ if not return_dict:
1811
+ output = (start_logits, end_logits) + outputs[2:]
1812
+ return ((total_loss,) + output) if total_loss is not None else output
1813
+
1814
+ return QuestionAnsweringModelOutput(
1815
+ loss=total_loss,
1816
+ start_logits=start_logits,
1817
+ end_logits=end_logits,
1818
+ hidden_states=outputs.hidden_states,
1819
+ attentions=outputs.attentions,
1820
+ )
venv/lib/python3.10/site-packages/transformers/models/mgp_str/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.1 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mgp_str/__pycache__/configuration_mgp_str.cpython-310.pyc ADDED
Binary file (5.03 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mgp_str/__pycache__/modeling_mgp_str.cpython-310.pyc ADDED
Binary file (18.7 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mgp_str/processing_mgp_str.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Processor class for MGP-STR."""
16
+
17
+ import warnings
18
+
19
+ from transformers import AutoTokenizer
20
+ from transformers.utils import is_torch_available
21
+ from transformers.utils.generic import ExplicitEnum
22
+
23
+ from ...processing_utils import ProcessorMixin
24
+
25
+
26
+ if is_torch_available():
27
+ import torch
28
+
29
+
30
+ class DecodeType(ExplicitEnum):
31
+ CHARACTER = "char"
32
+ BPE = "bpe"
33
+ WORDPIECE = "wp"
34
+
35
+
36
+ SUPPORTED_ANNOTATION_FORMATS = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
37
+
38
+
39
+ class MgpstrProcessor(ProcessorMixin):
40
+ r"""
41
+ Constructs a MGP-STR processor which wraps an image processor and MGP-STR tokenizers into a single
42
+
43
+ [`MgpstrProcessor`] offers all the functionalities of `ViTImageProcessor`] and [`MgpstrTokenizer`]. See the
44
+ [`~MgpstrProcessor.__call__`] and [`~MgpstrProcessor.batch_decode`] for more information.
45
+
46
+ Args:
47
+ image_processor (`ViTImageProcessor`, *optional*):
48
+ An instance of `ViTImageProcessor`. The image processor is a required input.
49
+ tokenizer ([`MgpstrTokenizer`], *optional*):
50
+ The tokenizer is a required input.
51
+ """
52
+
53
+ attributes = ["image_processor", "char_tokenizer"]
54
+ image_processor_class = "ViTImageProcessor"
55
+ char_tokenizer_class = "MgpstrTokenizer"
56
+
57
+ def __init__(self, image_processor=None, tokenizer=None, **kwargs):
58
+ feature_extractor = None
59
+ if "feature_extractor" in kwargs:
60
+ warnings.warn(
61
+ "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
62
+ " instead.",
63
+ FutureWarning,
64
+ )
65
+ feature_extractor = kwargs.pop("feature_extractor")
66
+
67
+ image_processor = image_processor if image_processor is not None else feature_extractor
68
+ if image_processor is None:
69
+ raise ValueError("You need to specify an `image_processor`.")
70
+ if tokenizer is None:
71
+ raise ValueError("You need to specify a `tokenizer`.")
72
+
73
+ self.char_tokenizer = tokenizer
74
+ self.bpe_tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
75
+ self.wp_tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
76
+
77
+ super().__init__(image_processor, tokenizer)
78
+
79
+ def __call__(self, text=None, images=None, return_tensors=None, **kwargs):
80
+ """
81
+ When used in normal mode, this method forwards all its arguments to ViTImageProcessor's
82
+ [`~ViTImageProcessor.__call__`] and returns its output. This method also forwards the `text` and `kwargs`
83
+ arguments to MgpstrTokenizer's [`~MgpstrTokenizer.__call__`] if `text` is not `None` to encode the text. Please
84
+ refer to the doctsring of the above methods for more information.
85
+ """
86
+ if images is None and text is None:
87
+ raise ValueError("You need to specify either an `images` or `text` input to process.")
88
+
89
+ if images is not None:
90
+ inputs = self.image_processor(images, return_tensors=return_tensors, **kwargs)
91
+ if text is not None:
92
+ encodings = self.char_tokenizer(text, return_tensors=return_tensors, **kwargs)
93
+
94
+ if text is None:
95
+ return inputs
96
+ elif images is None:
97
+ return encodings
98
+ else:
99
+ inputs["labels"] = encodings["input_ids"]
100
+ return inputs
101
+
102
+ def batch_decode(self, sequences):
103
+ """
104
+ Convert a list of lists of token ids into a list of strings by calling decode.
105
+
106
+ Args:
107
+ sequences (`torch.Tensor`):
108
+ List of tokenized input ids.
109
+
110
+ Returns:
111
+ `Dict[str, any]`: Dictionary of all the outputs of the decoded results.
112
+ generated_text (`List[str]`): The final results after fusion of char, bpe, and wp. scores
113
+ (`List[float]`): The final scores after fusion of char, bpe, and wp. char_preds (`List[str]`): The list
114
+ of character decoded sentences. bpe_preds (`List[str]`): The list of bpe decoded sentences. wp_preds
115
+ (`List[str]`): The list of wp decoded sentences.
116
+
117
+ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please
118
+ refer to the docstring of this method for more information.
119
+ """
120
+ char_preds, bpe_preds, wp_preds = sequences
121
+ batch_size = char_preds.size(0)
122
+
123
+ char_strs, char_scores = self._decode_helper(char_preds, "char")
124
+ bpe_strs, bpe_scores = self._decode_helper(bpe_preds, "bpe")
125
+ wp_strs, wp_scores = self._decode_helper(wp_preds, "wp")
126
+
127
+ final_strs = []
128
+ final_scores = []
129
+ for i in range(batch_size):
130
+ scores = [char_scores[i], bpe_scores[i], wp_scores[i]]
131
+ strs = [char_strs[i], bpe_strs[i], wp_strs[i]]
132
+ max_score_index = scores.index(max(scores))
133
+ final_strs.append(strs[max_score_index])
134
+ final_scores.append(scores[max_score_index])
135
+
136
+ out = {}
137
+ out["generated_text"] = final_strs
138
+ out["scores"] = final_scores
139
+ out["char_preds"] = char_strs
140
+ out["bpe_preds"] = bpe_strs
141
+ out["wp_preds"] = wp_strs
142
+ return out
143
+
144
+ def _decode_helper(self, pred_logits, format):
145
+ """
146
+ Convert a list of lists of bpe token ids into a list of strings by calling bpe tokenizer.
147
+
148
+ Args:
149
+ pred_logits (`torch.Tensor`):
150
+ List of model prediction logits.
151
+ format (`Union[DecoderType, str]`):
152
+ Type of model prediction. Must be one of ['char', 'bpe', 'wp'].
153
+ Returns:
154
+ `tuple`:
155
+ dec_strs(`str`): The decode strings of model prediction. conf_scores(`List[float]`): The confidence
156
+ score of model prediction.
157
+ """
158
+ if format == DecodeType.CHARACTER:
159
+ decoder = self.char_decode
160
+ eos_token = 1
161
+ eos_str = "[s]"
162
+ elif format == DecodeType.BPE:
163
+ decoder = self.bpe_decode
164
+ eos_token = 2
165
+ eos_str = "#"
166
+ elif format == DecodeType.WORDPIECE:
167
+ decoder = self.wp_decode
168
+ eos_token = 102
169
+ eos_str = "[SEP]"
170
+ else:
171
+ raise ValueError(f"Format {format} is not supported.")
172
+
173
+ dec_strs, conf_scores = [], []
174
+ batch_size = pred_logits.size(0)
175
+ batch_max_length = pred_logits.size(1)
176
+ _, preds_index = pred_logits.topk(1, dim=-1, largest=True, sorted=True)
177
+ preds_index = preds_index.view(-1, batch_max_length)[:, 1:]
178
+ preds_str = decoder(preds_index)
179
+ preds_max_prob, _ = torch.nn.functional.softmax(pred_logits, dim=2).max(dim=2)
180
+ preds_max_prob = preds_max_prob[:, 1:]
181
+
182
+ for index in range(batch_size):
183
+ pred_eos = preds_str[index].find(eos_str)
184
+ pred = preds_str[index][:pred_eos]
185
+ pred_index = preds_index[index].cpu().tolist()
186
+ pred_eos_index = pred_index.index(eos_token) if eos_token in pred_index else -1
187
+ pred_max_prob = preds_max_prob[index][: pred_eos_index + 1]
188
+ confidence_score = pred_max_prob.cumprod(dim=0)[-1] if pred_max_prob.nelement() != 0 else 0.0
189
+ dec_strs.append(pred)
190
+ conf_scores.append(confidence_score)
191
+
192
+ return dec_strs, conf_scores
193
+
194
+ def char_decode(self, sequences):
195
+ """
196
+ Convert a list of lists of char token ids into a list of strings by calling char tokenizer.
197
+
198
+ Args:
199
+ sequences (`torch.Tensor`):
200
+ List of tokenized input ids.
201
+ Returns:
202
+ `List[str]`: The list of char decoded sentences.
203
+ """
204
+ decode_strs = [seq.replace(" ", "") for seq in self.char_tokenizer.batch_decode(sequences)]
205
+ return decode_strs
206
+
207
+ def bpe_decode(self, sequences):
208
+ """
209
+ Convert a list of lists of bpe token ids into a list of strings by calling bpe tokenizer.
210
+
211
+ Args:
212
+ sequences (`torch.Tensor`):
213
+ List of tokenized input ids.
214
+ Returns:
215
+ `List[str]`: The list of bpe decoded sentences.
216
+ """
217
+ return self.bpe_tokenizer.batch_decode(sequences)
218
+
219
+ def wp_decode(self, sequences):
220
+ """
221
+ Convert a list of lists of word piece token ids into a list of strings by calling word piece tokenizer.
222
+
223
+ Args:
224
+ sequences (`torch.Tensor`):
225
+ List of tokenized input ids.
226
+ Returns:
227
+ `List[str]`: The list of wp decoded sentences.
228
+ """
229
+ decode_strs = [seq.replace(" ", "") for seq in self.wp_tokenizer.batch_decode(sequences)]
230
+ return decode_strs
venv/lib/python3.10/site-packages/transformers/models/recurrent_gemma/__init__.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_torch_available,
20
+ )
21
+
22
+
23
+ _import_structure = {
24
+ "configuration_recurrent_gemma": ["RecurrentGemmaConfig"],
25
+ }
26
+
27
+
28
+ try:
29
+ if not is_torch_available():
30
+ raise OptionalDependencyNotAvailable()
31
+ except OptionalDependencyNotAvailable:
32
+ pass
33
+ else:
34
+ _import_structure["modeling_recurrent_gemma"] = [
35
+ "RecurrentGemmaForCausalLM",
36
+ "RecurrentGemmaModel",
37
+ "RecurrentGemmaPreTrainedModel",
38
+ ]
39
+
40
+
41
+ if TYPE_CHECKING:
42
+ from .configuration_recurrent_gemma import RecurrentGemmaConfig
43
+
44
+ try:
45
+ if not is_torch_available():
46
+ raise OptionalDependencyNotAvailable()
47
+ except OptionalDependencyNotAvailable:
48
+ pass
49
+ else:
50
+ from .modeling_recurrent_gemma import (
51
+ RecurrentGemmaForCausalLM,
52
+ RecurrentGemmaModel,
53
+ RecurrentGemmaPreTrainedModel,
54
+ )
55
+
56
+ else:
57
+ import sys
58
+
59
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/recurrent_gemma/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (868 Bytes). View file
 
venv/lib/python3.10/site-packages/transformers/models/recurrent_gemma/__pycache__/configuration_recurrent_gemma.cpython-310.pyc ADDED
Binary file (6.71 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/recurrent_gemma/__pycache__/convert_recurrent_gemma_to_hf.cpython-310.pyc ADDED
Binary file (5.66 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/recurrent_gemma/__pycache__/modeling_recurrent_gemma.cpython-310.pyc ADDED
Binary file (31.4 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/recurrent_gemma/configuration_recurrent_gemma.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Google Inc. HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ RecurrentGemma model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ class RecurrentGemmaConfig(PretrainedConfig):
25
+ r"""
26
+ This is the configuration class to store the configuration of a [`RecurrentGemmaModel`]. It is used to instantiate a RecurrentGemma
27
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
28
+ defaults will yield a similar configuration to that of the RecurrentGemma-7B.
29
+
30
+ e.g. [google/recurrentgemma-2b](https://huggingface.co/google/recurrentgemma-2b)
31
+
32
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
33
+ documentation from [`PretrainedConfig`] for more information.
34
+
35
+
36
+ Args:
37
+ num_hidden_layers (`int`, *optional*, defaults to 26):
38
+ The number of hidden layers in the model.
39
+ vocab_size (`int`, *optional*, defaults to 256000):
40
+ Vocabulary size of the RecurrentGemma model. Defines the number of
41
+ different tokens that can be represented by the
42
+ `inputs_ids` passed when calling [`RecurrentGemmaModel`]
43
+ hidden_size (`int`, *optional*, defaults to 2560):
44
+ Dimension of the hidden representations.
45
+ intermediate_size (`int`, *optional*, defaults to 7680):
46
+ Dimension of the MLP representations.
47
+ num_attention_heads (`int`, *optional*, defaults to 10):
48
+ The number of heads for the attention block and the number of
49
+ heads/blocks for the block-diagonal layers used in the RG-LRU gates.
50
+ This number must divide `hidden_size` and `lru_width`.
51
+ lru_width (`int` or `None`, *optional*):
52
+ Dimension of the hidden representations of the RG-LRU. If `None`
53
+ this will be set to `hidden_size`.
54
+ Whether to scale the output of the embeddings by `sqrt(hidden_size)`.
55
+ attention_window_size (`int`, *optional*, defaults to 2048):
56
+ The size of the attention window used in the attention block.
57
+ conv1d_width (`int`, *optional*, defaults to 4):
58
+ The kernel size of conv1d layers used in the recurrent blocks.
59
+ logits_soft_cap (`float`, *optional*, defaults to 30.0):
60
+ The value at which the logits should be soft-capped to after the transformer and LM-head computation in the Causal LM architecture.
61
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
62
+ The epsilon used by the rms normalization layers.
63
+ use_cache (`bool`, *optional*, defaults to `True`):
64
+ Whether the model should return the last key/values
65
+ attentions (not used by all models). Only
66
+ relevant if `config.is_decoder=True`.
67
+ pad_token_id (`int`, *optional*, defaults to 0):
68
+ Padding token id.
69
+ eos_token_id (`int`, *optional*, defaults to 1):
70
+ End of stream token id.
71
+ bos_token_id (`int`, *optional*, defaults to 2):
72
+ Beginning of stream token id.
73
+ hidden_activation (``str` or `function``, *optional*, defaults to `"gelu_pytorch_tanh"`):
74
+ The hidden activation used in the recurrent block as well as the MLP layer of the decoder layers.
75
+ partial_rotary_factor (`float`, *optional*, defaults to 0.5):
76
+ The partial rotary factor used in the initialization of the rotary embeddings.
77
+ rope_theta (`float`, *optional*, defaults to 10000.0):
78
+ The base period of the RoPE embeddings.
79
+ block_types (`List[str]`, *optional*, defaults to `('recurrent', 'recurrent', 'attention')`):
80
+ List of aleternating blocks that will be repeated to initialize the `temporal_block` layer.
81
+ attention_dropout (`float`, *optional*, defaults to 0.0): dropout value to use after the attention softmax.
82
+ num_key_value_heads (`16`, *optional*, defaults to 16): Number of key value heads to use GQA.
83
+ attention_bias (`bool`, *optional*, defaults to `False`): whether or not the linear q,k,v of the Attention layer should have bias
84
+ w_init_variance_scale (`float`, *optional*, defaults to 0.01): weight initialization variance.
85
+ ```python
86
+ >>> from transformers import RecurrentGemmaModel, RecurrentGemmaConfig
87
+
88
+ >>> # Initializing a RecurrentGemma recurrentgemma-2b style configuration
89
+ >>> configuration = RecurrentGemmaConfig()
90
+
91
+ >>> # Initializing a model from the recurrentgemma-2b style configuration
92
+ >>> model = RecurrentGemmaModel(configuration)
93
+
94
+ >>> # Accessing the model configuration
95
+ >>> configuration = model.config
96
+ ```"""
97
+
98
+ model_type = "recurrent_gemma"
99
+
100
+ def __init__(
101
+ self,
102
+ num_hidden_layers=26,
103
+ vocab_size=256000,
104
+ hidden_size=2560,
105
+ intermediate_size=3 * 2560,
106
+ num_attention_heads=10,
107
+ lru_width=None,
108
+ attention_window_size=2048,
109
+ conv1d_width=4,
110
+ logits_soft_cap=30.0,
111
+ rms_norm_eps=1e-6,
112
+ use_cache=True,
113
+ pad_token_id=0,
114
+ eos_token_id=1,
115
+ bos_token_id=2,
116
+ hidden_activation="gelu_pytorch_tanh",
117
+ partial_rotary_factor=0.5,
118
+ rope_theta=10000.0,
119
+ block_types=("recurrent", "recurrent", "attention"),
120
+ attention_dropout=0.0,
121
+ num_key_value_heads=None,
122
+ attention_bias=False,
123
+ w_init_variance_scale=0.01,
124
+ **kwargs,
125
+ ):
126
+ self.num_hidden_layers = num_hidden_layers
127
+ self.vocab_size = vocab_size
128
+ self.hidden_size = hidden_size
129
+ self.intermediate_size = intermediate_size
130
+ self.num_attention_heads = num_attention_heads
131
+ self.lru_width = lru_width if lru_width is not None else hidden_size
132
+ self.attention_window_size = attention_window_size
133
+ self.conv1d_width = conv1d_width
134
+ self.logits_soft_cap = logits_soft_cap
135
+ self.rms_norm_eps = rms_norm_eps
136
+ self.use_cache = use_cache
137
+ self.rope_theta = rope_theta
138
+ self.partial_rotary_factor = partial_rotary_factor
139
+ self.block_types = list(block_types)
140
+ self.hidden_activation = hidden_activation
141
+ self.head_dim = self.hidden_size // self.num_attention_heads
142
+ self.num_key_value_heads = num_key_value_heads if num_key_value_heads is not None else num_attention_heads
143
+ if self.num_key_value_heads > self.num_attention_heads:
144
+ raise ValueError("The number of `num_key_value_heads` must be smaller than `num_attention_heads`")
145
+ self.attention_dropout = attention_dropout
146
+ self.attention_bias = attention_bias
147
+ self.w_init_variance_scale = w_init_variance_scale
148
+ self.final_w_init_variance_scale = 2.0 / self.num_hidden_layers
149
+ super().__init__(
150
+ pad_token_id=pad_token_id,
151
+ bos_token_id=bos_token_id,
152
+ eos_token_id=eos_token_id,
153
+ **kwargs,
154
+ )
155
+
156
+ @property
157
+ def layers_block_type(self):
158
+ return (self.block_types * 100)[: self.num_hidden_layers]
venv/lib/python3.10/site-packages/transformers/models/recurrent_gemma/convert_recurrent_gemma_to_hf.py ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import argparse
15
+ import os
16
+ import warnings
17
+
18
+ import torch
19
+ from accelerate import init_empty_weights
20
+
21
+ from transformers import GemmaTokenizer, RecurrentGemmaConfig, RecurrentGemmaForCausalLM
22
+
23
+
24
+ try:
25
+ from transformers import GemmaTokenizerFast
26
+ except ImportError as e:
27
+ warnings.warn(e)
28
+ warnings.warn(
29
+ "The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"
30
+ )
31
+ GemmaTokenizerFast = None
32
+
33
+ import regex as re
34
+
35
+
36
+ """
37
+ Sample usage:
38
+
39
+ ```
40
+ python src/transformers/models/gemma/convert_gemma_weights_to_hf.py \
41
+ --input_dir /path/to/downloaded/gemma/weights --model_size 7B --output_dir /output/path
42
+ ```
43
+
44
+ Thereafter, models can be loaded via:
45
+
46
+ ```py
47
+ from transformers import GemmaForCausalLM, GemmaTokenizerFast
48
+
49
+ model = GemmaForCausalLM.from_pretrained("/output/path")
50
+ tokenizer = GemmaTokenizerFast.from_pretrained("/output/path")
51
+ ```
52
+
53
+ Important note: you need to be able to host the whole model in RAM to execute this script (even if the biggest versions
54
+ come in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM).
55
+ """
56
+
57
+ gemma_2b_config = RecurrentGemmaConfig(
58
+ num_attention_heads=10,
59
+ num_key_value_heads=1,
60
+ hidden_size=2560,
61
+ intermediate_size=15360,
62
+ vocab_size=256000,
63
+ num_hidden_layers=26,
64
+ )
65
+
66
+ gemma_7b_config = RecurrentGemmaConfig()
67
+
68
+ CONFIG_MAPPING = {"2B": gemma_2b_config, "7B": gemma_7b_config}
69
+ LAYER_NAME_MAPPING = {"embedder.weight": "model.embed_tokens.weight"}
70
+
71
+
72
+ def write_model(save_path, input_base_path, config, safe_serialization=True, push_to_hub=False, dtype=torch.float32):
73
+ print(f"Fetching all parameters from the checkpoint at '{input_base_path}'")
74
+ model_state_dict = torch.load(input_base_path, map_location="cpu")
75
+
76
+ REPLACEMENT = {
77
+ "blocks.": "layers.",
78
+ ".ffw_down.b": ".down_proj.b",
79
+ ".ffw_down.w": ".down_proj.w",
80
+ ".ffw_up.b": ".up_proj.bias",
81
+ ".ffw_up.w": ".up_proj.weight",
82
+ "recurrent_block": "temporal_block",
83
+ "attention_block": "temporal_block",
84
+ "temporal_block.proj_final": "temporal_block.out_proj",
85
+ "norm.scale": "norm.weight",
86
+ ".proj_k": ".k_proj",
87
+ ".proj_q": ".q_proj",
88
+ ".proj_v": ".v_proj",
89
+ ".proj_final": ".o_proj",
90
+ "embedder.input_embedding": "embed_tokens.weight",
91
+ "conv_1d.w": "conv_1d.weight",
92
+ "conv_1d.b": "conv_1d.bias",
93
+ "input_gate.w": "input_gate.weight",
94
+ "input_gate.b": "input_gate.bias",
95
+ "a_param": "recurrent_param",
96
+ "a_gate.b": "recurrent_gate.bias",
97
+ "a_gate.w": "recurrent_gate.weight",
98
+ }
99
+
100
+ state_dict = {}
101
+ for k, v in model_state_dict.items():
102
+ k = "model." + k
103
+ pattern = re.compile("|".join(map(re.escape, REPLACEMENT.keys())))
104
+ key = pattern.sub(lambda match: REPLACEMENT[match.group(0)], k)
105
+ if "conv_1d.weight" in key:
106
+ v = v[:, None, :].transpose(0, 2)
107
+ if "up_proj.weight" in key:
108
+ state_dict[key.replace("up_proj", "gate_proj")] = v[0].T.contiguous()
109
+ v = v[1].T.contiguous()
110
+ if "up_proj.bias" in key:
111
+ state_dict[key.replace("up_proj", "gate_proj")] = v[0, 0, 0].clone()
112
+ v = v[1, 0, 0].contiguous()
113
+ if "recurrent_gate.bias" in key:
114
+ state_dict[key.replace("gate.", "gate_")] = v.contiguous().clone()
115
+ elif "recurrent_gate.weight" in key:
116
+ state_dict[key.replace("gate.", "gate_")] = v.contiguous().clone()
117
+ elif "input_gate.b" in key:
118
+ state_dict[key.replace("gate.", "gate_")] = v.contiguous().clone()
119
+ elif "input_gate.w" in key:
120
+ state_dict[key.replace("gate.", "gate_")] = v.contiguous().clone()
121
+ elif "embed_tokens" in key:
122
+ state_dict[key] = v[: config.vocab_size, :].contiguous().clone()
123
+ state_dict["lm_head.weight"] = v[: config.vocab_size, :].contiguous().clone()
124
+ else:
125
+ state_dict[key] = v.contiguous()
126
+
127
+ torch.set_default_dtype(dtype)
128
+
129
+ print("Loading the checkpoint in a Gemma model.")
130
+ with init_empty_weights():
131
+ model = RecurrentGemmaForCausalLM(config)
132
+ model.load_state_dict(state_dict, assign=True, strict=True)
133
+
134
+ model.config.torch_dtype = torch.float32
135
+ del model.config._name_or_path
136
+ print("Saving in the Transformers format.")
137
+
138
+ if push_to_hub:
139
+ print(f"pushing the model to {save_path}")
140
+ else:
141
+ model.save_pretrained(save_path, safe_serialization=safe_serialization)
142
+
143
+
144
+ def write_tokenizer(input_tokenizer_path, save_path, push_to_hub=False):
145
+ # Initialize the tokenizer based on the `spm` model
146
+ tokenizer_class = GemmaTokenizer if GemmaTokenizerFast is None else GemmaTokenizerFast
147
+ print(f"Saving a {tokenizer_class.__name__} to {save_path}.")
148
+ tokenizer = tokenizer_class(input_tokenizer_path)
149
+ if push_to_hub:
150
+ tokenizer.push_to_hub(save_path)
151
+ else:
152
+ tokenizer.save_pretrained(save_path)
153
+
154
+
155
+ def main():
156
+ parser = argparse.ArgumentParser()
157
+ parser.add_argument(
158
+ "--input_checkpoint",
159
+ help="Absolute path to the target Gemma weights.",
160
+ default="/home/arthur/transformers_recurrentgemma/google/recurrent-gemma-2b-it/ToBeDeleted/2b-it.pt",
161
+ )
162
+ parser.add_argument(
163
+ "--tokenizer_checkpoint",
164
+ help="Location of Gemma tokenizer model",
165
+ )
166
+ parser.add_argument(
167
+ "--model_size",
168
+ default="2B",
169
+ choices=["2B", "7B", "tokenizer_only"],
170
+ help="'f' models correspond to the finetuned versions, and are specific to the Gemma2 official release. For more details on Gemma2, checkout the original repo: https://huggingface.co/google/gemma-7b",
171
+ )
172
+ parser.add_argument(
173
+ "--output_dir",
174
+ default="google/recurrent-gemma-2b-it-hf",
175
+ help="Location to write HF model and tokenizer",
176
+ )
177
+ parser.add_argument(
178
+ "--pickle_serialization",
179
+ help="Whether or not to save using `safetensors`.",
180
+ action="store_true",
181
+ default=False,
182
+ )
183
+ parser.add_argument(
184
+ "--convert_tokenizer",
185
+ help="Whether or not to convert the tokenizer as well.",
186
+ action="store_true",
187
+ default=False,
188
+ )
189
+ parser.add_argument(
190
+ "--push_to_hub",
191
+ help="Whether or not to push the model to the hub at `output_dir` instead of saving it locally.",
192
+ action="store_true",
193
+ default=False,
194
+ )
195
+ parser.add_argument(
196
+ "--dtype",
197
+ default="float32",
198
+ help="Target dtype of the converted model",
199
+ )
200
+ args = parser.parse_args()
201
+
202
+ if args.convert_tokenizer:
203
+ if args.tokenizer_checkpoint is None:
204
+ raise ValueError("Path to the tokenizer is required when passing --convert_tokenizer")
205
+
206
+ spm_path = os.path.join(args.tokenizer_checkpoint)
207
+ write_tokenizer(spm_path, args.output_dir, args.push_to_hub)
208
+
209
+ config = CONFIG_MAPPING[args.model_size]
210
+ dtype = getattr(torch, args.dtype)
211
+ write_model(
212
+ config=config,
213
+ input_base_path=args.input_checkpoint,
214
+ save_path=args.output_dir,
215
+ safe_serialization=not args.pickle_serialization,
216
+ push_to_hub=args.push_to_hub,
217
+ dtype=dtype,
218
+ )
219
+
220
+
221
+ if __name__ == "__main__":
222
+ main()
venv/lib/python3.10/site-packages/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py ADDED
@@ -0,0 +1,942 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Google Inc. HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ PyTorch RecurrentGemma model."""
17
+
18
+ import math
19
+ from typing import Dict, Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+ from torch.nn import CrossEntropyLoss
25
+
26
+ from ...activations import ACT2FN
27
+ from ...modeling_attn_mask_utils import AttentionMaskConverter
28
+ from ...modeling_outputs import BaseModelOutputWithNoAttention, CausalLMOutput
29
+ from ...modeling_utils import PreTrainedModel
30
+ from ...pytorch_utils import ALL_LAYERNORM_LAYERS
31
+ from ...utils import (
32
+ add_start_docstrings,
33
+ add_start_docstrings_to_model_forward,
34
+ logging,
35
+ replace_return_docstrings,
36
+ )
37
+ from .configuration_recurrent_gemma import RecurrentGemmaConfig
38
+
39
+
40
+ logger = logging.get_logger(__name__)
41
+ _CONFIG_FOR_DOC = "RecurrentGemmaConfig"
42
+ _MAX_SQRT_GRADIENT = 1000.0
43
+
44
+
45
+ # Copied from transformers.models.gemma.modeling_gemma.GemmaRMSNorm with Gemma->RecurrentGemma
46
+ class RecurrentGemmaRMSNorm(nn.Module):
47
+ def __init__(self, dim: int, eps: float = 1e-6):
48
+ super().__init__()
49
+ self.eps = eps
50
+ self.weight = nn.Parameter(torch.zeros(dim))
51
+
52
+ def _norm(self, x):
53
+ return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
54
+
55
+ def forward(self, x):
56
+ output = self._norm(x.float())
57
+ # Llama does x.to(float16) * w whilst RecurrentGemma is (x * w).to(float16)
58
+ # See https://github.com/huggingface/transformers/pull/29402
59
+ output = output * (1.0 + self.weight.float())
60
+ return output.type_as(x)
61
+
62
+
63
+ ALL_LAYERNORM_LAYERS.append(RecurrentGemmaRMSNorm)
64
+
65
+
66
+ class RecurrentGemmaRotaryEmbedding(nn.Module):
67
+ def __init__(self, dim, base=10000, device=None):
68
+ super().__init__()
69
+ self.dim = dim
70
+ self.base = base
71
+ self.register_buffer("inv_freq", None, persistent=False)
72
+
73
+ @torch.no_grad()
74
+ # Copied from transformers.models.gemma.modeling_gemma.GemmaRotaryEmbedding.forward with Gemma->RecurrentGemma
75
+ def forward(self, x, position_ids, seq_len=None):
76
+ # x: [bs, num_attention_heads, seq_len, head_size]
77
+ if self.inv_freq is None:
78
+ self.inv_freq = 1.0 / (
79
+ self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64, device=x.device).float() / self.dim)
80
+ )
81
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
82
+ position_ids_expanded = position_ids[:, None, :].float()
83
+ # Force float32 since bfloat16 loses precision on long contexts
84
+ # See https://github.com/huggingface/transformers/pull/29285
85
+ device_type = x.device.type
86
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
87
+ with torch.autocast(device_type=device_type, enabled=False):
88
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
89
+ emb = torch.cat((freqs, freqs), dim=-1)
90
+ cos = emb.cos()
91
+ sin = emb.sin()
92
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
93
+
94
+
95
+ # Copied from transformers.models.llama.modeling_llama.rotate_half
96
+ def rotate_half(x):
97
+ """Rotates half the hidden dims of the input."""
98
+ x1 = x[..., : x.shape[-1] // 2]
99
+ x2 = x[..., x.shape[-1] // 2 :]
100
+ return torch.cat((-x2, x1), dim=-1)
101
+
102
+
103
+ # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
104
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
105
+ """Applies Rotary Position Embedding to the query and key tensors.
106
+
107
+ Args:
108
+ q (`torch.Tensor`): The query tensor.
109
+ k (`torch.Tensor`): The key tensor.
110
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
111
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
112
+ position_ids (`torch.Tensor`, *optional*):
113
+ Deprecated and unused.
114
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
115
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
116
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
117
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
118
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
119
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
120
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
121
+ Returns:
122
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
123
+ """
124
+ cos = cos.unsqueeze(unsqueeze_dim)
125
+ sin = sin.unsqueeze(unsqueeze_dim)
126
+ q_embed = (q * cos) + (rotate_half(q) * sin)
127
+ k_embed = (k * cos) + (rotate_half(k) * sin)
128
+ return q_embed, k_embed
129
+
130
+
131
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv
132
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
133
+ """
134
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
135
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
136
+ """
137
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
138
+ if n_rep == 1:
139
+ return hidden_states
140
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
141
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
142
+
143
+
144
+ class RecurrentGemmaSdpaAttention(nn.Module):
145
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
146
+
147
+ def __init__(self, config: RecurrentGemmaConfig):
148
+ super().__init__()
149
+ self.config = config
150
+ self.attention_dropout = config.attention_dropout
151
+ self.hidden_size = config.hidden_size
152
+ self.num_attention_heads = config.num_attention_heads
153
+ self.head_dim = config.head_dim
154
+ self.num_key_value_heads = config.num_key_value_heads
155
+ self.num_key_value_groups = self.num_attention_heads // self.num_key_value_heads
156
+ self.partial_rotary_factor = config.partial_rotary_factor
157
+
158
+ self.q_proj = nn.Linear(self.hidden_size, self.num_attention_heads * self.head_dim, bias=config.attention_bias)
159
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
160
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
161
+ self.o_proj = nn.Linear(self.num_attention_heads * self.head_dim, self.hidden_size, bias=True)
162
+ self.rotary_emb = RecurrentGemmaRotaryEmbedding(
163
+ int(self.partial_rotary_factor * self.head_dim),
164
+ base=config.rope_theta,
165
+ )
166
+
167
+ def forward(
168
+ self,
169
+ hidden_states: torch.Tensor,
170
+ position_ids: Optional[torch.LongTensor] = None,
171
+ attention_mask: Optional[torch.Tensor] = None,
172
+ cache_position: Optional[torch.LongTensor] = None,
173
+ use_cache: bool = False,
174
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
175
+ bsz, q_len, _ = hidden_states.size()
176
+
177
+ query_states = self.q_proj(hidden_states)
178
+ key_states = self.k_proj(hidden_states)
179
+ value_states = self.v_proj(hidden_states)
180
+
181
+ query_states = query_states.view(bsz, q_len, self.num_attention_heads, self.head_dim).transpose(1, 2)
182
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
183
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
184
+
185
+ cos, sin = self.rotary_emb(value_states, position_ids, seq_len=None)
186
+
187
+ # Partial rotary embedding
188
+ query_rot, query_pass = torch.chunk(query_states, int(1 / self.partial_rotary_factor), dim=-1)
189
+ key_rot, key_pass = torch.chunk(key_states, int(1 / self.partial_rotary_factor), dim=-1)
190
+ query_rot, key_rot = apply_rotary_pos_emb(query_rot, key_rot, cos, sin, position_ids)
191
+ query_states = torch.cat((query_rot, query_pass), dim=-1)
192
+ key_states = torch.cat((key_rot, key_pass), dim=-1)
193
+
194
+ if use_cache and hasattr(self, "key_states"):
195
+ cache_kwargs = {"cache_position": cache_position}
196
+ key_states, value_states = self._update_cache(key_states, value_states, **cache_kwargs)
197
+
198
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
199
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
200
+
201
+ causal_mask = attention_mask
202
+ if attention_mask is not None:
203
+ causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]
204
+
205
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
206
+ query_states.contiguous(),
207
+ key_states.contiguous(),
208
+ value_states.contiguous(),
209
+ attn_mask=causal_mask, # pretty much a must for sliding window backend!
210
+ dropout_p=self.attention_dropout if self.training else 0.0,
211
+ scale=self.head_dim**-0.5,
212
+ )
213
+
214
+ attn_output = attn_output.transpose(1, 2).contiguous()
215
+ attn_output = attn_output.view(bsz, q_len, self.hidden_size)
216
+ attn_output = self.o_proj(attn_output)
217
+ return attn_output
218
+
219
+ def _setup_cache(self, batch_size, device, dtype=None):
220
+ if dtype is None and self.config.torch_dtype is not None:
221
+ dtype = self.config.torch_dtype
222
+ dtype = dtype if dtype is not None else torch.float32
223
+ cache_shape = (batch_size, self.num_key_value_heads, self.config.attention_window_size, self.head_dim)
224
+ self.value_states = torch.zeros(cache_shape, dtype=dtype, device=device)
225
+ self.key_states = torch.zeros(cache_shape, dtype=dtype, device=device)
226
+
227
+ @torch.no_grad()
228
+ def _update_cache(self, key_states, value_states, **cache_kwargs):
229
+ """
230
+ torch.compile compatible sliding window.
231
+ Computes the `indices` based on `cache_position >= self.config.attention_window_size - 1`.
232
+ The `to_shift` is only true once we are above attention_window_size. Thus with `attention_window_size==64`:
233
+
234
+ indices = (slicing + to_shift[-1].int()-1) % self.config.attention_window_size
235
+ tensor([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
236
+ 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
237
+ 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
238
+ 55, 56, 57, 58, 59, 60, 61, 62, 63, 0])
239
+
240
+ We overwrite the cache using these, then we always write at cache_position (clamped to `attention_window_size`)
241
+ """
242
+ cache_position = cache_kwargs.get("cache_position")
243
+ if cache_position.shape[0] > self.config.attention_window_size:
244
+ # int indexing -> device sync? in compile, use tensor
245
+ k_out = key_states[:, :, -self.config.attention_window_size :, :]
246
+ v_out = value_states[:, :, -self.config.attention_window_size :, :]
247
+ else:
248
+ slicing = torch.ones(
249
+ self.config.attention_window_size, dtype=torch.long, device=value_states.device
250
+ ).cumsum(0)
251
+ cache_position = cache_position.clamp(0, self.config.attention_window_size - 1)
252
+ to_shift = cache_position >= self.config.attention_window_size - 1
253
+ indices = (slicing + to_shift[-1].int() - 1) % self.config.attention_window_size
254
+
255
+ k_out, v_out = self.key_states.to(key_states.device), self.value_states.to(value_states.device)
256
+ k_out = k_out[:, :, indices]
257
+ v_out = v_out[:, :, indices]
258
+
259
+ k_out[:, :, cache_position] = key_states
260
+ v_out[:, :, cache_position] = value_states
261
+
262
+ self.key_states, self.value_states = k_out, v_out
263
+ return k_out, v_out
264
+
265
+
266
+ class SqrtBoundDerivative(torch.autograd.Function):
267
+ """Computes a square root with a gradient clipped at `_MAX_SQRT_GRADIENT`."""
268
+
269
+ @staticmethod
270
+ def forward(ctx, x: torch.Tensor) -> torch.Tensor:
271
+ """The forward pass, which is a normal `sqrt`."""
272
+ ctx.save_for_backward(x)
273
+ return torch.sqrt(x)
274
+
275
+ @staticmethod
276
+ def backward(ctx, grad_output: torch.Tensor) -> torch.Tensor:
277
+ """The backward pass, which clips the `sqrt` gradient."""
278
+ (x,) = ctx.saved_tensors
279
+ clipped_x_times_4 = torch.clip(4.0 * x, min=1 / (_MAX_SQRT_GRADIENT**2))
280
+ return grad_output / torch.sqrt(clipped_x_times_4)
281
+
282
+
283
+ class RecurrentGemmaRglru(nn.Module):
284
+ """A Real-Gated Linear Recurrent Unit (RG-LRU) layer."""
285
+
286
+ def __init__(self, config):
287
+ super().__init__()
288
+ self.num_attention_heads = config.num_attention_heads
289
+ self.block_width = config.lru_width // self.num_attention_heads
290
+
291
+ self.recurrent_param = nn.Parameter(torch.empty([config.lru_width]))
292
+ self.input_gate_weight = nn.Parameter(
293
+ torch.empty([self.num_attention_heads, self.block_width, self.block_width])
294
+ )
295
+ self.input_gate_bias = nn.Parameter(torch.empty([self.num_attention_heads, self.block_width]))
296
+
297
+ self.recurrent_gate_weight = nn.Parameter(
298
+ torch.empty([self.num_attention_heads, self.block_width, self.block_width])
299
+ )
300
+ self.recurrent_gate_bias = nn.Parameter(torch.empty([self.num_attention_heads, self.block_width]))
301
+ self.recurrent_states = None
302
+
303
+ def forward(
304
+ self,
305
+ activations: torch.Tensor,
306
+ position_ids: torch.Tensor,
307
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
308
+ batch_size, seq_len, lru_width = activations.shape
309
+ reset = position_ids[:, :, None] == 0
310
+
311
+ reshape_act = activations.reshape(batch_size * seq_len, self.num_attention_heads, self.block_width)
312
+ reshape_act = reshape_act.permute(1, 0, 2)
313
+
314
+ res = torch.baddbmm(self.input_gate_bias[:, None, :], reshape_act, self.input_gate_weight)
315
+ input_gate = torch.sigmoid(res.transpose(0, 1).reshape(batch_size, seq_len, lru_width))
316
+
317
+ res = torch.baddbmm(self.recurrent_gate_bias[:, None, :], reshape_act, self.recurrent_gate_weight)
318
+ recurrent_gate = torch.sigmoid(res.transpose(0, 1).reshape(batch_size, seq_len, lru_width))
319
+
320
+ # Compute the parameter `A` of the recurrence.
321
+ log_recurrent_gate = -8.0 * recurrent_gate * nn.functional.softplus(self.recurrent_param)
322
+ recurrent_gate = torch.exp(log_recurrent_gate)
323
+ a_square = torch.exp(2 * log_recurrent_gate)
324
+
325
+ # Gate the input.
326
+ gated_inputs = activations * input_gate
327
+
328
+ # Apply gamma normalization to the input. We need to clip the derivatives of
329
+ # `sqrt` in order to prevent NaNs during training in bfloat16. TODO a bit annoying
330
+ multiplier = 1
331
+ tracing = isinstance(activations, torch.fx.Proxy) or (
332
+ hasattr(torch, "_dynamo") and torch._dynamo.is_compiling()
333
+ )
334
+ if not torch.jit.is_tracing() and not tracing:
335
+ multiplier = SqrtBoundDerivative.apply(1 - a_square)
336
+ multiplier = reset + ~reset * multiplier
337
+ normalized_x = gated_inputs * multiplier.type(activations.dtype)
338
+
339
+ hidden_states, recurrent_states = self._rnn_scan(
340
+ hidden_states=normalized_x,
341
+ recurrent_gate=recurrent_gate,
342
+ reset=reset,
343
+ recurrent_states=self.recurrent_states,
344
+ )
345
+ self.recurrent_states = recurrent_states
346
+ return hidden_states
347
+
348
+ # TODO refactor
349
+ def _rnn_scan(
350
+ self,
351
+ hidden_states: torch.Tensor,
352
+ recurrent_gate: torch.Tensor,
353
+ reset: torch.Tensor,
354
+ recurrent_states: Union[torch.Tensor, None],
355
+ acc_dtype: torch.dtype = torch.float32,
356
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
357
+ """Runs the recurrence of a linear RNN.
358
+
359
+ Args:
360
+ hidden_states: The input sequence.
361
+ recurrent_gate: The diagonal of the recurrence matrix `A`.
362
+ reset: Indicator of document boundaries, e.g. when to reset the hidden state
363
+ of the RNN.
364
+ recurrent_states: The initial hidden state.
365
+ acc_dtype: The data type for the accumulation.
366
+
367
+ Returns:
368
+ The output of the linear recurrence.
369
+ """
370
+ # Multiply `a` by the reset.
371
+ recurrent_gate = recurrent_gate * ~reset
372
+
373
+ if hidden_states.shape[1] == 1:
374
+ # Using scan in sampling mode.
375
+ if recurrent_states is None: # same here, when decoding you always have cache
376
+ return hidden_states, hidden_states[:, 0].type(acc_dtype)
377
+
378
+ else:
379
+ contextualized_states = recurrent_gate.type(acc_dtype) * recurrent_states[:, None].to(
380
+ recurrent_gate.device
381
+ )
382
+ contextualized_states += hidden_states.type(acc_dtype)
383
+ return contextualized_states.type(hidden_states.dtype), contextualized_states[:, -1]
384
+
385
+ else:
386
+ # Using scan in linear mode.
387
+ if recurrent_states is None:
388
+ recurrent_states = torch.zeros(hidden_states[:, 0].shape, dtype=acc_dtype, device=hidden_states.device)
389
+
390
+ contextualized_states = torch.zeros_like(hidden_states)
391
+ for t in range(hidden_states.shape[1]):
392
+ recurrent_states = recurrent_gate[:, t].type(acc_dtype) * recurrent_states.to(recurrent_gate.device)
393
+ recurrent_states = recurrent_states + hidden_states[:, t].type(acc_dtype)
394
+ contextualized_states[:, t] = recurrent_states.type(hidden_states.dtype)
395
+
396
+ return contextualized_states, recurrent_states
397
+
398
+
399
+ class RecurrentGemmaRecurrentBlock(nn.Module):
400
+ """Griffin and Hawk's recurrent block."""
401
+
402
+ def __init__(self, config):
403
+ super().__init__()
404
+ self.lru_width = config.lru_width
405
+ self.hidden_size = config.hidden_size
406
+ self.linear_y = nn.Linear(in_features=config.hidden_size, out_features=config.lru_width)
407
+ self.linear_x = nn.Linear(in_features=config.hidden_size, out_features=config.lru_width)
408
+ self.linear_out = nn.Linear(in_features=config.lru_width, out_features=config.hidden_size)
409
+ self.conv1d_width = config.conv1d_width
410
+ self.conv_1d = nn.Conv1d(
411
+ config.lru_width,
412
+ config.lru_width,
413
+ kernel_size=config.conv1d_width,
414
+ groups=config.lru_width,
415
+ padding=config.conv1d_width - 1,
416
+ )
417
+ self.rg_lru = RecurrentGemmaRglru(config)
418
+ self.act_fn = ACT2FN[config.hidden_activation]
419
+
420
+ self.conv1d_state = None
421
+
422
+ def forward(
423
+ self,
424
+ input_states: torch.Tensor,
425
+ position_ids: torch.Tensor,
426
+ attention_mask: torch.Tensor,
427
+ cache_position: torch.Tensor,
428
+ use_cache: bool = True,
429
+ ) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
430
+ _, seq_len, _ = input_states.shape
431
+
432
+ y_branch = self.linear_y(input_states)
433
+ y_branch = self.act_fn(y_branch)
434
+
435
+ x_branch = self.linear_x(input_states)
436
+ x_branch = x_branch.transpose(1, 2)
437
+
438
+ if use_cache:
439
+ if cache_position.shape[0] != 1: # prefill
440
+ self.conv1d_state = nn.functional.pad(x_branch, (self.conv1d_width - x_branch.shape[-1] - 1, 0))
441
+ x_branch = self.conv_1d(x_branch)[..., :seq_len]
442
+ else: # decoding
443
+ conv_state = torch.cat((self.conv1d_state, x_branch), -1)
444
+ x_branch = torch.sum(conv_state * self.conv_1d.weight[:, 0, :], dim=-1) + self.conv_1d.bias
445
+ x_branch = x_branch.unsqueeze(-1)
446
+ self.conv1d_state = conv_state[:, :, 1:]
447
+ else:
448
+ x_branch = self.conv_1d(x_branch)[..., :seq_len]
449
+
450
+ x_branch = self.rg_lru(x_branch.transpose(1, 2), position_ids)
451
+
452
+ hidden_states = x_branch * y_branch
453
+ hidden_states = self.linear_out(hidden_states)
454
+ return hidden_states
455
+
456
+ def _setup_cache(self, batch, device, dtype):
457
+ # recurrent_states always computed in full precision
458
+ self.rg_lru.recurrent_states = torch.zeros((batch, self.lru_width), device=device, dtype=torch.float32)
459
+ self.conv1d_state = torch.zeros((batch, self.hidden_size, self.conv1d_width - 1), device=device, dtype=dtype)
460
+
461
+
462
+ TEMPORAL_BLOCK_CLASSES = {"recurrent": RecurrentGemmaRecurrentBlock, "attention": RecurrentGemmaSdpaAttention}
463
+
464
+
465
+ class RecurrentGemmaMlp(nn.Module):
466
+ def __init__(self, config):
467
+ super().__init__()
468
+ self.config = config
469
+ self.hidden_size = config.hidden_size
470
+ self.intermediate_size = config.intermediate_size // 2
471
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=True)
472
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=True)
473
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=True)
474
+ self.act_fn = ACT2FN[config.hidden_activation]
475
+
476
+ def forward(self, hidden_states):
477
+ gate = self.act_fn(self.gate_proj(hidden_states))
478
+ return self.down_proj(gate * self.up_proj(hidden_states))
479
+
480
+
481
+ class RecurrentGemmaDecoderLayer(nn.Module):
482
+ """Griffin and Hawk's residual block."""
483
+
484
+ def __init__(self, config, layer_idx):
485
+ super().__init__()
486
+ self.temporal_pre_norm = RecurrentGemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
487
+ self.temporal_block = TEMPORAL_BLOCK_CLASSES[config.layers_block_type[layer_idx]](config)
488
+ self.channel_pre_norm = RecurrentGemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
489
+ self.mlp_block = RecurrentGemmaMlp(config)
490
+
491
+ def forward(
492
+ self,
493
+ activations: torch.Tensor,
494
+ position_ids: torch.Tensor,
495
+ attention_mask: torch.Tensor,
496
+ cache_position: torch.Tensor = None,
497
+ use_cache: bool = None,
498
+ ) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
499
+ raw_activations = activations
500
+ inputs_normalized = self.temporal_pre_norm(raw_activations) # RMSNorm introduces slight slight differences
501
+
502
+ hidden_states = self.temporal_block(
503
+ inputs_normalized, position_ids, attention_mask, cache_position=cache_position, use_cache=use_cache
504
+ )
505
+
506
+ residual = hidden_states + raw_activations
507
+
508
+ hidden_states = self.channel_pre_norm(residual)
509
+ hidden_states = self.mlp_block(hidden_states)
510
+
511
+ hidden_states = hidden_states + residual
512
+ return hidden_states
513
+
514
+
515
+ RECURRENTGEMMA_START_DOCSTRING = r"""
516
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
517
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
518
+ etc.)
519
+
520
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
521
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
522
+ and behavior.
523
+
524
+ Parameters:
525
+ config ([`RecurrentGemmaConfig`]):
526
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
527
+ load the weights associated with the model, only the configuration. Check out the
528
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
529
+ """
530
+
531
+
532
+ @add_start_docstrings(
533
+ "The bare RecurrentGemma Model outputting raw hidden-states without any specific head on top.",
534
+ RECURRENTGEMMA_START_DOCSTRING,
535
+ )
536
+ class RecurrentGemmaPreTrainedModel(PreTrainedModel):
537
+ config_class = RecurrentGemmaConfig
538
+ base_model_prefix = "model"
539
+ supports_gradient_checkpointing = True
540
+ _no_split_modules = ["RecurrentGemmaDecoderLayer"]
541
+ _skip_keys_device_placement = ["cache"]
542
+ _supports_flash_attn_2 = False
543
+ _supports_sdpa = False # we can't compare with eager for now
544
+ _supports_cache_class = True
545
+
546
+ def _init_weights(self, module):
547
+ std = math.sqrt(self.config.w_init_variance_scale / self.config.conv1d_width)
548
+ if isinstance(module, nn.Conv1d):
549
+ torch.nn.init.normal_(module.weight, mean=0.0, std=std)
550
+ torch.nn.init.zeros_(module.bias)
551
+ elif isinstance(module, RecurrentGemmaSdpaAttention):
552
+ torch.nn.init.normal_(module.q_proj.weight, mean=0.0, std=math.sqrt(1.0 / self.config.hidden_size))
553
+ torch.nn.init.normal_(module.k_proj.weight, mean=0.0, std=math.sqrt(1.0 / self.config.hidden_size))
554
+ torch.nn.init.normal_(module.v_proj.weight, mean=0.0, std=math.sqrt(1.0 / self.config.hidden_size))
555
+
556
+ std = math.sqrt(self.config.final_w_init_variance_scale / self.config.hidden_size)
557
+ torch.nn.init.normal_(module.o_proj.weight, mean=0.0, std=std)
558
+ elif isinstance(module, RecurrentGemmaRecurrentBlock):
559
+ torch.nn.init.zeros_(module.linear_x.bias)
560
+ torch.nn.init.normal_(module.linear_x.weight, mean=0.0, std=math.sqrt(1.0 / self.config.hidden_size))
561
+
562
+ torch.nn.init.zeros_(module.linear_y.bias)
563
+ torch.nn.init.normal_(module.linear_y.weight, mean=0.0, std=math.sqrt(1.0 / self.config.hidden_size))
564
+
565
+ std = math.sqrt(self.config.final_w_init_variance_scale / self.config.lru_width)
566
+ torch.nn.init.normal_(module.linear_out.weight, mean=0.0, std=std)
567
+ torch.nn.init.zeros_(module.linear_out.bias)
568
+ elif isinstance(module, RecurrentGemmaRglru):
569
+ std = math.sqrt(
570
+ self.config.w_init_variance_scale / (self.config.lru_width // self.config.num_attention_heads)
571
+ )
572
+ torch.nn.init.normal_(module.input_gate_weight, mean=0.0, std=std)
573
+ torch.nn.init.normal_(module.recurrent_gate_weight, mean=0.0, std=std)
574
+ torch.nn.init.zeros_(module.input_gate_bias)
575
+ torch.nn.init.zeros_(module.recurrent_gate_bias)
576
+
577
+ module.recurrent_param.data.uniform_(0.9**2 + 1e-8, 0.999**2 + 1e-8)
578
+ module.recurrent_param.data.log_().mul_(0.5)
579
+ module.recurrent_param.data.neg_().exp_().sub_(1.0).log_()
580
+ elif isinstance(module, nn.Linear):
581
+ torch.nn.init.normal_(module.weight, mean=0.0, std=std)
582
+ if getattr(module, "bias", None) is not None:
583
+ torch.nn.init.zeros_(module.bias)
584
+
585
+ def _setup_cache(self, config, batch, device, dtype):
586
+ layers = getattr(self, "model", self).layers
587
+ for layer in layers:
588
+ layer.temporal_block._setup_cache(batch, device, dtype)
589
+
590
+ def reset_cache(self, batch, device, dtype):
591
+ pass
592
+
593
+
594
+ RECURRENTGEMMA_INPUTS_DOCSTRING = r"""
595
+ Args:
596
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
597
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
598
+ it.
599
+
600
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
601
+ [`PreTrainedTokenizer.__call__`] for details.
602
+
603
+ [What are input IDs?](../glossary#input-ids)
604
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
605
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
606
+
607
+ - 1 for tokens that are **not masked**,
608
+ - 0 for tokens that are **masked**.
609
+
610
+ [What are attention masks?](../glossary#attention-mask)
611
+
612
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
613
+ [`PreTrainedTokenizer.__call__`] for details.
614
+
615
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
616
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
617
+ config.n_positions - 1]`.
618
+
619
+ [What are position IDs?](../glossary#position-ids)
620
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
621
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
622
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
623
+ model's internal embedding lookup matrix.
624
+ use_cache (`bool`, *optional*):
625
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
626
+ `past_key_values`).
627
+ output_hidden_states (`bool`, *optional*):
628
+ Whether or not to return the hidden states of all See `hidden_states` under returned tensors for
629
+ more detail.
630
+ return_dict (`bool`, *optional*):
631
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
632
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
633
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
634
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
635
+ the complete sequence length.
636
+ """
637
+
638
+
639
+ @add_start_docstrings(
640
+ "The bare RecurrentGemma Model outputting raw hidden-states without any specific head on top.",
641
+ RECURRENTGEMMA_START_DOCSTRING,
642
+ )
643
+ class RecurrentGemmaModel(RecurrentGemmaPreTrainedModel):
644
+ """
645
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`RecurrentGemmaDecoderLayer`]
646
+
647
+ Args:
648
+ config: RecurrentGemmaConfig
649
+ """
650
+
651
+ def __init__(self, config: RecurrentGemmaConfig):
652
+ super().__init__(config)
653
+ self.padding_idx = config.pad_token_id
654
+ self.vocab_size = config.vocab_size
655
+
656
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
657
+ self.layers = nn.ModuleList(
658
+ [RecurrentGemmaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
659
+ )
660
+ self.final_norm = RecurrentGemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
661
+ self.gradient_checkpointing = False
662
+
663
+ self.register_buffer(
664
+ "normalizer", torch.tensor(self.config.hidden_size**0.5, dtype=torch.bfloat16), persistent=False
665
+ )
666
+ # Initialize weights and apply final processing
667
+ self.post_init()
668
+
669
+ # Copied from transformers.models.llama.modeling_llama.LlamaModel.get_input_embeddings
670
+ def get_input_embeddings(self):
671
+ return self.embed_tokens
672
+
673
+ # Copied from transformers.models.llama.modeling_llama.LlamaModel.set_input_embeddings
674
+ def set_input_embeddings(self, value):
675
+ self.embed_tokens = value
676
+
677
+ @add_start_docstrings_to_model_forward(RECURRENTGEMMA_INPUTS_DOCSTRING)
678
+ def forward(
679
+ self,
680
+ input_ids: torch.LongTensor = None,
681
+ position_ids: Optional[torch.LongTensor] = None,
682
+ attention_mask: Optional[torch.Tensor] = None,
683
+ cache_position: Optional[torch.LongTensor] = None,
684
+ inputs_embeds: Optional[torch.FloatTensor] = None,
685
+ use_cache: Optional[bool] = None,
686
+ output_hidden_states: Optional[bool] = None,
687
+ return_dict: Optional[bool] = None,
688
+ **kwargs,
689
+ ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
690
+ output_hidden_states = (
691
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
692
+ )
693
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
694
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
695
+
696
+ if (input_ids is None) ^ (inputs_embeds is not None):
697
+ raise ValueError(
698
+ "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
699
+ )
700
+
701
+ if self.gradient_checkpointing and self.training and use_cache:
702
+ logger.warning_once(
703
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
704
+ )
705
+ use_cache = False
706
+
707
+ if inputs_embeds is None:
708
+ inputs_embeds = self.embed_tokens(input_ids)
709
+
710
+ hidden_states = inputs_embeds
711
+
712
+ if use_cache and inputs_embeds.shape[1] != 1: # TODO let's maybe only call in the `generate`?
713
+ self._setup_cache(self.config, hidden_states.shape[0], hidden_states.device, hidden_states.dtype)
714
+
715
+ if cache_position is None:
716
+ cache_position = torch.arange(hidden_states.shape[1], device=hidden_states.device)
717
+ if position_ids is None:
718
+ position_ids = cache_position.unsqueeze(0)
719
+
720
+ causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position)
721
+
722
+ hidden_states = hidden_states * self.normalizer.type(hidden_states.dtype)
723
+
724
+ all_hidden_states = () if output_hidden_states else None
725
+ for i, residual_block in enumerate(self.layers):
726
+ if output_hidden_states:
727
+ all_hidden_states += (hidden_states,)
728
+ if self.gradient_checkpointing and self.training:
729
+ hidden_states = self._gradient_checkpointing_func(
730
+ residual_block.__call__, hidden_states, position_ids, causal_mask, cache_position, use_cache
731
+ )
732
+ else:
733
+ hidden_states = residual_block(hidden_states, position_ids, causal_mask, cache_position, use_cache)
734
+
735
+ hidden_states = self.final_norm(hidden_states)
736
+
737
+ # add hidden states from the last decoder layer
738
+ if output_hidden_states:
739
+ all_hidden_states += (hidden_states,)
740
+
741
+ if not return_dict:
742
+ return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
743
+
744
+ return BaseModelOutputWithNoAttention(
745
+ last_hidden_state=hidden_states,
746
+ hidden_states=all_hidden_states,
747
+ )
748
+
749
+ # TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length even when the static
750
+ # KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at each decode steps due to the dynamic shapes.
751
+ # (`recording cudagraph tree for symint key 13`, etc.), which is VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using
752
+ # `fullgraph=True`. See more context in https://github.com/huggingface/transformers/pull/29114
753
+ # Ignore copy
754
+ def _update_causal_mask(self, attention_mask, input_tensor, cache_position):
755
+ dtype, device = input_tensor.dtype, input_tensor.device
756
+ min_dtype = torch.finfo(dtype).min
757
+ sequence_length = input_tensor.shape[1]
758
+ target_length = max(self.config.attention_window_size, sequence_length)
759
+
760
+ diagonal = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
761
+ causal_mask = diagonal
762
+ if sequence_length != 1:
763
+ causal_mask = torch.triu(diagonal, diagonal=-1)
764
+
765
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
766
+ causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1)
767
+ if attention_mask is not None:
768
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
769
+ if attention_mask.dim() == 2:
770
+ mask_length = attention_mask.shape[-1]
771
+ padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0)
772
+ causal_mask[..., :mask_length] = causal_mask[..., :mask_length].masked_fill(padding_mask, min_dtype)
773
+
774
+ if attention_mask is not None and attention_mask.device.type == "cuda":
775
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
776
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
777
+ # Details: https://github.com/pytorch/pytorch/issues/110213
778
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
779
+
780
+ return causal_mask
781
+
782
+
783
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM with LLAMA->RECURRENTGEMMA,Llama->RecurrentGemma,llama->gemma
784
+ class RecurrentGemmaForCausalLM(RecurrentGemmaPreTrainedModel):
785
+ _tied_weights_keys = ["lm_head.weight"]
786
+
787
+ def __init__(self, config):
788
+ super().__init__(config)
789
+ self.model = RecurrentGemmaModel(config)
790
+ self.vocab_size = config.vocab_size
791
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
792
+
793
+ # Initialize weights and apply final processing
794
+ self.post_init()
795
+
796
+ def get_input_embeddings(self):
797
+ return self.model.embed_tokens
798
+
799
+ def set_input_embeddings(self, value):
800
+ self.model.embed_tokens = value
801
+
802
+ def get_output_embeddings(self):
803
+ return self.lm_head
804
+
805
+ def set_output_embeddings(self, new_embeddings):
806
+ self.lm_head = new_embeddings
807
+
808
+ def set_decoder(self, decoder):
809
+ self.model = decoder
810
+
811
+ def get_decoder(self):
812
+ return self.model
813
+
814
+ # Ignore copy
815
+ @add_start_docstrings_to_model_forward(RECURRENTGEMMA_INPUTS_DOCSTRING)
816
+ @replace_return_docstrings(output_type=CausalLMOutput, config_class=_CONFIG_FOR_DOC)
817
+ def forward(
818
+ self,
819
+ input_ids: Optional[torch.LongTensor] = None,
820
+ cache_position: Optional[torch.LongTensor] = None,
821
+ attention_mask: Optional[torch.Tensor] = None,
822
+ inputs_embeds: Optional[torch.FloatTensor] = None,
823
+ labels: Optional[torch.LongTensor] = None,
824
+ output_hidden_states: Optional[bool] = None,
825
+ return_dict: Optional[bool] = None,
826
+ use_cache: Optional[bool] = None,
827
+ **kwargs, # for now we need this for generation
828
+ ) -> Union[Tuple, CausalLMOutput]:
829
+ r"""
830
+ Args:
831
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
832
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
833
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
834
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
835
+
836
+ Returns:
837
+
838
+ Example:
839
+
840
+ ```python
841
+ >>> from transformers import AutoTokenizer, RecurrentGemmaForCausalLM
842
+
843
+ >>> model = RecurrentGemmaForCausalLM.from_pretrained("google/recurrentgemma-2b")
844
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/recurrentgemma-2b")
845
+
846
+ >>> prompt = "What is your favorite condiment?"
847
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
848
+
849
+ >>> # Generate
850
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
851
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
852
+ "What is your favorite condiment?"
853
+ ```"""
854
+ output_hidden_states = (
855
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
856
+ )
857
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
858
+ output_hidden_states = True
859
+ outputs = self.model(
860
+ input_ids=input_ids,
861
+ cache_position=cache_position,
862
+ attention_mask=attention_mask,
863
+ inputs_embeds=inputs_embeds,
864
+ use_cache=use_cache,
865
+ output_hidden_states=output_hidden_states,
866
+ return_dict=return_dict,
867
+ )
868
+
869
+ hidden_states = outputs[0]
870
+ logits = self.lm_head(hidden_states)
871
+
872
+ # Soft-cap the logits TODO remove if always done.
873
+ # if self.config.logits_soft_cap is not None:
874
+ cap = self.config.logits_soft_cap
875
+ logits = nn.functional.tanh(logits / cap) * cap
876
+
877
+ logits = logits.float()
878
+ loss = None
879
+ if labels is not None:
880
+ # Shift so that tokens < n predict n
881
+ shift_logits = logits[..., :-1, :].contiguous()
882
+ shift_labels = labels[..., 1:].contiguous()
883
+ # Flatten the tokens
884
+ loss_fct = CrossEntropyLoss()
885
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
886
+ shift_labels = shift_labels.view(-1)
887
+ # Enable model parallelism
888
+ shift_labels = shift_labels.to(shift_logits.device)
889
+ loss = loss_fct(shift_logits, shift_labels)
890
+
891
+ if not return_dict:
892
+ output = (logits,) + outputs[1:]
893
+ return (loss,) + output if loss is not None else output
894
+
895
+ return CausalLMOutput(
896
+ loss=loss,
897
+ logits=logits,
898
+ hidden_states=outputs.hidden_states,
899
+ )
900
+
901
+ # Ignore copy
902
+ def prepare_inputs_for_generation(
903
+ self, input_ids, attention_mask=None, inputs_embeds=None, cache_position=None, use_cache=None, **kwargs
904
+ ):
905
+ position_ids = kwargs.get("position_ids", None)
906
+ if attention_mask is not None and position_ids is None:
907
+ position_ids = attention_mask.long().cumsum(-1) - 1
908
+ position_ids.masked_fill_(attention_mask == 0, 1)
909
+
910
+ attention_mask = attention_mask[:, -self.config.attention_window_size :]
911
+
912
+ past_length = cache_position[0]
913
+ if past_length > 0:
914
+ position_ids = position_ids[:, past_length:]
915
+
916
+ if inputs_embeds is not None:
917
+ model_inputs = {"inputs_embeds": inputs_embeds[:, past_length:]}
918
+ else:
919
+ model_inputs = {"input_ids": input_ids[:, past_length:].contiguous()}
920
+
921
+ if cache_position is not None:
922
+ cache_position = cache_position[-position_ids.shape[1] :]
923
+
924
+ model_inputs.update(
925
+ {
926
+ "position_ids": position_ids,
927
+ "attention_mask": attention_mask,
928
+ "cache_position": cache_position,
929
+ "use_cache": use_cache,
930
+ }
931
+ )
932
+ return model_inputs
933
+
934
+ # Ignore copy
935
+ def _reorder_cache(self, past_key_values, beam_idx):
936
+ for layer in self.layers:
937
+ if hasattr(layer.temporal_block, "key_states"):
938
+ k_state = layer.temporal_block.key_states
939
+ v_state = layer.temporal_block.value_states
940
+ k_state = k_state.index_select(0, beam_idx.to(k_state.device))
941
+ v_state = v_state.index_select(0, beam_idx.to(v_state.device))
942
+ return None
venv/lib/python3.10/site-packages/transformers/models/seamless_m4t/__init__.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_sentencepiece_available,
20
+ is_tokenizers_available,
21
+ is_torch_available,
22
+ )
23
+
24
+
25
+ _import_structure = {
26
+ "configuration_seamless_m4t": ["SEAMLESS_M4T_PRETRAINED_CONFIG_ARCHIVE_MAP", "SeamlessM4TConfig"],
27
+ "feature_extraction_seamless_m4t": ["SeamlessM4TFeatureExtractor"],
28
+ "processing_seamless_m4t": ["SeamlessM4TProcessor"],
29
+ }
30
+
31
+ try:
32
+ if not is_sentencepiece_available():
33
+ raise OptionalDependencyNotAvailable()
34
+ except OptionalDependencyNotAvailable:
35
+ pass
36
+ else:
37
+ _import_structure["tokenization_seamless_m4t"] = ["SeamlessM4TTokenizer"]
38
+
39
+ try:
40
+ if not is_tokenizers_available():
41
+ raise OptionalDependencyNotAvailable()
42
+ except OptionalDependencyNotAvailable:
43
+ pass
44
+ else:
45
+ _import_structure["tokenization_seamless_m4t_fast"] = ["SeamlessM4TTokenizerFast"]
46
+
47
+ try:
48
+ if not is_torch_available():
49
+ raise OptionalDependencyNotAvailable()
50
+ except OptionalDependencyNotAvailable:
51
+ pass
52
+ else:
53
+ _import_structure["modeling_seamless_m4t"] = [
54
+ "SEAMLESS_M4T_PRETRAINED_MODEL_ARCHIVE_LIST",
55
+ "SeamlessM4TForTextToSpeech",
56
+ "SeamlessM4TForSpeechToSpeech",
57
+ "SeamlessM4TForTextToText",
58
+ "SeamlessM4TForSpeechToText",
59
+ "SeamlessM4TModel",
60
+ "SeamlessM4TPreTrainedModel",
61
+ "SeamlessM4TCodeHifiGan",
62
+ "SeamlessM4THifiGan",
63
+ "SeamlessM4TTextToUnitForConditionalGeneration",
64
+ "SeamlessM4TTextToUnitModel",
65
+ ]
66
+
67
+ if TYPE_CHECKING:
68
+ from .configuration_seamless_m4t import SEAMLESS_M4T_PRETRAINED_CONFIG_ARCHIVE_MAP, SeamlessM4TConfig
69
+ from .feature_extraction_seamless_m4t import SeamlessM4TFeatureExtractor
70
+ from .processing_seamless_m4t import SeamlessM4TProcessor
71
+
72
+ try:
73
+ if not is_sentencepiece_available():
74
+ raise OptionalDependencyNotAvailable()
75
+ except OptionalDependencyNotAvailable:
76
+ pass
77
+ else:
78
+ from .tokenization_seamless_m4t import SeamlessM4TTokenizer
79
+
80
+ try:
81
+ if not is_tokenizers_available():
82
+ raise OptionalDependencyNotAvailable()
83
+ except OptionalDependencyNotAvailable:
84
+ pass
85
+ else:
86
+ from .tokenization_seamless_m4t_fast import SeamlessM4TTokenizerFast
87
+
88
+ try:
89
+ if not is_torch_available():
90
+ raise OptionalDependencyNotAvailable()
91
+ except OptionalDependencyNotAvailable:
92
+ pass
93
+ else:
94
+ from .modeling_seamless_m4t import (
95
+ SEAMLESS_M4T_PRETRAINED_MODEL_ARCHIVE_LIST,
96
+ SeamlessM4TCodeHifiGan,
97
+ SeamlessM4TForSpeechToSpeech,
98
+ SeamlessM4TForSpeechToText,
99
+ SeamlessM4TForTextToSpeech,
100
+ SeamlessM4TForTextToText,
101
+ SeamlessM4THifiGan,
102
+ SeamlessM4TModel,
103
+ SeamlessM4TPreTrainedModel,
104
+ SeamlessM4TTextToUnitForConditionalGeneration,
105
+ SeamlessM4TTextToUnitModel,
106
+ )
107
+
108
+ else:
109
+ import sys
110
+
111
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/seamless_m4t/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.94 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/seamless_m4t/__pycache__/configuration_seamless_m4t.cpython-310.pyc ADDED
Binary file (19.6 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/seamless_m4t/__pycache__/convert_fairseq2_to_hf.cpython-310.pyc ADDED
Binary file (11.9 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/seamless_m4t/__pycache__/feature_extraction_seamless_m4t.cpython-310.pyc ADDED
Binary file (11.2 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/seamless_m4t/__pycache__/modeling_seamless_m4t.cpython-310.pyc ADDED
Binary file (124 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/seamless_m4t/__pycache__/processing_seamless_m4t.cpython-310.pyc ADDED
Binary file (5.47 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/seamless_m4t/__pycache__/tokenization_seamless_m4t.cpython-310.pyc ADDED
Binary file (21.4 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/seamless_m4t/__pycache__/tokenization_seamless_m4t_fast.cpython-310.pyc ADDED
Binary file (16.5 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/seamless_m4t/configuration_seamless_m4t.py ADDED
@@ -0,0 +1,416 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ SeamlessM4T model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import SEAMLESS_M4T_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ class SeamlessM4TConfig(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`~SeamlessM4TModel`]. It is used to instantiate an
30
+ SeamlessM4T model according to the specified arguments, defining the model architecture. Instantiating a
31
+ configuration with the defaults will yield a similar configuration to that of the SeamlessM4T
32
+ ["facebook/hf-seamless-m4t-medium"](https://huggingface.co/"facebook/hf-seamless-m4t-medium") architecture.
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+
38
+ Args:
39
+ vocab_size (`int`, *optional*, defaults to 256102):
40
+ Vocabulary size of the SeamlessM4T model. Defines the number of different tokens that can be represented by
41
+ the `inputs_ids` passed when calling [`~SeamlessM4TModel`], [`~SeamlessM4TForTextToSpeech`] or
42
+ [`~SeamlessM4TForTextToText`].
43
+ t2u_vocab_size (`int`, *optional*, defaults to 10082):
44
+ Unit vocabulary size of the SeamlessM4T model. Defines the number of different unit tokens that can be
45
+ represented by the `inputs_ids` passed when calling the Text-To-Units sub-model of [`~SeamlessM4TModel`],
46
+ [`~SeamlessM4TForSpeechToSpeech`] or [`~SeamlessM4TForTextToSpeech`].
47
+
48
+ > Parameters shared across sub-models
49
+
50
+ hidden_size (`int`, *optional*, defaults to 1024):
51
+ Dimensionality of the "intermediate" layers in the architecture.
52
+ initializer_range (`float`, *optional*, defaults to 0.02):
53
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
54
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
55
+ The epsilon used by the layer normalization layers.
56
+ use_cache (`bool`, *optional*, defaults to `True`):
57
+ Whether or not the model should return the last key/values attentions (not used by all models).
58
+ max_position_embeddings (`int`, *optional*, defaults to 1024):
59
+ The maximum sequence length that this model text encoder and decoder might ever be used with. Typically set
60
+ this to something large just in case (e.g., 512 or 1024 or 2048).
61
+ is_encoder_decoder (`bool`, *optional*, defaults to `True`):
62
+ Whether the model is used as an encoder/decoder or not.
63
+ encoder_layerdrop (`float`, *optional*, defaults to 0.05):
64
+ The LayerDrop probability for the encoders. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
65
+ for more details.
66
+ decoder_layerdrop (`float`, *optional*, defaults to 0.05):
67
+ The LayerDrop probability for the decoders. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
68
+ for more details.
69
+ activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
70
+ The non-linear activation function (function or string) in the decoder and feed-forward layers. If string,
71
+ `"gelu"`, `"relu"`, `"selu"`, `"swish"` and `"gelu_new"` are supported.
72
+ dropout (`float`, *optional*, defaults to 0.1):
73
+ The dropout probability for all fully connected layers in the embeddings, encoder, decoder, and pooler.
74
+ attention_dropout (`float`, *optional*, defaults to 0.1):
75
+ The dropout probability for all attention layers.
76
+ activation_dropout (`float`, *optional*, defaults to 0.0):
77
+ The dropout probability for all activation layers in the model.
78
+ scale_embedding (`bool`, *optional*, defaults to `True`):
79
+ Scale embeddings by diving by sqrt(d_model).
80
+
81
+ > Text encoder and text decoder specific parameters
82
+
83
+ encoder_layers (`int`, *optional*, defaults to 24):
84
+ Number of hidden layers in the Transformer text encoder.
85
+ encoder_ffn_dim (`int`, *optional*, defaults to 8192):
86
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text encoder.
87
+ encoder_attention_heads (`int`, *optional*, defaults to 16):
88
+ Number of attention heads for each attention layer in the Transformer text encoder.
89
+ decoder_layers (`int`, *optional*, defaults to 24):
90
+ Number of hidden layers in the Transformer text decoder.
91
+ decoder_ffn_dim (`int`, *optional*, defaults to 8192):
92
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text decoder.
93
+ decoder_attention_heads (`int`, *optional*, defaults to 16):
94
+ Number of attention heads for each attention layer in the Transformer text decoder.
95
+ decoder_start_token_id (`int`, *optional*, defaults to 3):
96
+ If an encoder-decoder model starts decoding with a different token than _bos_, the id of that token. Only
97
+ applied in the text decoder.
98
+ max_new_tokens (`int`, *optional*, defaults to 256):
99
+ The maximum numbers of text tokens to generate, ignoring the number of tokens in the prompt.
100
+ pad_token_id (`int`, *optional*, defaults to 0):
101
+ The id of the _padding_ text token. Only applied to the text-decoder model.
102
+ bos_token_id (`int`, *optional*, defaults to 2):
103
+ The id of the _beginning-of-stream_ text token. Only applied to the text-decoder model.
104
+ eos_token_id (`int`, *optional*, defaults to 3):
105
+ The id of the _end-of-stream_ text token. Only applied to the text-decoder model.
106
+
107
+ > Speech encoder specific parameters
108
+
109
+ speech_encoder_layers (`int`, *optional*, defaults to 24):
110
+ Number of hidden layers in the Transformer speech encoder.
111
+ speech_encoder_attention_heads (`int`, *optional*, defaults to 16):
112
+ Number of attention heads for each attention layer in the Transformer speech encoder.
113
+ speech_encoder_intermediate_size (`int`, *optional*, defaults to 4096):
114
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer speech encoder.
115
+ speech_encoder_hidden_act (`str` or `function`, *optional*, defaults to `"swish"`):
116
+ The non-linear activation function (function or string) in the speech encoder. If string, `"gelu"`,
117
+ `"relu"`, `"selu"`, `"swish"` and `"gelu_new"` are supported.
118
+ speech_encoder_dropout (`float`, *optional*, defaults to 0.0):
119
+ The dropout probability for all layers in the speech encoder.
120
+ add_adapter (`bool`, *optional*, defaults to `True`):
121
+ Add an adapter layer on top of the speech encoder.
122
+ speech_encoder_layerdrop (`float`, *optional*, defaults to 0.1):
123
+ The LayerDrop probability for the speech encoder. See the [LayerDrop paper](see
124
+ https://arxiv.org/abs/1909.11556) for more details.
125
+ feature_projection_input_dim (`int`, *optional*, defaults to 160):
126
+ Input dimension of the input feature projection of the speech encoder, i.e the dimension after processing
127
+ input audios with [`SeamlessM4TFeatureExtractor`].
128
+ num_conv_pos_embeddings (`int`, *optional*, defaults to 128):
129
+ Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional
130
+ embeddings layer of the speech encoder.
131
+ num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16):
132
+ Number of groups of 1D convolutional positional embeddings layer of the speech encoder.
133
+ adaptor_kernel_size (`int`, *optional*, defaults to 8):
134
+ Kernel size of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
135
+ adaptor_stride (`int`, *optional*, defaults to 8):
136
+ Stride of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
137
+ adaptor_dropout (`float`, *optional*, defaults to 0.1):
138
+ The dropout probability for all layers in the speech adapter.
139
+ num_adapter_layers (`int`, *optional*, defaults to 1):
140
+ Number of convolutional layers that should be used in the adapter network. Only relevant if `add_adapter is
141
+ True`.
142
+ position_embeddings_type (`str`, *optional*, defaults to `"relative"`):
143
+ Can be specified to `relative` or `rotary` for relative or rotary position embeddings respectively. If left
144
+ `None` no relative position embedding is applied. Only applied to the speech encoder.
145
+ rotary_embedding_base (`int`, *optional*, defaults to 10000):
146
+ If `"rotary"` position embeddings are used, defines the size of the embedding base. Only applied to the
147
+ speech encoder.
148
+ max_source_positions (`int`, *optional*, defaults to 4096):
149
+ if `"relative"` position embeddings are used, defines the maximum source input positions. Only applied to
150
+ the speech encoder.
151
+ conv_depthwise_kernel_size (`int`, *optional*, defaults to 31):
152
+ Kernel size of convolutional depthwise 1D layer in Conformer blocks. Only applied to the speech encoder.
153
+
154
+ > Text-To-Unit (t2u) model specific parameters
155
+
156
+ t2u_bos_token_id (`int`, *optional*, defaults to 0):
157
+ The id of the _beginning-of-stream_ unit token. Only applied to the text-to-unit seq2seq model.
158
+ t2u_pad_token_id (`int`, *optional*, defaults to 1):
159
+ The id of the _padding_ unit token. Only applied to the text-to-unit seq2seq model.
160
+ t2u_eos_token_id (`int`, *optional*, defaults to 2):
161
+ The id of the _end-of-stream_ unit token. Only applied to the text-to-unit seq2seq model.
162
+ t2u_decoder_start_token_id (`int`, *optional*, defaults to 2):
163
+ If an encoder-decoder model starts decoding with a different token than _bos_, the id of that token. Only
164
+ applied to the text-to-unit seq2seq model.
165
+ t2u_max_new_tokens (`int`, *optional*, defaults to 1024):
166
+ The maximum numbers of unit tokens to generate, ignoring the number of tokens in the prompt. Only applied
167
+ to the text-to-unit seq2seq model.
168
+ t2u_encoder_layers (`int`, *optional*, defaults to 6):
169
+ Number of hidden layers in the Transformer text-to-unit encoder.
170
+ t2u_encoder_ffn_dim (`int`, *optional*, defaults to 8192):
171
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text-to-unit encoder.
172
+ t2u_encoder_attention_heads (`int`, *optional*, defaults to 16):
173
+ Number of attention heads for each attention layer in the Transformer text-to-unit encoder.
174
+ t2u_decoder_layers (`int`, *optional*, defaults to 6):
175
+ Number of hidden layers in the Transformer text-to-unit decoder.
176
+ t2u_decoder_ffn_dim (`int`, *optional*, defaults to 8192):
177
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text-to-unit decoder.
178
+ t2u_decoder_attention_heads (`int`, *optional*, defaults to 16):
179
+ Number of attention heads for each attention layer in the Transformer text-to-unit decoder.
180
+ t2u_max_position_embeddings (`int`, *optional*, defaults to 2048):
181
+ The maximum sequence length that this model text-to-unit component might ever be used with. Typically set
182
+ this to something large just in case (e.g., 512 or 1024 or 2048).
183
+
184
+ > Hifi-Gan Vocoder specific parameters
185
+
186
+ sampling_rate (`int`, *optional*, defaults to 16000):
187
+ The sampling rate at which the output audio will be generated, expressed in hertz (Hz).
188
+ upsample_initial_channel (`int`, *optional*, defaults to 512):
189
+ The number of input channels into the hifi-gan upsampling network. Applies to the vocoder only.
190
+ upsample_rates (`Tuple[int]` or `List[int]`, *optional*, defaults to `[5, 4, 4, 2, 2]`):
191
+ A tuple of integers defining the stride of each 1D convolutional layer in the vocoder upsampling network.
192
+ The length of *upsample_rates* defines the number of convolutional layers and has to match the length of
193
+ *upsample_kernel_sizes*. Applies to the vocoder only.
194
+ upsample_kernel_sizes (`Tuple[int]` or `List[int]`, *optional*, defaults to `[11, 8, 8, 4, 4]`):
195
+ A tuple of integers defining the kernel size of each 1D convolutional layer in the vocoder upsampling
196
+ network. The length of *upsample_kernel_sizes* defines the number of convolutional layers and has to match
197
+ the length of *upsample_rates*. Applies to the vocoder only.
198
+ resblock_kernel_sizes (`Tuple[int]` or `List[int]`, *optional*, defaults to `[3, 7, 11]`):
199
+ A tuple of integers defining the kernel sizes of the vocoder 1D convolutional layers in the multi-receptive
200
+ field fusion (MRF) module. Applies to the vocoder only.
201
+ resblock_dilation_sizes (`Tuple[Tuple[int]]` or `List[List[int]]`, *optional*, defaults to `[[1, 3, 5], [1, 3, 5], [1, 3, 5]]`):
202
+ A nested tuple of integers defining the dilation rates of the vocoder dilated 1D convolutional layers in
203
+ the multi-receptive field fusion (MRF) module. Applies to the vocoder only.
204
+ leaky_relu_slope (`float`, *optional*, defaults to 0.1):
205
+ The angle of the negative slope used by the leaky ReLU activation in the vocoder. Applies to the vocoder
206
+ only.
207
+ unit_hifi_gan_vocab_size (`int`, *optional*, defaults to 10000):
208
+ Vocabulary size of the SeamlessM4T vocoder. Defines the number of different unit tokens that can be
209
+ represented by the `inputs_ids` passed when calling the vocoder of [`~SeamlessM4TModel`],
210
+ [`~SeamlessM4TForSpeechToSpeech`] or [`~SeamlessM4TForTextToSpeech`].
211
+ unit_embed_dim (`int`, *optional*, defaults to 1280):
212
+ The projection dimension of the input ids given to the hifi-gan vocoder. Applies to the vocoder only.
213
+ lang_embed_dim (`int`, *optional*, defaults to 256):
214
+ The projection dimension of the target language given to the hifi-gan vocoder. Applies to the vocoder only.
215
+ spkr_embed_dim (`int`, *optional*, defaults to 256):
216
+ The projection dimension of the speaker id given to the hifi-gan vocoder. Applies to the vocoder only.
217
+ vocoder_num_langs (`int`, *optional*, defaults to 36):
218
+ Number of langs supported by the vocoder. Might be different from `t2u_num_langs`.
219
+ vocoder_num_spkrs (`int`, *optional*, defaults to 200):
220
+ Number of speakers supported by the vocoder.
221
+ variance_predictor_kernel_size (`int`, *optional*, defaults to 3):
222
+ Kernel size of the duration predictor. Applies to the vocoder only.
223
+ var_pred_dropout (`float`, *optional*, defaults to 0.5):
224
+ The dropout probability of the duration predictor. Applies to the vocoder only.
225
+ vocoder_offset (`int`, *optional*, defaults to 4):
226
+ Offset the unit token ids by this number to account for symbol tokens. Applies to the vocoder only.
227
+
228
+ ```python
229
+ >>> from transformers import SeamlessM4TModel, SeamlessM4TConfig
230
+
231
+ >>> # Initializing a SeamlessM4T "facebook/hf-seamless-m4t-medium" style configuration
232
+ >>> configuration = SeamlessM4TConfig()
233
+
234
+ >>> # Initializing a model from the "facebook/hf-seamless-m4t-medium" style configuration
235
+ >>> model = SeamlessM4TModel(configuration)
236
+
237
+ >>> # Accessing the model configuration
238
+ >>> configuration = model.config
239
+ ```"""
240
+
241
+ model_type = "seamless_m4t"
242
+
243
+ def __init__(
244
+ self,
245
+ vocab_size=256102,
246
+ t2u_vocab_size=10082,
247
+ # shared config
248
+ hidden_size=1024,
249
+ initializer_range=0.02,
250
+ layer_norm_eps=1e-5,
251
+ use_cache=True,
252
+ max_position_embeddings=1024,
253
+ is_encoder_decoder=True,
254
+ encoder_layerdrop=0.05,
255
+ decoder_layerdrop=0.05,
256
+ activation_function="relu",
257
+ dropout=0.1,
258
+ attention_dropout=0.1,
259
+ activation_dropout=0.0,
260
+ scale_embedding=True,
261
+ # text encoder|decoder
262
+ encoder_layers=24,
263
+ encoder_ffn_dim=8192,
264
+ encoder_attention_heads=16,
265
+ decoder_layers=24,
266
+ decoder_ffn_dim=8192,
267
+ decoder_attention_heads=16,
268
+ decoder_start_token_id=3,
269
+ max_new_tokens=256,
270
+ pad_token_id=0,
271
+ bos_token_id=2,
272
+ eos_token_id=3,
273
+ # speech_encoder
274
+ speech_encoder_layers=24,
275
+ speech_encoder_attention_heads=16,
276
+ speech_encoder_intermediate_size=4096,
277
+ speech_encoder_hidden_act="swish",
278
+ speech_encoder_dropout=0.0,
279
+ add_adapter=True,
280
+ speech_encoder_layerdrop=0.1,
281
+ feature_projection_input_dim=160,
282
+ num_conv_pos_embeddings=128,
283
+ num_conv_pos_embedding_groups=16,
284
+ adaptor_kernel_size=8,
285
+ adaptor_stride=8,
286
+ adaptor_dropout=0.1,
287
+ num_adapter_layers=1,
288
+ position_embeddings_type="relative",
289
+ rotary_embedding_base=10000,
290
+ max_source_positions=4096,
291
+ conv_depthwise_kernel_size=31,
292
+ # t2u config
293
+ t2u_bos_token_id=0,
294
+ t2u_pad_token_id=1,
295
+ t2u_eos_token_id=2,
296
+ t2u_decoder_start_token_id=2,
297
+ t2u_max_new_tokens=1024,
298
+ t2u_encoder_layers=6,
299
+ t2u_encoder_ffn_dim=8192,
300
+ t2u_encoder_attention_heads=16,
301
+ t2u_decoder_layers=6,
302
+ t2u_decoder_ffn_dim=8192,
303
+ t2u_decoder_attention_heads=16,
304
+ t2u_max_position_embeddings=2048,
305
+ # hifi-gan vocoder config
306
+ sampling_rate=16000,
307
+ upsample_initial_channel=512,
308
+ upsample_rates=[5, 4, 4, 2, 2],
309
+ upsample_kernel_sizes=[11, 8, 8, 4, 4],
310
+ resblock_kernel_sizes=[3, 7, 11],
311
+ resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]],
312
+ leaky_relu_slope=0.1,
313
+ # specific to Code Hifi-Gan
314
+ unit_hifi_gan_vocab_size=10000,
315
+ unit_embed_dim=1280,
316
+ lang_embed_dim=256,
317
+ spkr_embed_dim=256,
318
+ vocoder_num_langs=36,
319
+ vocoder_num_spkrs=200,
320
+ variance_predictor_kernel_size=3,
321
+ var_pred_dropout=0.5,
322
+ vocoder_offset=4,
323
+ **kwargs,
324
+ ):
325
+ # overall_config
326
+ self.vocab_size = vocab_size
327
+ self.t2u_vocab_size = t2u_vocab_size
328
+ self.hidden_size = hidden_size
329
+ self.initializer_range = initializer_range
330
+ self.layer_norm_eps = layer_norm_eps
331
+ self.max_position_embeddings = max_position_embeddings
332
+ self.use_cache = use_cache
333
+ self.max_new_tokens = max_new_tokens
334
+ self.encoder_layerdrop = encoder_layerdrop
335
+ self.decoder_layerdrop = decoder_layerdrop
336
+ self.activation_function = activation_function
337
+ self.dropout = dropout
338
+ self.attention_dropout = attention_dropout
339
+ self.activation_dropout = activation_dropout
340
+ self.scale_embedding = scale_embedding
341
+ # for proper config init
342
+ self.num_attention_heads = decoder_attention_heads
343
+ self.num_hidden_layers = decoder_layers
344
+
345
+ # text|unit encoder|decoder
346
+ self.encoder_layers = encoder_layers
347
+ self.encoder_ffn_dim = encoder_ffn_dim
348
+ self.encoder_attention_heads = encoder_attention_heads
349
+ self.decoder_layers = decoder_layers
350
+ self.decoder_ffn_dim = decoder_ffn_dim
351
+ self.decoder_attention_heads = decoder_attention_heads
352
+
353
+ # speech_encoder
354
+ self.speech_encoder_layers = speech_encoder_layers
355
+ self.speech_encoder_hidden_act = speech_encoder_hidden_act
356
+ self.speech_encoder_dropout = speech_encoder_dropout
357
+ self.speech_encoder_attention_heads = speech_encoder_attention_heads
358
+ self.speech_encoder_layerdrop = speech_encoder_layerdrop
359
+ self.speech_encoder_intermediate_size = speech_encoder_intermediate_size
360
+ self.feature_projection_input_dim = feature_projection_input_dim
361
+ self.num_conv_pos_embeddings = num_conv_pos_embeddings
362
+ self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
363
+ self.adaptor_kernel_size = adaptor_kernel_size
364
+ self.adaptor_stride = adaptor_stride
365
+ self.adaptor_dropout = adaptor_dropout
366
+ self.num_adapter_layers = num_adapter_layers
367
+ self.position_embeddings_type = position_embeddings_type
368
+ self.rotary_embedding_base = rotary_embedding_base
369
+ self.max_source_positions = max_source_positions
370
+ self.conv_depthwise_kernel_size = conv_depthwise_kernel_size
371
+ self.add_adapter = add_adapter
372
+
373
+ # t2u config
374
+ self.t2u_bos_token_id = t2u_bos_token_id
375
+ self.t2u_pad_token_id = t2u_pad_token_id
376
+ self.t2u_eos_token_id = t2u_eos_token_id
377
+ self.t2u_decoder_start_token_id = t2u_decoder_start_token_id
378
+ self.t2u_max_new_tokens = t2u_max_new_tokens
379
+ self.t2u_encoder_layers = t2u_encoder_layers
380
+ self.t2u_encoder_ffn_dim = t2u_encoder_ffn_dim
381
+ self.t2u_encoder_attention_heads = t2u_encoder_attention_heads
382
+ self.t2u_decoder_layers = t2u_decoder_layers
383
+ self.t2u_decoder_ffn_dim = t2u_decoder_ffn_dim
384
+ self.t2u_decoder_attention_heads = t2u_decoder_attention_heads
385
+ self.t2u_max_position_embeddings = t2u_max_position_embeddings
386
+
387
+ # hifi-gan vocoder config
388
+ # original parameters specific to Hifi-Gan
389
+ self.sampling_rate = sampling_rate
390
+ self.upsample_initial_channel = upsample_initial_channel
391
+ self.upsample_rates = upsample_rates
392
+ self.upsample_kernel_sizes = upsample_kernel_sizes
393
+ self.resblock_kernel_sizes = resblock_kernel_sizes
394
+ self.resblock_dilation_sizes = resblock_dilation_sizes
395
+ self.leaky_relu_slope = leaky_relu_slope
396
+
397
+ # specific to Code Hifi-Gan
398
+ self.unit_hifi_gan_vocab_size = unit_hifi_gan_vocab_size
399
+ self.unit_embed_dim = unit_embed_dim
400
+ self.lang_embed_dim = lang_embed_dim
401
+ self.spkr_embed_dim = spkr_embed_dim
402
+ self.vocoder_num_langs = vocoder_num_langs
403
+ self.vocoder_num_spkrs = vocoder_num_spkrs
404
+ self.variance_predictor_kernel_size = variance_predictor_kernel_size
405
+ self.var_pred_dropout = var_pred_dropout
406
+ self.vocoder_offset = vocoder_offset
407
+
408
+ super().__init__(
409
+ pad_token_id=pad_token_id,
410
+ bos_token_id=bos_token_id,
411
+ eos_token_id=eos_token_id,
412
+ decoder_start_token_id=decoder_start_token_id,
413
+ is_encoder_decoder=is_encoder_decoder,
414
+ max_position_embeddings=max_position_embeddings,
415
+ **kwargs,
416
+ )
venv/lib/python3.10/site-packages/transformers/models/seamless_m4t/convert_fairseq2_to_hf.py ADDED
@@ -0,0 +1,397 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Converting Meta SeamlessM4T checkpoints from seamless_communication to HF."""
16
+
17
+
18
+ import argparse
19
+ import os
20
+ from pathlib import Path
21
+
22
+ import torch
23
+ from accelerate.utils.modeling import find_tied_parameters
24
+ from seamless_communication.models.inference.translator import Translator
25
+
26
+ from transformers import (
27
+ SeamlessM4TConfig,
28
+ SeamlessM4TFeatureExtractor,
29
+ SeamlessM4TModel,
30
+ SeamlessM4TProcessor,
31
+ SeamlessM4TTokenizer,
32
+ )
33
+ from transformers.utils import logging
34
+
35
+
36
+ UNIT_SUPPORTED_LANGUAGES = ["__arb__", "__ben__", "__cat__", "__ces__", "__cmn__", "__cym__", "__dan__", "__deu__", "__eng__", "__est__", "__fin__", "__fra__", "__hin__", "__ind__", "__ita__", "__jpn__", "__kan__", "__kor__", "__mlt__", "__nld__", "__pes__", "__pol__", "__por__", "__ron__", "__rus__", "__slk__", "__spa__", "__swe__", "__swh__", "__tam__", "__tel__", "__tgl__", "__tha__", "__tur__", "__ukr__", "__urd__", "__uzn__", "__vie__", ] # fmt: skip
37
+ VOCODER_SUPPORTED_LANGUAGES = ["__arb__", "__ben__", "__cat__", "__ces__", "__cmn__", "__cym__", "__dan__", "__deu__", "__eng__", "__est__", "__fin__", "__fra__", "__hin__", "__ind__", "__ita__", "__jpn__", "__kor__", "__mlt__", "__nld__", "__pes__", "__pol__", "__por__", "__ron__", "__rus__", "__slk__", "__spa__", "__swe__", "__swh__", "__tel__", "__tgl__", "__tha__", "__tur__", "__ukr__", "__urd__", "__uzn__", "__vie__",] # fmt: skip
38
+ MEDIUM_SUPPORTED_LANGUAGES = ["ace","ace_Latn","acm","acq","aeb","afr","ajp","aka","amh","apc","arb","ars","ary","arz","asm","ast","awa","ayr","azb","azj","bak","bam","ban","bel","bem","ben","bho","bjn","bjn_Latn","bod","bos","bug","bul","cat","ceb","ces","cjk","ckb","crh","cym","dan","deu","dik","dyu","dzo","ell","eng","epo","est","eus","ewe","fao","pes","fij","fin","fon","fra","fur","fuv","gla","gle","glg","grn","guj","hat","hau","heb","hin","hne","hrv","hun","hye","ibo","ilo","ind","isl","ita","jav","jpn","kab","kac","kam","kan","kas","kas_Deva","kat","knc","knc_Latn","kaz","kbp","kea","khm","kik","kin","kir","kmb","kon","kor","kmr","lao","lvs","lij","lim","lin","lit","lmo","ltg","ltz","lua","lug","luo","lus","mag","mai","mal","mar","min","mkd","plt","mlt","mni","khk","mos","mri","zsm","mya","nld","nno","nob","npi","nso","nus","nya","oci","gaz","ory","pag","pan","pap","pol","por","prs","pbt","quy","ron","run","rus","sag","san","sat","scn","shn","sin","slk","slv","smo","sna","snd","som","sot","spa","als","srd","srp","ssw","sun","swe","swh","szl","tam","tat","tel","tgk","tgl","tha","tir","taq","taq_Tfng","tpi","tsn","tso","tuk","tum","tur","twi","tzm","uig","ukr","umb","urd","uzn","vec","vie","war","wol","xho","ydd","yor","yue","cmn","cmn_Hant","zul",] # fmt: skip
39
+ LARGE_SUPPORTED_LANGUAGES = ["afr","amh","arb","ary","arz","asm","azj","bel","ben","bos","bul","cat","ceb","ces","ckb","cmn","cmn_Hant","cym","dan","deu","ell","eng","est","eus","fin","fra","fuv","gaz","gle","glg","guj","heb","hin","hrv","hun","hye","ibo","ind","isl","ita","jav","jpn","kan","kat","kaz","khk","khm","kir","kor","lao","lit","lug","luo","lvs","mai","mal","mar","mkd","mlt","mni","mya","nld","nno","nob","npi","nya","ory","pan","pbt","pes","pol","por","ron","rus","sat","slk","slv","sna","snd","som","spa","srp","swe","swh","tam","tel","tgk","tgl","tha","tur","ukr","urd","uzn","vie","yor","yue","zlm","zul",] # fmt: skip
40
+
41
+
42
+ def assert_param_count(model_1, model_2):
43
+ count_1 = sum(p[1].numel() for p in model_1.named_parameters() if "final_proj" not in p[0])
44
+ count_2 = sum(p[1].numel() for p in model_2.named_parameters() if "final_proj" not in p[0])
45
+ assert count_1 == count_2, f"{model_1.__class__}: {count_1} != {model_2.__class__}: {count_2}"
46
+
47
+
48
+ def param_count(model):
49
+ return sum(p[1].numel() for p in model.named_parameters() if "final_proj" not in p[0])
50
+
51
+
52
+ def _grab_best_device(use_gpu=True):
53
+ if torch.cuda.device_count() > 0 and use_gpu:
54
+ device = "cuda"
55
+ else:
56
+ device = "cpu"
57
+ return torch.device(device)
58
+
59
+
60
+ logging.set_verbosity_info()
61
+ logger = logging.get_logger(__name__)
62
+
63
+ vocoder_convert_list = [
64
+ ("ups", "hifi_gan.upsampler"),
65
+ ("conv_pre", "hifi_gan.conv_pre"),
66
+ ("resblocks", "hifi_gan.resblocks"),
67
+ ("conv_post", "hifi_gan.conv_post"),
68
+ ("lang", "language_embedding"),
69
+ ("spkr", "speaker_embedding"),
70
+ ("dict.", "unit_embedding."),
71
+ ("dur_predictor.conv1.0", "dur_predictor.conv1"),
72
+ ("dur_predictor.conv2.0", "dur_predictor.conv2"),
73
+ ]
74
+
75
+ # order is important
76
+ wav2vec_convert_list = [
77
+ ("speech_encoder_frontend.model_dim_proj", "feature_projection.projection"),
78
+ ("speech_encoder_frontend.post_extract_layer_norm", "feature_projection.layer_norm"),
79
+ ("speech_encoder_frontend.pos_encoder.conv", "encoder.pos_conv_embed.conv"),
80
+ ("speech_encoder.inner.layers", "encoder.layers"),
81
+ ("speech_encoder.inner_layer_norm", "encoder.layer_norm"),
82
+ ("speech_encoder.adaptor_layers", "adapter.layers"),
83
+ ("inner_proj", "intermediate_dense"),
84
+ ("self_attn.output_proj", "self_attn.linear_out"),
85
+ ("output_proj", "output_dense"),
86
+ ("self_attn.k_proj", "self_attn.linear_k"),
87
+ ("self_attn.v_proj", "self_attn.linear_v"),
88
+ ("self_attn.q_proj", "self_attn.linear_q"),
89
+ ("self_attn.sdpa.u_bias", "self_attn.pos_bias_u"),
90
+ ("self_attn.sdpa.v_bias", "self_attn.pos_bias_v"),
91
+ ("self_attn.sdpa.r_proj", "self_attn.linear_pos"),
92
+ ("conv.pointwise_conv1", "conv_module.pointwise_conv1"),
93
+ ("conv.pointwise_conv2", "conv_module.pointwise_conv2"),
94
+ ("conv.depthwise_conv", "conv_module.depthwise_conv"),
95
+ ("conv.batch_norm", "conv_module.batch_norm"),
96
+ ("conv_layer_norm", "conv_module.layer_norm"),
97
+ ("speech_encoder.proj1", "intermediate_ffn.intermediate_dense"),
98
+ ("speech_encoder.proj2", "intermediate_ffn.output_dense"),
99
+ ("speech_encoder.layer_norm", "inner_layer_norm"),
100
+ ]
101
+
102
+ t2u_convert_list = [
103
+ ("t2u_model.final_proj", "lm_head"),
104
+ ("t2u_model.", "model."),
105
+ ("encoder_decoder_attn_layer_norm", "cross_attention_layer_norm"),
106
+ ("encoder_decoder_attn", "cross_attention"),
107
+ ("linear_k", "k_proj"),
108
+ ("linear_v", "v_proj"),
109
+ ("linear_q", "q_proj"),
110
+ ("ffn.inner_proj", "ffn.fc1"),
111
+ ("ffn.output_proj", "ffn.fc2"),
112
+ ("output_proj", "out_proj"),
113
+ ("decoder_frontend.embed", "decoder.embed_tokens"),
114
+ ]
115
+
116
+ text_convert_list = [
117
+ ("text_encoder.", ""),
118
+ ("text_decoder.", ""),
119
+ ("text_encoder_frontend.embed", "embed_tokens"),
120
+ ("text_decoder_frontend.embed", "embed_tokens"),
121
+ ("encoder_decoder_attn_layer_norm", "cross_attention_layer_norm"),
122
+ ("encoder_decoder_attn", "cross_attention"),
123
+ ("linear_k", "k_proj"),
124
+ ("linear_v", "v_proj"),
125
+ ("linear_q", "q_proj"),
126
+ ("ffn.inner_proj", "ffn.fc1"),
127
+ ("ffn.output_proj", "ffn.fc2"),
128
+ ("output_proj", "out_proj"),
129
+ ("final_proj", "lm_head"),
130
+ ]
131
+
132
+ CUR_PATH = os.path.dirname(os.path.abspath(__file__))
133
+ default_cache_dir = os.path.join(os.path.expanduser("~"), ".cache")
134
+ CACHE_DIR = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "huggingface", "hub")
135
+
136
+
137
+ def _load_hf_config(model_type="medium"):
138
+ if model_type == "medium":
139
+ kwargs = {
140
+ "vocab_size": 256206,
141
+ "t2u_vocab_size": 10082,
142
+ "hidden_size": 1024,
143
+ "max_position_embeddings": 4096,
144
+ "encoder_layers": 12,
145
+ "decoder_layers": 12,
146
+ "encoder_ffn_dim": 4096,
147
+ "decoder_ffn_dim": 4096,
148
+ "t2u_encoder_layers": 4,
149
+ "t2u_decoder_layers": 4,
150
+ "speech_encoder_layers": 12,
151
+ }
152
+ return SeamlessM4TConfig(**kwargs)
153
+ else:
154
+ return SeamlessM4TConfig()
155
+
156
+
157
+ def _convert_model(
158
+ original_model,
159
+ hf_model,
160
+ convert_list,
161
+ device,
162
+ unwanted_prefix="model.",
163
+ filter_state_dict="speech",
164
+ exclude_state_dict=None,
165
+ ):
166
+ state_dict = original_model.state_dict()
167
+
168
+ # filter func
169
+ if isinstance(filter_state_dict, str):
170
+
171
+ def filter_func(x):
172
+ return filter_state_dict in x[0]
173
+
174
+ else:
175
+
176
+ def filter_func(item):
177
+ if exclude_state_dict is not None and exclude_state_dict in item[0]:
178
+ return False
179
+ for filter_el in filter_state_dict:
180
+ if filter_el in item[0]:
181
+ return True
182
+
183
+ return False
184
+
185
+ state_dict = dict(filter(filter_func, state_dict.items()))
186
+
187
+ for k, v in list(state_dict.items()):
188
+ new_k = k[len(unwanted_prefix) :]
189
+ for old_layer_name, new_layer_name in convert_list:
190
+ if old_layer_name in new_k:
191
+ new_k = new_k.replace(old_layer_name, new_layer_name)
192
+
193
+ # must do it by hand
194
+ if ".layer_norm" in new_k and new_k.split(".layer_norm")[0][-1].isnumeric():
195
+ new_k = new_k.replace("layer_norm", "final_layer_norm")
196
+
197
+ state_dict[new_k] = state_dict.pop(k)
198
+
199
+ extra_keys = set(state_dict.keys()) - set(hf_model.state_dict().keys())
200
+ extra_keys = set(extra_keys)
201
+ missing_keys = set(hf_model.state_dict().keys()) - set(state_dict.keys())
202
+ missing_keys = set({k for k in missing_keys if "final_logits_bias" not in k})
203
+ if len(extra_keys) != 0:
204
+ raise ValueError(f"extra keys found: {extra_keys}")
205
+ if len(missing_keys) != 0:
206
+ raise ValueError(f"missing keys: {missing_keys}")
207
+ hf_model.load_state_dict(state_dict, strict=False)
208
+ n_params = param_count(hf_model)
209
+
210
+ logger.info(f"model loaded: {round(n_params/1e6,1)}M params")
211
+
212
+ hf_model.eval()
213
+ hf_model.to(device)
214
+ del state_dict
215
+
216
+ return hf_model
217
+
218
+
219
+ def load_model(save_dir, model_type, repo_id):
220
+ """
221
+ Meta SeamlessM4T is made of 8 main components:
222
+ - speech_encoder (#1) and speech_encoder_frontend (#2)
223
+ - t2u_model (#3)
224
+ - text_encoder (#4) and text_encoder_frontend (#5)
225
+ - text_decoder (#6) [and text_decoder_frontend (#5) = equals to text_encoder_frontend]
226
+ - final_proj (#7)
227
+ - vocoder (#8)
228
+ """
229
+ device = _grab_best_device()
230
+ if model_type == "medium":
231
+ name = "seamlessM4T_medium"
232
+ else:
233
+ name = "seamlessM4T_large"
234
+
235
+ original_model = Translator(name, "vocoder_36langs", device, torch.float32)
236
+
237
+ ######### TOKENIZER
238
+
239
+ langs = MEDIUM_SUPPORTED_LANGUAGES if model_type == "medium" else LARGE_SUPPORTED_LANGUAGES
240
+ langs = [f"__{lang}__" for lang in langs]
241
+ vocab_file = os.path.join(os.path.expanduser("~"), "tokenizer", model_type, "tokenizer.model")
242
+
243
+ save_dir = os.path.join(save_dir, name)
244
+ Path(save_dir).mkdir(exist_ok=True)
245
+
246
+ tokenizer = SeamlessM4TTokenizer(vocab_file, additional_special_tokens=langs)
247
+
248
+ sanity_check_lang_id = tokenizer.convert_tokens_to_ids("__fra__")
249
+
250
+ tokenizer.save_pretrained(save_dir)
251
+ tokenizer = SeamlessM4TTokenizer.from_pretrained(save_dir)
252
+
253
+ if sanity_check_lang_id != tokenizer.convert_tokens_to_ids("__fra__"):
254
+ raise ValueError(
255
+ f"Error in tokenizer saving/loading - __fra__ lang id is not coherent: {sanity_check_lang_id} vs {tokenizer.convert_tokens_to_ids('__fra__')}"
256
+ )
257
+
258
+ ####### get language to ids dict
259
+ text_decoder_lang_code_to_id = {lang.replace("__", ""): tokenizer.convert_tokens_to_ids(lang) for lang in langs}
260
+ # offset: vocoder unit vocab size + 5 (for EOS/PAD/BOS/UNK/MSK) + len(supported_languages)
261
+ t2u_lang_code_to_id = {
262
+ code.replace("__", ""): i + 10005 + len(UNIT_SUPPORTED_LANGUAGES)
263
+ for i, code in enumerate(UNIT_SUPPORTED_LANGUAGES)
264
+ }
265
+ vocoder_lang_code_to_id = {code.replace("__", ""): i for i, code in enumerate(VOCODER_SUPPORTED_LANGUAGES)}
266
+
267
+ ######### FE
268
+
269
+ fe = SeamlessM4TFeatureExtractor(language_code=langs)
270
+
271
+ fe.save_pretrained(save_dir)
272
+ fe = SeamlessM4TFeatureExtractor.from_pretrained(save_dir)
273
+
274
+ processor = SeamlessM4TProcessor(feature_extractor=fe, tokenizer=tokenizer)
275
+ processor.save_pretrained(save_dir)
276
+ processor.push_to_hub(repo_id=repo_id, create_pr=True)
277
+
278
+ processor = SeamlessM4TProcessor.from_pretrained(save_dir)
279
+
280
+ ######## Model
281
+
282
+ # init model
283
+ hf_config = _load_hf_config(model_type)
284
+ hf_model = SeamlessM4TModel(hf_config)
285
+
286
+ hf_model.generation_config.__setattr__("text_decoder_lang_to_code_id", text_decoder_lang_code_to_id)
287
+ hf_model.generation_config.__setattr__("t2u_lang_code_to_id", t2u_lang_code_to_id)
288
+ hf_model.generation_config.__setattr__("vocoder_lang_code_to_id", vocoder_lang_code_to_id)
289
+
290
+ # -1. take care of vocoder
291
+ # similarly to speech T5 must apply and remove weight norm
292
+ hf_model.vocoder.apply_weight_norm()
293
+ hf_model.vocoder = _convert_model(
294
+ original_model,
295
+ hf_model.vocoder,
296
+ vocoder_convert_list,
297
+ device,
298
+ unwanted_prefix="vocoder.code_generator.",
299
+ filter_state_dict="vocoder",
300
+ )
301
+ hf_model.vocoder.remove_weight_norm()
302
+
303
+ # 1. take care of speech encoder
304
+ wav2vec = hf_model.speech_encoder
305
+ hf_model.speech_encoder = _convert_model(
306
+ original_model, wav2vec, wav2vec_convert_list, device, unwanted_prefix="model.", filter_state_dict="speech"
307
+ )
308
+
309
+ # 2. take care of t2u
310
+
311
+ hf_model.t2u_model = _convert_model(
312
+ original_model,
313
+ hf_model.t2u_model,
314
+ t2u_convert_list,
315
+ device,
316
+ unwanted_prefix="model.",
317
+ filter_state_dict="t2u_model",
318
+ )
319
+
320
+ # 3. take care of text encoder
321
+ hf_model.text_encoder = _convert_model(
322
+ original_model,
323
+ hf_model.text_encoder,
324
+ text_convert_list,
325
+ device,
326
+ unwanted_prefix="model.",
327
+ filter_state_dict=["model.text_encoder"],
328
+ exclude_state_dict="t2u_model",
329
+ )
330
+
331
+ # 4. take care of text decoder
332
+ hf_model.text_decoder = _convert_model(
333
+ original_model,
334
+ hf_model.text_decoder,
335
+ text_convert_list,
336
+ device,
337
+ unwanted_prefix="model.",
338
+ filter_state_dict=["model.text_decoder"],
339
+ exclude_state_dict="t2u_model",
340
+ )
341
+
342
+ # 5. take care of final proj
343
+ hf_model.lm_head = _convert_model(
344
+ original_model,
345
+ hf_model.lm_head,
346
+ [("final_proj.", "")],
347
+ device,
348
+ unwanted_prefix="model.",
349
+ filter_state_dict=["model.final_proj"],
350
+ exclude_state_dict="t2u_model",
351
+ )
352
+
353
+ # sanity check
354
+ print(find_tied_parameters(hf_model))
355
+
356
+ count_1 = param_count(hf_model)
357
+ count_2 = param_count(original_model)
358
+
359
+ print(f"HF MODEL:{count_1}, ORIGINAL_MODEL: {count_2}, diff:{count_1 - count_2}")
360
+ print(f"HF MODEL excluding embeddings:{hf_model.num_parameters(exclude_embeddings=True)}")
361
+
362
+ del original_model
363
+
364
+ hf_model.generation_config._from_model_config = False
365
+ hf_model.save_pretrained(save_dir)
366
+ hf_model.push_to_hub(repo_id=repo_id, create_pr=True)
367
+ hf_model = SeamlessM4TModel.from_pretrained(save_dir)
368
+
369
+
370
+ if __name__ == "__main__":
371
+ parser = argparse.ArgumentParser()
372
+ # Required parameters
373
+
374
+ parser.add_argument(
375
+ "--model_type",
376
+ default="medium",
377
+ type=str,
378
+ help="Model type.",
379
+ )
380
+
381
+ parser.add_argument(
382
+ "--save_dir",
383
+ default="/home/ubuntu/weights",
384
+ type=str,
385
+ help="Path to the output PyTorch model.",
386
+ )
387
+
388
+ parser.add_argument(
389
+ "--repo_id",
390
+ default="facebook/hf-seamless-m4t-medium",
391
+ type=str,
392
+ help="Repo ID.",
393
+ )
394
+
395
+ args = parser.parse_args()
396
+
397
+ load_model(args.save_dir, args.model_type, args.repo_id)
venv/lib/python3.10/site-packages/transformers/models/seamless_m4t/feature_extraction_seamless_m4t.py ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Feature extractor class for SeamlessM4T
17
+ """
18
+
19
+ from typing import List, Optional, Union
20
+
21
+ import numpy as np
22
+
23
+ from ...utils import is_torch_available
24
+
25
+
26
+ if is_torch_available():
27
+ import torch
28
+
29
+ from ...audio_utils import mel_filter_bank, spectrogram, window_function
30
+ from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
31
+ from ...feature_extraction_utils import BatchFeature
32
+ from ...utils import PaddingStrategy, TensorType, logging
33
+
34
+
35
+ logger = logging.get_logger(__name__)
36
+
37
+
38
+ class SeamlessM4TFeatureExtractor(SequenceFeatureExtractor):
39
+ r"""
40
+ Constructs a SeamlessM4T feature extractor.
41
+
42
+ This feature extractor inherits from [`SequenceFeatureExtractor`] which contains most of the main methods. Users
43
+ should refer to this superclass for more information regarding those methods.
44
+
45
+ This class extracts mel-filter bank features from raw speech.
46
+
47
+ Args:
48
+ feature_size (`int`, *optional*, defaults to 80):
49
+ The feature dimension of the extracted features.
50
+ sampling_rate (`int`, *optional*, defaults to 16000):
51
+ The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
52
+ num_mel_bins (`int`, *optional*, defaults to 80):
53
+ Number of Mel-frequency bins.
54
+ padding_value (`float`, *optional*, defaults to 0.0):
55
+ The value that is used to fill the padding vectors.
56
+ stride (`int`, *optional*, defaults to 2):
57
+ Stride used to reshape audios from shape (batch_size,num_frames,num_mel_bins) to
58
+ (batch_size,num_frames//stride,num_mel_bins*stride).
59
+ """
60
+
61
+ model_input_names = ["input_features", "attention_mask"]
62
+
63
+ def __init__(
64
+ self,
65
+ feature_size=80,
66
+ sampling_rate=16000,
67
+ num_mel_bins=80,
68
+ padding_value=0.0,
69
+ stride=2,
70
+ **kwargs,
71
+ ):
72
+ self.num_mel_bins = num_mel_bins
73
+ self.return_attention_mask = True
74
+ self.stride = stride
75
+
76
+ mel_filters = mel_filter_bank(
77
+ num_frequency_bins=256,
78
+ num_mel_filters=self.num_mel_bins,
79
+ min_frequency=20,
80
+ max_frequency=sampling_rate // 2,
81
+ sampling_rate=sampling_rate,
82
+ norm=None,
83
+ mel_scale="kaldi",
84
+ triangularize_in_mel_space=True,
85
+ )
86
+
87
+ self.mel_filters = np.pad(mel_filters, ((0, 1), (0, 0)))
88
+ self.window = window_function(400, "povey", periodic=False)
89
+
90
+ super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
91
+
92
+ @staticmethod
93
+ # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
94
+ def zero_mean_unit_var_norm(
95
+ input_values: List[np.ndarray], attention_mask: List[np.ndarray], padding_value: float = 0.0
96
+ ) -> List[np.ndarray]:
97
+ """
98
+ Every array in the list is normalized to have zero mean and unit variance
99
+ """
100
+ if attention_mask is not None:
101
+ attention_mask = np.array(attention_mask, np.int32)
102
+ normed_input_values = []
103
+
104
+ for vector, length in zip(input_values, attention_mask.sum(-1)):
105
+ normed_slice = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7)
106
+ if length < normed_slice.shape[0]:
107
+ normed_slice[length:] = padding_value
108
+
109
+ normed_input_values.append(normed_slice)
110
+ else:
111
+ normed_input_values = [(x - x.mean()) / np.sqrt(x.var() + 1e-7) for x in input_values]
112
+
113
+ return normed_input_values
114
+
115
+ def _extract_fbank_features(
116
+ self,
117
+ waveform: np.ndarray,
118
+ ) -> np.ndarray:
119
+ """
120
+ Get mel-filter bank features using TorchAudio. Note that TorchAudio requires 16-bit signed integers as inputs
121
+ and hence the waveform should not be normalized before feature extraction.
122
+ """
123
+ # by default, it extracts the left channel if stereo
124
+ if len(waveform.shape) == 2:
125
+ waveform = waveform[0]
126
+
127
+ waveform = np.squeeze(waveform) * (2**15) # Kaldi compliance: 16-bit signed integers
128
+ features = spectrogram(
129
+ waveform,
130
+ self.window,
131
+ frame_length=400,
132
+ hop_length=160,
133
+ fft_length=512,
134
+ power=2.0,
135
+ center=False,
136
+ preemphasis=0.97,
137
+ mel_filters=self.mel_filters,
138
+ log_mel="log",
139
+ mel_floor=1.192092955078125e-07,
140
+ remove_dc_offset=True,
141
+ ).T
142
+ return features
143
+
144
+ def __call__(
145
+ self,
146
+ raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],
147
+ padding: Union[bool, str, PaddingStrategy] = True,
148
+ pad_to_multiple_of: Optional[int] = 2,
149
+ max_length: Optional[int] = None,
150
+ truncation: bool = False,
151
+ return_tensors: Optional[Union[str, TensorType]] = None,
152
+ sampling_rate: Optional[int] = None,
153
+ return_attention_mask: Optional[bool] = None,
154
+ do_normalize_per_mel_bins: Optional[bool] = True,
155
+ **kwargs,
156
+ ) -> BatchFeature:
157
+ """
158
+ Main method to featurize and prepare for the model one or several sequence(s).
159
+
160
+ Args:
161
+ raw_speech (`np.ndarray`, `torch.Tensor`, `List[float]`, `List[np.ndarray]`, `List[torch.Tensor]`,
162
+ `List[List[float]]`, `List[List[List[float]]]`):
163
+ The sequence or batch of sequences to be padded. Each sequence can be a numpy array,
164
+ a torch tensor, a list of float values, a list of numpy arrays, a list of torch tensors,
165
+ a list of list of float values or a list of a list of list of float values.
166
+ If `raw_speech` is a one-dimensional `np.ndarray`, `torch.Tensor` or a `List[float]`, `raw_speech` is
167
+ considered a single-channel, single-sample sound. In all other cases, the first dimension of
168
+ `raw_speech`, whether from an `np.ndarray`, a `torch.Tensor` or a `List[...]`,
169
+ corresponds to the number of samples in the batch, and the number of channels
170
+ (i.e. mono or stereo character) is derived from the other dimensions
171
+ (1D -> single-channel waveform batches; 2D-> stereo-channel waveform batches).
172
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
173
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding
174
+ index) among:
175
+
176
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
177
+ sequence if provided).
178
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
179
+ acceptable input length for the model if that argument is not provided.
180
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
181
+ lengths).
182
+ pad_to_multiple_of (`int`, *optional*, defaults to 2):
183
+ If set will pad the sequence to a multiple of the provided value.
184
+
185
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
186
+ `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
187
+ max_length (`int`, *optional*):
188
+ Maximum length of the returned list and optionally padding length (see above).
189
+ truncation (`bool`):
190
+ Activates truncation to cut input sequences longer than *max_length* to *max_length*.
191
+ return_attention_mask (`bool`, *optional*):
192
+ Whether to return the attention mask. If left to the default, will return the attention mask according
193
+ to the specific feature_extractor's default.
194
+
195
+ [What are attention masks?](../glossary#attention-mask)
196
+
197
+ <Tip>
198
+
199
+ For SeamlessM4T models, `attention_mask` should always be passed for batched inference, to avoid subtle
200
+ bugs.
201
+
202
+ </Tip>
203
+
204
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
205
+ If set, will return tensors instead of list of python integers. Acceptable values are:
206
+
207
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
208
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
209
+ - `'np'`: Return Numpy `np.ndarray` objects.
210
+ sampling_rate (`int`, *optional*):
211
+ The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
212
+ `sampling_rate` at the forward call to prevent silent errors.
213
+ do_normalize_per_mel_bins (`bool`, *optional*, defaults to `True`):
214
+ Whether or not to zero-mean unit-variance normalize the input per mel-channel.
215
+ kwargs (*optional*):
216
+ Remaining dictionary of keyword arguments that will be passed to the tokenizer or the feature
217
+ extractor.
218
+ """
219
+ if sampling_rate is not None:
220
+ if sampling_rate != self.sampling_rate:
221
+ raise ValueError(
222
+ f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
223
+ f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
224
+ f" {self.sampling_rate} and not {sampling_rate}."
225
+ )
226
+ else:
227
+ logger.warning(
228
+ "It is strongly recommended to pass the `sampling_rate` argument to this function. "
229
+ "Failing to do so can result in silent errors that might be hard to debug."
230
+ )
231
+
232
+ return_attention_mask = (
233
+ return_attention_mask if return_attention_mask is not None else self.return_attention_mask
234
+ )
235
+
236
+ is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1
237
+ if is_batched_numpy and len(raw_speech.shape) > 3:
238
+ raise ValueError(f"Only mono-channel or stereo-channel audio is supported for input to {self}")
239
+
240
+ acceptable_types = (
241
+ (torch.Tensor, np.ndarray, tuple, list) if is_torch_available() else (np.ndarray, tuple, list)
242
+ )
243
+ is_batched = is_batched_numpy or (
244
+ isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], acceptable_types))
245
+ )
246
+
247
+ if is_batched:
248
+ raw_speech = [np.asarray(speech, dtype=np.float32) for speech in raw_speech]
249
+ elif not is_batched and not isinstance(raw_speech, np.ndarray):
250
+ raw_speech = np.asarray(raw_speech, dtype=np.float32)
251
+ elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64):
252
+ raw_speech = raw_speech.astype(np.float32)
253
+
254
+ # always return batch
255
+ if not is_batched:
256
+ raw_speech = [raw_speech]
257
+
258
+ # extract fbank features
259
+ features = [self._extract_fbank_features(waveform) for waveform in raw_speech]
260
+
261
+ if do_normalize_per_mel_bins:
262
+ # torch defaults to ddof=1, and numpy defaults to ddof=0
263
+ features = [
264
+ (x - np.expand_dims(x.mean(0), 0)) / np.sqrt(np.expand_dims(x.var(0, ddof=1), 0) + 1e-7)
265
+ for x in features
266
+ ]
267
+
268
+ # convert into correct format for padding
269
+ encoded_inputs = BatchFeature({"input_features": features})
270
+
271
+ padded_inputs = self.pad(
272
+ encoded_inputs,
273
+ padding=padding,
274
+ max_length=max_length,
275
+ truncation=truncation,
276
+ pad_to_multiple_of=pad_to_multiple_of,
277
+ return_attention_mask=True,
278
+ return_tensors="np",
279
+ )
280
+
281
+ # SeamlessM4T needs to process extracted features
282
+ input_features = padded_inputs.get("input_features")
283
+ attention_mask = padded_inputs.pop("attention_mask")
284
+
285
+ batch_size, num_frames, num_channels = input_features.shape
286
+
287
+ remainder = num_frames % self.stride
288
+ if remainder != 0:
289
+ input_features = input_features[:, :num_frames, :]
290
+ attention_mask = attention_mask[:, :num_frames]
291
+
292
+ input_features = np.reshape(
293
+ input_features, (batch_size, num_frames // self.stride, num_channels * self.stride)
294
+ )
295
+
296
+ indices = np.arange(0, num_frames)
297
+ attention_mask = attention_mask[:, indices % self.stride == 1]
298
+
299
+ padded_inputs["input_features"] = input_features
300
+ if return_attention_mask:
301
+ padded_inputs["attention_mask"] = attention_mask
302
+
303
+ if return_tensors is not None:
304
+ padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
305
+
306
+ return padded_inputs
venv/lib/python3.10/site-packages/transformers/models/seamless_m4t/modeling_seamless_m4t.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/transformers/models/seamless_m4t/processing_seamless_m4t.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Audio/Text processor class for SeamlessM4T
17
+ """
18
+
19
+ from ...processing_utils import ProcessorMixin
20
+
21
+
22
+ class SeamlessM4TProcessor(ProcessorMixin):
23
+ r"""
24
+ Constructs a SeamlessM4T processor which wraps a SeamlessM4T feature extractor and a SeamlessM4T tokenizer into a
25
+ single processor.
26
+
27
+ [`SeamlessM4TProcessor`] offers all the functionalities of [`SeamlessM4TFeatureExtractor`] and
28
+ [`SeamlessM4TTokenizerFast`]. See the [`~SeamlessM4TProcessor.__call__`] and [`~SeamlessM4TProcessor.decode`] for
29
+ more information.
30
+
31
+ Args:
32
+ feature_extractor ([`SeamlessM4TFeatureExtractor`]):
33
+ The audio processor is a required input.
34
+ tokenizer ([`SeamlessM4TTokenizerFast`]):
35
+ The tokenizer is a required input.
36
+ """
37
+
38
+ feature_extractor_class = "SeamlessM4TFeatureExtractor"
39
+ tokenizer_class = ("SeamlessM4TTokenizer", "SeamlessM4TTokenizerFast")
40
+
41
+ def __init__(self, feature_extractor, tokenizer):
42
+ super().__init__(feature_extractor, tokenizer)
43
+
44
+ def __call__(self, text=None, audios=None, src_lang=None, tgt_lang=None, **kwargs):
45
+ """
46
+ Main method to prepare for the model one or several sequences(s) and audio(s). This method forwards the `text`
47
+ and `kwargs` arguments to SeamlessM4TTokenizerFast's [`~SeamlessM4TTokenizerFast.__call__`] if `text` is not
48
+ `None` to encode the text. To prepare the audio(s), this method forwards the `audios` and `kwrags` arguments to
49
+ SeamlessM4TFeatureExtractor's [`~SeamlessM4TFeatureExtractor.__call__`] if `audios` is not `None`. Please refer
50
+ to the doctsring of the above two methods for more information.
51
+
52
+ Args:
53
+ text (`str`, `List[str]`, `List[List[str]]`):
54
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
55
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
56
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
57
+ audios (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`):
58
+ The audio or batch of audios to be prepared. Each audio can be NumPy array or PyTorch tensor. In case
59
+ of a NumPy array/PyTorch tensor, each audio should be of shape (C, T), where C is a number of channels,
60
+ and T the sample length of the audio.
61
+ src_lang (`str`, *optional*):
62
+ The language code of the input texts/audios. If not specified, the last `src_lang` specified will be
63
+ used.
64
+ tgt_lang (`str`, *optional*):
65
+ The code of the target language. If not specified, the last `tgt_lang` specified will be used.
66
+ kwargs (*optional*):
67
+ Remaining dictionary of keyword arguments that will be passed to the feature extractor and/or the
68
+ tokenizer.
69
+ Returns:
70
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
71
+
72
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
73
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
74
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
75
+ `None`).
76
+ - **input_features** -- Audio input features to be fed to a model. Returned when `audios` is not `None`.
77
+ """
78
+ sampling_rate = kwargs.pop("sampling_rate", None)
79
+
80
+ if text is None and audios is None:
81
+ raise ValueError("You have to specify either text or audios. Both cannot be none.")
82
+ elif text is not None and audios is not None:
83
+ raise ValueError(
84
+ "Text and audios are mututally exclusive when passed to `SeamlessM4T`. Specify one or another."
85
+ )
86
+ elif text is not None:
87
+ if tgt_lang is not None:
88
+ self.tokenizer.tgt_lang = tgt_lang
89
+ if src_lang is not None:
90
+ self.tokenizer.src_lang = src_lang
91
+ encoding = self.tokenizer(text, **kwargs)
92
+
93
+ return encoding
94
+
95
+ else:
96
+ encoding = self.feature_extractor(audios, sampling_rate=sampling_rate, **kwargs)
97
+ return encoding
98
+
99
+ def batch_decode(self, *args, **kwargs):
100
+ """
101
+ This method forwards all its arguments to SeamlessM4TTokenizerFast's [`~PreTrainedTokenizer.batch_decode`].
102
+ Please refer to the docstring of this method for more information.
103
+ """
104
+ return self.tokenizer.batch_decode(*args, **kwargs)
105
+
106
+ def decode(self, *args, **kwargs):
107
+ """
108
+ This method forwards all its arguments to SeamlessM4TTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please
109
+ refer to the docstring of this method for more information.
110
+ """
111
+ return self.tokenizer.decode(*args, **kwargs)
112
+
113
+ @property
114
+ def model_input_names(self):
115
+ tokenizer_input_names = self.tokenizer.model_input_names
116
+ feature_extractor_input_names = self.feature_extractor.model_input_names
117
+ return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names))
venv/lib/python3.10/site-packages/transformers/models/seamless_m4t/tokenization_seamless_m4t.py ADDED
@@ -0,0 +1,562 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for SeamlessM4T."""
16
+ import os
17
+ from shutil import copyfile
18
+ from typing import Any, Dict, List, Optional, Tuple, Union
19
+
20
+ import sentencepiece as spm
21
+
22
+ from ...convert_slow_tokenizer import import_protobuf
23
+ from ...tokenization_utils import (
24
+ BatchEncoding,
25
+ PreTokenizedInput,
26
+ PreTrainedTokenizer,
27
+ TextInput,
28
+ )
29
+ from ...tokenization_utils_base import AddedToken
30
+ from ...utils import PaddingStrategy, logging
31
+
32
+
33
+ logger = logging.get_logger(__name__)
34
+
35
+
36
+ SPIECE_UNDERLINE = "▁"
37
+
38
+
39
+ VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"}
40
+
41
+
42
+ class SeamlessM4TTokenizer(PreTrainedTokenizer):
43
+ """
44
+ Construct a SeamlessM4T tokenizer.
45
+
46
+ Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
47
+ [SentencePiece](https://github.com/google/sentencepiece).
48
+
49
+ The tokenization method is `<language code> <tokens> <eos>` for source language documents, and `<eos> <language
50
+ code> <tokens> <eos>` for target language documents.
51
+
52
+ Examples:
53
+
54
+ ```python
55
+ >>> from transformers import SeamlessM4TTokenizer
56
+
57
+ >>> tokenizer = SeamlessM4TTokenizer.from_pretrained(
58
+ ... "facebook/hf-seamless-m4t-medium", src_lang="eng", tgt_lang="fra"
59
+ ... )
60
+ >>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria"
61
+ >>> expected_translation_french = "Le chef de l'ONU affirme qu'il n'y a pas de solution militaire en Syrie."
62
+ >>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_french, return_tensors="pt")
63
+ ```
64
+
65
+ Args:
66
+ vocab_file (`str`):
67
+ Path to the vocabulary file.
68
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
69
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
70
+
71
+ <Tip>
72
+
73
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
74
+ sequence. The token used is the `cls_token`.
75
+
76
+ </Tip>
77
+
78
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
79
+ The end of sequence token.
80
+
81
+ <Tip>
82
+
83
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
84
+ The token used is the `sep_token`.
85
+
86
+ </Tip>
87
+
88
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
89
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
90
+ sequence classification or for a text and a question for question answering. It is also used as the last
91
+ token of a sequence built with special tokens.
92
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
93
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
94
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
95
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
96
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
97
+ token instead.
98
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
99
+ The token used for padding, for example when batching sequences of different lengths.
100
+ tokenizer_file (`str`, *optional*):
101
+ The path to a tokenizer file to use instead of the vocab file.
102
+ src_lang (`str`, *optional*, defaults to `"eng"`):
103
+ The language to use as source language for translation.
104
+ tgt_lang (`str`, *optional*, defaults to `"fra"`):
105
+ The language to use as target language for translation.
106
+ sp_model_kwargs (`Dict[str, Any]`, *optional*):
107
+ Additional keyword arguments to pass to the model initialization.
108
+ additional_special_tokens (tuple or list of `str` or `tokenizers.AddedToken`, *optional*):
109
+ A tuple or a list of additional special tokens. Can be used to specify the list of languages that will be
110
+ supported by the tokenizer.
111
+ add_prefix_space (`bool`, *optional*, defaults to `True`):
112
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
113
+ other word.
114
+ """
115
+
116
+ vocab_files_names = VOCAB_FILES_NAMES
117
+ model_input_names = ["input_ids", "attention_mask"]
118
+
119
+ prefix_tokens: List[int] = []
120
+ suffix_tokens: List[int] = []
121
+
122
+ def __init__(
123
+ self,
124
+ vocab_file,
125
+ bos_token="<s>",
126
+ eos_token="</s>",
127
+ sep_token="</s>",
128
+ cls_token="<s>",
129
+ unk_token="<unk>",
130
+ pad_token="<pad>",
131
+ tokenizer_file=None,
132
+ src_lang="eng",
133
+ tgt_lang="fra",
134
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
135
+ additional_special_tokens=None,
136
+ add_prefix_space=True,
137
+ **kwargs,
138
+ ):
139
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
140
+ # Add this unused argument to keep some important Copied from statements
141
+ self.legacy = False
142
+ self.vocab_file = vocab_file
143
+
144
+ self.sp_model = self.get_spm_processor(kwargs.pop("from_slow", False))
145
+
146
+ # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
147
+ # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
148
+ # spm | '<unk>' | '<s>' | '</s>' | 'an' | 'en' | '_d' | 'er' | 'in' | '_s' | '_a'
149
+ # fairseq | '<pad>' | '<unk>' | '<s>' | '</s>' | 'an' | 'en' | '▁d' | 'er' | 'in' | '▁s'
150
+
151
+ # Mimic fairseq token-to-id alignment for the first 4 token
152
+ self._added_tokens_decoder = {
153
+ 0: AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token,
154
+ 1: AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token,
155
+ 2: AddedToken(bos_token, special=True) if isinstance(bos_token, str) else bos_token,
156
+ 3: AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token,
157
+ }
158
+
159
+ # The first "real" token "an" has position 4 in the original fairseq vocab and position 3 in the spm vocab
160
+ self.fairseq_offset = 1
161
+
162
+ self.sp_model_size = len(self.sp_model)
163
+
164
+ self._src_lang = f"__{src_lang}__" if "__" not in src_lang else src_lang
165
+ self._tgt_lang = f"__{tgt_lang}__" if "__" not in tgt_lang else tgt_lang
166
+ self.add_prefix_space = add_prefix_space
167
+
168
+ super().__init__(
169
+ bos_token=bos_token,
170
+ eos_token=eos_token,
171
+ unk_token=unk_token,
172
+ sep_token=sep_token,
173
+ cls_token=cls_token,
174
+ pad_token=pad_token,
175
+ tokenizer_file=tokenizer_file,
176
+ src_lang=src_lang,
177
+ tgt_lang=tgt_lang,
178
+ additional_special_tokens=additional_special_tokens,
179
+ sp_model_kwargs=self.sp_model_kwargs,
180
+ add_prefix_space=add_prefix_space,
181
+ **kwargs,
182
+ )
183
+
184
+ self.set_src_lang_special_tokens(self._src_lang)
185
+ self.set_tgt_lang_special_tokens(self._tgt_lang)
186
+
187
+ # Copied from transformers.models.nllb.tokenization_nllb.NllbTokenizer.__getstate__
188
+ def __getstate__(self):
189
+ state = self.__dict__.copy()
190
+ state["sp_model"] = None
191
+ state["sp_model_proto"] = self.sp_model.serialized_model_proto()
192
+ return state
193
+
194
+ # Copied from transformers.models.nllb.tokenization_nllb.NllbTokenizer.__setstate__
195
+ def __setstate__(self, d):
196
+ self.__dict__ = d
197
+
198
+ # for backward compatibility
199
+ if not hasattr(self, "sp_model_kwargs"):
200
+ self.sp_model_kwargs = {}
201
+
202
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
203
+ self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
204
+
205
+ @property
206
+ def vocab_size(self):
207
+ return len(self.sp_model)
208
+
209
+ def __call__(
210
+ self,
211
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
212
+ text_pair: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None,
213
+ text_target: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
214
+ text_pair_target: Optional[
215
+ Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]
216
+ ] = None,
217
+ padding: Union[bool, str, PaddingStrategy] = True,
218
+ pad_to_multiple_of: Optional[int] = 2,
219
+ src_lang: Optional[str] = None,
220
+ tgt_lang: Optional[str] = None,
221
+ **kwargs,
222
+ ):
223
+ """
224
+ Args:
225
+ text (`str`, `List[str]`, `List[List[str]]`, *optional*):
226
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
227
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
228
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
229
+ text_pair (`str`, `List[str]`, `List[List[str]]`, *optional*):
230
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
231
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
232
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
233
+ text_target (`str`, `List[str]`, `List[List[str]]`, *optional*):
234
+ The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a
235
+ list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized),
236
+ you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
237
+ text_pair_target (`str`, `List[str]`, `List[List[str]]`, *optional*):
238
+ The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a
239
+ list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized),
240
+ you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
241
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
242
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding
243
+ index) among:
244
+
245
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
246
+ sequence if provided).
247
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
248
+ acceptable input length for the model if that argument is not provided.
249
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
250
+ lengths).
251
+ pad_to_multiple_of (`int`, *optional*):
252
+ If set will pad the sequence to a multiple of the provided value.
253
+
254
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
255
+ `>= 7.5` (Volta).
256
+ src_lang (`str`, *optional*):
257
+ A string representing the source language. If not specified, the last `src_lang` specified (either
258
+ during initialization or when calling this tokenizer) will be used.
259
+ tgt_lang (`str`, *optional*):
260
+ A string representing the target language. If not specified, the last `tgt_lang` specified (either
261
+ during initialization or when calling this tokenizer) will be used.
262
+ kwargs (*optional*):
263
+ Remaining dictionary of keyword arguments that will be passed to [`PreTrainedTokenizer.__call__`].
264
+ """
265
+ if src_lang is not None:
266
+ self.src_lang = src_lang
267
+ if tgt_lang is not None:
268
+ self.tgt_lang = tgt_lang
269
+
270
+ output = super().__call__(
271
+ text=text,
272
+ text_pair=text_pair,
273
+ text_target=text_target,
274
+ text_pair_target=text_pair_target,
275
+ padding=padding,
276
+ pad_to_multiple_of=pad_to_multiple_of,
277
+ **kwargs,
278
+ )
279
+
280
+ return BatchEncoding(output, tensor_type=kwargs.get("return_tensors"))
281
+
282
+ @property
283
+ # Copied from transformers.models.nllb.tokenization_nllb.NllbTokenizer.src_lang
284
+ def src_lang(self) -> str:
285
+ return self._src_lang
286
+
287
+ @src_lang.setter
288
+ def src_lang(self, new_src_lang: str) -> None:
289
+ if "__" not in new_src_lang:
290
+ self._src_lang = f"__{new_src_lang}__"
291
+ else:
292
+ self._src_lang = new_src_lang
293
+ self.set_src_lang_special_tokens(self._src_lang)
294
+
295
+ @property
296
+ def tgt_lang(self) -> str:
297
+ return self._tgt_lang
298
+
299
+ @tgt_lang.setter
300
+ def tgt_lang(self, new_tgt_lang: str) -> None:
301
+ if "__" not in new_tgt_lang:
302
+ self._tgt_lang = f"__{new_tgt_lang}__"
303
+ else:
304
+ self._tgt_lang = new_tgt_lang
305
+ self.set_tgt_lang_special_tokens(self._tgt_lang)
306
+
307
+ # Copied from transformers.models.nllb.tokenization_nllb.NllbTokenizer.get_special_tokens_mask
308
+ def get_special_tokens_mask(
309
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
310
+ ) -> List[int]:
311
+ """
312
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
313
+ special tokens using the tokenizer `prepare_for_model` method.
314
+
315
+ Args:
316
+ token_ids_0 (`List[int]`):
317
+ List of IDs.
318
+ token_ids_1 (`List[int]`, *optional*):
319
+ Optional second list of IDs for sequence pairs.
320
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
321
+ Whether or not the token list is already formatted with special tokens for the model.
322
+
323
+ Returns:
324
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
325
+ """
326
+
327
+ if already_has_special_tokens:
328
+ return super().get_special_tokens_mask(
329
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
330
+ )
331
+
332
+ prefix_ones = [1] * len(self.prefix_tokens)
333
+ suffix_ones = [1] * len(self.suffix_tokens)
334
+ if token_ids_1 is None:
335
+ return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones
336
+ return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones
337
+
338
+ # Copied from transformers.models.nllb.tokenization_nllb.NllbTokenizer.build_inputs_with_special_tokens
339
+ def build_inputs_with_special_tokens(
340
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
341
+ ) -> List[int]:
342
+ """
343
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
344
+ adding special tokens. An NLLB sequence has the following format, where `X` represents the sequence:
345
+
346
+ - `input_ids` (for encoder) `X [eos, src_lang_code]`
347
+ - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`
348
+
349
+ BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
350
+ separator.
351
+
352
+ Args:
353
+ token_ids_0 (`List[int]`):
354
+ List of IDs to which the special tokens will be added.
355
+ token_ids_1 (`List[int]`, *optional*):
356
+ Optional second list of IDs for sequence pairs.
357
+
358
+ Returns:
359
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
360
+ """
361
+ if token_ids_1 is None:
362
+ return self.prefix_tokens + token_ids_0 + self.suffix_tokens
363
+ # We don't expect to process pairs, but leave the pair logic for API consistency
364
+ return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
365
+
366
+ # Copied from transformers.models.nllb.tokenization_nllb.NllbTokenizer.create_token_type_ids_from_sequences
367
+ def create_token_type_ids_from_sequences(
368
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
369
+ ) -> List[int]:
370
+ """
371
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. nllb does not
372
+ make use of token type ids, therefore a list of zeros is returned.
373
+
374
+ Args:
375
+ token_ids_0 (`List[int]`):
376
+ List of IDs.
377
+ token_ids_1 (`List[int]`, *optional*):
378
+ Optional second list of IDs for sequence pairs.
379
+
380
+ Returns:
381
+ `List[int]`: List of zeros.
382
+
383
+ """
384
+
385
+ sep = [self.sep_token_id]
386
+ cls = [self.cls_token_id]
387
+
388
+ if token_ids_1 is None:
389
+ return len(cls + token_ids_0 + sep) * [0]
390
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
391
+
392
+ def _build_translation_inputs(
393
+ self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs
394
+ ):
395
+ """Used by translation pipeline, to prepare inputs for the generate function"""
396
+ if src_lang is None or tgt_lang is None:
397
+ raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model.")
398
+ self.src_lang = src_lang
399
+ inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
400
+ if "__" not in tgt_lang:
401
+ tgt_lang = f"__{tgt_lang}__"
402
+ tgt_lang_id = self.convert_tokens_to_ids(tgt_lang)
403
+ inputs["forced_bos_token_id"] = tgt_lang_id
404
+ return inputs
405
+
406
+ def get_vocab(self):
407
+ vocab = {
408
+ self.convert_ids_to_tokens(i): i for i in range(self.fairseq_offset, self.vocab_size + self.fairseq_offset)
409
+ }
410
+ vocab.update(self.added_tokens_encoder)
411
+ return vocab
412
+
413
+ @property
414
+ def unk_token_length(self):
415
+ return len(self.sp_model.encode(str(self.unk_token)))
416
+
417
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.get_spm_processor
418
+ def get_spm_processor(self, from_slow=False):
419
+ tokenizer = spm.SentencePieceProcessor(**self.sp_model_kwargs)
420
+ if self.legacy or from_slow: # no dependency on protobuf
421
+ tokenizer.Load(self.vocab_file)
422
+ return tokenizer
423
+
424
+ with open(self.vocab_file, "rb") as f:
425
+ sp_model = f.read()
426
+ model_pb2 = import_protobuf(f"The new behaviour of {self.__class__.__name__} (with `self.legacy = False`)")
427
+ model = model_pb2.ModelProto.FromString(sp_model)
428
+ normalizer_spec = model_pb2.NormalizerSpec()
429
+ normalizer_spec.add_dummy_prefix = False
430
+ model.normalizer_spec.MergeFrom(normalizer_spec)
431
+ sp_model = model.SerializeToString()
432
+ tokenizer.LoadFromSerializedProto(sp_model)
433
+ return tokenizer
434
+
435
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.tokenize
436
+ def tokenize(self, text: "TextInput", **kwargs) -> List[str]:
437
+ """
438
+ Converts a string to a list of tokens. If `self.legacy` is set to `False`, a prefix token is added unless the
439
+ first token is special.
440
+ """
441
+ if self.legacy or len(text) == 0:
442
+ return super().tokenize(text, **kwargs)
443
+
444
+ text = text.replace(SPIECE_UNDERLINE, " ")
445
+ if self.add_prefix_space:
446
+ text = SPIECE_UNDERLINE + text
447
+
448
+ tokens = super().tokenize(text, **kwargs)
449
+
450
+ if len(tokens) > 1 and tokens[0] == SPIECE_UNDERLINE and tokens[1] in self.all_special_tokens:
451
+ tokens = tokens[1:]
452
+ return tokens
453
+
454
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer._tokenize
455
+ def _tokenize(self, text, **kwargs):
456
+ """
457
+ Returns a tokenized string.
458
+
459
+ We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any
460
+ SPIECE_UNDERLINE. For example: `self.sp_model.encode(f"{SPIECE_UNDERLINE}Hey", out_type = str)` will give
461
+ `['H', 'e', 'y']` instead of `['▁He', 'y']`. Thus we always encode `f"{unk_token}text"` and strip the
462
+ `unk_token`. Here is an example with `unk_token = "<unk>"` and `unk_token_length = 4`.
463
+ `self.tokenizer.sp_model.encode("<unk> Hey", out_type = str)[4:]`.
464
+ """
465
+ tokens = self.sp_model.encode(text, out_type=str)
466
+ if self.legacy or not text.startswith((SPIECE_UNDERLINE, " ")):
467
+ return tokens
468
+
469
+ # 1. Encode string + prefix ex: "<unk> Hey"
470
+ tokens = self.sp_model.encode(self.unk_token + text, out_type=str)
471
+ # 2. Remove self.unk_token from ['<','unk','>', '▁Hey']
472
+ return tokens[self.unk_token_length :] if len(tokens) >= self.unk_token_length else tokens
473
+
474
+ def _convert_token_to_id(self, token):
475
+ """Converts a token (str) in an id using the vocab."""
476
+ spm_id = self.sp_model.PieceToId(token)
477
+
478
+ # Need to return unknown token if the SP model returned 0
479
+ return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
480
+
481
+ def _convert_id_to_token(self, index):
482
+ """Converts an index (integer) in a token (str) using the vocab."""
483
+ return self.sp_model.IdToPiece(index - self.fairseq_offset)
484
+
485
+ def convert_tokens_to_string(self, tokens):
486
+ """Converts a sequence of tokens (strings for sub-words) in a single string."""
487
+ # since we manually add the prefix space, we have to remove it when decoding
488
+ if tokens[0].startswith(SPIECE_UNDERLINE) and self.add_prefix_space:
489
+ tokens[0] = tokens[0][1:]
490
+
491
+ out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
492
+ return out_string
493
+
494
+ # Copied from transformers.models.nllb.tokenization_nllb.NllbTokenizer.save_vocabulary
495
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
496
+ if not os.path.isdir(save_directory):
497
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
498
+ return
499
+ out_vocab_file = os.path.join(
500
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
501
+ )
502
+
503
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
504
+ copyfile(self.vocab_file, out_vocab_file)
505
+ elif not os.path.isfile(self.vocab_file):
506
+ with open(out_vocab_file, "wb") as fi:
507
+ content_spiece_model = self.sp_model.serialized_model_proto()
508
+ fi.write(content_spiece_model)
509
+
510
+ return (out_vocab_file,)
511
+
512
+ # Copied from transformers.models.nllb.tokenization_nllb.NllbTokenizer.prepare_seq2seq_batch with eng_Latn->eng, fra_Latn->fra
513
+ def prepare_seq2seq_batch(
514
+ self,
515
+ src_texts: List[str],
516
+ src_lang: str = "eng",
517
+ tgt_texts: Optional[List[str]] = None,
518
+ tgt_lang: str = "fra",
519
+ **kwargs,
520
+ ) -> BatchEncoding:
521
+ self.src_lang = src_lang
522
+ self.tgt_lang = tgt_lang
523
+ return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
524
+
525
+ # Copied from transformers.models.nllb.tokenization_nllb.NllbTokenizer._switch_to_input_mode
526
+ def _switch_to_input_mode(self):
527
+ return self.set_src_lang_special_tokens(self.src_lang)
528
+
529
+ # Copied from transformers.models.nllb.tokenization_nllb.NllbTokenizer._switch_to_target_mode
530
+ def _switch_to_target_mode(self):
531
+ return self.set_tgt_lang_special_tokens(self.tgt_lang)
532
+
533
+ def set_src_lang_special_tokens(self, src_lang) -> None:
534
+ """Reset the special tokens to the source lang setting.
535
+ Prefix=[src_lang_code], suffix = [eos]
536
+ """
537
+ self.cur_lang_code = self.convert_tokens_to_ids(src_lang)
538
+ self.init_kwargs["src_lang"] = src_lang
539
+
540
+ if self.cur_lang_code == self.unk_token_id:
541
+ logger.warning_once(
542
+ f"`src_lang={src_lang}` has not be found in the vocabulary. Behaviour will probably be unexpected because the language token id will be replaced by the unknown token id."
543
+ )
544
+
545
+ self.prefix_tokens = [self.cur_lang_code]
546
+ self.suffix_tokens = [self.eos_token_id]
547
+
548
+ # https://github.com/facebookresearch/fairseq2/blob/c53f18e6be6b8b46b722f2249b8397b7eccd7ad3/src/fairseq2/models/nllb/tokenizer.py#L112-L116
549
+ def set_tgt_lang_special_tokens(self, lang: str) -> None:
550
+ """Reset the special tokens to the target lang setting.
551
+ Prefix=[eos, tgt_lang_code] and suffix=[eos].
552
+ """
553
+ self.cur_lang_code = self.convert_tokens_to_ids(lang)
554
+ self.init_kwargs["tgt_lang"] = lang
555
+
556
+ if self.cur_lang_code == self.unk_token_id:
557
+ logger.warning_once(
558
+ f"`tgt_lang={lang}` has not be found in the vocabulary. Behaviour will probably be unexpected because the language token id will be replaced by the unknown token id."
559
+ )
560
+
561
+ self.prefix_tokens = [self.eos_token_id, self.cur_lang_code]
562
+ self.suffix_tokens = [self.eos_token_id]
venv/lib/python3.10/site-packages/transformers/models/seamless_m4t/tokenization_seamless_m4t_fast.py ADDED
@@ -0,0 +1,446 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Fast Tokenization class for SeamlessM4T."""
16
+ import os
17
+ from shutil import copyfile
18
+ from typing import List, Optional, Tuple, Union
19
+
20
+ from tokenizers import processors
21
+
22
+ from ...tokenization_utils import (
23
+ BatchEncoding,
24
+ PreTokenizedInput,
25
+ TextInput,
26
+ )
27
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
28
+ from ...utils import PaddingStrategy, is_sentencepiece_available, logging
29
+
30
+
31
+ if is_sentencepiece_available():
32
+ from .tokenization_seamless_m4t import SeamlessM4TTokenizer
33
+ else:
34
+ SeamlessM4TTokenizer = None
35
+
36
+ logger = logging.get_logger(__name__)
37
+
38
+ VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
39
+
40
+
41
+ class SeamlessM4TTokenizerFast(PreTrainedTokenizerFast):
42
+ """
43
+ Construct a "fast" SeamlessM4T tokenizer (backed by HuggingFace's *tokenizers* library). Based on
44
+ [BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models).
45
+
46
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
47
+ refer to this superclass for more information regarding those methods.
48
+
49
+ The tokenization method is `<language code> <tokens> <eos>` for source language documents, and `<eos> <language
50
+ code> <tokens> <eos>` for target language documents.
51
+
52
+ Examples:
53
+
54
+ ```python
55
+ >>> from transformers import SeamlessM4TTokenizerFast
56
+
57
+ >>> tokenizer = SeamlessM4TTokenizerFast.from_pretrained(
58
+ ... "facebook/hf-seamless-m4t-medium", src_lang="eng", tgt_lang="fra"
59
+ ... )
60
+ >>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria"
61
+ >>> expected_translation_french = "Le chef de l'ONU affirme qu'il n'y a pas de solution militaire en Syrie."
62
+ >>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_french, return_tensors="pt")
63
+ ```
64
+
65
+ Args:
66
+ vocab_file (`str`, *optional*):
67
+ Path to the vocabulary file.
68
+ tokenizer_file (`str`, *optional*):
69
+ The path to a tokenizer file to use instead of the vocab file.
70
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
71
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
72
+
73
+ <Tip>
74
+
75
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
76
+ sequence. The token used is the `cls_token`.
77
+
78
+ </Tip>
79
+
80
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
81
+ The end of sequence token.
82
+
83
+ <Tip>
84
+
85
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
86
+ The token used is the `sep_token`.
87
+
88
+ </Tip>
89
+
90
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
91
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
92
+ sequence classification or for a text and a question for question answering. It is also used as the last
93
+ token of a sequence built with special tokens.
94
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
95
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
96
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
97
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
98
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
99
+ token instead.
100
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
101
+ The token used for padding, for example when batching sequences of different lengths.
102
+ src_lang (`str`, *optional*, defaults to `"eng"`):
103
+ The language to use as source language for translation.
104
+ tgt_lang (`str`, *optional*, defaults to `"fra"`):
105
+ The language to use as target language for translation.
106
+ additional_special_tokens (tuple or list of `str` or `tokenizers.AddedToken`, *optional*):
107
+ A tuple or a list of additional special tokens.
108
+ """
109
+
110
+ vocab_files_names = VOCAB_FILES_NAMES
111
+ slow_tokenizer_class = SeamlessM4TTokenizer
112
+ model_input_names = ["input_ids", "attention_mask"]
113
+
114
+ prefix_tokens: List[int] = []
115
+ suffix_tokens: List[int] = []
116
+
117
+ def __init__(
118
+ self,
119
+ vocab_file=None,
120
+ tokenizer_file=None,
121
+ bos_token="<s>",
122
+ eos_token="</s>",
123
+ sep_token="</s>",
124
+ cls_token="<s>",
125
+ unk_token="<unk>",
126
+ pad_token="<pad>",
127
+ src_lang="eng",
128
+ tgt_lang="fra",
129
+ additional_special_tokens=None,
130
+ **kwargs,
131
+ ):
132
+ super().__init__(
133
+ vocab_file=vocab_file,
134
+ tokenizer_file=tokenizer_file,
135
+ bos_token=bos_token,
136
+ eos_token=eos_token,
137
+ sep_token=sep_token,
138
+ cls_token=cls_token,
139
+ unk_token=unk_token,
140
+ pad_token=pad_token,
141
+ src_lang=src_lang,
142
+ tgt_lang=tgt_lang,
143
+ additional_special_tokens=additional_special_tokens,
144
+ **kwargs,
145
+ )
146
+
147
+ self.vocab_file = vocab_file
148
+ self._src_lang = f"__{src_lang}__" if "__" not in src_lang else src_lang
149
+ self._tgt_lang = f"__{tgt_lang}__" if "__" not in tgt_lang else tgt_lang
150
+ self.set_src_lang_special_tokens(self._src_lang)
151
+ self.set_tgt_lang_special_tokens(self._tgt_lang)
152
+
153
+ @property
154
+ def can_save_slow_tokenizer(self) -> bool:
155
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
156
+
157
+ @property
158
+ # Copied from transformers.models.nllb.tokenization_nllb.NllbTokenizer.src_lang
159
+ def src_lang(self) -> str:
160
+ return self._src_lang
161
+
162
+ @src_lang.setter
163
+ def src_lang(self, new_src_lang: str) -> None:
164
+ if "__" not in new_src_lang:
165
+ self._src_lang = f"__{new_src_lang}__"
166
+ else:
167
+ self._src_lang = new_src_lang
168
+ self.set_src_lang_special_tokens(self._src_lang)
169
+
170
+ @property
171
+ def tgt_lang(self) -> str:
172
+ return self._tgt_lang
173
+
174
+ @tgt_lang.setter
175
+ def tgt_lang(self, new_tgt_lang: str) -> None:
176
+ if "__" not in new_tgt_lang:
177
+ self._tgt_lang = f"__{new_tgt_lang}__"
178
+ else:
179
+ self._tgt_lang = new_tgt_lang
180
+ self.set_tgt_lang_special_tokens(self._tgt_lang)
181
+
182
+ def build_inputs_with_special_tokens(
183
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
184
+ ) -> List[int]:
185
+ """
186
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
187
+ adding special tokens. The special tokens depend on calling set_lang.
188
+
189
+ An SeamlessM4T sequence has the following format, where `X` represents the sequence:
190
+
191
+ - `input_ids` (for encoder) `[src_lang_code] X [eos]`
192
+ - `decoder_input_ids`: (for decoder) `[eos, tgt_lang_code] X [eos]`
193
+
194
+ BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
195
+ separator.
196
+
197
+ Args:
198
+ token_ids_0 (`List[int]`):
199
+ List of IDs to which the special tokens will be added.
200
+ token_ids_1 (`List[int]`, *optional*):
201
+ Optional second list of IDs for sequence pairs.
202
+
203
+ Returns:
204
+ `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
205
+ """
206
+ if token_ids_1 is None:
207
+ return self.prefix_tokens + token_ids_0 + self.suffix_tokens
208
+ # We don't expect to process pairs, but leave the pair logic for API consistency
209
+ return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
210
+
211
+ # Copied from transformers.models.nllb.tokenization_nllb_fast.NllbTokenizerFast.create_token_type_ids_from_sequences
212
+ def create_token_type_ids_from_sequences(
213
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
214
+ ) -> List[int]:
215
+ """
216
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. nllb does not
217
+ make use of token type ids, therefore a list of zeros is returned.
218
+
219
+ Args:
220
+ token_ids_0 (`List[int]`):
221
+ List of IDs.
222
+ token_ids_1 (`List[int]`, *optional*):
223
+ Optional second list of IDs for sequence pairs.
224
+
225
+ Returns:
226
+ `List[int]`: List of zeros.
227
+
228
+ """
229
+
230
+ sep = [self.sep_token_id]
231
+ cls = [self.cls_token_id]
232
+
233
+ if token_ids_1 is None:
234
+ return len(cls + token_ids_0 + sep) * [0]
235
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
236
+
237
+ def _build_translation_inputs(
238
+ self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs
239
+ ):
240
+ """Used by translation pipeline, to prepare inputs for the generate function"""
241
+ if src_lang is None or tgt_lang is None:
242
+ raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
243
+ self.src_lang = src_lang
244
+ inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
245
+ if "__" not in tgt_lang:
246
+ tgt_lang = f"__{tgt_lang}__"
247
+ tgt_lang_id = self.convert_tokens_to_ids(tgt_lang)
248
+ inputs["forced_bos_token_id"] = tgt_lang_id
249
+ return inputs
250
+
251
+ # Copied from transformers.models.nllb.tokenization_nllb_fast.NllbTokenizerFast.prepare_seq2seq_batch with "fra_Latn"->"fra", "eng_Latn"->"eng"
252
+ def prepare_seq2seq_batch(
253
+ self,
254
+ src_texts: List[str],
255
+ src_lang: str = "eng",
256
+ tgt_texts: Optional[List[str]] = None,
257
+ tgt_lang: str = "fra",
258
+ **kwargs,
259
+ ) -> BatchEncoding:
260
+ self.src_lang = src_lang
261
+ self.tgt_lang = tgt_lang
262
+ return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
263
+
264
+ # Copied from transformers.models.nllb.tokenization_nllb_fast.NllbTokenizerFast._switch_to_input_mode
265
+ def _switch_to_input_mode(self):
266
+ return self.set_src_lang_special_tokens(self.src_lang)
267
+
268
+ # Copied from transformers.models.nllb.tokenization_nllb_fast.NllbTokenizerFast._switch_to_target_mode
269
+ def _switch_to_target_mode(self):
270
+ return self.set_tgt_lang_special_tokens(self.tgt_lang)
271
+
272
+ def set_src_lang_special_tokens(self, src_lang) -> None:
273
+ """Reset the special tokens to the source lang setting.
274
+ Prefix=[src_lang_code], suffix = [eos]
275
+ """
276
+ self.cur_lang_code = self.convert_tokens_to_ids(src_lang)
277
+
278
+ if self.cur_lang_code == self.unk_token_id:
279
+ logger.warning_once(
280
+ f"`tgt_lang={src_lang}` has not be found in the `vocabulary`. Behaviour will probably be unexpected because the language token id will be replaced by the unknown token id."
281
+ )
282
+
283
+ self.init_kwargs["src_lang"] = src_lang
284
+
285
+ self.prefix_tokens = [self.cur_lang_code]
286
+ self.suffix_tokens = [self.eos_token_id]
287
+
288
+ prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens)
289
+ suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens)
290
+
291
+ self._tokenizer.post_processor = processors.TemplateProcessing(
292
+ single=prefix_tokens_str + ["$A"] + suffix_tokens_str,
293
+ pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str,
294
+ special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)),
295
+ )
296
+
297
+ def set_tgt_lang_special_tokens(self, lang: str) -> None:
298
+ """Reset the special tokens to the target lang setting.
299
+ Prefix=[eos, tgt_lang_code] and suffix=[eos].
300
+ """
301
+ self.cur_lang_code = self.convert_tokens_to_ids(lang)
302
+
303
+ if self.cur_lang_code == self.unk_token_id:
304
+ logger.warning_once(
305
+ f"`tgt_lang={lang}` has not be found in the `vocabulary`. Behaviour will probably be unexpected because the language token id will be replaced by the unknown token id."
306
+ )
307
+
308
+ self.init_kwargs["tgt_lang"] = lang
309
+
310
+ self.prefix_tokens = [self.eos_token_id, self.cur_lang_code]
311
+ self.suffix_tokens = [self.eos_token_id]
312
+
313
+ prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens)
314
+ suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens)
315
+
316
+ self._tokenizer.post_processor = processors.TemplateProcessing(
317
+ single=prefix_tokens_str + ["$A"] + suffix_tokens_str,
318
+ pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str,
319
+ special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)),
320
+ )
321
+
322
+ # Copied from transformers.models.nllb.tokenization_nllb_fast.NllbTokenizerFast.save_vocabulary
323
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
324
+ if not self.can_save_slow_tokenizer:
325
+ raise ValueError(
326
+ "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
327
+ "tokenizer."
328
+ )
329
+
330
+ if not os.path.isdir(save_directory):
331
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory.")
332
+ return
333
+ out_vocab_file = os.path.join(
334
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
335
+ )
336
+
337
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
338
+ copyfile(self.vocab_file, out_vocab_file)
339
+
340
+ return (out_vocab_file,)
341
+
342
+ @classmethod
343
+ def _from_pretrained(
344
+ cls,
345
+ resolved_vocab_files,
346
+ pretrained_model_name_or_path,
347
+ init_configuration,
348
+ *init_inputs,
349
+ token=None,
350
+ cache_dir=None,
351
+ local_files_only=False,
352
+ _commit_hash=None,
353
+ _is_local=False,
354
+ **kwargs,
355
+ ):
356
+ tokenizer = super()._from_pretrained(
357
+ resolved_vocab_files,
358
+ pretrained_model_name_or_path,
359
+ init_configuration,
360
+ *init_inputs,
361
+ token=token,
362
+ cache_dir=cache_dir,
363
+ local_files_only=local_files_only,
364
+ _commit_hash=_commit_hash,
365
+ _is_local=_is_local,
366
+ **kwargs,
367
+ )
368
+
369
+ # ensure also set after from pretrained
370
+ tokenizer.set_src_lang_special_tokens(tokenizer._src_lang)
371
+ tokenizer.set_tgt_lang_special_tokens(tokenizer._tgt_lang)
372
+
373
+ return tokenizer
374
+
375
+ def __call__(
376
+ self,
377
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
378
+ text_pair: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None,
379
+ text_target: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
380
+ text_pair_target: Optional[
381
+ Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]
382
+ ] = None,
383
+ padding: Union[bool, str, PaddingStrategy] = True,
384
+ pad_to_multiple_of: Optional[int] = 2,
385
+ src_lang: Optional[str] = None,
386
+ tgt_lang: Optional[str] = None,
387
+ **kwargs,
388
+ ):
389
+ """
390
+ Args:
391
+ text (`str`, `List[str]`, `List[List[str]]`, *optional*):
392
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
393
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
394
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
395
+ text_pair (`str`, `List[str]`, `List[List[str]]`, *optional*):
396
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
397
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
398
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
399
+ text_target (`str`, `List[str]`, `List[List[str]]`, *optional*):
400
+ The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a
401
+ list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized),
402
+ you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
403
+ text_pair_target (`str`, `List[str]`, `List[List[str]]`, *optional*):
404
+ The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a
405
+ list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized),
406
+ you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
407
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
408
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding
409
+ index) among:
410
+
411
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
412
+ sequence if provided).
413
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
414
+ acceptable input length for the model if that argument is not provided.
415
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
416
+ lengths).
417
+ pad_to_multiple_of (`int`, *optional*):
418
+ If set will pad the sequence to a multiple of the provided value.
419
+
420
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
421
+ `>= 7.5` (Volta).
422
+ src_lang (`str`, *optional*):
423
+ A string representing the source language. If not specified, the last `src_lang` specified (either
424
+ during initialization or when calling this tokenizer) will be used.
425
+ tgt_lang (`str`, *optional*):
426
+ A string representing the target language. If not specified, the last `tgt_lang` specified (either
427
+ during initialization or when calling this tokenizer) will be used.
428
+ kwargs (*optional*):
429
+ Remaining dictionary of keyword arguments that will be passed to [`PreTrainedTokenizerFast.__call__`].
430
+ """
431
+ if src_lang is not None:
432
+ self.src_lang = src_lang
433
+ if tgt_lang is not None:
434
+ self.tgt_lang = tgt_lang
435
+
436
+ output = super().__call__(
437
+ text=text,
438
+ text_pair=text_pair,
439
+ text_target=text_target,
440
+ text_pair_target=text_pair_target,
441
+ padding=padding,
442
+ pad_to_multiple_of=pad_to_multiple_of,
443
+ **kwargs,
444
+ )
445
+
446
+ return output
venv/lib/python3.10/site-packages/transformers/models/vilt/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.47 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/vilt/__pycache__/configuration_vilt.cpython-310.pyc ADDED
Binary file (6.03 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/vilt/__pycache__/convert_vilt_original_to_pytorch.cpython-310.pyc ADDED
Binary file (8.54 kB). View file