applied-ai-018 commited on
Commit
1bc5244
·
verified ·
1 Parent(s): 2d9dc5b

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step20/mp_rank_00_model_states.pt +3 -0
  2. ckpts/universal/global_step20/mp_rank_06_model_states.pt +3 -0
  3. ckpts/universal/global_step40/zero/11.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
  4. lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_with_adj_2-v0-res.json +1 -0
  5. lm-evaluation-harness/tests/testdata/blimp_irregular_plural_subject_verb_agreement_1-v0-loglikelihood +1 -0
  6. lm-evaluation-harness/tests/testdata/blimp_irregular_plural_subject_verb_agreement_2-v0-res.json +1 -0
  7. lm-evaluation-harness/tests/testdata/blimp_wh_island-v0-res.json +1 -0
  8. lm-evaluation-harness/tests/testdata/coqa-v1-res.json +1 -0
  9. lm-evaluation-harness/tests/testdata/crows_pairs_english_disability-v0-res.json +1 -0
  10. lm-evaluation-harness/tests/testdata/hendrycksTest-prehistory-v0-res.json +1 -0
  11. lm-evaluation-harness/tests/testdata/lambada_cloze-v0-loglikelihood +1 -0
  12. lm-evaluation-harness/tests/testdata/lambada_mt_de-v0-res.json +1 -0
  13. lm-evaluation-harness/tests/testdata/math_prealgebra-v1-res.json +1 -0
  14. lm-evaluation-harness/tests/testdata/mathqa-v0-res.json +1 -0
  15. lm-evaluation-harness/tests/testdata/pile_dm-mathematics-v0-loglikelihood_rolling +1 -0
  16. lm-evaluation-harness/tests/testdata/wikitext-v1-res.json +1 -0
  17. lm-evaluation-harness/tests/testdata/wmt20-zh-en-v0-greedy_until +1 -0
  18. venv/lib/python3.10/site-packages/transformers/models/beit/__init__.py +112 -0
  19. venv/lib/python3.10/site-packages/transformers/models/beit/__pycache__/configuration_beit.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/transformers/models/beit/__pycache__/convert_beit_unilm_to_pytorch.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/transformers/models/beit/__pycache__/feature_extraction_beit.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/transformers/models/beit/__pycache__/image_processing_beit.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/transformers/models/beit/__pycache__/modeling_beit.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/transformers/models/beit/__pycache__/modeling_flax_beit.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/transformers/models/beit/configuration_beit.py +231 -0
  26. venv/lib/python3.10/site-packages/transformers/models/beit/convert_beit_unilm_to_pytorch.py +374 -0
  27. venv/lib/python3.10/site-packages/transformers/models/beit/feature_extraction_beit.py +33 -0
  28. venv/lib/python3.10/site-packages/transformers/models/beit/image_processing_beit.py +531 -0
  29. venv/lib/python3.10/site-packages/transformers/models/beit/modeling_beit.py +1425 -0
  30. venv/lib/python3.10/site-packages/transformers/models/beit/modeling_flax_beit.py +948 -0
  31. venv/lib/python3.10/site-packages/transformers/models/bert/__init__.py +197 -0
  32. venv/lib/python3.10/site-packages/transformers/models/bert/__pycache__/__init__.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/transformers/models/bert/__pycache__/configuration_bert.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/transformers/models/bert/__pycache__/convert_bert_original_tf2_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/transformers/models/bert/__pycache__/convert_bert_original_tf_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/transformers/models/bert/__pycache__/convert_bert_pytorch_checkpoint_to_original_tf.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/transformers/models/bert/__pycache__/convert_bert_token_dropping_original_tf2_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/transformers/models/bert/__pycache__/modeling_bert.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/transformers/models/bert/__pycache__/modeling_flax_bert.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/transformers/models/bert/__pycache__/modeling_tf_bert.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/transformers/models/bert/__pycache__/tokenization_bert.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/transformers/models/bert/__pycache__/tokenization_bert_fast.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/transformers/models/bert/__pycache__/tokenization_bert_tf.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/transformers/models/bert/convert_bert_original_tf2_checkpoint_to_pytorch.py +245 -0
  45. venv/lib/python3.10/site-packages/transformers/models/bert/convert_bert_original_tf_checkpoint_to_pytorch.py +63 -0
  46. venv/lib/python3.10/site-packages/transformers/models/bert/convert_bert_token_dropping_original_tf2_checkpoint_to_pytorch.py +187 -0
  47. venv/lib/python3.10/site-packages/transformers/models/bert/modeling_bert.py +1867 -0
  48. venv/lib/python3.10/site-packages/transformers/models/bert/modeling_flax_bert.py +1713 -0
  49. venv/lib/python3.10/site-packages/transformers/models/bert/modeling_tf_bert.py +2114 -0
  50. venv/lib/python3.10/site-packages/transformers/models/bert/tokenization_bert.py +500 -0
ckpts/universal/global_step20/mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47c4fd3935126d180959d8110cd061dbb13a2144994ed75933dc9d05bec308f7
3
+ size 4230084
ckpts/universal/global_step20/mp_rank_06_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:703865275d6419eab2b5f48649dbab58108bcbb91d05fba8c72ee5d3a5f2a7c8
3
+ size 4230084
ckpts/universal/global_step40/zero/11.attention.query_key_value.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1bc07a27766fecfe0e8ab3fbed3c142f2f4750ddcad6990fadce473d8452c58
3
+ size 50332843
lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_with_adj_2-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_determiner_noun_agreement_with_adj_2": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_determiner_noun_agreement_with_adj_2": 0}}
lm-evaluation-harness/tests/testdata/blimp_irregular_plural_subject_verb_agreement_1-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 7084358b1b7dd7fb5ead1a58f4b499d6f7610eca897bfac25a986d0f9a91aa5d
lm-evaluation-harness/tests/testdata/blimp_irregular_plural_subject_verb_agreement_2-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_irregular_plural_subject_verb_agreement_2": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_irregular_plural_subject_verb_agreement_2": 0}}
lm-evaluation-harness/tests/testdata/blimp_wh_island-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_wh_island": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_wh_island": 0}}
lm-evaluation-harness/tests/testdata/coqa-v1-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"coqa": {"em": 0.0, "em_stderr": 0.0, "f1": 0.0, "f1_stderr": 0.0}}, "versions": {"coqa": 1}}
lm-evaluation-harness/tests/testdata/crows_pairs_english_disability-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"crows_pairs_english_disability": {"likelihood_difference": 0.3148684792547637, "likelihood_difference_stderr": 0.02800803147051987, "pct_stereotype": 0.36923076923076925, "pct_stereotype_stderr": 0.06032456592830047}}, "versions": {"crows_pairs_english_disability": 0}}
lm-evaluation-harness/tests/testdata/hendrycksTest-prehistory-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-prehistory": {"acc": 0.2623456790123457, "acc_norm": 0.26851851851851855, "acc_norm_stderr": 0.024659685185967277, "acc_stderr": 0.02447722285613511}}, "versions": {"hendrycksTest-prehistory": 0}}
lm-evaluation-harness/tests/testdata/lambada_cloze-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 7655e748b63ae7e9911411d2d2a2577221d6c861ca4448509992541294d689f3
lm-evaluation-harness/tests/testdata/lambada_mt_de-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"lambada_mt_de": {"acc": 0.0, "acc_stderr": 0.0, "ppl": 1.6479047769869253, "ppl_stderr": 0.006497321146240192}}, "versions": {"lambada_mt_de": 0}}
lm-evaluation-harness/tests/testdata/math_prealgebra-v1-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"math_prealgebra": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"math_prealgebra": 1}}
lm-evaluation-harness/tests/testdata/mathqa-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"mathqa": {"acc": 0.20770519262981574, "acc_norm": 0.2050251256281407, "acc_norm_stderr": 0.007390619359738901, "acc_stderr": 0.007426217631188539}}, "versions": {"mathqa": 0}}
lm-evaluation-harness/tests/testdata/pile_dm-mathematics-v0-loglikelihood_rolling ADDED
@@ -0,0 +1 @@
 
 
1
+ d5b7967c0ece8b816f3921a8bd0fad23365349e935b491595e2ad1135af42da6
lm-evaluation-harness/tests/testdata/wikitext-v1-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"wikitext": {"bits_per_byte": 3.202519859941674e-05, "byte_perplexity": 1.0000221984224973, "word_perplexity": 1.000118710696617}}, "versions": {"wikitext": 1}}
lm-evaluation-harness/tests/testdata/wmt20-zh-en-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ 07dbadfd6f2b2b9462ab6187dbfaabae6e5192ab89a8e4ede9237834b9364dd1
venv/lib/python3.10/site-packages/transformers/models/beit/__init__.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_flax_available,
21
+ is_torch_available,
22
+ is_vision_available,
23
+ )
24
+
25
+
26
+ _import_structure = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
27
+
28
+ try:
29
+ if not is_vision_available():
30
+ raise OptionalDependencyNotAvailable()
31
+ except OptionalDependencyNotAvailable:
32
+ pass
33
+ else:
34
+ _import_structure["feature_extraction_beit"] = ["BeitFeatureExtractor"]
35
+ _import_structure["image_processing_beit"] = ["BeitImageProcessor"]
36
+
37
+ try:
38
+ if not is_torch_available():
39
+ raise OptionalDependencyNotAvailable()
40
+ except OptionalDependencyNotAvailable:
41
+ pass
42
+ else:
43
+ _import_structure["modeling_beit"] = [
44
+ "BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
45
+ "BeitForImageClassification",
46
+ "BeitForMaskedImageModeling",
47
+ "BeitForSemanticSegmentation",
48
+ "BeitModel",
49
+ "BeitPreTrainedModel",
50
+ "BeitBackbone",
51
+ ]
52
+
53
+
54
+ try:
55
+ if not is_flax_available():
56
+ raise OptionalDependencyNotAvailable()
57
+ except OptionalDependencyNotAvailable:
58
+ pass
59
+ else:
60
+ _import_structure["modeling_flax_beit"] = [
61
+ "FlaxBeitForImageClassification",
62
+ "FlaxBeitForMaskedImageModeling",
63
+ "FlaxBeitModel",
64
+ "FlaxBeitPreTrainedModel",
65
+ ]
66
+
67
+ if TYPE_CHECKING:
68
+ from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
69
+
70
+ try:
71
+ if not is_vision_available():
72
+ raise OptionalDependencyNotAvailable()
73
+ except OptionalDependencyNotAvailable:
74
+ pass
75
+ else:
76
+ from .feature_extraction_beit import BeitFeatureExtractor
77
+ from .image_processing_beit import BeitImageProcessor
78
+
79
+ try:
80
+ if not is_torch_available():
81
+ raise OptionalDependencyNotAvailable()
82
+ except OptionalDependencyNotAvailable:
83
+ pass
84
+ else:
85
+ from .modeling_beit import (
86
+ BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
87
+ BeitBackbone,
88
+ BeitForImageClassification,
89
+ BeitForMaskedImageModeling,
90
+ BeitForSemanticSegmentation,
91
+ BeitModel,
92
+ BeitPreTrainedModel,
93
+ )
94
+
95
+ try:
96
+ if not is_flax_available():
97
+ raise OptionalDependencyNotAvailable()
98
+ except OptionalDependencyNotAvailable:
99
+ pass
100
+ else:
101
+ from .modeling_flax_beit import (
102
+ FlaxBeitForImageClassification,
103
+ FlaxBeitForMaskedImageModeling,
104
+ FlaxBeitModel,
105
+ FlaxBeitPreTrainedModel,
106
+ )
107
+
108
+
109
+ else:
110
+ import sys
111
+
112
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/beit/__pycache__/configuration_beit.cpython-310.pyc ADDED
Binary file (10.1 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/beit/__pycache__/convert_beit_unilm_to_pytorch.cpython-310.pyc ADDED
Binary file (10.9 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/beit/__pycache__/feature_extraction_beit.cpython-310.pyc ADDED
Binary file (997 Bytes). View file
 
venv/lib/python3.10/site-packages/transformers/models/beit/__pycache__/image_processing_beit.cpython-310.pyc ADDED
Binary file (18.7 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/beit/__pycache__/modeling_beit.cpython-310.pyc ADDED
Binary file (44.7 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/beit/__pycache__/modeling_flax_beit.cpython-310.pyc ADDED
Binary file (28.3 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/beit/configuration_beit.py ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright Microsoft Research and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ BEiT model configuration"""
16
+ from collections import OrderedDict
17
+ from typing import Mapping
18
+
19
+ from packaging import version
20
+
21
+ from ...configuration_utils import PretrainedConfig
22
+ from ...onnx import OnnxConfig
23
+ from ...utils import logging
24
+ from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+
30
+ from ..deprecated._archive_maps import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
31
+
32
+
33
+ class BeitConfig(BackboneConfigMixin, PretrainedConfig):
34
+ r"""
35
+ This is the configuration class to store the configuration of a [`BeitModel`]. It is used to instantiate an BEiT
36
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
37
+ defaults will yield a similar configuration to that of the BEiT
38
+ [microsoft/beit-base-patch16-224-pt22k](https://huggingface.co/microsoft/beit-base-patch16-224-pt22k) architecture.
39
+
40
+ Args:
41
+ vocab_size (`int`, *optional*, defaults to 8192):
42
+ Vocabulary size of the BEiT model. Defines the number of different image tokens that can be used during
43
+ pre-training.
44
+ hidden_size (`int`, *optional*, defaults to 768):
45
+ Dimensionality of the encoder layers and the pooler layer.
46
+ num_hidden_layers (`int`, *optional*, defaults to 12):
47
+ Number of hidden layers in the Transformer encoder.
48
+ num_attention_heads (`int`, *optional*, defaults to 12):
49
+ Number of attention heads for each attention layer in the Transformer encoder.
50
+ intermediate_size (`int`, *optional*, defaults to 3072):
51
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
52
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
53
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
54
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
55
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
56
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
57
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
58
+ The dropout ratio for the attention probabilities.
59
+ initializer_range (`float`, *optional*, defaults to 0.02):
60
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
61
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
62
+ The epsilon used by the layer normalization layers.
63
+ image_size (`int`, *optional*, defaults to 224):
64
+ The size (resolution) of each image.
65
+ patch_size (`int`, *optional*, defaults to 16):
66
+ The size (resolution) of each patch.
67
+ num_channels (`int`, *optional*, defaults to 3):
68
+ The number of input channels.
69
+ use_mask_token (`bool`, *optional*, defaults to `False`):
70
+ Whether to use a mask token for masked image modeling.
71
+ use_absolute_position_embeddings (`bool`, *optional*, defaults to `False`):
72
+ Whether to use BERT-style absolute position embeddings.
73
+ use_relative_position_bias (`bool`, *optional*, defaults to `False`):
74
+ Whether to use T5-style relative position embeddings in the self-attention layers.
75
+ use_shared_relative_position_bias (`bool`, *optional*, defaults to `False`):
76
+ Whether to use the same relative position embeddings across all self-attention layers of the Transformer.
77
+ layer_scale_init_value (`float`, *optional*, defaults to 0.1):
78
+ Scale to use in the self-attention layers. 0.1 for base, 1e-5 for large. Set 0 to disable layer scale.
79
+ drop_path_rate (`float`, *optional*, defaults to 0.1):
80
+ Stochastic depth rate per sample (when applied in the main path of residual layers).
81
+ use_mean_pooling (`bool`, *optional*, defaults to `True`):
82
+ Whether to mean pool the final hidden states of the patches instead of using the final hidden state of the
83
+ CLS token, before applying the classification head.
84
+ pool_scales (`Tuple[int]`, *optional*, defaults to `[1, 2, 3, 6]`):
85
+ Pooling scales used in Pooling Pyramid Module applied on the last feature map.
86
+ use_auxiliary_head (`bool`, *optional*, defaults to `True`):
87
+ Whether to use an auxiliary head during training.
88
+ auxiliary_loss_weight (`float`, *optional*, defaults to 0.4):
89
+ Weight of the cross-entropy loss of the auxiliary head.
90
+ auxiliary_channels (`int`, *optional*, defaults to 256):
91
+ Number of channels to use in the auxiliary head.
92
+ auxiliary_num_convs (`int`, *optional*, defaults to 1):
93
+ Number of convolutional layers to use in the auxiliary head.
94
+ auxiliary_concat_input (`bool`, *optional*, defaults to `False`):
95
+ Whether to concatenate the output of the auxiliary head with the input before the classification layer.
96
+ semantic_loss_ignore_index (`int`, *optional*, defaults to 255):
97
+ The index that is ignored by the loss function of the semantic segmentation model.
98
+ out_features (`List[str]`, *optional*):
99
+ If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
100
+ (depending on how many stages the model has). If unset and `out_indices` is set, will default to the
101
+ corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
102
+ same order as defined in the `stage_names` attribute.
103
+ out_indices (`List[int]`, *optional*):
104
+ If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
105
+ many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
106
+ If unset and `out_features` is unset, will default to the last stage. Must be in the
107
+ same order as defined in the `stage_names` attribute.
108
+ add_fpn (`bool`, *optional*, defaults to `False`):
109
+ Whether to add a FPN as part of the backbone. Only relevant for [`BeitBackbone`].
110
+ reshape_hidden_states (`bool`, *optional*, defaults to `True`):
111
+ Whether to reshape the feature maps to 4D tensors of shape `(batch_size, hidden_size, height, width)` in
112
+ case the model is used as backbone. If `False`, the feature maps will be 3D tensors of shape `(batch_size,
113
+ seq_len, hidden_size)`. Only relevant for [`BeitBackbone`].
114
+
115
+ Example:
116
+
117
+ ```python
118
+ >>> from transformers import BeitConfig, BeitModel
119
+
120
+ >>> # Initializing a BEiT beit-base-patch16-224-pt22k style configuration
121
+ >>> configuration = BeitConfig()
122
+
123
+ >>> # Initializing a model (with random weights) from the beit-base-patch16-224-pt22k style configuration
124
+ >>> model = BeitModel(configuration)
125
+
126
+ >>> # Accessing the model configuration
127
+ >>> configuration = model.config
128
+ ```"""
129
+
130
+ model_type = "beit"
131
+
132
+ def __init__(
133
+ self,
134
+ vocab_size=8192,
135
+ hidden_size=768,
136
+ num_hidden_layers=12,
137
+ num_attention_heads=12,
138
+ intermediate_size=3072,
139
+ hidden_act="gelu",
140
+ hidden_dropout_prob=0.0,
141
+ attention_probs_dropout_prob=0.0,
142
+ initializer_range=0.02,
143
+ layer_norm_eps=1e-12,
144
+ image_size=224,
145
+ patch_size=16,
146
+ num_channels=3,
147
+ use_mask_token=False,
148
+ use_absolute_position_embeddings=False,
149
+ use_relative_position_bias=False,
150
+ use_shared_relative_position_bias=False,
151
+ layer_scale_init_value=0.1,
152
+ drop_path_rate=0.1,
153
+ use_mean_pooling=True,
154
+ pool_scales=[1, 2, 3, 6],
155
+ use_auxiliary_head=True,
156
+ auxiliary_loss_weight=0.4,
157
+ auxiliary_channels=256,
158
+ auxiliary_num_convs=1,
159
+ auxiliary_concat_input=False,
160
+ semantic_loss_ignore_index=255,
161
+ out_features=None,
162
+ out_indices=None,
163
+ add_fpn=False,
164
+ reshape_hidden_states=True,
165
+ **kwargs,
166
+ ):
167
+ super().__init__(**kwargs)
168
+
169
+ self.vocab_size = vocab_size
170
+ self.hidden_size = hidden_size
171
+ self.num_hidden_layers = num_hidden_layers
172
+ self.num_attention_heads = num_attention_heads
173
+ self.intermediate_size = intermediate_size
174
+ self.hidden_act = hidden_act
175
+ self.hidden_dropout_prob = hidden_dropout_prob
176
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
177
+ self.initializer_range = initializer_range
178
+ self.layer_norm_eps = layer_norm_eps
179
+
180
+ self.image_size = image_size
181
+ self.patch_size = patch_size
182
+ self.num_channels = num_channels
183
+ self.use_mask_token = use_mask_token
184
+ self.use_absolute_position_embeddings = use_absolute_position_embeddings
185
+ self.use_relative_position_bias = use_relative_position_bias
186
+ self.use_shared_relative_position_bias = use_shared_relative_position_bias
187
+ self.layer_scale_init_value = layer_scale_init_value
188
+ self.drop_path_rate = drop_path_rate
189
+ self.use_mean_pooling = use_mean_pooling
190
+ # decode head attributes (semantic segmentation)
191
+ self.pool_scales = pool_scales
192
+ # auxiliary head attributes (semantic segmentation)
193
+ self.use_auxiliary_head = use_auxiliary_head
194
+ self.auxiliary_loss_weight = auxiliary_loss_weight
195
+ self.auxiliary_channels = auxiliary_channels
196
+ self.auxiliary_num_convs = auxiliary_num_convs
197
+ self.auxiliary_concat_input = auxiliary_concat_input
198
+ self.semantic_loss_ignore_index = semantic_loss_ignore_index
199
+
200
+ # handle backwards compatibility
201
+ if "segmentation_indices" in kwargs:
202
+ logger.warning(
203
+ "The `segmentation_indices` argument is deprecated and will be removed in a future version, use `out_indices` instead.",
204
+ FutureWarning,
205
+ )
206
+ out_indices = kwargs.pop("segmentation_indices")
207
+
208
+ # backbone attributes
209
+ self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, self.num_hidden_layers + 1)]
210
+ self._out_features, self._out_indices = get_aligned_output_features_output_indices(
211
+ out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
212
+ )
213
+ self.add_fpn = add_fpn
214
+ self.reshape_hidden_states = reshape_hidden_states
215
+
216
+
217
+ # Copied from transformers.models.vit.configuration_vit.ViTOnnxConfig
218
+ class BeitOnnxConfig(OnnxConfig):
219
+ torch_onnx_minimum_version = version.parse("1.11")
220
+
221
+ @property
222
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
223
+ return OrderedDict(
224
+ [
225
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
226
+ ]
227
+ )
228
+
229
+ @property
230
+ def atol_for_validation(self) -> float:
231
+ return 1e-4
venv/lib/python3.10/site-packages/transformers/models/beit/convert_beit_unilm_to_pytorch.py ADDED
@@ -0,0 +1,374 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert BEiT checkpoints from the unilm repository."""
16
+
17
+
18
+ import argparse
19
+ import json
20
+ from pathlib import Path
21
+
22
+ import requests
23
+ import torch
24
+ from datasets import load_dataset
25
+ from huggingface_hub import hf_hub_download
26
+ from PIL import Image
27
+
28
+ from transformers import (
29
+ BeitConfig,
30
+ BeitForImageClassification,
31
+ BeitForMaskedImageModeling,
32
+ BeitForSemanticSegmentation,
33
+ BeitImageProcessor,
34
+ )
35
+ from transformers.image_utils import PILImageResampling
36
+ from transformers.utils import logging
37
+
38
+
39
+ logging.set_verbosity_info()
40
+ logger = logging.get_logger(__name__)
41
+
42
+
43
+ # here we list all keys to be renamed (original name on the left, our name on the right)
44
+ def create_rename_keys(config, has_lm_head=False, is_semantic=False):
45
+ prefix = "backbone." if is_semantic else ""
46
+
47
+ rename_keys = []
48
+ for i in range(config.num_hidden_layers):
49
+ # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
50
+ rename_keys.append((f"{prefix}blocks.{i}.norm1.weight", f"beit.encoder.layer.{i}.layernorm_before.weight"))
51
+ rename_keys.append((f"{prefix}blocks.{i}.norm1.bias", f"beit.encoder.layer.{i}.layernorm_before.bias"))
52
+ rename_keys.append(
53
+ (f"{prefix}blocks.{i}.attn.proj.weight", f"beit.encoder.layer.{i}.attention.output.dense.weight")
54
+ )
55
+ rename_keys.append(
56
+ (f"{prefix}blocks.{i}.attn.proj.bias", f"beit.encoder.layer.{i}.attention.output.dense.bias")
57
+ )
58
+ rename_keys.append((f"{prefix}blocks.{i}.norm2.weight", f"beit.encoder.layer.{i}.layernorm_after.weight"))
59
+ rename_keys.append((f"{prefix}blocks.{i}.norm2.bias", f"beit.encoder.layer.{i}.layernorm_after.bias"))
60
+ rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.weight", f"beit.encoder.layer.{i}.intermediate.dense.weight"))
61
+ rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.bias", f"beit.encoder.layer.{i}.intermediate.dense.bias"))
62
+ rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.weight", f"beit.encoder.layer.{i}.output.dense.weight"))
63
+ rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.bias", f"beit.encoder.layer.{i}.output.dense.bias"))
64
+
65
+ # projection layer + position embeddings
66
+ rename_keys.extend(
67
+ [
68
+ (f"{prefix}cls_token", "beit.embeddings.cls_token"),
69
+ (f"{prefix}patch_embed.proj.weight", "beit.embeddings.patch_embeddings.projection.weight"),
70
+ (f"{prefix}patch_embed.proj.bias", "beit.embeddings.patch_embeddings.projection.bias"),
71
+ ]
72
+ )
73
+
74
+ if has_lm_head:
75
+ # mask token + shared relative position bias + layernorm
76
+ rename_keys.extend(
77
+ [
78
+ ("mask_token", "beit.embeddings.mask_token"),
79
+ (
80
+ "rel_pos_bias.relative_position_bias_table",
81
+ "beit.encoder.relative_position_bias.relative_position_bias_table",
82
+ ),
83
+ (
84
+ "rel_pos_bias.relative_position_index",
85
+ "beit.encoder.relative_position_bias.relative_position_index",
86
+ ),
87
+ ("norm.weight", "layernorm.weight"),
88
+ ("norm.bias", "layernorm.bias"),
89
+ ]
90
+ )
91
+ elif is_semantic:
92
+ # semantic segmentation classification heads
93
+ rename_keys.extend(
94
+ [
95
+ ("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
96
+ ("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
97
+ ("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
98
+ ("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
99
+ ]
100
+ )
101
+ else:
102
+ # layernorm + classification head
103
+ rename_keys.extend(
104
+ [
105
+ ("fc_norm.weight", "beit.pooler.layernorm.weight"),
106
+ ("fc_norm.bias", "beit.pooler.layernorm.bias"),
107
+ ("head.weight", "classifier.weight"),
108
+ ("head.bias", "classifier.bias"),
109
+ ]
110
+ )
111
+
112
+ return rename_keys
113
+
114
+
115
+ # we split up the matrix of each encoder layer into queries, keys and values
116
+ def read_in_q_k_v(state_dict, config, has_lm_head=False, is_semantic=False):
117
+ for i in range(config.num_hidden_layers):
118
+ prefix = "backbone." if is_semantic else ""
119
+ # queries, keys and values
120
+ in_proj_weight = state_dict.pop(f"{prefix}blocks.{i}.attn.qkv.weight")
121
+ q_bias = state_dict.pop(f"{prefix}blocks.{i}.attn.q_bias")
122
+ v_bias = state_dict.pop(f"{prefix}blocks.{i}.attn.v_bias")
123
+
124
+ state_dict[f"beit.encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[
125
+ : config.hidden_size, :
126
+ ]
127
+ state_dict[f"beit.encoder.layer.{i}.attention.attention.query.bias"] = q_bias
128
+ state_dict[f"beit.encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[
129
+ config.hidden_size : config.hidden_size * 2, :
130
+ ]
131
+ state_dict[f"beit.encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[
132
+ -config.hidden_size :, :
133
+ ]
134
+ state_dict[f"beit.encoder.layer.{i}.attention.attention.value.bias"] = v_bias
135
+
136
+ # gamma_1 and gamma_2
137
+ # we call them lambda because otherwise they are renamed when using .from_pretrained
138
+ gamma_1 = state_dict.pop(f"{prefix}blocks.{i}.gamma_1")
139
+ gamma_2 = state_dict.pop(f"{prefix}blocks.{i}.gamma_2")
140
+
141
+ state_dict[f"beit.encoder.layer.{i}.lambda_1"] = gamma_1
142
+ state_dict[f"beit.encoder.layer.{i}.lambda_2"] = gamma_2
143
+
144
+ # relative_position bias table + index
145
+ if not has_lm_head:
146
+ # each layer has its own relative position bias
147
+ table = state_dict.pop(f"{prefix}blocks.{i}.attn.relative_position_bias_table")
148
+ index = state_dict.pop(f"{prefix}blocks.{i}.attn.relative_position_index")
149
+
150
+ state_dict[
151
+ f"beit.encoder.layer.{i}.attention.attention.relative_position_bias.relative_position_bias_table"
152
+ ] = table
153
+ state_dict[
154
+ f"beit.encoder.layer.{i}.attention.attention.relative_position_bias.relative_position_index"
155
+ ] = index
156
+
157
+
158
+ def rename_key(dct, old, new):
159
+ val = dct.pop(old)
160
+ dct[new] = val
161
+
162
+
163
+ # We will verify our results on an image of cute cats
164
+ def prepare_img():
165
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
166
+ im = Image.open(requests.get(url, stream=True).raw)
167
+ return im
168
+
169
+
170
+ @torch.no_grad()
171
+ def convert_beit_checkpoint(checkpoint_url, pytorch_dump_folder_path):
172
+ """
173
+ Copy/paste/tweak model's weights to our BEiT structure.
174
+ """
175
+
176
+ # define default BEiT configuration
177
+ config = BeitConfig()
178
+ has_lm_head = False
179
+ is_semantic = False
180
+ repo_id = "huggingface/label-files"
181
+ # set config parameters based on URL
182
+ if checkpoint_url[-9:-4] == "pt22k":
183
+ # masked image modeling
184
+ config.use_shared_relative_position_bias = True
185
+ config.use_mask_token = True
186
+ has_lm_head = True
187
+ elif checkpoint_url[-9:-4] == "ft22k":
188
+ # intermediate fine-tuning on ImageNet-22k
189
+ config.use_relative_position_bias = True
190
+ config.num_labels = 21841
191
+ filename = "imagenet-22k-id2label.json"
192
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
193
+ id2label = {int(k): v for k, v in id2label.items()}
194
+ # this dataset contains 21843 labels but the model only has 21841
195
+ # we delete the classes as mentioned in https://github.com/google-research/big_transfer/issues/18
196
+ del id2label[9205]
197
+ del id2label[15027]
198
+ config.id2label = id2label
199
+ config.label2id = {v: k for k, v in id2label.items()}
200
+ elif checkpoint_url[-8:-4] == "to1k":
201
+ # fine-tuning on ImageNet-1k
202
+ config.use_relative_position_bias = True
203
+ config.num_labels = 1000
204
+ filename = "imagenet-1k-id2label.json"
205
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
206
+ id2label = {int(k): v for k, v in id2label.items()}
207
+ config.id2label = id2label
208
+ config.label2id = {v: k for k, v in id2label.items()}
209
+ if "384" in checkpoint_url:
210
+ config.image_size = 384
211
+ if "512" in checkpoint_url:
212
+ config.image_size = 512
213
+ elif "ade20k" in checkpoint_url:
214
+ # fine-tuning
215
+ config.use_relative_position_bias = True
216
+ config.num_labels = 150
217
+ filename = "ade20k-id2label.json"
218
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
219
+ id2label = {int(k): v for k, v in id2label.items()}
220
+ config.id2label = id2label
221
+ config.label2id = {v: k for k, v in id2label.items()}
222
+ config.image_size = 640
223
+ is_semantic = True
224
+ else:
225
+ raise ValueError("Checkpoint not supported, URL should either end with 'pt22k', 'ft22k', 'to1k' or 'ade20k'")
226
+
227
+ # size of the architecture
228
+ if "base" in checkpoint_url:
229
+ pass
230
+ elif "large" in checkpoint_url:
231
+ config.hidden_size = 1024
232
+ config.intermediate_size = 4096
233
+ config.num_hidden_layers = 24
234
+ config.num_attention_heads = 16
235
+ if "ade20k" in checkpoint_url:
236
+ config.image_size = 640
237
+ config.out_indices = [7, 11, 15, 23]
238
+ else:
239
+ raise ValueError("Should either find 'base' or 'large' in checkpoint URL")
240
+
241
+ # load state_dict of original model, remove and rename some keys
242
+ state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu", check_hash=True)
243
+ state_dict = state_dict["model"] if "ade20k" not in checkpoint_url else state_dict["state_dict"]
244
+
245
+ rename_keys = create_rename_keys(config, has_lm_head=has_lm_head, is_semantic=is_semantic)
246
+ for src, dest in rename_keys:
247
+ rename_key(state_dict, src, dest)
248
+ read_in_q_k_v(state_dict, config, has_lm_head=has_lm_head, is_semantic=is_semantic)
249
+ if is_semantic:
250
+ # add prefix to decoder keys
251
+ for key, val in state_dict.copy().items():
252
+ val = state_dict.pop(key)
253
+ if key.startswith("backbone.fpn"):
254
+ key = key.replace("backbone.fpn", "fpn")
255
+ state_dict[key] = val
256
+
257
+ # load HuggingFace model
258
+ if checkpoint_url[-9:-4] == "pt22k":
259
+ model = BeitForMaskedImageModeling(config)
260
+ elif "ade20k" in checkpoint_url:
261
+ model = BeitForSemanticSegmentation(config)
262
+ else:
263
+ model = BeitForImageClassification(config)
264
+ model.eval()
265
+ model.load_state_dict(state_dict)
266
+
267
+ # Check outputs on an image
268
+ if is_semantic:
269
+ image_processor = BeitImageProcessor(size=config.image_size, do_center_crop=False)
270
+ ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test")
271
+ image = Image.open(ds[0]["file"])
272
+ else:
273
+ image_processor = BeitImageProcessor(
274
+ size=config.image_size, resample=PILImageResampling.BILINEAR, do_center_crop=False
275
+ )
276
+ image = prepare_img()
277
+
278
+ encoding = image_processor(images=image, return_tensors="pt")
279
+ pixel_values = encoding["pixel_values"]
280
+
281
+ outputs = model(pixel_values)
282
+ logits = outputs.logits
283
+
284
+ # verify logits
285
+ expected_shape = torch.Size([1, 1000])
286
+ if checkpoint_url[:-4].endswith("beit_base_patch16_224_pt22k"):
287
+ expected_shape = torch.Size([1, 196, 8192])
288
+ elif checkpoint_url[:-4].endswith("beit_large_patch16_224_pt22k"):
289
+ expected_shape = torch.Size([1, 196, 8192])
290
+ elif checkpoint_url[:-4].endswith("beit_base_patch16_224_pt22k_ft22k"):
291
+ expected_shape = torch.Size([1, 21841])
292
+ expected_logits = torch.tensor([2.2288, 2.4671, 0.7395])
293
+ expected_class_idx = 2397
294
+ elif checkpoint_url[:-4].endswith("beit_large_patch16_224_pt22k_ft22k"):
295
+ expected_shape = torch.Size([1, 21841])
296
+ expected_logits = torch.tensor([1.6881, -0.2787, 0.5901])
297
+ expected_class_idx = 2396
298
+ elif checkpoint_url[:-4].endswith("beit_base_patch16_224_pt22k_ft1k"):
299
+ expected_logits = torch.tensor([0.1241, 0.0798, -0.6569])
300
+ expected_class_idx = 285
301
+ elif checkpoint_url[:-4].endswith("beit_base_patch16_224_pt22k_ft22kto1k"):
302
+ expected_logits = torch.tensor([-1.2385, -1.0987, -1.0108])
303
+ expected_class_idx = 281
304
+ elif checkpoint_url[:-4].endswith("beit_base_patch16_384_pt22k_ft22kto1k"):
305
+ expected_logits = torch.tensor([-1.5303, -0.9484, -0.3147])
306
+ expected_class_idx = 761
307
+ elif checkpoint_url[:-4].endswith("beit_large_patch16_224_pt22k_ft1k"):
308
+ expected_logits = torch.tensor([0.4610, -0.0928, 0.2086])
309
+ expected_class_idx = 761
310
+ elif checkpoint_url[:-4].endswith("beit_large_patch16_224_pt22k_ft22kto1k"):
311
+ expected_logits = torch.tensor([-0.4804, 0.6257, -0.1837])
312
+ expected_class_idx = 761
313
+ elif checkpoint_url[:-4].endswith("beit_large_patch16_384_pt22k_ft22kto1k"):
314
+ expected_logits = torch.tensor([[-0.5122, 0.5117, -0.2113]])
315
+ expected_class_idx = 761
316
+ elif checkpoint_url[:-4].endswith("beit_large_patch16_512_pt22k_ft22kto1k"):
317
+ expected_logits = torch.tensor([-0.3062, 0.7261, 0.4852])
318
+ expected_class_idx = 761
319
+ elif checkpoint_url[:-4].endswith("beit_base_patch16_640_pt22k_ft22ktoade20k"):
320
+ expected_shape = (1, 150, 160, 160)
321
+ expected_logits = torch.tensor(
322
+ [
323
+ [[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
324
+ [[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
325
+ [[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
326
+ ]
327
+ )
328
+ elif checkpoint_url[:-4].endswith("beit_large_patch16_640_pt22k_ft22ktoade20k"):
329
+ expected_shape = (1, 150, 160, 160)
330
+ expected_logits = torch.tensor(
331
+ [
332
+ [[-4.3305, -2.3049, -3.0161], [-2.9591, -1.5305, -2.2251], [-3.4198, -1.8004, -2.9062]],
333
+ [[-5.8922, -3.7435, -4.3978], [-4.2063, -2.7872, -3.4755], [-4.2791, -3.1874, -4.1681]],
334
+ [[0.9895, 4.3467, 4.7663], [4.2476, 5.6830, 6.1518], [4.5550, 6.2495, 6.5154]],
335
+ ]
336
+ )
337
+ else:
338
+ raise ValueError("Can't verify logits as model is not supported")
339
+
340
+ if logits.shape != expected_shape:
341
+ raise ValueError(f"Shape of logits not as expected. {logits.shape=}, {expected_shape=}")
342
+ if not has_lm_head:
343
+ if is_semantic:
344
+ if not torch.allclose(logits[0, :3, :3, :3], expected_logits, atol=1e-3):
345
+ raise ValueError("First elements of logits not as expected")
346
+ else:
347
+ print("Predicted class idx:", logits.argmax(-1).item())
348
+
349
+ if not torch.allclose(logits[0, :3], expected_logits, atol=1e-3):
350
+ raise ValueError("First elements of logits not as expected")
351
+ if logits.argmax(-1).item() != expected_class_idx:
352
+ raise ValueError("Predicted class index not as expected")
353
+
354
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
355
+ print(f"Saving model to {pytorch_dump_folder_path}")
356
+ model.save_pretrained(pytorch_dump_folder_path)
357
+ print(f"Saving image processor to {pytorch_dump_folder_path}")
358
+ image_processor.save_pretrained(pytorch_dump_folder_path)
359
+
360
+
361
+ if __name__ == "__main__":
362
+ parser = argparse.ArgumentParser()
363
+
364
+ parser.add_argument(
365
+ "--checkpoint_url",
366
+ default="https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_base_patch16_224_pt22k_ft22kto1k.pth",
367
+ type=str,
368
+ help="URL to the original PyTorch checkpoint (.pth file).",
369
+ )
370
+ parser.add_argument(
371
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
372
+ )
373
+ args = parser.parse_args()
374
+ convert_beit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
venv/lib/python3.10/site-packages/transformers/models/beit/feature_extraction_beit.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for BEiT."""
16
+
17
+ import warnings
18
+
19
+ from ...utils import logging
20
+ from .image_processing_beit import BeitImageProcessor
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class BeitFeatureExtractor(BeitImageProcessor):
27
+ def __init__(self, *args, **kwargs) -> None:
28
+ warnings.warn(
29
+ "The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
30
+ " use BeitImageProcessor instead.",
31
+ FutureWarning,
32
+ )
33
+ super().__init__(*args, **kwargs)
venv/lib/python3.10/site-packages/transformers/models/beit/image_processing_beit.py ADDED
@@ -0,0 +1,531 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for Beit."""
16
+
17
+ import warnings
18
+ from typing import Any, Dict, List, Optional, Tuple, Union
19
+
20
+ import numpy as np
21
+
22
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
23
+ from ...image_transforms import resize, to_channel_dimension_format
24
+ from ...image_utils import (
25
+ IMAGENET_STANDARD_MEAN,
26
+ IMAGENET_STANDARD_STD,
27
+ ChannelDimension,
28
+ ImageInput,
29
+ PILImageResampling,
30
+ infer_channel_dimension_format,
31
+ is_scaled_image,
32
+ make_list_of_images,
33
+ to_numpy_array,
34
+ valid_images,
35
+ validate_kwargs,
36
+ validate_preprocess_arguments,
37
+ )
38
+ from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
39
+
40
+
41
+ if is_vision_available():
42
+ import PIL
43
+
44
+ if is_torch_available():
45
+ import torch
46
+
47
+
48
+ logger = logging.get_logger(__name__)
49
+
50
+
51
+ class BeitImageProcessor(BaseImageProcessor):
52
+ r"""
53
+ Constructs a BEiT image processor.
54
+
55
+ Args:
56
+ do_resize (`bool`, *optional*, defaults to `True`):
57
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
58
+ `do_resize` parameter in the `preprocess` method.
59
+ size (`Dict[str, int]` *optional*, defaults to `{"height": 256, "width": 256}`):
60
+ Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
61
+ method.
62
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
63
+ Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
64
+ `preprocess` method.
65
+ do_center_crop (`bool`, *optional*, defaults to `True`):
66
+ Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image
67
+ is padded with 0's and then center cropped. Can be overridden by the `do_center_crop` parameter in the
68
+ `preprocess` method.
69
+ crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
70
+ Desired output size when applying center-cropping. Only has an effect if `do_center_crop` is set to `True`.
71
+ Can be overridden by the `crop_size` parameter in the `preprocess` method.
72
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
73
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
74
+ `preprocess` method.
75
+ do_rescale (`bool`, *optional*, defaults to `True`):
76
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
77
+ parameter in the `preprocess` method.
78
+ do_normalize (`bool`, *optional*, defaults to `True`):
79
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
80
+ method.
81
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
82
+ The mean to use if normalizing the image. This is a float or list of floats of length of the number of
83
+ channels of the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
84
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
85
+ The standard deviation to use if normalizing the image. This is a float or list of floats of length of the
86
+ number of channels of the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
87
+ do_reduce_labels (`bool`, *optional*, defaults to `False`):
88
+ Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is
89
+ used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The
90
+ background label will be replaced by 255. Can be overridden by the `do_reduce_labels` parameter in the
91
+ `preprocess` method.
92
+ """
93
+
94
+ model_input_names = ["pixel_values"]
95
+
96
+ def __init__(
97
+ self,
98
+ do_resize: bool = True,
99
+ size: Dict[str, int] = None,
100
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
101
+ do_center_crop: bool = True,
102
+ crop_size: Dict[str, int] = None,
103
+ rescale_factor: Union[int, float] = 1 / 255,
104
+ do_rescale: bool = True,
105
+ do_normalize: bool = True,
106
+ image_mean: Optional[Union[float, List[float]]] = None,
107
+ image_std: Optional[Union[float, List[float]]] = None,
108
+ do_reduce_labels: bool = False,
109
+ **kwargs,
110
+ ) -> None:
111
+ if "reduce_labels" in kwargs:
112
+ warnings.warn(
113
+ "The `reduce_labels` parameter is deprecated and will be removed in a future version. Please use"
114
+ " `do_reduce_labels` instead.",
115
+ FutureWarning,
116
+ )
117
+ do_reduce_labels = kwargs.pop("reduce_labels")
118
+ super().__init__(**kwargs)
119
+ size = size if size is not None else {"height": 256, "width": 256}
120
+ size = get_size_dict(size)
121
+ crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
122
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
123
+ self.do_resize = do_resize
124
+ self.size = size
125
+ self.resample = resample
126
+ self.do_center_crop = do_center_crop
127
+ self.crop_size = crop_size
128
+ self.do_rescale = do_rescale
129
+ self.rescale_factor = rescale_factor
130
+ self.do_normalize = do_normalize
131
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
132
+ self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
133
+ self.do_reduce_labels = do_reduce_labels
134
+ self._valid_processor_keys = [
135
+ "images",
136
+ "segmentation_maps",
137
+ "do_resize",
138
+ "size",
139
+ "resample",
140
+ "do_center_crop",
141
+ "crop_size",
142
+ "do_rescale",
143
+ "rescale_factor",
144
+ "do_normalize",
145
+ "image_mean",
146
+ "image_std",
147
+ "do_reduce_labels",
148
+ "return_tensors",
149
+ "data_format",
150
+ "input_data_format",
151
+ ]
152
+
153
+ @classmethod
154
+ def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs):
155
+ """
156
+ Overrides the `from_dict` method from the base class to make sure `reduce_labels` is updated if image processor
157
+ is created using from_dict and kwargs e.g. `BeitImageProcessor.from_pretrained(checkpoint, reduce_labels=True)`
158
+ """
159
+ image_processor_dict = image_processor_dict.copy()
160
+ if "reduce_labels" in kwargs:
161
+ image_processor_dict["reduce_labels"] = kwargs.pop("reduce_labels")
162
+ return super().from_dict(image_processor_dict, **kwargs)
163
+
164
+ def resize(
165
+ self,
166
+ image: np.ndarray,
167
+ size: Dict[str, int],
168
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
169
+ data_format: Optional[Union[str, ChannelDimension]] = None,
170
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
171
+ **kwargs,
172
+ ) -> np.ndarray:
173
+ """
174
+ Resize an image to (size["height"], size["width"]).
175
+
176
+ Args:
177
+ image (`np.ndarray`):
178
+ Image to resize.
179
+ size (`Dict[str, int]`):
180
+ Size of the output image.
181
+ resample (`PILImageResampling`, *optional*, defaults to `PIL.Image.BICUBIC`):
182
+ Resampling filter to use when resiizing the image.
183
+ data_format (`str` or `ChannelDimension`, *optional*):
184
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
185
+ input_data_format (`str` or `ChannelDimension`, *optional*):
186
+ The channel dimension format of the input image. If not provided, it will be inferred.
187
+ """
188
+ size = get_size_dict(size, default_to_square=True, param_name="size")
189
+ if "height" not in size or "width" not in size:
190
+ raise ValueError(f"The `size` argument must contain `height` and `width` keys. Got {size.keys()}")
191
+ return resize(
192
+ image,
193
+ size=(size["height"], size["width"]),
194
+ resample=resample,
195
+ data_format=data_format,
196
+ input_data_format=input_data_format,
197
+ **kwargs,
198
+ )
199
+
200
+ def reduce_label(self, label: ImageInput) -> np.ndarray:
201
+ label = to_numpy_array(label)
202
+ # Avoid using underflow conversion
203
+ label[label == 0] = 255
204
+ label = label - 1
205
+ label[label == 254] = 255
206
+ return label
207
+
208
+ def _preprocess(
209
+ self,
210
+ image: ImageInput,
211
+ do_reduce_labels: bool = None,
212
+ do_resize: bool = None,
213
+ size: Dict[str, int] = None,
214
+ resample: PILImageResampling = None,
215
+ do_center_crop: bool = None,
216
+ crop_size: Dict[str, int] = None,
217
+ do_rescale: bool = None,
218
+ rescale_factor: float = None,
219
+ do_normalize: bool = None,
220
+ image_mean: Optional[Union[float, List[float]]] = None,
221
+ image_std: Optional[Union[float, List[float]]] = None,
222
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
223
+ ):
224
+ if do_reduce_labels:
225
+ image = self.reduce_label(image)
226
+
227
+ if do_resize:
228
+ image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
229
+
230
+ if do_center_crop:
231
+ image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)
232
+
233
+ if do_rescale:
234
+ image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
235
+
236
+ if do_normalize:
237
+ image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
238
+
239
+ return image
240
+
241
+ def _preprocess_image(
242
+ self,
243
+ image: ImageInput,
244
+ do_resize: bool = None,
245
+ size: Dict[str, int] = None,
246
+ resample: PILImageResampling = None,
247
+ do_center_crop: bool = None,
248
+ crop_size: Dict[str, int] = None,
249
+ do_rescale: bool = None,
250
+ rescale_factor: float = None,
251
+ do_normalize: bool = None,
252
+ image_mean: Optional[Union[float, List[float]]] = None,
253
+ image_std: Optional[Union[float, List[float]]] = None,
254
+ data_format: Optional[Union[str, ChannelDimension]] = None,
255
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
256
+ ) -> np.ndarray:
257
+ """Preprocesses a single image."""
258
+ # All transformations expect numpy arrays.
259
+ image = to_numpy_array(image)
260
+ if is_scaled_image(image) and do_rescale:
261
+ logger.warning_once(
262
+ "It looks like you are trying to rescale already rescaled images. If the input"
263
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
264
+ )
265
+ if input_data_format is None:
266
+ input_data_format = infer_channel_dimension_format(image)
267
+ image = self._preprocess(
268
+ image,
269
+ do_reduce_labels=False,
270
+ do_resize=do_resize,
271
+ size=size,
272
+ resample=resample,
273
+ do_center_crop=do_center_crop,
274
+ crop_size=crop_size,
275
+ do_rescale=do_rescale,
276
+ rescale_factor=rescale_factor,
277
+ do_normalize=do_normalize,
278
+ image_mean=image_mean,
279
+ image_std=image_std,
280
+ input_data_format=input_data_format,
281
+ )
282
+ if data_format is not None:
283
+ image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
284
+ return image
285
+
286
+ def _preprocess_segmentation_map(
287
+ self,
288
+ segmentation_map: ImageInput,
289
+ do_resize: bool = None,
290
+ size: Dict[str, int] = None,
291
+ resample: PILImageResampling = None,
292
+ do_center_crop: bool = None,
293
+ crop_size: Dict[str, int] = None,
294
+ do_reduce_labels: bool = None,
295
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
296
+ ):
297
+ """Preprocesses a single segmentation map."""
298
+ # All transformations expect numpy arrays.
299
+ segmentation_map = to_numpy_array(segmentation_map)
300
+ # Add an axis to the segmentation maps for transformations.
301
+ if segmentation_map.ndim == 2:
302
+ segmentation_map = segmentation_map[None, ...]
303
+ added_dimension = True
304
+ input_data_format = ChannelDimension.FIRST
305
+ else:
306
+ added_dimension = False
307
+ if input_data_format is None:
308
+ input_data_format = infer_channel_dimension_format(segmentation_map, num_channels=1)
309
+ segmentation_map = self._preprocess(
310
+ image=segmentation_map,
311
+ do_reduce_labels=do_reduce_labels,
312
+ do_resize=do_resize,
313
+ resample=resample,
314
+ size=size,
315
+ do_center_crop=do_center_crop,
316
+ crop_size=crop_size,
317
+ do_normalize=False,
318
+ do_rescale=False,
319
+ input_data_format=ChannelDimension.FIRST,
320
+ )
321
+ # Remove extra axis if added
322
+ if added_dimension:
323
+ segmentation_map = np.squeeze(segmentation_map, axis=0)
324
+ segmentation_map = segmentation_map.astype(np.int64)
325
+ return segmentation_map
326
+
327
+ def __call__(self, images, segmentation_maps=None, **kwargs):
328
+ # Overrides the `__call__` method of the `Preprocessor` class such that the images and segmentation maps can both
329
+ # be passed in as positional arguments.
330
+ return super().__call__(images, segmentation_maps=segmentation_maps, **kwargs)
331
+
332
+ def preprocess(
333
+ self,
334
+ images: ImageInput,
335
+ segmentation_maps: Optional[ImageInput] = None,
336
+ do_resize: bool = None,
337
+ size: Dict[str, int] = None,
338
+ resample: PILImageResampling = None,
339
+ do_center_crop: bool = None,
340
+ crop_size: Dict[str, int] = None,
341
+ do_rescale: bool = None,
342
+ rescale_factor: float = None,
343
+ do_normalize: bool = None,
344
+ image_mean: Optional[Union[float, List[float]]] = None,
345
+ image_std: Optional[Union[float, List[float]]] = None,
346
+ do_reduce_labels: Optional[bool] = None,
347
+ return_tensors: Optional[Union[str, TensorType]] = None,
348
+ data_format: ChannelDimension = ChannelDimension.FIRST,
349
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
350
+ **kwargs,
351
+ ) -> PIL.Image.Image:
352
+ """
353
+ Preprocess an image or batch of images.
354
+
355
+ Args:
356
+ images (`ImageInput`):
357
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
358
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
359
+ segmentation_maps (`ImageInput`, *optional*)
360
+ Segmentation maps to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
361
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
362
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
363
+ Whether to resize the image.
364
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
365
+ Size of the image after resizing.
366
+ resample (`int`, *optional*, defaults to `self.resample`):
367
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
368
+ has an effect if `do_resize` is set to `True`.
369
+ do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
370
+ Whether to center crop the image.
371
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
372
+ Size of the image after center crop. If one edge the image is smaller than `crop_size`, it will be
373
+ padded with zeros and then cropped
374
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
375
+ Whether to rescale the image values between [0 - 1].
376
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
377
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
378
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
379
+ Whether to normalize the image.
380
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
381
+ Image mean.
382
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
383
+ Image standard deviation.
384
+ do_reduce_labels (`bool`, *optional*, defaults to `self.do_reduce_labels`):
385
+ Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0
386
+ is used for background, and background itself is not included in all classes of a dataset (e.g.
387
+ ADE20k). The background label will be replaced by 255.
388
+ return_tensors (`str` or `TensorType`, *optional*):
389
+ The type of tensors to return. Can be one of:
390
+ - Unset: Return a list of `np.ndarray`.
391
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
392
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
393
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
394
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
395
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
396
+ The channel dimension format for the output image. Can be one of:
397
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
398
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
399
+ - Unset: Use the channel dimension format of the input image.
400
+ input_data_format (`ChannelDimension` or `str`, *optional*):
401
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
402
+ from the input image. Can be one of:
403
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
404
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
405
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
406
+ """
407
+ do_resize = do_resize if do_resize is not None else self.do_resize
408
+ size = size if size is not None else self.size
409
+ size = get_size_dict(size, default_to_square=True, param_name="size")
410
+ resample = resample if resample is not None else self.resample
411
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
412
+ crop_size = crop_size if crop_size is not None else self.crop_size
413
+ crop_size = get_size_dict(crop_size, default_to_square=True, param_name="crop_size")
414
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
415
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
416
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
417
+ image_mean = image_mean if image_mean is not None else self.image_mean
418
+ image_std = image_std if image_std is not None else self.image_std
419
+ do_reduce_labels = do_reduce_labels if do_reduce_labels is not None else self.do_reduce_labels
420
+
421
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
422
+
423
+ images = make_list_of_images(images)
424
+
425
+ if segmentation_maps is not None:
426
+ segmentation_maps = make_list_of_images(segmentation_maps, expected_ndims=2)
427
+
428
+ if segmentation_maps is not None and not valid_images(segmentation_maps):
429
+ raise ValueError(
430
+ "Invalid segmentation_maps type. Must be of type PIL.Image.Image, numpy.ndarray, "
431
+ "torch.Tensor, tf.Tensor or jax.ndarray."
432
+ )
433
+ if not valid_images(images):
434
+ raise ValueError(
435
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
436
+ "torch.Tensor, tf.Tensor or jax.ndarray."
437
+ )
438
+
439
+ validate_preprocess_arguments(
440
+ do_rescale=do_rescale,
441
+ rescale_factor=rescale_factor,
442
+ do_normalize=do_normalize,
443
+ image_mean=image_mean,
444
+ image_std=image_std,
445
+ do_center_crop=do_center_crop,
446
+ crop_size=crop_size,
447
+ do_resize=do_resize,
448
+ size=size,
449
+ resample=resample,
450
+ )
451
+
452
+ images = [
453
+ self._preprocess_image(
454
+ image=img,
455
+ do_resize=do_resize,
456
+ do_center_crop=do_center_crop,
457
+ do_rescale=do_rescale,
458
+ do_normalize=do_normalize,
459
+ resample=resample,
460
+ size=size,
461
+ rescale_factor=rescale_factor,
462
+ crop_size=crop_size,
463
+ image_mean=image_mean,
464
+ image_std=image_std,
465
+ data_format=data_format,
466
+ input_data_format=input_data_format,
467
+ )
468
+ for img in images
469
+ ]
470
+
471
+ data = {"pixel_values": images}
472
+
473
+ if segmentation_maps is not None:
474
+ segmentation_maps = [
475
+ self._preprocess_segmentation_map(
476
+ segmentation_map=segmentation_map,
477
+ do_reduce_labels=do_reduce_labels,
478
+ do_resize=do_resize,
479
+ resample=resample,
480
+ size=size,
481
+ do_center_crop=do_center_crop,
482
+ crop_size=crop_size,
483
+ )
484
+ for segmentation_map in segmentation_maps
485
+ ]
486
+ data["labels"] = segmentation_maps
487
+
488
+ return BatchFeature(data=data, tensor_type=return_tensors)
489
+
490
+ def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple] = None):
491
+ """
492
+ Converts the output of [`BeitForSemanticSegmentation`] into semantic segmentation maps. Only supports PyTorch.
493
+
494
+ Args:
495
+ outputs ([`BeitForSemanticSegmentation`]):
496
+ Raw outputs of the model.
497
+ target_sizes (`List[Tuple]` of length `batch_size`, *optional*):
498
+ List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
499
+ predictions will not be resized.
500
+
501
+ Returns:
502
+ semantic_segmentation: `List[torch.Tensor]` of length `batch_size`, where each item is a semantic
503
+ segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
504
+ specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
505
+ """
506
+ # TODO: add support for other frameworks
507
+ logits = outputs.logits
508
+
509
+ # Resize logits and compute semantic segmentation maps
510
+ if target_sizes is not None:
511
+ if len(logits) != len(target_sizes):
512
+ raise ValueError(
513
+ "Make sure that you pass in as many target sizes as the batch dimension of the logits"
514
+ )
515
+
516
+ if is_torch_tensor(target_sizes):
517
+ target_sizes = target_sizes.numpy()
518
+
519
+ semantic_segmentation = []
520
+
521
+ for idx in range(len(logits)):
522
+ resized_logits = torch.nn.functional.interpolate(
523
+ logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False
524
+ )
525
+ semantic_map = resized_logits[0].argmax(dim=0)
526
+ semantic_segmentation.append(semantic_map)
527
+ else:
528
+ semantic_segmentation = logits.argmax(dim=1)
529
+ semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
530
+
531
+ return semantic_segmentation
venv/lib/python3.10/site-packages/transformers/models/beit/modeling_beit.py ADDED
@@ -0,0 +1,1425 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch BEiT model."""
16
+
17
+
18
+ import collections.abc
19
+ import math
20
+ from dataclasses import dataclass
21
+ from typing import List, Optional, Tuple, Union
22
+
23
+ import torch
24
+ import torch.utils.checkpoint
25
+ from torch import Tensor, nn
26
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
27
+
28
+ from ...activations import ACT2FN
29
+ from ...modeling_outputs import (
30
+ BackboneOutput,
31
+ BaseModelOutput,
32
+ BaseModelOutputWithPooling,
33
+ ImageClassifierOutput,
34
+ MaskedLMOutput,
35
+ SemanticSegmenterOutput,
36
+ )
37
+ from ...modeling_utils import PreTrainedModel
38
+ from ...pytorch_utils import find_pruneable_heads_and_indices, meshgrid, prune_linear_layer
39
+ from ...utils import (
40
+ add_code_sample_docstrings,
41
+ add_start_docstrings,
42
+ add_start_docstrings_to_model_forward,
43
+ logging,
44
+ replace_return_docstrings,
45
+ )
46
+ from ...utils.backbone_utils import BackboneMixin
47
+ from .configuration_beit import BeitConfig
48
+
49
+
50
+ logger = logging.get_logger(__name__)
51
+
52
+ # General docstring
53
+ _CONFIG_FOR_DOC = "BeitConfig"
54
+
55
+ # Base docstring
56
+ _CHECKPOINT_FOR_DOC = "microsoft/beit-base-patch16-224-pt22k"
57
+ _EXPECTED_OUTPUT_SHAPE = [1, 197, 768]
58
+
59
+ # Image classification docstring
60
+ _IMAGE_CLASS_CHECKPOINT = "microsoft/beit-base-patch16-224"
61
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
62
+
63
+
64
+ from ..deprecated._archive_maps import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
65
+
66
+
67
+ @dataclass
68
+ class BeitModelOutputWithPooling(BaseModelOutputWithPooling):
69
+ """
70
+ Class for outputs of [`BeitModel`].
71
+
72
+ Args:
73
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
74
+ Sequence of hidden-states at the output of the last layer of the model.
75
+ pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
76
+ Average of the last layer hidden states of the patch tokens (excluding the *[CLS]* token) if
77
+ *config.use_mean_pooling* is set to True. If set to False, then the final hidden state of the *[CLS]* token
78
+ will be returned.
79
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
80
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
81
+ shape `(batch_size, sequence_length, hidden_size)`.
82
+
83
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
84
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
85
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
86
+ sequence_length)`.
87
+
88
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
89
+ heads.
90
+ """
91
+
92
+
93
+ def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
94
+ """
95
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
96
+
97
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
98
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
99
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
100
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
101
+ argument.
102
+ """
103
+ if drop_prob == 0.0 or not training:
104
+ return input
105
+ keep_prob = 1 - drop_prob
106
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
107
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
108
+ random_tensor.floor_() # binarize
109
+ output = input.div(keep_prob) * random_tensor
110
+ return output
111
+
112
+
113
+ class BeitDropPath(nn.Module):
114
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
115
+
116
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
117
+ super().__init__()
118
+ self.drop_prob = drop_prob
119
+
120
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
121
+ return drop_path(hidden_states, self.drop_prob, self.training)
122
+
123
+ def extra_repr(self) -> str:
124
+ return "p={}".format(self.drop_prob)
125
+
126
+
127
+ # Based on timm implementation, which can be found here:
128
+ # https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
129
+ class BeitEmbeddings(nn.Module):
130
+ """
131
+ Construct the CLS token, position and patch embeddings. Optionally, also the mask token.
132
+
133
+ """
134
+
135
+ def __init__(self, config: BeitConfig) -> None:
136
+ super().__init__()
137
+
138
+ self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
139
+ if config.use_mask_token:
140
+ self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
141
+ else:
142
+ self.mask_token = None
143
+ self.patch_embeddings = BeitPatchEmbeddings(config)
144
+ num_patches = self.patch_embeddings.num_patches
145
+ if config.use_absolute_position_embeddings:
146
+ self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size))
147
+ else:
148
+ self.position_embeddings = None
149
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
150
+
151
+ def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor] = None) -> torch.Tensor:
152
+ embeddings, (patch_height, patch_width) = self.patch_embeddings(
153
+ pixel_values, self.position_embeddings[:, 1:, :] if self.position_embeddings is not None else None
154
+ )
155
+ batch_size, seq_len, _ = embeddings.size()
156
+
157
+ if bool_masked_pos is not None:
158
+ mask_tokens = self.mask_token.expand(batch_size, seq_len, -1)
159
+ # replace the masked visual tokens by mask_tokens
160
+ w = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
161
+ embeddings = embeddings * (1 - w) + mask_tokens * w
162
+
163
+ cls_tokens = self.cls_token.expand(batch_size, -1, -1)
164
+ if self.position_embeddings is not None:
165
+ cls_tokens = cls_tokens + self.position_embeddings[:, :1, :]
166
+
167
+ embeddings = torch.cat((cls_tokens, embeddings), dim=1)
168
+
169
+ embeddings = self.dropout(embeddings)
170
+
171
+ return embeddings, (patch_height, patch_width)
172
+
173
+
174
+ class BeitPatchEmbeddings(nn.Module):
175
+ """
176
+ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
177
+ `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
178
+ Transformer.
179
+ """
180
+
181
+ def __init__(self, config):
182
+ super().__init__()
183
+ image_size, patch_size = config.image_size, config.patch_size
184
+ num_channels, hidden_size = config.num_channels, config.hidden_size
185
+
186
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
187
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
188
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
189
+ patch_shape = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
190
+ self.image_size = image_size
191
+ self.patch_size = patch_size
192
+ self.num_channels = num_channels
193
+ self.num_patches = num_patches
194
+ self.patch_shape = patch_shape
195
+
196
+ self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
197
+
198
+ def forward(self, pixel_values: torch.Tensor, position_embedding: Optional[torch.Tensor] = None) -> torch.Tensor:
199
+ batch_size, num_channels, height, width = pixel_values.shape
200
+ if num_channels != self.num_channels:
201
+ raise ValueError(
202
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
203
+ )
204
+
205
+ embeddings = self.projection(pixel_values)
206
+ patch_height, patch_width = embeddings.shape[2], embeddings.shape[3]
207
+
208
+ if position_embedding is not None:
209
+ # interpolate the position embedding to the corresponding size
210
+ position_embedding = position_embedding.view(1, self.patch_shape[0], self.patch_shape[1], -1).permute(
211
+ 0, 3, 1, 2
212
+ )
213
+ position_embedding = nn.functional.interpolate(
214
+ position_embedding, size=(patch_height, patch_width), mode="bicubic"
215
+ )
216
+ embeddings = embeddings + position_embedding
217
+
218
+ embeddings = embeddings.flatten(2).transpose(1, 2)
219
+
220
+ return embeddings, (patch_height, patch_width)
221
+
222
+
223
+ class BeitSelfAttention(nn.Module):
224
+ def __init__(self, config: BeitConfig, window_size: Optional[tuple] = None) -> None:
225
+ super().__init__()
226
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
227
+ raise ValueError(
228
+ f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
229
+ f"heads {config.num_attention_heads}."
230
+ )
231
+
232
+ self.num_attention_heads = config.num_attention_heads
233
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
234
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
235
+
236
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
237
+ self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=False)
238
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
239
+
240
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
241
+
242
+ if window_size:
243
+ self.relative_position_bias = BeitRelativePositionBias(config, window_size=window_size)
244
+ else:
245
+ self.relative_position_bias = None
246
+
247
+ def transpose_for_scores(self, x):
248
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
249
+ x = x.view(*new_x_shape)
250
+ return x.permute(0, 2, 1, 3)
251
+
252
+ def forward(
253
+ self,
254
+ hidden_states: torch.Tensor,
255
+ head_mask: Optional[torch.Tensor] = None,
256
+ output_attentions: bool = False,
257
+ relative_position_bias: Optional["BeitRelativePositionBias"] = None,
258
+ ) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]:
259
+ mixed_query_layer = self.query(hidden_states)
260
+
261
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
262
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
263
+ query_layer = self.transpose_for_scores(mixed_query_layer)
264
+
265
+ # Take the dot product between "query" and "key" to get the raw attention scores.
266
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
267
+
268
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
269
+
270
+ # Add relative position bias if present.
271
+ if self.relative_position_bias is not None:
272
+ attention_scores = attention_scores + self.relative_position_bias().unsqueeze(0)
273
+
274
+ # Add shared relative position bias if provided.
275
+ if relative_position_bias is not None:
276
+ attention_scores = attention_scores + relative_position_bias
277
+
278
+ # Normalize the attention scores to probabilities.
279
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
280
+
281
+ # This is actually dropping out entire tokens to attend to, which might
282
+ # seem a bit unusual, but is taken from the original Transformer paper.
283
+ attention_probs = self.dropout(attention_probs)
284
+
285
+ # Mask heads if we want to
286
+ if head_mask is not None:
287
+ attention_probs = attention_probs * head_mask
288
+
289
+ context_layer = torch.matmul(attention_probs, value_layer)
290
+
291
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
292
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
293
+ context_layer = context_layer.view(*new_context_layer_shape)
294
+
295
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
296
+
297
+ return outputs
298
+
299
+
300
+ class BeitSelfOutput(nn.Module):
301
+ """
302
+ The residual connection is defined in BeitLayer instead of here (as is the case with other models), due to the
303
+ layernorm applied before each block.
304
+ """
305
+
306
+ def __init__(self, config: BeitConfig) -> None:
307
+ super().__init__()
308
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
309
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
310
+
311
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor, gamma=None) -> torch.Tensor:
312
+ hidden_states = self.dense(hidden_states)
313
+ hidden_states = self.dropout(hidden_states)
314
+
315
+ return hidden_states
316
+
317
+
318
+ class BeitAttention(nn.Module):
319
+ def __init__(self, config: BeitConfig, window_size: Optional[tuple] = None) -> None:
320
+ super().__init__()
321
+ self.attention = BeitSelfAttention(config, window_size=window_size)
322
+ self.output = BeitSelfOutput(config)
323
+ self.pruned_heads = set()
324
+
325
+ def prune_heads(self, heads):
326
+ if len(heads) == 0:
327
+ return
328
+ heads, index = find_pruneable_heads_and_indices(
329
+ heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
330
+ )
331
+
332
+ # Prune linear layers
333
+ self.attention.query = prune_linear_layer(self.attention.query, index)
334
+ self.attention.key = prune_linear_layer(self.attention.key, index)
335
+ self.attention.value = prune_linear_layer(self.attention.value, index)
336
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
337
+
338
+ # Update hyper params and store pruned heads
339
+ self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
340
+ self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
341
+ self.pruned_heads = self.pruned_heads.union(heads)
342
+
343
+ def forward(
344
+ self,
345
+ hidden_states: torch.Tensor,
346
+ head_mask: Optional[torch.Tensor] = None,
347
+ output_attentions: bool = False,
348
+ relative_position_bias: Optional["BeitRelativePositionBias"] = None,
349
+ ) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]:
350
+ self_outputs = self.attention(hidden_states, head_mask, output_attentions, relative_position_bias)
351
+
352
+ attention_output = self.output(self_outputs[0], hidden_states)
353
+
354
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
355
+ return outputs
356
+
357
+
358
+ class BeitIntermediate(nn.Module):
359
+ def __init__(self, config: BeitConfig) -> None:
360
+ super().__init__()
361
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
362
+ if isinstance(config.hidden_act, str):
363
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
364
+ else:
365
+ self.intermediate_act_fn = config.hidden_act
366
+
367
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
368
+ hidden_states = self.dense(hidden_states)
369
+ hidden_states = self.intermediate_act_fn(hidden_states)
370
+
371
+ return hidden_states
372
+
373
+
374
+ class BeitOutput(nn.Module):
375
+ def __init__(self, config: BeitConfig) -> None:
376
+ super().__init__()
377
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
378
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
379
+
380
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
381
+ hidden_states = self.dense(hidden_states)
382
+ hidden_states = self.dropout(hidden_states)
383
+
384
+ return hidden_states
385
+
386
+
387
+ class BeitLayer(nn.Module):
388
+ """This corresponds to the Block class in the timm implementation."""
389
+
390
+ def __init__(self, config: BeitConfig, window_size: Optional[tuple] = None, drop_path_rate: float = 0.0) -> None:
391
+ super().__init__()
392
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
393
+ self.seq_len_dim = 1
394
+ self.attention = BeitAttention(config, window_size=window_size)
395
+ self.intermediate = BeitIntermediate(config)
396
+ self.output = BeitOutput(config)
397
+ self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
398
+ self.drop_path = BeitDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
399
+ self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
400
+
401
+ init_values = config.layer_scale_init_value
402
+ if init_values > 0:
403
+ self.lambda_1 = nn.Parameter(init_values * torch.ones((config.hidden_size)), requires_grad=True)
404
+ self.lambda_2 = nn.Parameter(init_values * torch.ones((config.hidden_size)), requires_grad=True)
405
+ else:
406
+ self.lambda_1, self.lambda_2 = None, None
407
+
408
+ def forward(
409
+ self,
410
+ hidden_states: torch.Tensor,
411
+ head_mask: Optional[torch.Tensor] = None,
412
+ output_attentions: bool = False,
413
+ relative_position_bias: Optional["BeitRelativePositionBias"] = None,
414
+ ) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]:
415
+ self_attention_outputs = self.attention(
416
+ self.layernorm_before(hidden_states), # in BEiT, layernorm is applied before self-attention
417
+ head_mask,
418
+ output_attentions=output_attentions,
419
+ relative_position_bias=relative_position_bias,
420
+ )
421
+ attention_output = self_attention_outputs[0]
422
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
423
+
424
+ # apply lambda_1 if present
425
+ if self.lambda_1 is not None:
426
+ attention_output = self.lambda_1 * attention_output
427
+
428
+ # first residual connection
429
+ hidden_states = self.drop_path(attention_output) + hidden_states
430
+
431
+ # in BEiT, layernorm is also applied after self-attention
432
+ layer_output = self.layernorm_after(hidden_states)
433
+
434
+ layer_output = self.intermediate(layer_output)
435
+ layer_output = self.output(layer_output)
436
+
437
+ if self.lambda_2 is not None:
438
+ layer_output = self.lambda_2 * layer_output
439
+
440
+ # second residual connection
441
+ layer_output = self.drop_path(layer_output) + hidden_states
442
+
443
+ outputs = (layer_output,) + outputs
444
+
445
+ return outputs
446
+
447
+
448
+ class BeitRelativePositionBias(nn.Module):
449
+ def __init__(self, config: BeitConfig, window_size: tuple) -> None:
450
+ super().__init__()
451
+ self.window_size = window_size
452
+ self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
453
+ self.relative_position_bias_table = nn.Parameter(
454
+ torch.zeros(self.num_relative_distance, config.num_attention_heads)
455
+ ) # 2*Wh-1 * 2*Ww-1, nH
456
+ # cls to token & token 2 cls & cls to cls
457
+
458
+ # get pair-wise relative position index for each token inside the window
459
+ coords_h = torch.arange(window_size[0])
460
+ coords_w = torch.arange(window_size[1])
461
+ coords = torch.stack(meshgrid([coords_h, coords_w], indexing="ij")) # 2, Wh, Ww
462
+ coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
463
+ relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
464
+ relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
465
+ relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
466
+ relative_coords[:, :, 1] += window_size[1] - 1
467
+ relative_coords[:, :, 0] *= 2 * window_size[1] - 1
468
+ relative_position_index = torch.zeros(
469
+ size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype
470
+ )
471
+ relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
472
+ relative_position_index[0, 0:] = self.num_relative_distance - 3
473
+ relative_position_index[0:, 0] = self.num_relative_distance - 2
474
+ relative_position_index[0, 0] = self.num_relative_distance - 1
475
+
476
+ self.register_buffer("relative_position_index", relative_position_index, persistent=False)
477
+
478
+ def forward(self) -> torch.Tensor:
479
+ relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
480
+ self.window_size[0] * self.window_size[1] + 1, self.window_size[0] * self.window_size[1] + 1, -1
481
+ ) # Wh*Ww,Wh*Ww,nH
482
+
483
+ return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
484
+
485
+
486
+ class BeitEncoder(nn.Module):
487
+ def __init__(self, config: BeitConfig, window_size: Optional[tuple] = None) -> None:
488
+ super().__init__()
489
+ self.config = config
490
+ if config.use_shared_relative_position_bias:
491
+ self.relative_position_bias = BeitRelativePositionBias(config, window_size=window_size)
492
+ else:
493
+ self.relative_position_bias = None
494
+
495
+ # stochastic depth decay rule
496
+ dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)]
497
+ self.layer = nn.ModuleList(
498
+ [
499
+ BeitLayer(
500
+ config,
501
+ window_size=window_size if config.use_relative_position_bias else None,
502
+ drop_path_rate=dpr[i],
503
+ )
504
+ for i in range(config.num_hidden_layers)
505
+ ]
506
+ )
507
+ self.gradient_checkpointing = False
508
+
509
+ def forward(
510
+ self,
511
+ hidden_states: torch.Tensor,
512
+ head_mask: Optional[torch.Tensor] = None,
513
+ output_attentions: bool = False,
514
+ output_hidden_states: bool = False,
515
+ return_dict: bool = True,
516
+ ) -> Union[tuple, BaseModelOutput]:
517
+ all_hidden_states = () if output_hidden_states else None
518
+ all_self_attentions = () if output_attentions else None
519
+
520
+ for i, layer_module in enumerate(self.layer):
521
+ if output_hidden_states:
522
+ all_hidden_states = all_hidden_states + (hidden_states,)
523
+
524
+ layer_head_mask = head_mask[i] if head_mask is not None else None
525
+
526
+ if self.gradient_checkpointing and self.training:
527
+ layer_outputs = self._gradient_checkpointing_func(
528
+ layer_module.__call__,
529
+ hidden_states,
530
+ layer_head_mask,
531
+ output_attentions,
532
+ )
533
+ else:
534
+ relative_position_bias = (
535
+ self.relative_position_bias() if self.relative_position_bias is not None else None
536
+ )
537
+ layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions, relative_position_bias)
538
+
539
+ hidden_states = layer_outputs[0]
540
+
541
+ if output_attentions:
542
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
543
+
544
+ if output_hidden_states:
545
+ all_hidden_states = all_hidden_states + (hidden_states,)
546
+
547
+ if not return_dict:
548
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
549
+ return BaseModelOutput(
550
+ last_hidden_state=hidden_states,
551
+ hidden_states=all_hidden_states,
552
+ attentions=all_self_attentions,
553
+ )
554
+
555
+
556
+ class BeitPreTrainedModel(PreTrainedModel):
557
+ """
558
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
559
+ models.
560
+ """
561
+
562
+ config_class = BeitConfig
563
+ base_model_prefix = "beit"
564
+ main_input_name = "pixel_values"
565
+ supports_gradient_checkpointing = True
566
+
567
+ def _init_weights(self, module):
568
+ """Initialize the weights"""
569
+ if isinstance(module, (nn.Linear, nn.Conv2d, nn.ConvTranspose2d)):
570
+ # Slightly different from the TF version which uses truncated_normal for initialization
571
+ # cf https://github.com/pytorch/pytorch/pull/5617
572
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
573
+ if module.bias is not None:
574
+ module.bias.data.zero_()
575
+ elif isinstance(module, nn.Embedding):
576
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
577
+ if module.padding_idx is not None:
578
+ module.weight.data[module.padding_idx].zero_()
579
+ elif isinstance(module, nn.LayerNorm):
580
+ module.bias.data.zero_()
581
+ module.weight.data.fill_(1.0)
582
+
583
+
584
+ BEIT_START_DOCSTRING = r"""
585
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
586
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
587
+ behavior.
588
+
589
+ Parameters:
590
+ config ([`BeitConfig`]): Model configuration class with all the parameters of the model.
591
+ Initializing with a config file does not load the weights associated with the model, only the
592
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
593
+ """
594
+
595
+ BEIT_INPUTS_DOCSTRING = r"""
596
+ Args:
597
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
598
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
599
+ [`BeitImageProcessor.__call__`] for details.
600
+
601
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
602
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
603
+
604
+ - 1 indicates the head is **not masked**,
605
+ - 0 indicates the head is **masked**.
606
+
607
+ output_attentions (`bool`, *optional*):
608
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
609
+ tensors for more detail.
610
+ output_hidden_states (`bool`, *optional*):
611
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
612
+ more detail.
613
+ return_dict (`bool`, *optional*):
614
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
615
+ """
616
+
617
+
618
+ @add_start_docstrings(
619
+ "The bare Beit Model transformer outputting raw hidden-states without any specific head on top.",
620
+ BEIT_START_DOCSTRING,
621
+ )
622
+ class BeitModel(BeitPreTrainedModel):
623
+ def __init__(self, config: BeitConfig, add_pooling_layer: bool = True) -> None:
624
+ super().__init__(config)
625
+ self.config = config
626
+
627
+ self.embeddings = BeitEmbeddings(config)
628
+ self.encoder = BeitEncoder(config, window_size=self.embeddings.patch_embeddings.patch_shape)
629
+
630
+ self.layernorm = (
631
+ nn.Identity() if config.use_mean_pooling else nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
632
+ )
633
+ self.pooler = BeitPooler(config) if add_pooling_layer else None
634
+
635
+ # Initialize weights and apply final processing
636
+ self.post_init()
637
+
638
+ def get_input_embeddings(self):
639
+ return self.embeddings.patch_embeddings
640
+
641
+ def _prune_heads(self, heads_to_prune):
642
+ """
643
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
644
+ class PreTrainedModel
645
+ """
646
+ for layer, heads in heads_to_prune.items():
647
+ self.encoder.layer[layer].attention.prune_heads(heads)
648
+
649
+ @add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING)
650
+ @add_code_sample_docstrings(
651
+ checkpoint=_CHECKPOINT_FOR_DOC,
652
+ output_type=BeitModelOutputWithPooling,
653
+ config_class=_CONFIG_FOR_DOC,
654
+ modality="vision",
655
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
656
+ )
657
+ def forward(
658
+ self,
659
+ pixel_values: Optional[torch.Tensor] = None,
660
+ bool_masked_pos: Optional[torch.BoolTensor] = None,
661
+ head_mask: Optional[torch.Tensor] = None,
662
+ output_attentions: Optional[bool] = None,
663
+ output_hidden_states: Optional[bool] = None,
664
+ return_dict: Optional[bool] = None,
665
+ ) -> Union[tuple, BeitModelOutputWithPooling]:
666
+ r"""
667
+ bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*):
668
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
669
+ """
670
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
671
+ output_hidden_states = (
672
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
673
+ )
674
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
675
+
676
+ if pixel_values is None:
677
+ raise ValueError("You have to specify pixel_values")
678
+
679
+ # Prepare head mask if needed
680
+ # 1.0 in head_mask indicate we keep the head
681
+ # attention_probs has shape bsz x n_heads x N x N
682
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
683
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
684
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
685
+
686
+ embedding_output, (patch_height, patch_width) = self.embeddings(pixel_values, bool_masked_pos)
687
+
688
+ encoder_outputs = self.encoder(
689
+ embedding_output,
690
+ head_mask=head_mask,
691
+ output_attentions=output_attentions,
692
+ output_hidden_states=output_hidden_states,
693
+ return_dict=return_dict,
694
+ )
695
+ sequence_output = encoder_outputs[0]
696
+ sequence_output = self.layernorm(sequence_output)
697
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
698
+
699
+ if not return_dict:
700
+ head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,)
701
+ return head_outputs + encoder_outputs[1:]
702
+
703
+ return BeitModelOutputWithPooling(
704
+ last_hidden_state=sequence_output,
705
+ pooler_output=pooled_output,
706
+ hidden_states=encoder_outputs.hidden_states,
707
+ attentions=encoder_outputs.attentions,
708
+ )
709
+
710
+
711
+ class BeitPooler(nn.Module):
712
+ def __init__(self, config: BeitConfig) -> None:
713
+ super().__init__()
714
+ self.layernorm = (
715
+ nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) if config.use_mean_pooling else None
716
+ )
717
+
718
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
719
+ if self.layernorm is not None:
720
+ # Mean pool the final hidden states of the patch tokens
721
+ patch_tokens = hidden_states[:, 1:, :]
722
+ pooled_output = self.layernorm(patch_tokens.mean(1))
723
+ else:
724
+ # Pool by simply taking the final hidden state of the [CLS] token
725
+ pooled_output = hidden_states[:, 0]
726
+
727
+ return pooled_output
728
+
729
+
730
+ @add_start_docstrings(
731
+ """Beit Model transformer with a 'language' modeling head on top. BEiT does masked image modeling by predicting
732
+ visual tokens of a Vector-Quantize Variational Autoencoder (VQ-VAE), whereas other vision models like ViT and DeiT
733
+ predict RGB pixel values. As a result, this class is incompatible with [`AutoModelForMaskedImageModeling`], so you
734
+ will need to use [`BeitForMaskedImageModeling`] directly if you wish to do masked image modeling with BEiT.""",
735
+ BEIT_START_DOCSTRING,
736
+ )
737
+ class BeitForMaskedImageModeling(BeitPreTrainedModel):
738
+ def __init__(self, config: BeitConfig) -> None:
739
+ super().__init__(config)
740
+
741
+ self.num_labels = config.num_labels
742
+ self.beit = BeitModel(config, add_pooling_layer=False)
743
+
744
+ # Classifier head
745
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
746
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size)
747
+
748
+ # Initialize weights and apply final processing
749
+ self.post_init()
750
+
751
+ @add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING)
752
+ @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC)
753
+ def forward(
754
+ self,
755
+ pixel_values: Optional[torch.Tensor] = None,
756
+ bool_masked_pos: Optional[torch.BoolTensor] = None,
757
+ head_mask: Optional[torch.Tensor] = None,
758
+ labels: Optional[torch.Tensor] = None,
759
+ output_attentions: Optional[bool] = None,
760
+ output_hidden_states: Optional[bool] = None,
761
+ return_dict: Optional[bool] = None,
762
+ ) -> Union[tuple, MaskedLMOutput]:
763
+ r"""
764
+ bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
765
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
766
+
767
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
768
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
769
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
770
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
771
+
772
+ Returns:
773
+
774
+ Examples:
775
+
776
+ ```python
777
+ >>> from transformers import AutoImageProcessor, BeitForMaskedImageModeling
778
+ >>> import torch
779
+ >>> from PIL import Image
780
+ >>> import requests
781
+
782
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
783
+ >>> image = Image.open(requests.get(url, stream=True).raw)
784
+
785
+ >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/beit-base-patch16-224-pt22k")
786
+ >>> model = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k")
787
+
788
+ >>> num_patches = (model.config.image_size // model.config.patch_size) ** 2
789
+ >>> pixel_values = image_processor(images=image, return_tensors="pt").pixel_values
790
+ >>> # create random boolean mask of shape (batch_size, num_patches)
791
+ >>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool()
792
+
793
+ >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)
794
+ >>> loss, logits = outputs.loss, outputs.logits
795
+ >>> list(logits.shape)
796
+ [1, 196, 8192]
797
+ ```"""
798
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
799
+
800
+ outputs = self.beit(
801
+ pixel_values,
802
+ bool_masked_pos=bool_masked_pos,
803
+ head_mask=head_mask,
804
+ output_attentions=output_attentions,
805
+ output_hidden_states=output_hidden_states,
806
+ return_dict=return_dict,
807
+ )
808
+
809
+ sequence_output = outputs[0]
810
+ sequence_output = self.layernorm(sequence_output)
811
+ prediction_scores = self.lm_head(sequence_output[:, 1:])
812
+
813
+ masked_lm_loss = None
814
+ if labels is not None:
815
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
816
+ masked_lm_loss = loss_fct(prediction_scores[bool_masked_pos], labels)
817
+
818
+ if not return_dict:
819
+ output = (prediction_scores,) + outputs[1:]
820
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
821
+
822
+ return MaskedLMOutput(
823
+ loss=masked_lm_loss,
824
+ logits=prediction_scores,
825
+ hidden_states=outputs.hidden_states,
826
+ attentions=outputs.attentions,
827
+ )
828
+
829
+
830
+ @add_start_docstrings(
831
+ """
832
+ Beit Model transformer with an image classification head on top (a linear layer on top of the average of the final
833
+ hidden states of the patch tokens) e.g. for ImageNet.
834
+ """,
835
+ BEIT_START_DOCSTRING,
836
+ )
837
+ class BeitForImageClassification(BeitPreTrainedModel):
838
+ def __init__(self, config: BeitConfig) -> None:
839
+ super().__init__(config)
840
+
841
+ self.num_labels = config.num_labels
842
+ self.beit = BeitModel(config, add_pooling_layer=True)
843
+
844
+ # Classifier head
845
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
846
+
847
+ # Initialize weights and apply final processing
848
+ self.post_init()
849
+
850
+ @add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING)
851
+ @add_code_sample_docstrings(
852
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
853
+ output_type=ImageClassifierOutput,
854
+ config_class=_CONFIG_FOR_DOC,
855
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
856
+ )
857
+ def forward(
858
+ self,
859
+ pixel_values: Optional[torch.Tensor] = None,
860
+ head_mask: Optional[torch.Tensor] = None,
861
+ labels: Optional[torch.Tensor] = None,
862
+ output_attentions: Optional[bool] = None,
863
+ output_hidden_states: Optional[bool] = None,
864
+ return_dict: Optional[bool] = None,
865
+ ) -> Union[tuple, ImageClassifierOutput]:
866
+ r"""
867
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
868
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
869
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
870
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
871
+ """
872
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
873
+ outputs = self.beit(
874
+ pixel_values,
875
+ head_mask=head_mask,
876
+ output_attentions=output_attentions,
877
+ output_hidden_states=output_hidden_states,
878
+ return_dict=return_dict,
879
+ )
880
+
881
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
882
+
883
+ logits = self.classifier(pooled_output)
884
+
885
+ loss = None
886
+ if labels is not None:
887
+ if self.config.problem_type is None:
888
+ if self.num_labels == 1:
889
+ self.config.problem_type = "regression"
890
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
891
+ self.config.problem_type = "single_label_classification"
892
+ else:
893
+ self.config.problem_type = "multi_label_classification"
894
+
895
+ if self.config.problem_type == "regression":
896
+ loss_fct = MSELoss()
897
+ if self.num_labels == 1:
898
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
899
+ else:
900
+ loss = loss_fct(logits, labels)
901
+ elif self.config.problem_type == "single_label_classification":
902
+ loss_fct = CrossEntropyLoss()
903
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
904
+ elif self.config.problem_type == "multi_label_classification":
905
+ loss_fct = BCEWithLogitsLoss()
906
+ loss = loss_fct(logits, labels)
907
+ if not return_dict:
908
+ output = (logits,) + outputs[2:]
909
+ return ((loss,) + output) if loss is not None else output
910
+
911
+ return ImageClassifierOutput(
912
+ loss=loss,
913
+ logits=logits,
914
+ hidden_states=outputs.hidden_states,
915
+ attentions=outputs.attentions,
916
+ )
917
+
918
+
919
+ class BeitConvModule(nn.Module):
920
+ """
921
+ A convolutional block that bundles conv/norm/activation layers. This block simplifies the usage of convolution
922
+ layers, which are commonly used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU).
923
+
924
+ Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
925
+ """
926
+
927
+ def __init__(
928
+ self,
929
+ in_channels: int,
930
+ out_channels: int,
931
+ kernel_size: Union[int, Tuple[int, int]],
932
+ padding: Union[int, Tuple[int, int], str] = 0,
933
+ bias: bool = False,
934
+ dilation: Union[int, Tuple[int, int]] = 1,
935
+ ) -> None:
936
+ super().__init__()
937
+ self.conv = nn.Conv2d(
938
+ in_channels=in_channels,
939
+ out_channels=out_channels,
940
+ kernel_size=kernel_size,
941
+ padding=padding,
942
+ bias=bias,
943
+ dilation=dilation,
944
+ )
945
+ self.bn = nn.BatchNorm2d(out_channels)
946
+ self.activation = nn.ReLU()
947
+
948
+ def forward(self, input: torch.Tensor) -> torch.Tensor:
949
+ output = self.conv(input)
950
+ output = self.bn(output)
951
+ output = self.activation(output)
952
+
953
+ return output
954
+
955
+
956
+ class BeitPyramidPoolingBlock(nn.Module):
957
+ def __init__(self, pool_scale: int, in_channels: int, channels: int) -> None:
958
+ super().__init__()
959
+ self.layers = [
960
+ nn.AdaptiveAvgPool2d(pool_scale),
961
+ BeitConvModule(in_channels, channels, kernel_size=1),
962
+ ]
963
+ for i, layer in enumerate(self.layers):
964
+ self.add_module(str(i), layer)
965
+
966
+ def forward(self, input: torch.Tensor) -> torch.Tensor:
967
+ hidden_state = input
968
+ for layer in self.layers:
969
+ hidden_state = layer(hidden_state)
970
+ return hidden_state
971
+
972
+
973
+ class BeitPyramidPoolingModule(nn.Module):
974
+ """
975
+ Pyramid Pooling Module (PPM) used in PSPNet.
976
+
977
+ Args:
978
+ pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
979
+ Module.
980
+ in_channels (int): Input channels.
981
+ channels (int): Channels after modules, before conv_seg.
982
+ align_corners (bool): align_corners argument of F.interpolate.
983
+
984
+ Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
985
+ """
986
+
987
+ def __init__(self, pool_scales: Tuple[int, ...], in_channels: int, channels: int, align_corners: bool) -> None:
988
+ super().__init__()
989
+ self.pool_scales = pool_scales
990
+ self.align_corners = align_corners
991
+ self.in_channels = in_channels
992
+ self.channels = channels
993
+ self.blocks = []
994
+ for i, pool_scale in enumerate(pool_scales):
995
+ block = BeitPyramidPoolingBlock(pool_scale=pool_scale, in_channels=in_channels, channels=channels)
996
+ self.blocks.append(block)
997
+ self.add_module(str(i), block)
998
+
999
+ def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
1000
+ ppm_outs = []
1001
+ for ppm in self.blocks:
1002
+ ppm_out = ppm(x)
1003
+ upsampled_ppm_out = nn.functional.interpolate(
1004
+ ppm_out, size=x.size()[2:], mode="bilinear", align_corners=self.align_corners
1005
+ )
1006
+ ppm_outs.append(upsampled_ppm_out)
1007
+ return ppm_outs
1008
+
1009
+
1010
+ class BeitUperHead(nn.Module):
1011
+ """
1012
+ Unified Perceptual Parsing for Scene Understanding. This head is the implementation of
1013
+ [UPerNet](https://arxiv.org/abs/1807.10221).
1014
+
1015
+ Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
1016
+ """
1017
+
1018
+ def __init__(self, config: BeitConfig) -> None:
1019
+ super().__init__()
1020
+
1021
+ self.pool_scales = config.pool_scales # e.g. (1, 2, 3, 6)
1022
+ self.in_channels = [config.hidden_size] * 4 # e.g. [768, 768, 768, 768]
1023
+ self.channels = config.hidden_size
1024
+ self.align_corners = False
1025
+ self.classifier = nn.Conv2d(self.channels, config.num_labels, kernel_size=1)
1026
+
1027
+ # PSP Module
1028
+ self.psp_modules = BeitPyramidPoolingModule(
1029
+ self.pool_scales,
1030
+ self.in_channels[-1],
1031
+ self.channels,
1032
+ align_corners=self.align_corners,
1033
+ )
1034
+ self.bottleneck = BeitConvModule(
1035
+ self.in_channels[-1] + len(self.pool_scales) * self.channels,
1036
+ self.channels,
1037
+ kernel_size=3,
1038
+ padding=1,
1039
+ )
1040
+ # FPN Module
1041
+ self.lateral_convs = nn.ModuleList()
1042
+ self.fpn_convs = nn.ModuleList()
1043
+ for in_channels in self.in_channels[:-1]: # skip the top layer
1044
+ l_conv = BeitConvModule(in_channels, self.channels, kernel_size=1)
1045
+ fpn_conv = BeitConvModule(self.channels, self.channels, kernel_size=3, padding=1)
1046
+ self.lateral_convs.append(l_conv)
1047
+ self.fpn_convs.append(fpn_conv)
1048
+
1049
+ self.fpn_bottleneck = BeitConvModule(
1050
+ len(self.in_channels) * self.channels,
1051
+ self.channels,
1052
+ kernel_size=3,
1053
+ padding=1,
1054
+ )
1055
+
1056
+ def psp_forward(self, inputs):
1057
+ x = inputs[-1]
1058
+ psp_outs = [x]
1059
+ psp_outs.extend(self.psp_modules(x))
1060
+ psp_outs = torch.cat(psp_outs, dim=1)
1061
+ output = self.bottleneck(psp_outs)
1062
+
1063
+ return output
1064
+
1065
+ def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor:
1066
+ # build laterals
1067
+ laterals = [lateral_conv(encoder_hidden_states[i]) for i, lateral_conv in enumerate(self.lateral_convs)]
1068
+
1069
+ laterals.append(self.psp_forward(encoder_hidden_states))
1070
+
1071
+ # build top-down path
1072
+ used_backbone_levels = len(laterals)
1073
+ for i in range(used_backbone_levels - 1, 0, -1):
1074
+ prev_shape = laterals[i - 1].shape[2:]
1075
+ laterals[i - 1] = laterals[i - 1] + nn.functional.interpolate(
1076
+ laterals[i], size=prev_shape, mode="bilinear", align_corners=self.align_corners
1077
+ )
1078
+
1079
+ # build outputs
1080
+ fpn_outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels - 1)]
1081
+ # append psp feature
1082
+ fpn_outs.append(laterals[-1])
1083
+
1084
+ for i in range(used_backbone_levels - 1, 0, -1):
1085
+ fpn_outs[i] = nn.functional.interpolate(
1086
+ fpn_outs[i], size=fpn_outs[0].shape[2:], mode="bilinear", align_corners=self.align_corners
1087
+ )
1088
+ fpn_outs = torch.cat(fpn_outs, dim=1)
1089
+ output = self.fpn_bottleneck(fpn_outs)
1090
+ output = self.classifier(output)
1091
+
1092
+ return output
1093
+
1094
+
1095
+ class BeitFCNHead(nn.Module):
1096
+ """
1097
+ Fully Convolution Networks for Semantic Segmentation. This head is implemented of
1098
+ [FCNNet](https://arxiv.org/abs/1411.4038>).
1099
+
1100
+ Args:
1101
+ config (BeitConfig): Configuration.
1102
+ in_channels
1103
+ kernel_size (int): The kernel size for convs in the head. Default: 3.
1104
+ dilation (int): The dilation rate for convs in the head. Default: 1.
1105
+
1106
+
1107
+ Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
1108
+ """
1109
+
1110
+ def __init__(
1111
+ self, config: BeitConfig, in_index: int = 2, kernel_size: int = 3, dilation: Union[int, Tuple[int, int]] = 1
1112
+ ) -> None:
1113
+ super().__init__()
1114
+ self.in_channels = config.hidden_size
1115
+ self.channels = config.auxiliary_channels
1116
+ self.num_convs = config.auxiliary_num_convs
1117
+ self.concat_input = config.auxiliary_concat_input
1118
+ self.in_index = in_index
1119
+
1120
+ conv_padding = (kernel_size // 2) * dilation
1121
+ convs = []
1122
+ convs.append(
1123
+ BeitConvModule(
1124
+ self.in_channels, self.channels, kernel_size=kernel_size, padding=conv_padding, dilation=dilation
1125
+ )
1126
+ )
1127
+ for i in range(self.num_convs - 1):
1128
+ convs.append(
1129
+ BeitConvModule(
1130
+ self.channels, self.channels, kernel_size=kernel_size, padding=conv_padding, dilation=dilation
1131
+ )
1132
+ )
1133
+ if self.num_convs == 0:
1134
+ self.convs = nn.Identity()
1135
+ else:
1136
+ self.convs = nn.Sequential(*convs)
1137
+ if self.concat_input:
1138
+ self.conv_cat = BeitConvModule(
1139
+ self.in_channels + self.channels, self.channels, kernel_size=kernel_size, padding=kernel_size // 2
1140
+ )
1141
+
1142
+ self.classifier = nn.Conv2d(self.channels, config.num_labels, kernel_size=1)
1143
+
1144
+ def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor:
1145
+ # just take the relevant feature maps
1146
+ hidden_states = encoder_hidden_states[self.in_index]
1147
+ output = self.convs(hidden_states)
1148
+ if self.concat_input:
1149
+ output = self.conv_cat(torch.cat([hidden_states, output], dim=1))
1150
+ output = self.classifier(output)
1151
+ return output
1152
+
1153
+
1154
+ @add_start_docstrings(
1155
+ """
1156
+ Beit Model transformer with a semantic segmentation head on top e.g. for ADE20k, CityScapes.
1157
+ """,
1158
+ BEIT_START_DOCSTRING,
1159
+ )
1160
+ class BeitForSemanticSegmentation(BeitPreTrainedModel):
1161
+ def __init__(self, config: BeitConfig) -> None:
1162
+ super().__init__(config)
1163
+
1164
+ self.num_labels = config.num_labels
1165
+ self.beit = BeitModel(config, add_pooling_layer=False)
1166
+
1167
+ # FPNs
1168
+ if len(self.config.out_indices) != 4:
1169
+ raise ValueError(
1170
+ "BeitForSemanticSegmentation requires config.out_indices to be a list of 4 integers, "
1171
+ "specifying which features to use from the backbone. One can use [3, 5, 7, 11] in case of "
1172
+ "a base-sized architecture."
1173
+ )
1174
+ self.fpn1 = nn.Sequential(
1175
+ nn.ConvTranspose2d(config.hidden_size, config.hidden_size, kernel_size=2, stride=2),
1176
+ nn.BatchNorm2d(config.hidden_size),
1177
+ nn.GELU(),
1178
+ nn.ConvTranspose2d(config.hidden_size, config.hidden_size, kernel_size=2, stride=2),
1179
+ )
1180
+ self.fpn2 = nn.Sequential(
1181
+ nn.ConvTranspose2d(config.hidden_size, config.hidden_size, kernel_size=2, stride=2),
1182
+ )
1183
+ self.fpn3 = nn.Identity()
1184
+ self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2)
1185
+
1186
+ # Semantic segmentation head(s)
1187
+ self.decode_head = BeitUperHead(config)
1188
+ self.auxiliary_head = BeitFCNHead(config) if config.use_auxiliary_head else None
1189
+
1190
+ # Initialize weights and apply final processing
1191
+ self.post_init()
1192
+
1193
+ def compute_loss(self, logits, auxiliary_logits, labels):
1194
+ # upsample logits to the images' original size
1195
+ upsampled_logits = nn.functional.interpolate(
1196
+ logits, size=labels.shape[-2:], mode="bilinear", align_corners=False
1197
+ )
1198
+ if auxiliary_logits is not None:
1199
+ upsampled_auxiliary_logits = nn.functional.interpolate(
1200
+ auxiliary_logits, size=labels.shape[-2:], mode="bilinear", align_corners=False
1201
+ )
1202
+ # compute weighted loss
1203
+ loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index)
1204
+ main_loss = loss_fct(upsampled_logits, labels)
1205
+ loss = main_loss
1206
+ if auxiliary_logits is not None:
1207
+ auxiliary_loss = loss_fct(upsampled_auxiliary_logits, labels)
1208
+ loss += self.config.auxiliary_loss_weight * auxiliary_loss
1209
+
1210
+ return loss
1211
+
1212
+ @add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING)
1213
+ @replace_return_docstrings(output_type=SemanticSegmenterOutput, config_class=_CONFIG_FOR_DOC)
1214
+ def forward(
1215
+ self,
1216
+ pixel_values: Optional[torch.Tensor] = None,
1217
+ head_mask: Optional[torch.Tensor] = None,
1218
+ labels: Optional[torch.Tensor] = None,
1219
+ output_attentions: Optional[bool] = None,
1220
+ output_hidden_states: Optional[bool] = None,
1221
+ return_dict: Optional[bool] = None,
1222
+ ) -> Union[tuple, SemanticSegmenterOutput]:
1223
+ r"""
1224
+ labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
1225
+ Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
1226
+ config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).
1227
+
1228
+ Returns:
1229
+
1230
+ Examples:
1231
+
1232
+ ```python
1233
+ >>> from transformers import AutoImageProcessor, BeitForSemanticSegmentation
1234
+ >>> from PIL import Image
1235
+ >>> import requests
1236
+
1237
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1238
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1239
+
1240
+ >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/beit-base-finetuned-ade-640-640")
1241
+ >>> model = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640")
1242
+
1243
+ >>> inputs = image_processor(images=image, return_tensors="pt")
1244
+ >>> outputs = model(**inputs)
1245
+ >>> # logits are of shape (batch_size, num_labels, height, width)
1246
+ >>> logits = outputs.logits
1247
+ ```"""
1248
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1249
+ output_hidden_states = (
1250
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1251
+ )
1252
+
1253
+ outputs = self.beit(
1254
+ pixel_values,
1255
+ head_mask=head_mask,
1256
+ output_attentions=output_attentions,
1257
+ output_hidden_states=True, # we need the intermediate hidden states
1258
+ return_dict=return_dict,
1259
+ )
1260
+
1261
+ encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1]
1262
+
1263
+ # only keep certain features, and reshape
1264
+ # note that we do +1 as the encoder_hidden_states also includes the initial embeddings
1265
+ features = [feature for idx, feature in enumerate(encoder_hidden_states) if idx + 1 in self.config.out_indices]
1266
+ batch_size = pixel_values.shape[0]
1267
+ patch_resolution = self.config.image_size // self.config.patch_size
1268
+ features = [
1269
+ x[:, 1:, :].permute(0, 2, 1).reshape(batch_size, -1, patch_resolution, patch_resolution) for x in features
1270
+ ]
1271
+
1272
+ # apply FPNs
1273
+ ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4]
1274
+ for i in range(len(features)):
1275
+ features[i] = ops[i](features[i])
1276
+
1277
+ logits = self.decode_head(features)
1278
+
1279
+ auxiliary_logits = None
1280
+ if self.auxiliary_head is not None:
1281
+ auxiliary_logits = self.auxiliary_head(features)
1282
+
1283
+ loss = None
1284
+ if labels is not None:
1285
+ if self.config.num_labels == 1:
1286
+ raise ValueError("The number of labels should be greater than one")
1287
+ else:
1288
+ loss = self.compute_loss(logits, auxiliary_logits, labels)
1289
+
1290
+ if not return_dict:
1291
+ if output_hidden_states:
1292
+ output = (logits,) + outputs[1:]
1293
+ else:
1294
+ output = (logits,) + outputs[2:]
1295
+ return ((loss,) + output) if loss is not None else output
1296
+
1297
+ return SemanticSegmenterOutput(
1298
+ loss=loss,
1299
+ logits=logits,
1300
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
1301
+ attentions=outputs.attentions,
1302
+ )
1303
+
1304
+
1305
+ @add_start_docstrings(
1306
+ """
1307
+ BEiT backbone, to be used with frameworks like DETR and MaskFormer.
1308
+ """,
1309
+ BEIT_START_DOCSTRING,
1310
+ )
1311
+ class BeitBackbone(BeitPreTrainedModel, BackboneMixin):
1312
+ def __init__(self, config):
1313
+ super().__init__(config)
1314
+ super()._init_backbone(config)
1315
+
1316
+ self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)]
1317
+ self.embeddings = BeitEmbeddings(config)
1318
+ self.encoder = BeitEncoder(config, window_size=self.embeddings.patch_embeddings.patch_shape)
1319
+
1320
+ if config.add_fpn:
1321
+ if len(self.config.out_indices) != 4:
1322
+ raise ValueError(
1323
+ "BeitBackbone requires config.out_indices to be a list of 4 integers, "
1324
+ "specifying which features to use from the backbone. One can use [3, 5, 7, 11] in case of "
1325
+ "a base-sized architecture."
1326
+ )
1327
+ hidden_size = config.hidden_size
1328
+ self.fpn1 = nn.Sequential(
1329
+ nn.ConvTranspose2d(hidden_size, hidden_size, kernel_size=2, stride=2),
1330
+ nn.BatchNorm2d(hidden_size, eps=config.batch_norm_eps),
1331
+ nn.GELU(),
1332
+ nn.ConvTranspose2d(hidden_size, hidden_size, kernel_size=2, stride=2),
1333
+ )
1334
+
1335
+ self.fpn2 = nn.Sequential(nn.ConvTranspose2d(hidden_size, hidden_size, kernel_size=2, stride=2))
1336
+ self.fpn3 = nn.Identity()
1337
+ self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2)
1338
+
1339
+ # initialize weights and apply final processing
1340
+ self.post_init()
1341
+
1342
+ def get_input_embeddings(self):
1343
+ return self.embeddings.patch_embeddings
1344
+
1345
+ @add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING)
1346
+ @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
1347
+ def forward(
1348
+ self,
1349
+ pixel_values: Tensor,
1350
+ output_hidden_states: Optional[bool] = None,
1351
+ output_attentions: Optional[bool] = None,
1352
+ return_dict: Optional[bool] = None,
1353
+ ) -> BackboneOutput:
1354
+ """
1355
+ Returns:
1356
+
1357
+ Examples:
1358
+
1359
+ ```python
1360
+ >>> from transformers import AutoImageProcessor, AutoBackbone
1361
+ >>> import torch
1362
+ >>> from PIL import Image
1363
+ >>> import requests
1364
+
1365
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1366
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1367
+
1368
+ >>> processor = AutoImageProcessor.from_pretrained("microsoft/beit-base-patch16-224")
1369
+ >>> model = AutoBackbone.from_pretrained(
1370
+ ... "microsoft/beit-base-patch16-224", out_features=["stage1", "stage2", "stage3", "stage4"]
1371
+ ... )
1372
+
1373
+ >>> inputs = processor(image, return_tensors="pt")
1374
+
1375
+ >>> outputs = model(**inputs)
1376
+ >>> feature_maps = outputs.feature_maps
1377
+ >>> list(feature_maps[-1].shape)
1378
+ [1, 768, 14, 14]
1379
+ ```"""
1380
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1381
+ output_hidden_states = (
1382
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1383
+ )
1384
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1385
+
1386
+ batch_size = pixel_values.shape[0]
1387
+ embedding_output, (patch_height, patch_width) = self.embeddings(pixel_values)
1388
+
1389
+ outputs = self.encoder(
1390
+ embedding_output, output_hidden_states=True, output_attentions=output_attentions, return_dict=return_dict
1391
+ )
1392
+
1393
+ hidden_states = outputs.hidden_states if return_dict else outputs[1]
1394
+
1395
+ feature_maps = ()
1396
+ for stage, hidden_state in zip(self.stage_names, hidden_states):
1397
+ if stage in self.out_features:
1398
+ if self.config.reshape_hidden_states:
1399
+ hidden_state = hidden_state[:, 1:, :]
1400
+ hidden_state = hidden_state.permute(0, 2, 1)
1401
+ hidden_state = hidden_state.reshape(batch_size, -1, patch_height, patch_width)
1402
+
1403
+ feature_maps += (hidden_state,)
1404
+
1405
+ if self.config.add_fpn:
1406
+ feature_maps = [
1407
+ self.fpn1(feature_maps[0]),
1408
+ self.fpn2(feature_maps[1]),
1409
+ self.fpn3(feature_maps[2]),
1410
+ self.fpn4(feature_maps[3]),
1411
+ ]
1412
+ feature_maps = tuple(feature_maps)
1413
+
1414
+ if not return_dict:
1415
+ if output_hidden_states:
1416
+ output = (feature_maps,) + outputs[1:]
1417
+ else:
1418
+ output = (feature_maps,) + outputs[2:]
1419
+ return output
1420
+
1421
+ return BackboneOutput(
1422
+ feature_maps=feature_maps,
1423
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
1424
+ attentions=outputs.attentions,
1425
+ )
venv/lib/python3.10/site-packages/transformers/models/beit/modeling_flax_beit.py ADDED
@@ -0,0 +1,948 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 Microsoft Research and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+
17
+ from typing import Callable, List, Optional, Tuple
18
+
19
+ import flax
20
+ import flax.linen as nn
21
+ import jax
22
+ import jax.numpy as jnp
23
+ import numpy as np
24
+ from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
25
+ from flax.linen.attention import dot_product_attention_weights
26
+ from flax.traverse_util import flatten_dict, unflatten_dict
27
+
28
+ from ...modeling_flax_outputs import (
29
+ FlaxBaseModelOutput,
30
+ FlaxBaseModelOutputWithPooling,
31
+ FlaxMaskedLMOutput,
32
+ FlaxSequenceClassifierOutput,
33
+ )
34
+ from ...modeling_flax_utils import (
35
+ ACT2FN,
36
+ FlaxPreTrainedModel,
37
+ append_replace_return_docstrings,
38
+ overwrite_call_docstring,
39
+ )
40
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward
41
+ from .configuration_beit import BeitConfig
42
+
43
+
44
+ @flax.struct.dataclass
45
+ class FlaxBeitModelOutputWithPooling(FlaxBaseModelOutputWithPooling):
46
+ """
47
+ Class for outputs of [`FlaxBeitModel`].
48
+
49
+ Args:
50
+ last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`):
51
+ Sequence of hidden-states at the output of the last layer of the model.
52
+ pooler_output (`jnp.ndarray` of shape `(batch_size, hidden_size)`):
53
+ Average of the last layer hidden states of the patch tokens (excluding the *[CLS]* token) if
54
+ *config.use_mean_pooling* is set to True. If set to False, then the final hidden state of the *[CLS]* token
55
+ will be returned.
56
+ hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
57
+ Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
58
+ `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus
59
+ the initial embedding outputs.
60
+ attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
61
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
62
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
63
+ the self-attention heads.
64
+ """
65
+
66
+
67
+ BEIT_START_DOCSTRING = r"""
68
+
69
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
70
+ library implements for all its model (such as downloading, saving and converting weights from PyTorch models)
71
+
72
+ This model is also a
73
+ [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as
74
+ a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and
75
+ behavior.
76
+
77
+ Finally, this model supports inherent JAX features such as:
78
+
79
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
80
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
81
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
82
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
83
+
84
+ Parameters:
85
+ config ([`BeitConfig`]): Model configuration class with all the parameters of the model.
86
+ Initializing with a config file does not load the weights associated with the model, only the
87
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
88
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
89
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
90
+ `jax.numpy.bfloat16` (on TPUs).
91
+
92
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
93
+ specified all the computation will be performed with the given `dtype`.
94
+
95
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
96
+ parameters.**
97
+
98
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
99
+ [`~FlaxPreTrainedModel.to_bf16`].
100
+ """
101
+
102
+ BEIT_INPUTS_DOCSTRING = r"""
103
+ Args:
104
+ pixel_values (`numpy.ndarray` of shape `(batch_size, num_channels, height, width)`):
105
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
106
+ [`AutoImageProcessor.__call__`] for details.
107
+
108
+ output_attentions (`bool`, *optional*):
109
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
110
+ tensors for more detail.
111
+ output_hidden_states (`bool`, *optional*):
112
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
113
+ more detail.
114
+ return_dict (`bool`, *optional*):
115
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
116
+ """
117
+
118
+
119
+ def relative_position_index_init(window_size: Tuple[int, int]) -> jnp.ndarray:
120
+ """
121
+ get pair-wise relative position index for each token inside the window
122
+ """
123
+ num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
124
+
125
+ coords_h = np.arange(window_size[0])
126
+ coords_w = np.arange(window_size[1])
127
+ coords = np.stack(np.meshgrid(coords_h, coords_w, indexing="ij")) # 2, Wh, Ww
128
+ coords_flatten = np.reshape(coords, (2, -1))
129
+ relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
130
+ relative_coords = np.transpose(relative_coords, (1, 2, 0)) # Wh*Ww, Wh*Ww, 2
131
+ relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
132
+ relative_coords[:, :, 1] += window_size[1] - 1
133
+ relative_coords[:, :, 0] *= 2 * window_size[1] - 1
134
+
135
+ relative_position_index = np.zeros(shape=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype)
136
+ relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
137
+ relative_position_index[0, 0:] = num_relative_distance - 3
138
+ relative_position_index[0:, 0] = num_relative_distance - 2
139
+ relative_position_index[0, 0] = num_relative_distance - 1
140
+ return jnp.array(relative_position_index)
141
+
142
+
143
+ def ones_with_scale(key, shape, scale, dtype=jnp.float32):
144
+ return jnp.ones(shape, dtype) * scale
145
+
146
+
147
+ class FlaxBeitDropPath(nn.Module):
148
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
149
+
150
+ rate: float
151
+
152
+ @nn.module.compact
153
+ def __call__(self, inputs, deterministic: Optional[bool] = True):
154
+ if self.rate == 0.0:
155
+ return inputs
156
+ keep_prob = 1.0 - self.rate
157
+ if deterministic:
158
+ return inputs
159
+ else:
160
+ shape = (inputs.shape[0],) + (1,) * (inputs.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
161
+ rng = self.make_rng("droppath")
162
+ random_tensor = keep_prob + jax.random.uniform(rng, shape=shape, dtype=inputs.dtype)
163
+ binary_tensor = jnp.floor(random_tensor)
164
+ output = inputs / keep_prob * binary_tensor
165
+ return output
166
+
167
+
168
+ class FlaxBeitPatchEmbeddings(nn.Module):
169
+ config: BeitConfig
170
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
171
+
172
+ def setup(self):
173
+ self.num_channels = self.config.num_channels
174
+ image_size = self.config.image_size
175
+ patch_size = self.config.patch_size
176
+ num_patches = (image_size // patch_size) * (image_size // patch_size)
177
+ patch_shape = (image_size // patch_size, image_size // patch_size)
178
+ self.num_patches = num_patches
179
+ self.patch_shape = patch_shape
180
+ self.projection = nn.Conv(
181
+ self.config.hidden_size,
182
+ kernel_size=(patch_size, patch_size),
183
+ strides=(patch_size, patch_size),
184
+ padding="VALID",
185
+ dtype=self.dtype,
186
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
187
+ )
188
+
189
+ def __call__(self, pixel_values):
190
+ num_channels = pixel_values.shape[-1]
191
+ if num_channels != self.num_channels:
192
+ raise ValueError(
193
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
194
+ )
195
+ embeddings = self.projection(pixel_values)
196
+ batch_size, _, _, channels = embeddings.shape
197
+ return jnp.reshape(embeddings, (batch_size, -1, channels))
198
+
199
+
200
+ class FlaxBeitEmbeddings(nn.Module):
201
+ """Construct the CLS token, position and patch embeddings."""
202
+
203
+ config: BeitConfig
204
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
205
+
206
+ def setup(self):
207
+ self.cls_token = self.param("cls_token", nn.initializers.zeros, (1, 1, self.config.hidden_size))
208
+ if self.config.use_mask_token:
209
+ self.mask_token = self.param("mask_token", nn.initializers.zeros, (1, 1, self.config.hidden_size))
210
+ self.patch_embeddings = FlaxBeitPatchEmbeddings(self.config, dtype=self.dtype)
211
+ num_patches = self.patch_embeddings.num_patches
212
+ if self.config.use_absolute_position_embeddings:
213
+ self.position_embeddings = self.param(
214
+ "position_embeddings", nn.initializers.zeros, (1, num_patches + 1, self.config.hidden_size)
215
+ )
216
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
217
+
218
+ def __call__(self, pixel_values, bool_masked_pos=None, deterministic=True):
219
+ embeddings = self.patch_embeddings(pixel_values)
220
+ batch_size, seq_len, _ = embeddings.shape
221
+
222
+ cls_tokens = jnp.broadcast_to(self.cls_token, (batch_size, 1, self.config.hidden_size))
223
+ cls_tokens = cls_tokens.astype(embeddings.dtype)
224
+
225
+ if bool_masked_pos is not None:
226
+ mask_tokens = jnp.broadcast_to(self.mask_token, (batch_size, seq_len, self.config.hidden_size))
227
+ mask_tokens = mask_tokens.astype(embeddings.dtype)
228
+ # replace the masked visual tokens by mask_tokens
229
+ w = jnp.expand_dims(bool_masked_pos, axis=-1)
230
+ embeddings = embeddings * (1 - w) + mask_tokens * w
231
+
232
+ embeddings = jnp.concatenate((cls_tokens, embeddings), axis=1)
233
+
234
+ if self.config.use_absolute_position_embeddings:
235
+ embeddings = embeddings + self.position_embeddings.astype(embeddings.dtype)
236
+
237
+ embeddings = self.dropout(embeddings, deterministic=deterministic)
238
+ return embeddings
239
+
240
+
241
+ class FlaxBeitRelativePositionBias(nn.Module):
242
+ config: BeitConfig
243
+ window_size: Tuple[int, int]
244
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
245
+
246
+ def setup(self):
247
+ num_relative_distance = (2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1) + 3
248
+ self.relative_position_bias_table = self.param(
249
+ "relative_position_bias_table",
250
+ nn.initializers.zeros,
251
+ (num_relative_distance, self.config.num_attention_heads),
252
+ ) # 2*Wh-1 * 2*Ww-1, nH
253
+ # cls to token & token 2 cls & cls to cls
254
+
255
+ self.relative_position_index = relative_position_index_init(self.window_size)
256
+
257
+ def __call__(self):
258
+ index = self.relative_position_index.reshape(-1)
259
+ shape = (self.window_size[0] * self.window_size[1] + 1, self.window_size[0] * self.window_size[1] + 1, -1)
260
+ relative_position_bias = self.relative_position_bias_table[index].reshape(shape) # Wh*Ww,Wh*Ww,nH
261
+ return jnp.transpose(relative_position_bias, (2, 0, 1))
262
+
263
+
264
+ class FlaxBeitSelfAttention(nn.Module):
265
+ config: BeitConfig
266
+ window_size: Tuple[int, int]
267
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
268
+
269
+ def setup(self):
270
+ if self.config.hidden_size % self.config.num_attention_heads != 0 and not hasattr(
271
+ self.config, "embedding_size"
272
+ ):
273
+ raise ValueError(
274
+ f"The hidden size {self.config.hidden_size,} is not a multiple of the number of attention "
275
+ f"heads {self.config.num_attention_heads}."
276
+ )
277
+
278
+ self.query = nn.Dense(
279
+ self.config.hidden_size,
280
+ dtype=self.dtype,
281
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
282
+ )
283
+ self.key = nn.Dense(
284
+ self.config.hidden_size,
285
+ dtype=self.dtype,
286
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
287
+ use_bias=False,
288
+ )
289
+ self.value = nn.Dense(
290
+ self.config.hidden_size,
291
+ dtype=self.dtype,
292
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
293
+ )
294
+
295
+ self.relative_position_bias = (
296
+ FlaxBeitRelativePositionBias(self.config, window_size=self.window_size, dtype=self.dtype)
297
+ if self.window_size
298
+ else None
299
+ )
300
+
301
+ def __call__(
302
+ self, hidden_states, relative_position_bias=None, deterministic: bool = True, output_attentions: bool = False
303
+ ):
304
+ head_dim = self.config.hidden_size // self.config.num_attention_heads
305
+
306
+ query_states = self.query(hidden_states).reshape(
307
+ hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
308
+ )
309
+ value_states = self.value(hidden_states).reshape(
310
+ hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
311
+ )
312
+ key_states = self.key(hidden_states).reshape(
313
+ hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
314
+ )
315
+
316
+ dropout_rng = None
317
+ if not deterministic and self.config.attention_probs_dropout_prob > 0.0:
318
+ dropout_rng = self.make_rng("dropout")
319
+
320
+ attention_bias = jnp.array(0.0, dtype=self.dtype)
321
+ # Add relative position bias if present.
322
+ if self.relative_position_bias is not None:
323
+ attention_bias = jnp.expand_dims(self.relative_position_bias(), 0)
324
+ attention_bias = attention_bias.astype(query_states.dtype)
325
+
326
+ # Add shared relative position bias if provided.
327
+ if relative_position_bias is not None:
328
+ attention_bias = attention_bias + relative_position_bias.astype(attention_bias.dtype)
329
+
330
+ attn_weights = dot_product_attention_weights(
331
+ query_states,
332
+ key_states,
333
+ bias=attention_bias,
334
+ dropout_rng=dropout_rng,
335
+ dropout_rate=self.config.attention_probs_dropout_prob,
336
+ broadcast_dropout=True,
337
+ deterministic=deterministic,
338
+ dtype=self.dtype,
339
+ precision=None,
340
+ )
341
+
342
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
343
+ attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,))
344
+
345
+ outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
346
+ return outputs
347
+
348
+
349
+ class FlaxBeitSelfOutput(nn.Module):
350
+ config: BeitConfig
351
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
352
+
353
+ def setup(self):
354
+ self.dense = nn.Dense(
355
+ self.config.hidden_size,
356
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
357
+ dtype=self.dtype,
358
+ )
359
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
360
+
361
+ def __call__(self, hidden_states, deterministic: bool = True):
362
+ hidden_states = self.dense(hidden_states)
363
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
364
+ return hidden_states
365
+
366
+
367
+ class FlaxBeitAttention(nn.Module):
368
+ config: BeitConfig
369
+ window_size: Tuple[int, int]
370
+ dtype: jnp.dtype = jnp.float32
371
+
372
+ def setup(self):
373
+ self.attention = FlaxBeitSelfAttention(self.config, self.window_size, dtype=self.dtype)
374
+ self.output = FlaxBeitSelfOutput(self.config, dtype=self.dtype)
375
+
376
+ def __call__(
377
+ self, hidden_states, relative_position_bias=None, deterministic=True, output_attentions: bool = False
378
+ ):
379
+ attn_outputs = self.attention(
380
+ hidden_states, relative_position_bias, deterministic=deterministic, output_attentions=output_attentions
381
+ )
382
+ attn_output = attn_outputs[0]
383
+ attn_output = self.output(attn_output, deterministic=deterministic)
384
+
385
+ outputs = (attn_output,)
386
+
387
+ if output_attentions:
388
+ outputs += (attn_outputs[1],)
389
+
390
+ return outputs
391
+
392
+
393
+ class FlaxBeitIntermediate(nn.Module):
394
+ config: BeitConfig
395
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
396
+
397
+ def setup(self):
398
+ self.dense = nn.Dense(
399
+ self.config.intermediate_size,
400
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
401
+ dtype=self.dtype,
402
+ )
403
+ self.activation = ACT2FN[self.config.hidden_act]
404
+
405
+ def __call__(self, hidden_states):
406
+ hidden_states = self.dense(hidden_states)
407
+ hidden_states = self.activation(hidden_states)
408
+
409
+ return hidden_states
410
+
411
+
412
+ class FlaxBeitOutput(nn.Module):
413
+ config: BeitConfig
414
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
415
+
416
+ def setup(self):
417
+ self.dense = nn.Dense(
418
+ self.config.hidden_size,
419
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
420
+ dtype=self.dtype,
421
+ )
422
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
423
+
424
+ def __call__(self, hidden_states, deterministic: bool = True):
425
+ hidden_states = self.dense(hidden_states)
426
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
427
+
428
+ return hidden_states
429
+
430
+
431
+ class FlaxBeitLayer(nn.Module):
432
+ config: BeitConfig
433
+ window_size: Tuple[int, int]
434
+ drop_path_rate: float
435
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
436
+
437
+ def setup(self):
438
+ self.attention = FlaxBeitAttention(self.config, self.window_size, dtype=self.dtype)
439
+ self.intermediate = FlaxBeitIntermediate(self.config, dtype=self.dtype)
440
+ self.output = FlaxBeitOutput(self.config, dtype=self.dtype)
441
+ self.layernorm_before = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
442
+ self.drop_path = FlaxBeitDropPath(rate=self.drop_path_rate)
443
+ self.layernorm_after = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
444
+
445
+ self.init_values = self.config.layer_scale_init_value
446
+ if self.init_values > 0:
447
+ self.lambda_1 = self.param("lambda_1", ones_with_scale, (self.config.hidden_size), self.init_values)
448
+ self.lambda_2 = self.param("lambda_2", ones_with_scale, (self.config.hidden_size), self.init_values)
449
+ else:
450
+ self.lambda_1 = None
451
+ self.lambda_2 = None
452
+
453
+ def __call__(
454
+ self, hidden_states, relative_position_bias=None, deterministic: bool = True, output_attentions: bool = False
455
+ ):
456
+ self_attention_outputs = self.attention(
457
+ self.layernorm_before(hidden_states), # in BEiT, layernorm is applied before self-attention
458
+ relative_position_bias,
459
+ deterministic=deterministic,
460
+ output_attentions=output_attentions,
461
+ )
462
+ attention_output = self_attention_outputs[0]
463
+
464
+ # apply lambda_1 if present
465
+ if self.lambda_1 is not None:
466
+ attention_output = self.lambda_1.astype(attention_output.dtype) * attention_output
467
+
468
+ # first residual connection
469
+ hidden_states = self.drop_path(attention_output, deterministic=deterministic) + hidden_states
470
+
471
+ # in BEiT, layernorm is also applied after self-attention
472
+ layer_output = self.layernorm_after(hidden_states)
473
+
474
+ layer_output = self.intermediate(layer_output)
475
+ layer_output = self.output(layer_output, deterministic=deterministic)
476
+
477
+ # apply lambda_2 if present
478
+ if self.lambda_2 is not None:
479
+ layer_output = self.lambda_2.astype(layer_output.dtype) * layer_output
480
+
481
+ # second residual connection
482
+ layer_output = self.drop_path(layer_output, deterministic=deterministic) + hidden_states
483
+
484
+ outputs = (layer_output,)
485
+
486
+ if output_attentions:
487
+ outputs += (self_attention_outputs[1],)
488
+
489
+ return outputs
490
+
491
+
492
+ class FlaxBeitLayerCollection(nn.Module):
493
+ config: BeitConfig
494
+ window_size: Tuple[int, int]
495
+ drop_path_rates: List[float]
496
+ relative_position_bias: Callable[[], jnp.ndarray]
497
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
498
+
499
+ def setup(self):
500
+ self.layers = [
501
+ FlaxBeitLayer(
502
+ self.config,
503
+ window_size=self.window_size if self.config.use_relative_position_bias else None,
504
+ drop_path_rate=self.drop_path_rates[i],
505
+ name=str(i),
506
+ dtype=self.dtype,
507
+ )
508
+ for i in range(self.config.num_hidden_layers)
509
+ ]
510
+
511
+ def __call__(
512
+ self,
513
+ hidden_states,
514
+ deterministic: bool = True,
515
+ output_attentions: bool = False,
516
+ output_hidden_states: bool = False,
517
+ return_dict: bool = True,
518
+ ):
519
+ all_attentions = () if output_attentions else None
520
+ all_hidden_states = () if output_hidden_states else None
521
+
522
+ for i, layer in enumerate(self.layers):
523
+ if output_hidden_states:
524
+ all_hidden_states += (hidden_states,)
525
+ relative_position_bias = self.relative_position_bias() if self.relative_position_bias is not None else None
526
+ layer_outputs = layer(
527
+ hidden_states, relative_position_bias, deterministic=deterministic, output_attentions=output_attentions
528
+ )
529
+
530
+ hidden_states = layer_outputs[0]
531
+
532
+ if output_attentions:
533
+ all_attentions += (layer_outputs[1],)
534
+
535
+ if output_hidden_states:
536
+ all_hidden_states += (hidden_states,)
537
+
538
+ outputs = (hidden_states,)
539
+ if not return_dict:
540
+ return tuple(v for v in outputs if v is not None)
541
+
542
+ return FlaxBaseModelOutput(
543
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
544
+ )
545
+
546
+
547
+ class FlaxBeitEncoder(nn.Module):
548
+ config: BeitConfig
549
+ window_size: Tuple[int, int]
550
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
551
+
552
+ def setup(self):
553
+ if self.config.use_shared_relative_position_bias:
554
+ self.relative_position_bias = FlaxBeitRelativePositionBias(
555
+ config=self.config, window_size=self.window_size, dtype=self.dtype
556
+ )
557
+
558
+ # stochastic depth decay rule
559
+ drop_path_rates = list(np.linspace(0, self.config.drop_path_rate, self.config.num_hidden_layers))
560
+ self.layer = FlaxBeitLayerCollection(
561
+ self.config,
562
+ window_size=self.window_size,
563
+ drop_path_rates=drop_path_rates,
564
+ relative_position_bias=self.relative_position_bias
565
+ if self.config.use_shared_relative_position_bias
566
+ else None,
567
+ dtype=self.dtype,
568
+ )
569
+
570
+ def __call__(
571
+ self,
572
+ hidden_states,
573
+ deterministic: bool = True,
574
+ output_attentions: bool = False,
575
+ output_hidden_states: bool = False,
576
+ return_dict: bool = True,
577
+ ):
578
+ return self.layer(
579
+ hidden_states,
580
+ deterministic=deterministic,
581
+ output_attentions=output_attentions,
582
+ output_hidden_states=output_hidden_states,
583
+ return_dict=return_dict,
584
+ )
585
+
586
+
587
+ class FlaxBeitPreTrainedModel(FlaxPreTrainedModel):
588
+ """
589
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
590
+ models.
591
+ """
592
+
593
+ config_class = BeitConfig
594
+ base_model_prefix = "beit"
595
+ main_input_name = "pixel_values"
596
+ module_class: nn.Module = None
597
+
598
+ def __init__(
599
+ self,
600
+ config: BeitConfig,
601
+ input_shape=None,
602
+ seed: int = 0,
603
+ dtype: jnp.dtype = jnp.float32,
604
+ _do_init: bool = True,
605
+ **kwargs,
606
+ ):
607
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
608
+ if input_shape is None:
609
+ input_shape = (1, config.image_size, config.image_size, config.num_channels)
610
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
611
+
612
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
613
+ # init input tensors
614
+ pixel_values = jnp.zeros(input_shape, dtype=self.dtype)
615
+
616
+ params_rng, dropout_rng = jax.random.split(rng)
617
+ dropout_rng, droppath_rng = jax.random.split(dropout_rng)
618
+ rngs = {"params": params_rng, "dropout": dropout_rng, "droppath": droppath_rng}
619
+
620
+ random_params = self.module.init(rngs, pixel_values, return_dict=False)["params"]
621
+
622
+ if params is not None:
623
+ random_params = flatten_dict(unfreeze(random_params))
624
+ params = flatten_dict(unfreeze(params))
625
+ for missing_key in self._missing_keys:
626
+ params[missing_key] = random_params[missing_key]
627
+ self._missing_keys = set()
628
+ return freeze(unflatten_dict(params))
629
+ else:
630
+ return random_params
631
+
632
+ @add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
633
+ def __call__(
634
+ self,
635
+ pixel_values,
636
+ bool_masked_pos=None,
637
+ params: dict = None,
638
+ dropout_rng: jax.random.PRNGKey = None,
639
+ train: bool = False,
640
+ output_attentions: Optional[bool] = None,
641
+ output_hidden_states: Optional[bool] = None,
642
+ return_dict: Optional[bool] = None,
643
+ ):
644
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
645
+ output_hidden_states = (
646
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
647
+ )
648
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
649
+
650
+ pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1))
651
+ # Handle any PRNG if needed
652
+ rngs = {}
653
+ if dropout_rng is not None:
654
+ dropout_rng, droppath_rng = jax.random.split(dropout_rng)
655
+ rngs["dropout"] = dropout_rng
656
+ rngs["droppath"] = droppath_rng
657
+
658
+ return self.module.apply(
659
+ {"params": params or self.params},
660
+ jnp.array(pixel_values, dtype=jnp.float32),
661
+ bool_masked_pos,
662
+ not train,
663
+ output_attentions,
664
+ output_hidden_states,
665
+ return_dict,
666
+ rngs=rngs,
667
+ )
668
+
669
+
670
+ class FlaxBeitPooler(nn.Module):
671
+ config: BeitConfig
672
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
673
+
674
+ def setup(self):
675
+ if self.config.use_mean_pooling:
676
+ self.layernorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
677
+
678
+ def __call__(self, hidden_states):
679
+ if self.config.use_mean_pooling:
680
+ # Mean pool the final hidden states of the patch tokens
681
+ patch_tokens = hidden_states[:, 1:, :]
682
+ pooled_output = self.layernorm(jnp.mean(patch_tokens, axis=1))
683
+ else:
684
+ # Pool by simply taking the final hidden state of the [CLS] token
685
+ pooled_output = hidden_states[:, 0]
686
+
687
+ return pooled_output
688
+
689
+
690
+ class FlaxBeitModule(nn.Module):
691
+ config: BeitConfig
692
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
693
+ add_pooling_layer: bool = True
694
+
695
+ def setup(self):
696
+ self.embeddings = FlaxBeitEmbeddings(self.config, dtype=self.dtype)
697
+ self.encoder = FlaxBeitEncoder(
698
+ self.config, window_size=self.embeddings.patch_embeddings.patch_shape, dtype=self.dtype
699
+ )
700
+ if not self.config.use_mean_pooling:
701
+ self.layernorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
702
+ self.pooler = FlaxBeitPooler(self.config, dtype=self.dtype) if self.add_pooling_layer else None
703
+
704
+ def __call__(
705
+ self,
706
+ pixel_values,
707
+ bool_masked_pos=None,
708
+ deterministic: bool = True,
709
+ output_attentions: bool = False,
710
+ output_hidden_states: bool = False,
711
+ return_dict: bool = True,
712
+ ):
713
+ hidden_states = self.embeddings(pixel_values, bool_masked_pos, deterministic=deterministic)
714
+
715
+ outputs = self.encoder(
716
+ hidden_states,
717
+ deterministic=deterministic,
718
+ output_attentions=output_attentions,
719
+ output_hidden_states=output_hidden_states,
720
+ return_dict=return_dict,
721
+ )
722
+ hidden_states = outputs[0]
723
+ if not self.config.use_mean_pooling:
724
+ hidden_states = self.layernorm(hidden_states)
725
+ pooled = self.pooler(hidden_states) if self.add_pooling_layer else None
726
+
727
+ if not return_dict:
728
+ # if pooled is None, don't return it
729
+ if pooled is None:
730
+ return (hidden_states,) + outputs[1:]
731
+ return (hidden_states, pooled) + outputs[1:]
732
+
733
+ return FlaxBeitModelOutputWithPooling(
734
+ last_hidden_state=hidden_states,
735
+ pooler_output=pooled,
736
+ hidden_states=outputs.hidden_states,
737
+ attentions=outputs.attentions,
738
+ )
739
+
740
+
741
+ @add_start_docstrings(
742
+ "The bare Beit Model transformer outputting raw hidden-states without any specific head on top.",
743
+ BEIT_START_DOCSTRING,
744
+ )
745
+ class FlaxBeitModel(FlaxBeitPreTrainedModel):
746
+ module_class = FlaxBeitModule
747
+
748
+
749
+ FLAX_BEIT_MODEL_DOCSTRING = """
750
+ Returns:
751
+
752
+ Examples:
753
+
754
+ ```python
755
+ >>> from transformers import AutoImageProcessor, FlaxBeitModel
756
+ >>> from PIL import Image
757
+ >>> import requests
758
+
759
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
760
+ >>> image = Image.open(requests.get(url, stream=True).raw)
761
+
762
+ >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/beit-base-patch16-224-pt22k-ft22k")
763
+ >>> model = FlaxBeitModel.from_pretrained("microsoft/beit-base-patch16-224-pt22k-ft22k")
764
+
765
+ >>> inputs = image_processor(images=image, return_tensors="np")
766
+ >>> outputs = model(**inputs)
767
+ >>> last_hidden_states = outputs.last_hidden_state
768
+ ```
769
+ """
770
+
771
+ overwrite_call_docstring(FlaxBeitModel, FLAX_BEIT_MODEL_DOCSTRING)
772
+ append_replace_return_docstrings(FlaxBeitModel, output_type=FlaxBeitModelOutputWithPooling, config_class=BeitConfig)
773
+
774
+
775
+ class FlaxBeitForMaskedImageModelingModule(nn.Module):
776
+ config: BeitConfig
777
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
778
+
779
+ def setup(self):
780
+ self.beit = FlaxBeitModule(self.config, add_pooling_layer=False, dtype=self.dtype)
781
+
782
+ # Classifier head
783
+ self.layernorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
784
+ self.lm_head = nn.Dense(
785
+ self.config.vocab_size,
786
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
787
+ dtype=self.dtype,
788
+ )
789
+
790
+ def __call__(
791
+ self,
792
+ pixel_values=None,
793
+ bool_masked_pos=None,
794
+ deterministic: bool = True,
795
+ output_attentions=None,
796
+ output_hidden_states=None,
797
+ return_dict=None,
798
+ ):
799
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
800
+
801
+ outputs = self.beit(
802
+ pixel_values,
803
+ bool_masked_pos,
804
+ deterministic=deterministic,
805
+ output_attentions=output_attentions,
806
+ output_hidden_states=output_hidden_states,
807
+ return_dict=return_dict,
808
+ )
809
+
810
+ sequence_output = outputs[0]
811
+ sequence_output = self.layernorm(sequence_output)
812
+ prediction_scores = self.lm_head(sequence_output[:, 1:])
813
+
814
+ if not return_dict:
815
+ output = (prediction_scores,) + outputs[2:]
816
+ return output
817
+
818
+ return FlaxMaskedLMOutput(
819
+ logits=prediction_scores,
820
+ hidden_states=outputs.hidden_states,
821
+ attentions=outputs.attentions,
822
+ )
823
+
824
+
825
+ @add_start_docstrings(
826
+ "Beit Model transformer with a 'language' modeling head on top (to predict visual tokens).",
827
+ BEIT_START_DOCSTRING,
828
+ )
829
+ class FlaxBeitForMaskedImageModeling(FlaxBeitPreTrainedModel):
830
+ module_class = FlaxBeitForMaskedImageModelingModule
831
+
832
+
833
+ FLAX_BEIT_MLM_DOCSTRING = """
834
+ bool_masked_pos (`numpy.ndarray` of shape `(batch_size, num_patches)`):
835
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
836
+
837
+ Returns:
838
+
839
+ Examples:
840
+
841
+ ```python
842
+ >>> from transformers import AutoImageProcessor, BeitForMaskedImageModeling
843
+ >>> from PIL import Image
844
+ >>> import requests
845
+
846
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
847
+ >>> image = Image.open(requests.get(url, stream=True).raw)
848
+
849
+ >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/beit-base-patch16-224-pt22k")
850
+ >>> model = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k")
851
+
852
+ >>> inputs = image_processor(images=image, return_tensors="np")
853
+ >>> outputs = model(**inputs)
854
+ >>> logits = outputs.logits
855
+ ```
856
+ """
857
+
858
+ overwrite_call_docstring(FlaxBeitForMaskedImageModeling, FLAX_BEIT_MLM_DOCSTRING)
859
+ append_replace_return_docstrings(
860
+ FlaxBeitForMaskedImageModeling, output_type=FlaxMaskedLMOutput, config_class=BeitConfig
861
+ )
862
+
863
+
864
+ class FlaxBeitForImageClassificationModule(nn.Module):
865
+ config: BeitConfig
866
+ dtype: jnp.dtype = jnp.float32
867
+
868
+ def setup(self):
869
+ self.beit = FlaxBeitModule(config=self.config, dtype=self.dtype, add_pooling_layer=True)
870
+ self.classifier = nn.Dense(
871
+ self.config.num_labels,
872
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
873
+ dtype=self.dtype,
874
+ )
875
+
876
+ def __call__(
877
+ self,
878
+ pixel_values=None,
879
+ bool_masked_pos=None,
880
+ deterministic: bool = True,
881
+ output_attentions=None,
882
+ output_hidden_states=None,
883
+ return_dict=None,
884
+ ):
885
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
886
+
887
+ outputs = self.beit(
888
+ pixel_values,
889
+ deterministic=deterministic,
890
+ output_attentions=output_attentions,
891
+ output_hidden_states=output_hidden_states,
892
+ return_dict=return_dict,
893
+ )
894
+
895
+ pooled_output = outputs[1]
896
+ logits = self.classifier(pooled_output)
897
+
898
+ if not return_dict:
899
+ output = (logits,) + outputs[2:]
900
+ return output
901
+
902
+ return FlaxSequenceClassifierOutput(
903
+ logits=logits,
904
+ hidden_states=outputs.hidden_states,
905
+ attentions=outputs.attentions,
906
+ )
907
+
908
+
909
+ @add_start_docstrings(
910
+ """
911
+ Beit Model transformer with an image classification head on top (a linear layer on top of the average of the final
912
+ hidden states of the patch tokens) e.g. for ImageNet.
913
+ """,
914
+ BEIT_START_DOCSTRING,
915
+ )
916
+ class FlaxBeitForImageClassification(FlaxBeitPreTrainedModel):
917
+ module_class = FlaxBeitForImageClassificationModule
918
+
919
+
920
+ FLAX_BEIT_CLASSIF_DOCSTRING = """
921
+ Returns:
922
+
923
+ Example:
924
+
925
+ ```python
926
+ >>> from transformers import AutoImageProcessor, FlaxBeitForImageClassification
927
+ >>> from PIL import Image
928
+ >>> import requests
929
+
930
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
931
+ >>> image = Image.open(requests.get(url, stream=True).raw)
932
+
933
+ >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/beit-base-patch16-224")
934
+ >>> model = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224")
935
+
936
+ >>> inputs = image_processor(images=image, return_tensors="np")
937
+ >>> outputs = model(**inputs)
938
+ >>> logits = outputs.logits
939
+ >>> # model predicts one of the 1000 ImageNet classes
940
+ >>> predicted_class_idx = logits.argmax(-1).item()
941
+ >>> print("Predicted class:", model.config.id2label[predicted_class_idx])
942
+ ```
943
+ """
944
+
945
+ overwrite_call_docstring(FlaxBeitForImageClassification, FLAX_BEIT_CLASSIF_DOCSTRING)
946
+ append_replace_return_docstrings(
947
+ FlaxBeitForImageClassification, output_type=FlaxSequenceClassifierOutput, config_class=BeitConfig
948
+ )
venv/lib/python3.10/site-packages/transformers/models/bert/__init__.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_flax_available,
21
+ is_tensorflow_text_available,
22
+ is_tf_available,
23
+ is_tokenizers_available,
24
+ is_torch_available,
25
+ )
26
+
27
+
28
+ _import_structure = {
29
+ "configuration_bert": ["BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BertConfig", "BertOnnxConfig"],
30
+ "tokenization_bert": ["BasicTokenizer", "BertTokenizer", "WordpieceTokenizer"],
31
+ }
32
+
33
+ try:
34
+ if not is_tokenizers_available():
35
+ raise OptionalDependencyNotAvailable()
36
+ except OptionalDependencyNotAvailable:
37
+ pass
38
+ else:
39
+ _import_structure["tokenization_bert_fast"] = ["BertTokenizerFast"]
40
+
41
+ try:
42
+ if not is_torch_available():
43
+ raise OptionalDependencyNotAvailable()
44
+ except OptionalDependencyNotAvailable:
45
+ pass
46
+ else:
47
+ _import_structure["modeling_bert"] = [
48
+ "BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
49
+ "BertForMaskedLM",
50
+ "BertForMultipleChoice",
51
+ "BertForNextSentencePrediction",
52
+ "BertForPreTraining",
53
+ "BertForQuestionAnswering",
54
+ "BertForSequenceClassification",
55
+ "BertForTokenClassification",
56
+ "BertLayer",
57
+ "BertLMHeadModel",
58
+ "BertModel",
59
+ "BertPreTrainedModel",
60
+ "load_tf_weights_in_bert",
61
+ ]
62
+
63
+ try:
64
+ if not is_tf_available():
65
+ raise OptionalDependencyNotAvailable()
66
+ except OptionalDependencyNotAvailable:
67
+ pass
68
+ else:
69
+ _import_structure["modeling_tf_bert"] = [
70
+ "TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
71
+ "TFBertEmbeddings",
72
+ "TFBertForMaskedLM",
73
+ "TFBertForMultipleChoice",
74
+ "TFBertForNextSentencePrediction",
75
+ "TFBertForPreTraining",
76
+ "TFBertForQuestionAnswering",
77
+ "TFBertForSequenceClassification",
78
+ "TFBertForTokenClassification",
79
+ "TFBertLMHeadModel",
80
+ "TFBertMainLayer",
81
+ "TFBertModel",
82
+ "TFBertPreTrainedModel",
83
+ ]
84
+ try:
85
+ if not is_tensorflow_text_available():
86
+ raise OptionalDependencyNotAvailable()
87
+ except OptionalDependencyNotAvailable:
88
+ pass
89
+ else:
90
+ _import_structure["tokenization_bert_tf"] = ["TFBertTokenizer"]
91
+
92
+ try:
93
+ if not is_flax_available():
94
+ raise OptionalDependencyNotAvailable()
95
+ except OptionalDependencyNotAvailable:
96
+ pass
97
+ else:
98
+ _import_structure["modeling_flax_bert"] = [
99
+ "FlaxBertForCausalLM",
100
+ "FlaxBertForMaskedLM",
101
+ "FlaxBertForMultipleChoice",
102
+ "FlaxBertForNextSentencePrediction",
103
+ "FlaxBertForPreTraining",
104
+ "FlaxBertForQuestionAnswering",
105
+ "FlaxBertForSequenceClassification",
106
+ "FlaxBertForTokenClassification",
107
+ "FlaxBertModel",
108
+ "FlaxBertPreTrainedModel",
109
+ ]
110
+
111
+ if TYPE_CHECKING:
112
+ from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
113
+ from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
114
+
115
+ try:
116
+ if not is_tokenizers_available():
117
+ raise OptionalDependencyNotAvailable()
118
+ except OptionalDependencyNotAvailable:
119
+ pass
120
+ else:
121
+ from .tokenization_bert_fast import BertTokenizerFast
122
+
123
+ try:
124
+ if not is_torch_available():
125
+ raise OptionalDependencyNotAvailable()
126
+ except OptionalDependencyNotAvailable:
127
+ pass
128
+ else:
129
+ from .modeling_bert import (
130
+ BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
131
+ BertForMaskedLM,
132
+ BertForMultipleChoice,
133
+ BertForNextSentencePrediction,
134
+ BertForPreTraining,
135
+ BertForQuestionAnswering,
136
+ BertForSequenceClassification,
137
+ BertForTokenClassification,
138
+ BertLayer,
139
+ BertLMHeadModel,
140
+ BertModel,
141
+ BertPreTrainedModel,
142
+ load_tf_weights_in_bert,
143
+ )
144
+
145
+ try:
146
+ if not is_tf_available():
147
+ raise OptionalDependencyNotAvailable()
148
+ except OptionalDependencyNotAvailable:
149
+ pass
150
+ else:
151
+ from .modeling_tf_bert import (
152
+ TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
153
+ TFBertEmbeddings,
154
+ TFBertForMaskedLM,
155
+ TFBertForMultipleChoice,
156
+ TFBertForNextSentencePrediction,
157
+ TFBertForPreTraining,
158
+ TFBertForQuestionAnswering,
159
+ TFBertForSequenceClassification,
160
+ TFBertForTokenClassification,
161
+ TFBertLMHeadModel,
162
+ TFBertMainLayer,
163
+ TFBertModel,
164
+ TFBertPreTrainedModel,
165
+ )
166
+
167
+ try:
168
+ if not is_tensorflow_text_available():
169
+ raise OptionalDependencyNotAvailable()
170
+ except OptionalDependencyNotAvailable:
171
+ pass
172
+ else:
173
+ from .tokenization_bert_tf import TFBertTokenizer
174
+
175
+ try:
176
+ if not is_flax_available():
177
+ raise OptionalDependencyNotAvailable()
178
+ except OptionalDependencyNotAvailable:
179
+ pass
180
+ else:
181
+ from .modeling_flax_bert import (
182
+ FlaxBertForCausalLM,
183
+ FlaxBertForMaskedLM,
184
+ FlaxBertForMultipleChoice,
185
+ FlaxBertForNextSentencePrediction,
186
+ FlaxBertForPreTraining,
187
+ FlaxBertForQuestionAnswering,
188
+ FlaxBertForSequenceClassification,
189
+ FlaxBertForTokenClassification,
190
+ FlaxBertModel,
191
+ FlaxBertPreTrainedModel,
192
+ )
193
+
194
+ else:
195
+ import sys
196
+
197
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/bert/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.92 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/bert/__pycache__/configuration_bert.cpython-310.pyc ADDED
Binary file (6.58 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/bert/__pycache__/convert_bert_original_tf2_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (5.6 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/bert/__pycache__/convert_bert_original_tf_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (1.42 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/bert/__pycache__/convert_bert_pytorch_checkpoint_to_original_tf.cpython-310.pyc ADDED
Binary file (3.73 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/bert/__pycache__/convert_bert_token_dropping_original_tf2_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (4.86 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/bert/__pycache__/modeling_bert.cpython-310.pyc ADDED
Binary file (54.8 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/bert/__pycache__/modeling_flax_bert.cpython-310.pyc ADDED
Binary file (42.3 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/bert/__pycache__/modeling_tf_bert.cpython-310.pyc ADDED
Binary file (61.2 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/bert/__pycache__/tokenization_bert.cpython-310.pyc ADDED
Binary file (17 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/bert/__pycache__/tokenization_bert_fast.cpython-310.pyc ADDED
Binary file (6.76 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/bert/__pycache__/tokenization_bert_tf.cpython-310.pyc ADDED
Binary file (9.28 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/bert/convert_bert_original_tf2_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ This script can be used to convert a head-less TF2.x Bert model to PyTorch, as published on the official (now
17
+ deprecated) GitHub: https://github.com/tensorflow/models/tree/v2.3.0/official/nlp/bert
18
+
19
+ TF2.x uses different variable names from the original BERT (TF 1.4) implementation. The script re-maps the TF2.x Bert
20
+ weight names to the original names, so the model can be imported with Huggingface/transformer.
21
+
22
+ You may adapt this script to include classification/MLM/NSP/etc. heads.
23
+
24
+ Note: This script is only working with an older version of the TensorFlow models repository (<= v2.3.0).
25
+ Models trained with never versions are not compatible with this script.
26
+ """
27
+ import argparse
28
+ import os
29
+ import re
30
+
31
+ import tensorflow as tf
32
+ import torch
33
+
34
+ from transformers import BertConfig, BertModel
35
+ from transformers.utils import logging
36
+
37
+
38
+ logging.set_verbosity_info()
39
+ logger = logging.get_logger(__name__)
40
+
41
+
42
+ def load_tf2_weights_in_bert(model, tf_checkpoint_path, config):
43
+ tf_path = os.path.abspath(tf_checkpoint_path)
44
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
45
+ # Load weights from TF model
46
+ init_vars = tf.train.list_variables(tf_path)
47
+ names = []
48
+ arrays = []
49
+ layer_depth = []
50
+ for full_name, shape in init_vars:
51
+ # logger.info(f"Loading TF weight {name} with shape {shape}")
52
+ name = full_name.split("/")
53
+ if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
54
+ logger.info(f"Skipping non-model layer {full_name}")
55
+ continue
56
+ if "optimizer" in full_name:
57
+ logger.info(f"Skipping optimization layer {full_name}")
58
+ continue
59
+ if name[0] == "model":
60
+ # ignore initial 'model'
61
+ name = name[1:]
62
+ # figure out how many levels deep the name is
63
+ depth = 0
64
+ for _name in name:
65
+ if _name.startswith("layer_with_weights"):
66
+ depth += 1
67
+ else:
68
+ break
69
+ layer_depth.append(depth)
70
+ # read data
71
+ array = tf.train.load_variable(tf_path, full_name)
72
+ names.append("/".join(name))
73
+ arrays.append(array)
74
+ logger.info(f"Read a total of {len(arrays):,} layers")
75
+
76
+ # Sanity check
77
+ if len(set(layer_depth)) != 1:
78
+ raise ValueError(f"Found layer names with different depths (layer depth {list(set(layer_depth))})")
79
+ layer_depth = list(set(layer_depth))[0]
80
+ if layer_depth != 1:
81
+ raise ValueError(
82
+ "The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"
83
+ " heads."
84
+ )
85
+
86
+ # convert layers
87
+ logger.info("Converting weights...")
88
+ for full_name, array in zip(names, arrays):
89
+ name = full_name.split("/")
90
+ pointer = model
91
+ trace = []
92
+ for i, m_name in enumerate(name):
93
+ if m_name == ".ATTRIBUTES":
94
+ # variable names end with .ATTRIBUTES/VARIABLE_VALUE
95
+ break
96
+ if m_name.startswith("layer_with_weights"):
97
+ layer_num = int(m_name.split("-")[-1])
98
+ if layer_num <= 2:
99
+ # embedding layers
100
+ # layer_num 0: word_embeddings
101
+ # layer_num 1: position_embeddings
102
+ # layer_num 2: token_type_embeddings
103
+ continue
104
+ elif layer_num == 3:
105
+ # embedding LayerNorm
106
+ trace.extend(["embeddings", "LayerNorm"])
107
+ pointer = getattr(pointer, "embeddings")
108
+ pointer = getattr(pointer, "LayerNorm")
109
+ elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
110
+ # encoder layers
111
+ trace.extend(["encoder", "layer", str(layer_num - 4)])
112
+ pointer = getattr(pointer, "encoder")
113
+ pointer = getattr(pointer, "layer")
114
+ pointer = pointer[layer_num - 4]
115
+ elif layer_num == config.num_hidden_layers + 4:
116
+ # pooler layer
117
+ trace.extend(["pooler", "dense"])
118
+ pointer = getattr(pointer, "pooler")
119
+ pointer = getattr(pointer, "dense")
120
+ elif m_name == "embeddings":
121
+ trace.append("embeddings")
122
+ pointer = getattr(pointer, "embeddings")
123
+ if layer_num == 0:
124
+ trace.append("word_embeddings")
125
+ pointer = getattr(pointer, "word_embeddings")
126
+ elif layer_num == 1:
127
+ trace.append("position_embeddings")
128
+ pointer = getattr(pointer, "position_embeddings")
129
+ elif layer_num == 2:
130
+ trace.append("token_type_embeddings")
131
+ pointer = getattr(pointer, "token_type_embeddings")
132
+ else:
133
+ raise ValueError(f"Unknown embedding layer with name {full_name}")
134
+ trace.append("weight")
135
+ pointer = getattr(pointer, "weight")
136
+ elif m_name == "_attention_layer":
137
+ # self-attention layer
138
+ trace.extend(["attention", "self"])
139
+ pointer = getattr(pointer, "attention")
140
+ pointer = getattr(pointer, "self")
141
+ elif m_name == "_attention_layer_norm":
142
+ # output attention norm
143
+ trace.extend(["attention", "output", "LayerNorm"])
144
+ pointer = getattr(pointer, "attention")
145
+ pointer = getattr(pointer, "output")
146
+ pointer = getattr(pointer, "LayerNorm")
147
+ elif m_name == "_attention_output_dense":
148
+ # output attention dense
149
+ trace.extend(["attention", "output", "dense"])
150
+ pointer = getattr(pointer, "attention")
151
+ pointer = getattr(pointer, "output")
152
+ pointer = getattr(pointer, "dense")
153
+ elif m_name == "_output_dense":
154
+ # output dense
155
+ trace.extend(["output", "dense"])
156
+ pointer = getattr(pointer, "output")
157
+ pointer = getattr(pointer, "dense")
158
+ elif m_name == "_output_layer_norm":
159
+ # output dense
160
+ trace.extend(["output", "LayerNorm"])
161
+ pointer = getattr(pointer, "output")
162
+ pointer = getattr(pointer, "LayerNorm")
163
+ elif m_name == "_key_dense":
164
+ # attention key
165
+ trace.append("key")
166
+ pointer = getattr(pointer, "key")
167
+ elif m_name == "_query_dense":
168
+ # attention query
169
+ trace.append("query")
170
+ pointer = getattr(pointer, "query")
171
+ elif m_name == "_value_dense":
172
+ # attention value
173
+ trace.append("value")
174
+ pointer = getattr(pointer, "value")
175
+ elif m_name == "_intermediate_dense":
176
+ # attention intermediate dense
177
+ trace.extend(["intermediate", "dense"])
178
+ pointer = getattr(pointer, "intermediate")
179
+ pointer = getattr(pointer, "dense")
180
+ elif m_name == "_output_layer_norm":
181
+ # output layer norm
182
+ trace.append("output")
183
+ pointer = getattr(pointer, "output")
184
+ # weights & biases
185
+ elif m_name in ["bias", "beta"]:
186
+ trace.append("bias")
187
+ pointer = getattr(pointer, "bias")
188
+ elif m_name in ["kernel", "gamma"]:
189
+ trace.append("weight")
190
+ pointer = getattr(pointer, "weight")
191
+ else:
192
+ logger.warning(f"Ignored {m_name}")
193
+ # for certain layers reshape is necessary
194
+ trace = ".".join(trace)
195
+ if re.match(r"(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)", trace) or re.match(
196
+ r"(\S+)\.attention\.output\.dense\.weight", trace
197
+ ):
198
+ array = array.reshape(pointer.data.shape)
199
+ if "kernel" in full_name:
200
+ array = array.transpose()
201
+ if pointer.shape == array.shape:
202
+ pointer.data = torch.from_numpy(array)
203
+ else:
204
+ raise ValueError(
205
+ f"Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"
206
+ f" {array.shape}"
207
+ )
208
+ logger.info(f"Successfully set variable {full_name} to PyTorch layer {trace}")
209
+ return model
210
+
211
+
212
+ def convert_tf2_checkpoint_to_pytorch(tf_checkpoint_path, config_path, pytorch_dump_path):
213
+ # Instantiate model
214
+ logger.info(f"Loading model based on config from {config_path}...")
215
+ config = BertConfig.from_json_file(config_path)
216
+ model = BertModel(config)
217
+
218
+ # Load weights from checkpoint
219
+ logger.info(f"Loading weights from checkpoint {tf_checkpoint_path}...")
220
+ load_tf2_weights_in_bert(model, tf_checkpoint_path, config)
221
+
222
+ # Save pytorch-model
223
+ logger.info(f"Saving PyTorch model to {pytorch_dump_path}...")
224
+ torch.save(model.state_dict(), pytorch_dump_path)
225
+
226
+
227
+ if __name__ == "__main__":
228
+ parser = argparse.ArgumentParser()
229
+ parser.add_argument(
230
+ "--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow 2.x checkpoint path."
231
+ )
232
+ parser.add_argument(
233
+ "--bert_config_file",
234
+ type=str,
235
+ required=True,
236
+ help="The config json file corresponding to the BERT model. This specifies the model architecture.",
237
+ )
238
+ parser.add_argument(
239
+ "--pytorch_dump_path",
240
+ type=str,
241
+ required=True,
242
+ help="Path to the output PyTorch model (must include filename).",
243
+ )
244
+ args = parser.parse_args()
245
+ convert_tf2_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
venv/lib/python3.10/site-packages/transformers/models/bert/convert_bert_original_tf_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert BERT checkpoint."""
16
+
17
+
18
+ import argparse
19
+
20
+ import torch
21
+
22
+ from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
23
+ from transformers.utils import logging
24
+
25
+
26
+ logging.set_verbosity_info()
27
+
28
+
29
+ def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path):
30
+ # Initialise PyTorch model
31
+ config = BertConfig.from_json_file(bert_config_file)
32
+ print(f"Building PyTorch model from configuration: {config}")
33
+ model = BertForPreTraining(config)
34
+
35
+ # Load weights from tf checkpoint
36
+ load_tf_weights_in_bert(model, config, tf_checkpoint_path)
37
+
38
+ # Save pytorch-model
39
+ print(f"Save PyTorch model to {pytorch_dump_path}")
40
+ torch.save(model.state_dict(), pytorch_dump_path)
41
+
42
+
43
+ if __name__ == "__main__":
44
+ parser = argparse.ArgumentParser()
45
+ # Required parameters
46
+ parser.add_argument(
47
+ "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
48
+ )
49
+ parser.add_argument(
50
+ "--bert_config_file",
51
+ default=None,
52
+ type=str,
53
+ required=True,
54
+ help=(
55
+ "The config json file corresponding to the pre-trained BERT model. \n"
56
+ "This specifies the model architecture."
57
+ ),
58
+ )
59
+ parser.add_argument(
60
+ "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
61
+ )
62
+ args = parser.parse_args()
63
+ convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
venv/lib/python3.10/site-packages/transformers/models/bert/convert_bert_token_dropping_original_tf2_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ This script converts a lm-head checkpoint from the "Token Dropping" implementation into a PyTorch-compatible BERT
17
+ model. The official implementation of "Token Dropping" can be found in the TensorFlow Models repository:
18
+
19
+ https://github.com/tensorflow/models/tree/master/official/projects/token_dropping
20
+ """
21
+ import argparse
22
+
23
+ import tensorflow as tf
24
+ import torch
25
+
26
+ from transformers import BertConfig, BertForMaskedLM
27
+ from transformers.models.bert.modeling_bert import (
28
+ BertIntermediate,
29
+ BertLayer,
30
+ BertOutput,
31
+ BertPooler,
32
+ BertSelfAttention,
33
+ BertSelfOutput,
34
+ )
35
+ from transformers.utils import logging
36
+
37
+
38
+ logging.set_verbosity_info()
39
+
40
+
41
+ def convert_checkpoint_to_pytorch(tf_checkpoint_path: str, config_path: str, pytorch_dump_path: str):
42
+ def get_masked_lm_array(name: str):
43
+ full_name = f"masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"
44
+ array = tf.train.load_variable(tf_checkpoint_path, full_name)
45
+
46
+ if "kernel" in name:
47
+ array = array.transpose()
48
+
49
+ return torch.from_numpy(array)
50
+
51
+ def get_encoder_array(name: str):
52
+ full_name = f"encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"
53
+ array = tf.train.load_variable(tf_checkpoint_path, full_name)
54
+
55
+ if "kernel" in name:
56
+ array = array.transpose()
57
+
58
+ return torch.from_numpy(array)
59
+
60
+ def get_encoder_layer_array(layer_index: int, name: str):
61
+ full_name = f"encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"
62
+ array = tf.train.load_variable(tf_checkpoint_path, full_name)
63
+
64
+ if "kernel" in name:
65
+ array = array.transpose()
66
+
67
+ return torch.from_numpy(array)
68
+
69
+ def get_encoder_attention_layer_array(layer_index: int, name: str, orginal_shape):
70
+ full_name = f"encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"
71
+ array = tf.train.load_variable(tf_checkpoint_path, full_name)
72
+ array = array.reshape(orginal_shape)
73
+
74
+ if "kernel" in name:
75
+ array = array.transpose()
76
+
77
+ return torch.from_numpy(array)
78
+
79
+ print(f"Loading model based on config from {config_path}...")
80
+ config = BertConfig.from_json_file(config_path)
81
+ model = BertForMaskedLM(config)
82
+
83
+ # Layers
84
+ for layer_index in range(0, config.num_hidden_layers):
85
+ layer: BertLayer = model.bert.encoder.layer[layer_index]
86
+
87
+ # Self-attention
88
+ self_attn: BertSelfAttention = layer.attention.self
89
+
90
+ self_attn.query.weight.data = get_encoder_attention_layer_array(
91
+ layer_index, "_query_dense/kernel", self_attn.query.weight.data.shape
92
+ )
93
+ self_attn.query.bias.data = get_encoder_attention_layer_array(
94
+ layer_index, "_query_dense/bias", self_attn.query.bias.data.shape
95
+ )
96
+ self_attn.key.weight.data = get_encoder_attention_layer_array(
97
+ layer_index, "_key_dense/kernel", self_attn.key.weight.data.shape
98
+ )
99
+ self_attn.key.bias.data = get_encoder_attention_layer_array(
100
+ layer_index, "_key_dense/bias", self_attn.key.bias.data.shape
101
+ )
102
+ self_attn.value.weight.data = get_encoder_attention_layer_array(
103
+ layer_index, "_value_dense/kernel", self_attn.value.weight.data.shape
104
+ )
105
+ self_attn.value.bias.data = get_encoder_attention_layer_array(
106
+ layer_index, "_value_dense/bias", self_attn.value.bias.data.shape
107
+ )
108
+
109
+ # Self-attention Output
110
+ self_output: BertSelfOutput = layer.attention.output
111
+
112
+ self_output.dense.weight.data = get_encoder_attention_layer_array(
113
+ layer_index, "_output_dense/kernel", self_output.dense.weight.data.shape
114
+ )
115
+ self_output.dense.bias.data = get_encoder_attention_layer_array(
116
+ layer_index, "_output_dense/bias", self_output.dense.bias.data.shape
117
+ )
118
+
119
+ self_output.LayerNorm.weight.data = get_encoder_layer_array(layer_index, "_attention_layer_norm/gamma")
120
+ self_output.LayerNorm.bias.data = get_encoder_layer_array(layer_index, "_attention_layer_norm/beta")
121
+
122
+ # Intermediate
123
+ intermediate: BertIntermediate = layer.intermediate
124
+
125
+ intermediate.dense.weight.data = get_encoder_layer_array(layer_index, "_intermediate_dense/kernel")
126
+ intermediate.dense.bias.data = get_encoder_layer_array(layer_index, "_intermediate_dense/bias")
127
+
128
+ # Output
129
+ bert_output: BertOutput = layer.output
130
+
131
+ bert_output.dense.weight.data = get_encoder_layer_array(layer_index, "_output_dense/kernel")
132
+ bert_output.dense.bias.data = get_encoder_layer_array(layer_index, "_output_dense/bias")
133
+
134
+ bert_output.LayerNorm.weight.data = get_encoder_layer_array(layer_index, "_output_layer_norm/gamma")
135
+ bert_output.LayerNorm.bias.data = get_encoder_layer_array(layer_index, "_output_layer_norm/beta")
136
+
137
+ # Embeddings
138
+ model.bert.embeddings.position_embeddings.weight.data = get_encoder_array("_position_embedding_layer/embeddings")
139
+ model.bert.embeddings.token_type_embeddings.weight.data = get_encoder_array("_type_embedding_layer/embeddings")
140
+ model.bert.embeddings.LayerNorm.weight.data = get_encoder_array("_embedding_norm_layer/gamma")
141
+ model.bert.embeddings.LayerNorm.bias.data = get_encoder_array("_embedding_norm_layer/beta")
142
+
143
+ # LM Head
144
+ lm_head = model.cls.predictions.transform
145
+
146
+ lm_head.dense.weight.data = get_masked_lm_array("dense/kernel")
147
+ lm_head.dense.bias.data = get_masked_lm_array("dense/bias")
148
+
149
+ lm_head.LayerNorm.weight.data = get_masked_lm_array("layer_norm/gamma")
150
+ lm_head.LayerNorm.bias.data = get_masked_lm_array("layer_norm/beta")
151
+
152
+ model.bert.embeddings.word_embeddings.weight.data = get_masked_lm_array("embedding_table")
153
+
154
+ # Pooling
155
+ model.bert.pooler = BertPooler(config=config)
156
+ model.bert.pooler.dense.weight.data: BertPooler = get_encoder_array("_pooler_layer/kernel")
157
+ model.bert.pooler.dense.bias.data: BertPooler = get_encoder_array("_pooler_layer/bias")
158
+
159
+ # Export final model
160
+ model.save_pretrained(pytorch_dump_path)
161
+
162
+ # Integration test - should load without any errors ;)
163
+ new_model = BertForMaskedLM.from_pretrained(pytorch_dump_path)
164
+ print(new_model.eval())
165
+
166
+ print("Model conversion was done sucessfully!")
167
+
168
+
169
+ if __name__ == "__main__":
170
+ parser = argparse.ArgumentParser()
171
+ parser.add_argument(
172
+ "--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
173
+ )
174
+ parser.add_argument(
175
+ "--bert_config_file",
176
+ type=str,
177
+ required=True,
178
+ help="The config json file corresponding to the BERT model. This specifies the model architecture.",
179
+ )
180
+ parser.add_argument(
181
+ "--pytorch_dump_path",
182
+ type=str,
183
+ required=True,
184
+ help="Path to the output PyTorch model.",
185
+ )
186
+ args = parser.parse_args()
187
+ convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
venv/lib/python3.10/site-packages/transformers/models/bert/modeling_bert.py ADDED
@@ -0,0 +1,1867 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """PyTorch BERT model."""
17
+
18
+ import math
19
+ import os
20
+ import warnings
21
+ from dataclasses import dataclass
22
+ from typing import List, Optional, Tuple, Union
23
+
24
+ import torch
25
+ import torch.utils.checkpoint
26
+ from torch import nn
27
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
28
+
29
+ from ...activations import ACT2FN
30
+ from ...modeling_outputs import (
31
+ BaseModelOutputWithPastAndCrossAttentions,
32
+ BaseModelOutputWithPoolingAndCrossAttentions,
33
+ CausalLMOutputWithCrossAttentions,
34
+ MaskedLMOutput,
35
+ MultipleChoiceModelOutput,
36
+ NextSentencePredictorOutput,
37
+ QuestionAnsweringModelOutput,
38
+ SequenceClassifierOutput,
39
+ TokenClassifierOutput,
40
+ )
41
+ from ...modeling_utils import PreTrainedModel
42
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
43
+ from ...utils import (
44
+ ModelOutput,
45
+ add_code_sample_docstrings,
46
+ add_start_docstrings,
47
+ add_start_docstrings_to_model_forward,
48
+ logging,
49
+ replace_return_docstrings,
50
+ )
51
+ from .configuration_bert import BertConfig
52
+
53
+
54
+ logger = logging.get_logger(__name__)
55
+
56
+ _CHECKPOINT_FOR_DOC = "google-bert/bert-base-uncased"
57
+ _CONFIG_FOR_DOC = "BertConfig"
58
+
59
+ # TokenClassification docstring
60
+ _CHECKPOINT_FOR_TOKEN_CLASSIFICATION = "dbmdz/bert-large-cased-finetuned-conll03-english"
61
+ _TOKEN_CLASS_EXPECTED_OUTPUT = (
62
+ "['O', 'I-ORG', 'I-ORG', 'I-ORG', 'O', 'O', 'O', 'O', 'O', 'I-LOC', 'O', 'I-LOC', 'I-LOC'] "
63
+ )
64
+ _TOKEN_CLASS_EXPECTED_LOSS = 0.01
65
+
66
+ # QuestionAnswering docstring
67
+ _CHECKPOINT_FOR_QA = "deepset/bert-base-cased-squad2"
68
+ _QA_EXPECTED_OUTPUT = "'a nice puppet'"
69
+ _QA_EXPECTED_LOSS = 7.41
70
+ _QA_TARGET_START_INDEX = 14
71
+ _QA_TARGET_END_INDEX = 15
72
+
73
+ # SequenceClassification docstring
74
+ _CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = "textattack/bert-base-uncased-yelp-polarity"
75
+ _SEQ_CLASS_EXPECTED_OUTPUT = "'LABEL_1'"
76
+ _SEQ_CLASS_EXPECTED_LOSS = 0.01
77
+
78
+
79
+ from ..deprecated._archive_maps import BERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
80
+
81
+
82
+ def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
83
+ """Load tf checkpoints in a pytorch model."""
84
+ try:
85
+ import re
86
+
87
+ import numpy as np
88
+ import tensorflow as tf
89
+ except ImportError:
90
+ logger.error(
91
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
92
+ "https://www.tensorflow.org/install/ for installation instructions."
93
+ )
94
+ raise
95
+ tf_path = os.path.abspath(tf_checkpoint_path)
96
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
97
+ # Load weights from TF model
98
+ init_vars = tf.train.list_variables(tf_path)
99
+ names = []
100
+ arrays = []
101
+ for name, shape in init_vars:
102
+ logger.info(f"Loading TF weight {name} with shape {shape}")
103
+ array = tf.train.load_variable(tf_path, name)
104
+ names.append(name)
105
+ arrays.append(array)
106
+
107
+ for name, array in zip(names, arrays):
108
+ name = name.split("/")
109
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
110
+ # which are not required for using pretrained model
111
+ if any(
112
+ n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
113
+ for n in name
114
+ ):
115
+ logger.info(f"Skipping {'/'.join(name)}")
116
+ continue
117
+ pointer = model
118
+ for m_name in name:
119
+ if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
120
+ scope_names = re.split(r"_(\d+)", m_name)
121
+ else:
122
+ scope_names = [m_name]
123
+ if scope_names[0] == "kernel" or scope_names[0] == "gamma":
124
+ pointer = getattr(pointer, "weight")
125
+ elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
126
+ pointer = getattr(pointer, "bias")
127
+ elif scope_names[0] == "output_weights":
128
+ pointer = getattr(pointer, "weight")
129
+ elif scope_names[0] == "squad":
130
+ pointer = getattr(pointer, "classifier")
131
+ else:
132
+ try:
133
+ pointer = getattr(pointer, scope_names[0])
134
+ except AttributeError:
135
+ logger.info(f"Skipping {'/'.join(name)}")
136
+ continue
137
+ if len(scope_names) >= 2:
138
+ num = int(scope_names[1])
139
+ pointer = pointer[num]
140
+ if m_name[-11:] == "_embeddings":
141
+ pointer = getattr(pointer, "weight")
142
+ elif m_name == "kernel":
143
+ array = np.transpose(array)
144
+ try:
145
+ if pointer.shape != array.shape:
146
+ raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
147
+ except ValueError as e:
148
+ e.args += (pointer.shape, array.shape)
149
+ raise
150
+ logger.info(f"Initialize PyTorch weight {name}")
151
+ pointer.data = torch.from_numpy(array)
152
+ return model
153
+
154
+
155
+ class BertEmbeddings(nn.Module):
156
+ """Construct the embeddings from word, position and token_type embeddings."""
157
+
158
+ def __init__(self, config):
159
+ super().__init__()
160
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
161
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
162
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
163
+
164
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
165
+ # any TensorFlow checkpoint file
166
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
167
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
168
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
169
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
170
+ self.register_buffer(
171
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
172
+ )
173
+ self.register_buffer(
174
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
175
+ )
176
+
177
+ def forward(
178
+ self,
179
+ input_ids: Optional[torch.LongTensor] = None,
180
+ token_type_ids: Optional[torch.LongTensor] = None,
181
+ position_ids: Optional[torch.LongTensor] = None,
182
+ inputs_embeds: Optional[torch.FloatTensor] = None,
183
+ past_key_values_length: int = 0,
184
+ ) -> torch.Tensor:
185
+ if input_ids is not None:
186
+ input_shape = input_ids.size()
187
+ else:
188
+ input_shape = inputs_embeds.size()[:-1]
189
+
190
+ seq_length = input_shape[1]
191
+
192
+ if position_ids is None:
193
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
194
+
195
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
196
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
197
+ # issue #5664
198
+ if token_type_ids is None:
199
+ if hasattr(self, "token_type_ids"):
200
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
201
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
202
+ token_type_ids = buffered_token_type_ids_expanded
203
+ else:
204
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
205
+
206
+ if inputs_embeds is None:
207
+ inputs_embeds = self.word_embeddings(input_ids)
208
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
209
+
210
+ embeddings = inputs_embeds + token_type_embeddings
211
+ if self.position_embedding_type == "absolute":
212
+ position_embeddings = self.position_embeddings(position_ids)
213
+ embeddings += position_embeddings
214
+ embeddings = self.LayerNorm(embeddings)
215
+ embeddings = self.dropout(embeddings)
216
+ return embeddings
217
+
218
+
219
+ class BertSelfAttention(nn.Module):
220
+ def __init__(self, config, position_embedding_type=None):
221
+ super().__init__()
222
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
223
+ raise ValueError(
224
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
225
+ f"heads ({config.num_attention_heads})"
226
+ )
227
+
228
+ self.num_attention_heads = config.num_attention_heads
229
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
230
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
231
+
232
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
233
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
234
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
235
+
236
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
237
+ self.position_embedding_type = position_embedding_type or getattr(
238
+ config, "position_embedding_type", "absolute"
239
+ )
240
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
241
+ self.max_position_embeddings = config.max_position_embeddings
242
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
243
+
244
+ self.is_decoder = config.is_decoder
245
+
246
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
247
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
248
+ x = x.view(new_x_shape)
249
+ return x.permute(0, 2, 1, 3)
250
+
251
+ def forward(
252
+ self,
253
+ hidden_states: torch.Tensor,
254
+ attention_mask: Optional[torch.FloatTensor] = None,
255
+ head_mask: Optional[torch.FloatTensor] = None,
256
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
257
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
258
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
259
+ output_attentions: Optional[bool] = False,
260
+ ) -> Tuple[torch.Tensor]:
261
+ mixed_query_layer = self.query(hidden_states)
262
+
263
+ # If this is instantiated as a cross-attention module, the keys
264
+ # and values come from an encoder; the attention mask needs to be
265
+ # such that the encoder's padding tokens are not attended to.
266
+ is_cross_attention = encoder_hidden_states is not None
267
+
268
+ if is_cross_attention and past_key_value is not None:
269
+ # reuse k,v, cross_attentions
270
+ key_layer = past_key_value[0]
271
+ value_layer = past_key_value[1]
272
+ attention_mask = encoder_attention_mask
273
+ elif is_cross_attention:
274
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
275
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
276
+ attention_mask = encoder_attention_mask
277
+ elif past_key_value is not None:
278
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
279
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
280
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
281
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
282
+ else:
283
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
284
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
285
+
286
+ query_layer = self.transpose_for_scores(mixed_query_layer)
287
+
288
+ use_cache = past_key_value is not None
289
+ if self.is_decoder:
290
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
291
+ # Further calls to cross_attention layer can then reuse all cross-attention
292
+ # key/value_states (first "if" case)
293
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
294
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
295
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
296
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
297
+ past_key_value = (key_layer, value_layer)
298
+
299
+ # Take the dot product between "query" and "key" to get the raw attention scores.
300
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
301
+
302
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
303
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
304
+ if use_cache:
305
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
306
+ -1, 1
307
+ )
308
+ else:
309
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
310
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
311
+ distance = position_ids_l - position_ids_r
312
+
313
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
314
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
315
+
316
+ if self.position_embedding_type == "relative_key":
317
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
318
+ attention_scores = attention_scores + relative_position_scores
319
+ elif self.position_embedding_type == "relative_key_query":
320
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
321
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
322
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
323
+
324
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
325
+ if attention_mask is not None:
326
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
327
+ attention_scores = attention_scores + attention_mask
328
+
329
+ # Normalize the attention scores to probabilities.
330
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
331
+
332
+ # This is actually dropping out entire tokens to attend to, which might
333
+ # seem a bit unusual, but is taken from the original Transformer paper.
334
+ attention_probs = self.dropout(attention_probs)
335
+
336
+ # Mask heads if we want to
337
+ if head_mask is not None:
338
+ attention_probs = attention_probs * head_mask
339
+
340
+ context_layer = torch.matmul(attention_probs, value_layer)
341
+
342
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
343
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
344
+ context_layer = context_layer.view(new_context_layer_shape)
345
+
346
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
347
+
348
+ if self.is_decoder:
349
+ outputs = outputs + (past_key_value,)
350
+ return outputs
351
+
352
+
353
+ class BertSelfOutput(nn.Module):
354
+ def __init__(self, config):
355
+ super().__init__()
356
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
357
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
358
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
359
+
360
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
361
+ hidden_states = self.dense(hidden_states)
362
+ hidden_states = self.dropout(hidden_states)
363
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
364
+ return hidden_states
365
+
366
+
367
+ class BertAttention(nn.Module):
368
+ def __init__(self, config, position_embedding_type=None):
369
+ super().__init__()
370
+ self.self = BertSelfAttention(config, position_embedding_type=position_embedding_type)
371
+ self.output = BertSelfOutput(config)
372
+ self.pruned_heads = set()
373
+
374
+ def prune_heads(self, heads):
375
+ if len(heads) == 0:
376
+ return
377
+ heads, index = find_pruneable_heads_and_indices(
378
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
379
+ )
380
+
381
+ # Prune linear layers
382
+ self.self.query = prune_linear_layer(self.self.query, index)
383
+ self.self.key = prune_linear_layer(self.self.key, index)
384
+ self.self.value = prune_linear_layer(self.self.value, index)
385
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
386
+
387
+ # Update hyper params and store pruned heads
388
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
389
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
390
+ self.pruned_heads = self.pruned_heads.union(heads)
391
+
392
+ def forward(
393
+ self,
394
+ hidden_states: torch.Tensor,
395
+ attention_mask: Optional[torch.FloatTensor] = None,
396
+ head_mask: Optional[torch.FloatTensor] = None,
397
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
398
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
399
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
400
+ output_attentions: Optional[bool] = False,
401
+ ) -> Tuple[torch.Tensor]:
402
+ self_outputs = self.self(
403
+ hidden_states,
404
+ attention_mask,
405
+ head_mask,
406
+ encoder_hidden_states,
407
+ encoder_attention_mask,
408
+ past_key_value,
409
+ output_attentions,
410
+ )
411
+ attention_output = self.output(self_outputs[0], hidden_states)
412
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
413
+ return outputs
414
+
415
+
416
+ class BertIntermediate(nn.Module):
417
+ def __init__(self, config):
418
+ super().__init__()
419
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
420
+ if isinstance(config.hidden_act, str):
421
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
422
+ else:
423
+ self.intermediate_act_fn = config.hidden_act
424
+
425
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
426
+ hidden_states = self.dense(hidden_states)
427
+ hidden_states = self.intermediate_act_fn(hidden_states)
428
+ return hidden_states
429
+
430
+
431
+ class BertOutput(nn.Module):
432
+ def __init__(self, config):
433
+ super().__init__()
434
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
435
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
436
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
437
+
438
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
439
+ hidden_states = self.dense(hidden_states)
440
+ hidden_states = self.dropout(hidden_states)
441
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
442
+ return hidden_states
443
+
444
+
445
+ class BertLayer(nn.Module):
446
+ def __init__(self, config):
447
+ super().__init__()
448
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
449
+ self.seq_len_dim = 1
450
+ self.attention = BertAttention(config)
451
+ self.is_decoder = config.is_decoder
452
+ self.add_cross_attention = config.add_cross_attention
453
+ if self.add_cross_attention:
454
+ if not self.is_decoder:
455
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
456
+ self.crossattention = BertAttention(config, position_embedding_type="absolute")
457
+ self.intermediate = BertIntermediate(config)
458
+ self.output = BertOutput(config)
459
+
460
+ def forward(
461
+ self,
462
+ hidden_states: torch.Tensor,
463
+ attention_mask: Optional[torch.FloatTensor] = None,
464
+ head_mask: Optional[torch.FloatTensor] = None,
465
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
466
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
467
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
468
+ output_attentions: Optional[bool] = False,
469
+ ) -> Tuple[torch.Tensor]:
470
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
471
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
472
+ self_attention_outputs = self.attention(
473
+ hidden_states,
474
+ attention_mask,
475
+ head_mask,
476
+ output_attentions=output_attentions,
477
+ past_key_value=self_attn_past_key_value,
478
+ )
479
+ attention_output = self_attention_outputs[0]
480
+
481
+ # if decoder, the last output is tuple of self-attn cache
482
+ if self.is_decoder:
483
+ outputs = self_attention_outputs[1:-1]
484
+ present_key_value = self_attention_outputs[-1]
485
+ else:
486
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
487
+
488
+ cross_attn_present_key_value = None
489
+ if self.is_decoder and encoder_hidden_states is not None:
490
+ if not hasattr(self, "crossattention"):
491
+ raise ValueError(
492
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
493
+ " by setting `config.add_cross_attention=True`"
494
+ )
495
+
496
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
497
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
498
+ cross_attention_outputs = self.crossattention(
499
+ attention_output,
500
+ attention_mask,
501
+ head_mask,
502
+ encoder_hidden_states,
503
+ encoder_attention_mask,
504
+ cross_attn_past_key_value,
505
+ output_attentions,
506
+ )
507
+ attention_output = cross_attention_outputs[0]
508
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
509
+
510
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
511
+ cross_attn_present_key_value = cross_attention_outputs[-1]
512
+ present_key_value = present_key_value + cross_attn_present_key_value
513
+
514
+ layer_output = apply_chunking_to_forward(
515
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
516
+ )
517
+ outputs = (layer_output,) + outputs
518
+
519
+ # if decoder, return the attn key/values as the last output
520
+ if self.is_decoder:
521
+ outputs = outputs + (present_key_value,)
522
+
523
+ return outputs
524
+
525
+ def feed_forward_chunk(self, attention_output):
526
+ intermediate_output = self.intermediate(attention_output)
527
+ layer_output = self.output(intermediate_output, attention_output)
528
+ return layer_output
529
+
530
+
531
+ class BertEncoder(nn.Module):
532
+ def __init__(self, config):
533
+ super().__init__()
534
+ self.config = config
535
+ self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
536
+ self.gradient_checkpointing = False
537
+
538
+ def forward(
539
+ self,
540
+ hidden_states: torch.Tensor,
541
+ attention_mask: Optional[torch.FloatTensor] = None,
542
+ head_mask: Optional[torch.FloatTensor] = None,
543
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
544
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
545
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
546
+ use_cache: Optional[bool] = None,
547
+ output_attentions: Optional[bool] = False,
548
+ output_hidden_states: Optional[bool] = False,
549
+ return_dict: Optional[bool] = True,
550
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
551
+ all_hidden_states = () if output_hidden_states else None
552
+ all_self_attentions = () if output_attentions else None
553
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
554
+
555
+ if self.gradient_checkpointing and self.training:
556
+ if use_cache:
557
+ logger.warning_once(
558
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
559
+ )
560
+ use_cache = False
561
+
562
+ next_decoder_cache = () if use_cache else None
563
+ for i, layer_module in enumerate(self.layer):
564
+ if output_hidden_states:
565
+ all_hidden_states = all_hidden_states + (hidden_states,)
566
+
567
+ layer_head_mask = head_mask[i] if head_mask is not None else None
568
+ past_key_value = past_key_values[i] if past_key_values is not None else None
569
+
570
+ if self.gradient_checkpointing and self.training:
571
+ layer_outputs = self._gradient_checkpointing_func(
572
+ layer_module.__call__,
573
+ hidden_states,
574
+ attention_mask,
575
+ layer_head_mask,
576
+ encoder_hidden_states,
577
+ encoder_attention_mask,
578
+ past_key_value,
579
+ output_attentions,
580
+ )
581
+ else:
582
+ layer_outputs = layer_module(
583
+ hidden_states,
584
+ attention_mask,
585
+ layer_head_mask,
586
+ encoder_hidden_states,
587
+ encoder_attention_mask,
588
+ past_key_value,
589
+ output_attentions,
590
+ )
591
+
592
+ hidden_states = layer_outputs[0]
593
+ if use_cache:
594
+ next_decoder_cache += (layer_outputs[-1],)
595
+ if output_attentions:
596
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
597
+ if self.config.add_cross_attention:
598
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
599
+
600
+ if output_hidden_states:
601
+ all_hidden_states = all_hidden_states + (hidden_states,)
602
+
603
+ if not return_dict:
604
+ return tuple(
605
+ v
606
+ for v in [
607
+ hidden_states,
608
+ next_decoder_cache,
609
+ all_hidden_states,
610
+ all_self_attentions,
611
+ all_cross_attentions,
612
+ ]
613
+ if v is not None
614
+ )
615
+ return BaseModelOutputWithPastAndCrossAttentions(
616
+ last_hidden_state=hidden_states,
617
+ past_key_values=next_decoder_cache,
618
+ hidden_states=all_hidden_states,
619
+ attentions=all_self_attentions,
620
+ cross_attentions=all_cross_attentions,
621
+ )
622
+
623
+
624
+ class BertPooler(nn.Module):
625
+ def __init__(self, config):
626
+ super().__init__()
627
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
628
+ self.activation = nn.Tanh()
629
+
630
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
631
+ # We "pool" the model by simply taking the hidden state corresponding
632
+ # to the first token.
633
+ first_token_tensor = hidden_states[:, 0]
634
+ pooled_output = self.dense(first_token_tensor)
635
+ pooled_output = self.activation(pooled_output)
636
+ return pooled_output
637
+
638
+
639
+ class BertPredictionHeadTransform(nn.Module):
640
+ def __init__(self, config):
641
+ super().__init__()
642
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
643
+ if isinstance(config.hidden_act, str):
644
+ self.transform_act_fn = ACT2FN[config.hidden_act]
645
+ else:
646
+ self.transform_act_fn = config.hidden_act
647
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
648
+
649
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
650
+ hidden_states = self.dense(hidden_states)
651
+ hidden_states = self.transform_act_fn(hidden_states)
652
+ hidden_states = self.LayerNorm(hidden_states)
653
+ return hidden_states
654
+
655
+
656
+ class BertLMPredictionHead(nn.Module):
657
+ def __init__(self, config):
658
+ super().__init__()
659
+ self.transform = BertPredictionHeadTransform(config)
660
+
661
+ # The output weights are the same as the input embeddings, but there is
662
+ # an output-only bias for each token.
663
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
664
+
665
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
666
+
667
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
668
+ self.decoder.bias = self.bias
669
+
670
+ def forward(self, hidden_states):
671
+ hidden_states = self.transform(hidden_states)
672
+ hidden_states = self.decoder(hidden_states)
673
+ return hidden_states
674
+
675
+
676
+ class BertOnlyMLMHead(nn.Module):
677
+ def __init__(self, config):
678
+ super().__init__()
679
+ self.predictions = BertLMPredictionHead(config)
680
+
681
+ def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
682
+ prediction_scores = self.predictions(sequence_output)
683
+ return prediction_scores
684
+
685
+
686
+ class BertOnlyNSPHead(nn.Module):
687
+ def __init__(self, config):
688
+ super().__init__()
689
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
690
+
691
+ def forward(self, pooled_output):
692
+ seq_relationship_score = self.seq_relationship(pooled_output)
693
+ return seq_relationship_score
694
+
695
+
696
+ class BertPreTrainingHeads(nn.Module):
697
+ def __init__(self, config):
698
+ super().__init__()
699
+ self.predictions = BertLMPredictionHead(config)
700
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
701
+
702
+ def forward(self, sequence_output, pooled_output):
703
+ prediction_scores = self.predictions(sequence_output)
704
+ seq_relationship_score = self.seq_relationship(pooled_output)
705
+ return prediction_scores, seq_relationship_score
706
+
707
+
708
+ class BertPreTrainedModel(PreTrainedModel):
709
+ """
710
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
711
+ models.
712
+ """
713
+
714
+ config_class = BertConfig
715
+ load_tf_weights = load_tf_weights_in_bert
716
+ base_model_prefix = "bert"
717
+ supports_gradient_checkpointing = True
718
+
719
+ def _init_weights(self, module):
720
+ """Initialize the weights"""
721
+ if isinstance(module, nn.Linear):
722
+ # Slightly different from the TF version which uses truncated_normal for initialization
723
+ # cf https://github.com/pytorch/pytorch/pull/5617
724
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
725
+ if module.bias is not None:
726
+ module.bias.data.zero_()
727
+ elif isinstance(module, nn.Embedding):
728
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
729
+ if module.padding_idx is not None:
730
+ module.weight.data[module.padding_idx].zero_()
731
+ elif isinstance(module, nn.LayerNorm):
732
+ module.bias.data.zero_()
733
+ module.weight.data.fill_(1.0)
734
+
735
+
736
+ @dataclass
737
+ class BertForPreTrainingOutput(ModelOutput):
738
+ """
739
+ Output type of [`BertForPreTraining`].
740
+
741
+ Args:
742
+ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
743
+ Total loss as the sum of the masked language modeling loss and the next sequence prediction
744
+ (classification) loss.
745
+ prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
746
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
747
+ seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
748
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
749
+ before SoftMax).
750
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
751
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
752
+ shape `(batch_size, sequence_length, hidden_size)`.
753
+
754
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
755
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
756
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
757
+ sequence_length)`.
758
+
759
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
760
+ heads.
761
+ """
762
+
763
+ loss: Optional[torch.FloatTensor] = None
764
+ prediction_logits: torch.FloatTensor = None
765
+ seq_relationship_logits: torch.FloatTensor = None
766
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
767
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
768
+
769
+
770
+ BERT_START_DOCSTRING = r"""
771
+
772
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
773
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
774
+ etc.)
775
+
776
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
777
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
778
+ and behavior.
779
+
780
+ Parameters:
781
+ config ([`BertConfig`]): Model configuration class with all the parameters of the model.
782
+ Initializing with a config file does not load the weights associated with the model, only the
783
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
784
+ """
785
+
786
+ BERT_INPUTS_DOCSTRING = r"""
787
+ Args:
788
+ input_ids (`torch.LongTensor` of shape `({0})`):
789
+ Indices of input sequence tokens in the vocabulary.
790
+
791
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
792
+ [`PreTrainedTokenizer.__call__`] for details.
793
+
794
+ [What are input IDs?](../glossary#input-ids)
795
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
796
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
797
+
798
+ - 1 for tokens that are **not masked**,
799
+ - 0 for tokens that are **masked**.
800
+
801
+ [What are attention masks?](../glossary#attention-mask)
802
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
803
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
804
+ 1]`:
805
+
806
+ - 0 corresponds to a *sentence A* token,
807
+ - 1 corresponds to a *sentence B* token.
808
+
809
+ [What are token type IDs?](../glossary#token-type-ids)
810
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
811
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
812
+ config.max_position_embeddings - 1]`.
813
+
814
+ [What are position IDs?](../glossary#position-ids)
815
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
816
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
817
+
818
+ - 1 indicates the head is **not masked**,
819
+ - 0 indicates the head is **masked**.
820
+
821
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
822
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
823
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
824
+ model's internal embedding lookup matrix.
825
+ output_attentions (`bool`, *optional*):
826
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
827
+ tensors for more detail.
828
+ output_hidden_states (`bool`, *optional*):
829
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
830
+ more detail.
831
+ return_dict (`bool`, *optional*):
832
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
833
+ """
834
+
835
+
836
+ @add_start_docstrings(
837
+ "The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
838
+ BERT_START_DOCSTRING,
839
+ )
840
+ class BertModel(BertPreTrainedModel):
841
+ """
842
+
843
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
844
+ cross-attention is added between the self-attention layers, following the architecture described in [Attention is
845
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
846
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
847
+
848
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
849
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
850
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
851
+ """
852
+
853
+ def __init__(self, config, add_pooling_layer=True):
854
+ super().__init__(config)
855
+ self.config = config
856
+
857
+ self.embeddings = BertEmbeddings(config)
858
+ self.encoder = BertEncoder(config)
859
+
860
+ self.pooler = BertPooler(config) if add_pooling_layer else None
861
+
862
+ # Initialize weights and apply final processing
863
+ self.post_init()
864
+
865
+ def get_input_embeddings(self):
866
+ return self.embeddings.word_embeddings
867
+
868
+ def set_input_embeddings(self, value):
869
+ self.embeddings.word_embeddings = value
870
+
871
+ def _prune_heads(self, heads_to_prune):
872
+ """
873
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
874
+ class PreTrainedModel
875
+ """
876
+ for layer, heads in heads_to_prune.items():
877
+ self.encoder.layer[layer].attention.prune_heads(heads)
878
+
879
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
880
+ @add_code_sample_docstrings(
881
+ checkpoint=_CHECKPOINT_FOR_DOC,
882
+ output_type=BaseModelOutputWithPoolingAndCrossAttentions,
883
+ config_class=_CONFIG_FOR_DOC,
884
+ )
885
+ def forward(
886
+ self,
887
+ input_ids: Optional[torch.Tensor] = None,
888
+ attention_mask: Optional[torch.Tensor] = None,
889
+ token_type_ids: Optional[torch.Tensor] = None,
890
+ position_ids: Optional[torch.Tensor] = None,
891
+ head_mask: Optional[torch.Tensor] = None,
892
+ inputs_embeds: Optional[torch.Tensor] = None,
893
+ encoder_hidden_states: Optional[torch.Tensor] = None,
894
+ encoder_attention_mask: Optional[torch.Tensor] = None,
895
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
896
+ use_cache: Optional[bool] = None,
897
+ output_attentions: Optional[bool] = None,
898
+ output_hidden_states: Optional[bool] = None,
899
+ return_dict: Optional[bool] = None,
900
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
901
+ r"""
902
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
903
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
904
+ the model is configured as a decoder.
905
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
906
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
907
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
908
+
909
+ - 1 for tokens that are **not masked**,
910
+ - 0 for tokens that are **masked**.
911
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
912
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
913
+
914
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
915
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
916
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
917
+ use_cache (`bool`, *optional*):
918
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
919
+ `past_key_values`).
920
+ """
921
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
922
+ output_hidden_states = (
923
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
924
+ )
925
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
926
+
927
+ if self.config.is_decoder:
928
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
929
+ else:
930
+ use_cache = False
931
+
932
+ if input_ids is not None and inputs_embeds is not None:
933
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
934
+ elif input_ids is not None:
935
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
936
+ input_shape = input_ids.size()
937
+ elif inputs_embeds is not None:
938
+ input_shape = inputs_embeds.size()[:-1]
939
+ else:
940
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
941
+
942
+ batch_size, seq_length = input_shape
943
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
944
+
945
+ # past_key_values_length
946
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
947
+
948
+ if attention_mask is None:
949
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
950
+
951
+ if token_type_ids is None:
952
+ if hasattr(self.embeddings, "token_type_ids"):
953
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
954
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
955
+ token_type_ids = buffered_token_type_ids_expanded
956
+ else:
957
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
958
+
959
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
960
+ # ourselves in which case we just need to make it broadcastable to all heads.
961
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
962
+
963
+ # If a 2D or 3D attention mask is provided for the cross-attention
964
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
965
+ if self.config.is_decoder and encoder_hidden_states is not None:
966
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
967
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
968
+ if encoder_attention_mask is None:
969
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
970
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
971
+ else:
972
+ encoder_extended_attention_mask = None
973
+
974
+ # Prepare head mask if needed
975
+ # 1.0 in head_mask indicate we keep the head
976
+ # attention_probs has shape bsz x n_heads x N x N
977
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
978
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
979
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
980
+
981
+ embedding_output = self.embeddings(
982
+ input_ids=input_ids,
983
+ position_ids=position_ids,
984
+ token_type_ids=token_type_ids,
985
+ inputs_embeds=inputs_embeds,
986
+ past_key_values_length=past_key_values_length,
987
+ )
988
+ encoder_outputs = self.encoder(
989
+ embedding_output,
990
+ attention_mask=extended_attention_mask,
991
+ head_mask=head_mask,
992
+ encoder_hidden_states=encoder_hidden_states,
993
+ encoder_attention_mask=encoder_extended_attention_mask,
994
+ past_key_values=past_key_values,
995
+ use_cache=use_cache,
996
+ output_attentions=output_attentions,
997
+ output_hidden_states=output_hidden_states,
998
+ return_dict=return_dict,
999
+ )
1000
+ sequence_output = encoder_outputs[0]
1001
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
1002
+
1003
+ if not return_dict:
1004
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
1005
+
1006
+ return BaseModelOutputWithPoolingAndCrossAttentions(
1007
+ last_hidden_state=sequence_output,
1008
+ pooler_output=pooled_output,
1009
+ past_key_values=encoder_outputs.past_key_values,
1010
+ hidden_states=encoder_outputs.hidden_states,
1011
+ attentions=encoder_outputs.attentions,
1012
+ cross_attentions=encoder_outputs.cross_attentions,
1013
+ )
1014
+
1015
+
1016
+ @add_start_docstrings(
1017
+ """
1018
+ Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next
1019
+ sentence prediction (classification)` head.
1020
+ """,
1021
+ BERT_START_DOCSTRING,
1022
+ )
1023
+ class BertForPreTraining(BertPreTrainedModel):
1024
+ _tied_weights_keys = ["predictions.decoder.bias", "cls.predictions.decoder.weight"]
1025
+
1026
+ def __init__(self, config):
1027
+ super().__init__(config)
1028
+
1029
+ self.bert = BertModel(config)
1030
+ self.cls = BertPreTrainingHeads(config)
1031
+
1032
+ # Initialize weights and apply final processing
1033
+ self.post_init()
1034
+
1035
+ def get_output_embeddings(self):
1036
+ return self.cls.predictions.decoder
1037
+
1038
+ def set_output_embeddings(self, new_embeddings):
1039
+ self.cls.predictions.decoder = new_embeddings
1040
+
1041
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1042
+ @replace_return_docstrings(output_type=BertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
1043
+ def forward(
1044
+ self,
1045
+ input_ids: Optional[torch.Tensor] = None,
1046
+ attention_mask: Optional[torch.Tensor] = None,
1047
+ token_type_ids: Optional[torch.Tensor] = None,
1048
+ position_ids: Optional[torch.Tensor] = None,
1049
+ head_mask: Optional[torch.Tensor] = None,
1050
+ inputs_embeds: Optional[torch.Tensor] = None,
1051
+ labels: Optional[torch.Tensor] = None,
1052
+ next_sentence_label: Optional[torch.Tensor] = None,
1053
+ output_attentions: Optional[bool] = None,
1054
+ output_hidden_states: Optional[bool] = None,
1055
+ return_dict: Optional[bool] = None,
1056
+ ) -> Union[Tuple[torch.Tensor], BertForPreTrainingOutput]:
1057
+ r"""
1058
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1059
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1060
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked),
1061
+ the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1062
+ next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1063
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence
1064
+ pair (see `input_ids` docstring) Indices should be in `[0, 1]`:
1065
+
1066
+ - 0 indicates sequence B is a continuation of sequence A,
1067
+ - 1 indicates sequence B is a random sequence.
1068
+ kwargs (`Dict[str, any]`, optional, defaults to *{}*):
1069
+ Used to hide legacy arguments that have been deprecated.
1070
+
1071
+ Returns:
1072
+
1073
+ Example:
1074
+
1075
+ ```python
1076
+ >>> from transformers import AutoTokenizer, BertForPreTraining
1077
+ >>> import torch
1078
+
1079
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
1080
+ >>> model = BertForPreTraining.from_pretrained("google-bert/bert-base-uncased")
1081
+
1082
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
1083
+ >>> outputs = model(**inputs)
1084
+
1085
+ >>> prediction_logits = outputs.prediction_logits
1086
+ >>> seq_relationship_logits = outputs.seq_relationship_logits
1087
+ ```
1088
+ """
1089
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1090
+
1091
+ outputs = self.bert(
1092
+ input_ids,
1093
+ attention_mask=attention_mask,
1094
+ token_type_ids=token_type_ids,
1095
+ position_ids=position_ids,
1096
+ head_mask=head_mask,
1097
+ inputs_embeds=inputs_embeds,
1098
+ output_attentions=output_attentions,
1099
+ output_hidden_states=output_hidden_states,
1100
+ return_dict=return_dict,
1101
+ )
1102
+
1103
+ sequence_output, pooled_output = outputs[:2]
1104
+ prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
1105
+
1106
+ total_loss = None
1107
+ if labels is not None and next_sentence_label is not None:
1108
+ loss_fct = CrossEntropyLoss()
1109
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1110
+ next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
1111
+ total_loss = masked_lm_loss + next_sentence_loss
1112
+
1113
+ if not return_dict:
1114
+ output = (prediction_scores, seq_relationship_score) + outputs[2:]
1115
+ return ((total_loss,) + output) if total_loss is not None else output
1116
+
1117
+ return BertForPreTrainingOutput(
1118
+ loss=total_loss,
1119
+ prediction_logits=prediction_scores,
1120
+ seq_relationship_logits=seq_relationship_score,
1121
+ hidden_states=outputs.hidden_states,
1122
+ attentions=outputs.attentions,
1123
+ )
1124
+
1125
+
1126
+ @add_start_docstrings(
1127
+ """Bert Model with a `language modeling` head on top for CLM fine-tuning.""", BERT_START_DOCSTRING
1128
+ )
1129
+ class BertLMHeadModel(BertPreTrainedModel):
1130
+ _tied_weights_keys = ["cls.predictions.decoder.bias", "cls.predictions.decoder.weight"]
1131
+
1132
+ def __init__(self, config):
1133
+ super().__init__(config)
1134
+
1135
+ if not config.is_decoder:
1136
+ logger.warning("If you want to use `BertLMHeadModel` as a standalone, add `is_decoder=True.`")
1137
+
1138
+ self.bert = BertModel(config, add_pooling_layer=False)
1139
+ self.cls = BertOnlyMLMHead(config)
1140
+
1141
+ # Initialize weights and apply final processing
1142
+ self.post_init()
1143
+
1144
+ def get_output_embeddings(self):
1145
+ return self.cls.predictions.decoder
1146
+
1147
+ def set_output_embeddings(self, new_embeddings):
1148
+ self.cls.predictions.decoder = new_embeddings
1149
+
1150
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1151
+ @add_code_sample_docstrings(
1152
+ checkpoint=_CHECKPOINT_FOR_DOC,
1153
+ output_type=CausalLMOutputWithCrossAttentions,
1154
+ config_class=_CONFIG_FOR_DOC,
1155
+ )
1156
+ def forward(
1157
+ self,
1158
+ input_ids: Optional[torch.Tensor] = None,
1159
+ attention_mask: Optional[torch.Tensor] = None,
1160
+ token_type_ids: Optional[torch.Tensor] = None,
1161
+ position_ids: Optional[torch.Tensor] = None,
1162
+ head_mask: Optional[torch.Tensor] = None,
1163
+ inputs_embeds: Optional[torch.Tensor] = None,
1164
+ encoder_hidden_states: Optional[torch.Tensor] = None,
1165
+ encoder_attention_mask: Optional[torch.Tensor] = None,
1166
+ labels: Optional[torch.Tensor] = None,
1167
+ past_key_values: Optional[List[torch.Tensor]] = None,
1168
+ use_cache: Optional[bool] = None,
1169
+ output_attentions: Optional[bool] = None,
1170
+ output_hidden_states: Optional[bool] = None,
1171
+ return_dict: Optional[bool] = None,
1172
+ ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
1173
+ r"""
1174
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1175
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1176
+ the model is configured as a decoder.
1177
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1178
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1179
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1180
+
1181
+ - 1 for tokens that are **not masked**,
1182
+ - 0 for tokens that are **masked**.
1183
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1184
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
1185
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
1186
+ ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`
1187
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
1188
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1189
+
1190
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1191
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1192
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1193
+ use_cache (`bool`, *optional*):
1194
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1195
+ `past_key_values`).
1196
+ """
1197
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1198
+ if labels is not None:
1199
+ use_cache = False
1200
+
1201
+ outputs = self.bert(
1202
+ input_ids,
1203
+ attention_mask=attention_mask,
1204
+ token_type_ids=token_type_ids,
1205
+ position_ids=position_ids,
1206
+ head_mask=head_mask,
1207
+ inputs_embeds=inputs_embeds,
1208
+ encoder_hidden_states=encoder_hidden_states,
1209
+ encoder_attention_mask=encoder_attention_mask,
1210
+ past_key_values=past_key_values,
1211
+ use_cache=use_cache,
1212
+ output_attentions=output_attentions,
1213
+ output_hidden_states=output_hidden_states,
1214
+ return_dict=return_dict,
1215
+ )
1216
+
1217
+ sequence_output = outputs[0]
1218
+ prediction_scores = self.cls(sequence_output)
1219
+
1220
+ lm_loss = None
1221
+ if labels is not None:
1222
+ # we are doing next-token prediction; shift prediction scores and input ids by one
1223
+ shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
1224
+ labels = labels[:, 1:].contiguous()
1225
+ loss_fct = CrossEntropyLoss()
1226
+ lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1227
+
1228
+ if not return_dict:
1229
+ output = (prediction_scores,) + outputs[2:]
1230
+ return ((lm_loss,) + output) if lm_loss is not None else output
1231
+
1232
+ return CausalLMOutputWithCrossAttentions(
1233
+ loss=lm_loss,
1234
+ logits=prediction_scores,
1235
+ past_key_values=outputs.past_key_values,
1236
+ hidden_states=outputs.hidden_states,
1237
+ attentions=outputs.attentions,
1238
+ cross_attentions=outputs.cross_attentions,
1239
+ )
1240
+
1241
+ def prepare_inputs_for_generation(
1242
+ self, input_ids, past_key_values=None, attention_mask=None, use_cache=True, **model_kwargs
1243
+ ):
1244
+ input_shape = input_ids.shape
1245
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
1246
+ if attention_mask is None:
1247
+ attention_mask = input_ids.new_ones(input_shape)
1248
+
1249
+ # cut decoder_input_ids if past_key_values is used
1250
+ if past_key_values is not None:
1251
+ past_length = past_key_values[0][0].shape[2]
1252
+
1253
+ # Some generation methods already pass only the last input ID
1254
+ if input_ids.shape[1] > past_length:
1255
+ remove_prefix_length = past_length
1256
+ else:
1257
+ # Default to old behavior: keep only final ID
1258
+ remove_prefix_length = input_ids.shape[1] - 1
1259
+
1260
+ input_ids = input_ids[:, remove_prefix_length:]
1261
+
1262
+ return {
1263
+ "input_ids": input_ids,
1264
+ "attention_mask": attention_mask,
1265
+ "past_key_values": past_key_values,
1266
+ "use_cache": use_cache,
1267
+ }
1268
+
1269
+ def _reorder_cache(self, past_key_values, beam_idx):
1270
+ reordered_past = ()
1271
+ for layer_past in past_key_values:
1272
+ reordered_past += (
1273
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1274
+ )
1275
+ return reordered_past
1276
+
1277
+
1278
+ @add_start_docstrings("""Bert Model with a `language modeling` head on top.""", BERT_START_DOCSTRING)
1279
+ class BertForMaskedLM(BertPreTrainedModel):
1280
+ _tied_weights_keys = ["predictions.decoder.bias", "cls.predictions.decoder.weight"]
1281
+
1282
+ def __init__(self, config):
1283
+ super().__init__(config)
1284
+
1285
+ if config.is_decoder:
1286
+ logger.warning(
1287
+ "If you want to use `BertForMaskedLM` make sure `config.is_decoder=False` for "
1288
+ "bi-directional self-attention."
1289
+ )
1290
+
1291
+ self.bert = BertModel(config, add_pooling_layer=False)
1292
+ self.cls = BertOnlyMLMHead(config)
1293
+
1294
+ # Initialize weights and apply final processing
1295
+ self.post_init()
1296
+
1297
+ def get_output_embeddings(self):
1298
+ return self.cls.predictions.decoder
1299
+
1300
+ def set_output_embeddings(self, new_embeddings):
1301
+ self.cls.predictions.decoder = new_embeddings
1302
+
1303
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1304
+ @add_code_sample_docstrings(
1305
+ checkpoint=_CHECKPOINT_FOR_DOC,
1306
+ output_type=MaskedLMOutput,
1307
+ config_class=_CONFIG_FOR_DOC,
1308
+ expected_output="'paris'",
1309
+ expected_loss=0.88,
1310
+ )
1311
+ def forward(
1312
+ self,
1313
+ input_ids: Optional[torch.Tensor] = None,
1314
+ attention_mask: Optional[torch.Tensor] = None,
1315
+ token_type_ids: Optional[torch.Tensor] = None,
1316
+ position_ids: Optional[torch.Tensor] = None,
1317
+ head_mask: Optional[torch.Tensor] = None,
1318
+ inputs_embeds: Optional[torch.Tensor] = None,
1319
+ encoder_hidden_states: Optional[torch.Tensor] = None,
1320
+ encoder_attention_mask: Optional[torch.Tensor] = None,
1321
+ labels: Optional[torch.Tensor] = None,
1322
+ output_attentions: Optional[bool] = None,
1323
+ output_hidden_states: Optional[bool] = None,
1324
+ return_dict: Optional[bool] = None,
1325
+ ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]:
1326
+ r"""
1327
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1328
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1329
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1330
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1331
+ """
1332
+
1333
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1334
+
1335
+ outputs = self.bert(
1336
+ input_ids,
1337
+ attention_mask=attention_mask,
1338
+ token_type_ids=token_type_ids,
1339
+ position_ids=position_ids,
1340
+ head_mask=head_mask,
1341
+ inputs_embeds=inputs_embeds,
1342
+ encoder_hidden_states=encoder_hidden_states,
1343
+ encoder_attention_mask=encoder_attention_mask,
1344
+ output_attentions=output_attentions,
1345
+ output_hidden_states=output_hidden_states,
1346
+ return_dict=return_dict,
1347
+ )
1348
+
1349
+ sequence_output = outputs[0]
1350
+ prediction_scores = self.cls(sequence_output)
1351
+
1352
+ masked_lm_loss = None
1353
+ if labels is not None:
1354
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
1355
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1356
+
1357
+ if not return_dict:
1358
+ output = (prediction_scores,) + outputs[2:]
1359
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1360
+
1361
+ return MaskedLMOutput(
1362
+ loss=masked_lm_loss,
1363
+ logits=prediction_scores,
1364
+ hidden_states=outputs.hidden_states,
1365
+ attentions=outputs.attentions,
1366
+ )
1367
+
1368
+ def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
1369
+ input_shape = input_ids.shape
1370
+ effective_batch_size = input_shape[0]
1371
+
1372
+ # add a dummy token
1373
+ if self.config.pad_token_id is None:
1374
+ raise ValueError("The PAD token should be defined for generation")
1375
+
1376
+ attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
1377
+ dummy_token = torch.full(
1378
+ (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
1379
+ )
1380
+ input_ids = torch.cat([input_ids, dummy_token], dim=1)
1381
+
1382
+ return {"input_ids": input_ids, "attention_mask": attention_mask}
1383
+
1384
+
1385
+ @add_start_docstrings(
1386
+ """Bert Model with a `next sentence prediction (classification)` head on top.""",
1387
+ BERT_START_DOCSTRING,
1388
+ )
1389
+ class BertForNextSentencePrediction(BertPreTrainedModel):
1390
+ def __init__(self, config):
1391
+ super().__init__(config)
1392
+
1393
+ self.bert = BertModel(config)
1394
+ self.cls = BertOnlyNSPHead(config)
1395
+
1396
+ # Initialize weights and apply final processing
1397
+ self.post_init()
1398
+
1399
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1400
+ @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
1401
+ def forward(
1402
+ self,
1403
+ input_ids: Optional[torch.Tensor] = None,
1404
+ attention_mask: Optional[torch.Tensor] = None,
1405
+ token_type_ids: Optional[torch.Tensor] = None,
1406
+ position_ids: Optional[torch.Tensor] = None,
1407
+ head_mask: Optional[torch.Tensor] = None,
1408
+ inputs_embeds: Optional[torch.Tensor] = None,
1409
+ labels: Optional[torch.Tensor] = None,
1410
+ output_attentions: Optional[bool] = None,
1411
+ output_hidden_states: Optional[bool] = None,
1412
+ return_dict: Optional[bool] = None,
1413
+ **kwargs,
1414
+ ) -> Union[Tuple[torch.Tensor], NextSentencePredictorOutput]:
1415
+ r"""
1416
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1417
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
1418
+ (see `input_ids` docstring). Indices should be in `[0, 1]`:
1419
+
1420
+ - 0 indicates sequence B is a continuation of sequence A,
1421
+ - 1 indicates sequence B is a random sequence.
1422
+
1423
+ Returns:
1424
+
1425
+ Example:
1426
+
1427
+ ```python
1428
+ >>> from transformers import AutoTokenizer, BertForNextSentencePrediction
1429
+ >>> import torch
1430
+
1431
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
1432
+ >>> model = BertForNextSentencePrediction.from_pretrained("google-bert/bert-base-uncased")
1433
+
1434
+ >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
1435
+ >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
1436
+ >>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt")
1437
+
1438
+ >>> outputs = model(**encoding, labels=torch.LongTensor([1]))
1439
+ >>> logits = outputs.logits
1440
+ >>> assert logits[0, 0] < logits[0, 1] # next sentence was random
1441
+ ```
1442
+ """
1443
+
1444
+ if "next_sentence_label" in kwargs:
1445
+ warnings.warn(
1446
+ "The `next_sentence_label` argument is deprecated and will be removed in a future version, use"
1447
+ " `labels` instead.",
1448
+ FutureWarning,
1449
+ )
1450
+ labels = kwargs.pop("next_sentence_label")
1451
+
1452
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1453
+
1454
+ outputs = self.bert(
1455
+ input_ids,
1456
+ attention_mask=attention_mask,
1457
+ token_type_ids=token_type_ids,
1458
+ position_ids=position_ids,
1459
+ head_mask=head_mask,
1460
+ inputs_embeds=inputs_embeds,
1461
+ output_attentions=output_attentions,
1462
+ output_hidden_states=output_hidden_states,
1463
+ return_dict=return_dict,
1464
+ )
1465
+
1466
+ pooled_output = outputs[1]
1467
+
1468
+ seq_relationship_scores = self.cls(pooled_output)
1469
+
1470
+ next_sentence_loss = None
1471
+ if labels is not None:
1472
+ loss_fct = CrossEntropyLoss()
1473
+ next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))
1474
+
1475
+ if not return_dict:
1476
+ output = (seq_relationship_scores,) + outputs[2:]
1477
+ return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
1478
+
1479
+ return NextSentencePredictorOutput(
1480
+ loss=next_sentence_loss,
1481
+ logits=seq_relationship_scores,
1482
+ hidden_states=outputs.hidden_states,
1483
+ attentions=outputs.attentions,
1484
+ )
1485
+
1486
+
1487
+ @add_start_docstrings(
1488
+ """
1489
+ Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
1490
+ output) e.g. for GLUE tasks.
1491
+ """,
1492
+ BERT_START_DOCSTRING,
1493
+ )
1494
+ class BertForSequenceClassification(BertPreTrainedModel):
1495
+ def __init__(self, config):
1496
+ super().__init__(config)
1497
+ self.num_labels = config.num_labels
1498
+ self.config = config
1499
+
1500
+ self.bert = BertModel(config)
1501
+ classifier_dropout = (
1502
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1503
+ )
1504
+ self.dropout = nn.Dropout(classifier_dropout)
1505
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1506
+
1507
+ # Initialize weights and apply final processing
1508
+ self.post_init()
1509
+
1510
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1511
+ @add_code_sample_docstrings(
1512
+ checkpoint=_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION,
1513
+ output_type=SequenceClassifierOutput,
1514
+ config_class=_CONFIG_FOR_DOC,
1515
+ expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,
1516
+ expected_loss=_SEQ_CLASS_EXPECTED_LOSS,
1517
+ )
1518
+ def forward(
1519
+ self,
1520
+ input_ids: Optional[torch.Tensor] = None,
1521
+ attention_mask: Optional[torch.Tensor] = None,
1522
+ token_type_ids: Optional[torch.Tensor] = None,
1523
+ position_ids: Optional[torch.Tensor] = None,
1524
+ head_mask: Optional[torch.Tensor] = None,
1525
+ inputs_embeds: Optional[torch.Tensor] = None,
1526
+ labels: Optional[torch.Tensor] = None,
1527
+ output_attentions: Optional[bool] = None,
1528
+ output_hidden_states: Optional[bool] = None,
1529
+ return_dict: Optional[bool] = None,
1530
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
1531
+ r"""
1532
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1533
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1534
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1535
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1536
+ """
1537
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1538
+
1539
+ outputs = self.bert(
1540
+ input_ids,
1541
+ attention_mask=attention_mask,
1542
+ token_type_ids=token_type_ids,
1543
+ position_ids=position_ids,
1544
+ head_mask=head_mask,
1545
+ inputs_embeds=inputs_embeds,
1546
+ output_attentions=output_attentions,
1547
+ output_hidden_states=output_hidden_states,
1548
+ return_dict=return_dict,
1549
+ )
1550
+
1551
+ pooled_output = outputs[1]
1552
+
1553
+ pooled_output = self.dropout(pooled_output)
1554
+ logits = self.classifier(pooled_output)
1555
+
1556
+ loss = None
1557
+ if labels is not None:
1558
+ if self.config.problem_type is None:
1559
+ if self.num_labels == 1:
1560
+ self.config.problem_type = "regression"
1561
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1562
+ self.config.problem_type = "single_label_classification"
1563
+ else:
1564
+ self.config.problem_type = "multi_label_classification"
1565
+
1566
+ if self.config.problem_type == "regression":
1567
+ loss_fct = MSELoss()
1568
+ if self.num_labels == 1:
1569
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1570
+ else:
1571
+ loss = loss_fct(logits, labels)
1572
+ elif self.config.problem_type == "single_label_classification":
1573
+ loss_fct = CrossEntropyLoss()
1574
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1575
+ elif self.config.problem_type == "multi_label_classification":
1576
+ loss_fct = BCEWithLogitsLoss()
1577
+ loss = loss_fct(logits, labels)
1578
+ if not return_dict:
1579
+ output = (logits,) + outputs[2:]
1580
+ return ((loss,) + output) if loss is not None else output
1581
+
1582
+ return SequenceClassifierOutput(
1583
+ loss=loss,
1584
+ logits=logits,
1585
+ hidden_states=outputs.hidden_states,
1586
+ attentions=outputs.attentions,
1587
+ )
1588
+
1589
+
1590
+ @add_start_docstrings(
1591
+ """
1592
+ Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1593
+ softmax) e.g. for RocStories/SWAG tasks.
1594
+ """,
1595
+ BERT_START_DOCSTRING,
1596
+ )
1597
+ class BertForMultipleChoice(BertPreTrainedModel):
1598
+ def __init__(self, config):
1599
+ super().__init__(config)
1600
+
1601
+ self.bert = BertModel(config)
1602
+ classifier_dropout = (
1603
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1604
+ )
1605
+ self.dropout = nn.Dropout(classifier_dropout)
1606
+ self.classifier = nn.Linear(config.hidden_size, 1)
1607
+
1608
+ # Initialize weights and apply final processing
1609
+ self.post_init()
1610
+
1611
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
1612
+ @add_code_sample_docstrings(
1613
+ checkpoint=_CHECKPOINT_FOR_DOC,
1614
+ output_type=MultipleChoiceModelOutput,
1615
+ config_class=_CONFIG_FOR_DOC,
1616
+ )
1617
+ def forward(
1618
+ self,
1619
+ input_ids: Optional[torch.Tensor] = None,
1620
+ attention_mask: Optional[torch.Tensor] = None,
1621
+ token_type_ids: Optional[torch.Tensor] = None,
1622
+ position_ids: Optional[torch.Tensor] = None,
1623
+ head_mask: Optional[torch.Tensor] = None,
1624
+ inputs_embeds: Optional[torch.Tensor] = None,
1625
+ labels: Optional[torch.Tensor] = None,
1626
+ output_attentions: Optional[bool] = None,
1627
+ output_hidden_states: Optional[bool] = None,
1628
+ return_dict: Optional[bool] = None,
1629
+ ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]:
1630
+ r"""
1631
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1632
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1633
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
1634
+ `input_ids` above)
1635
+ """
1636
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1637
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1638
+
1639
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1640
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1641
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1642
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
1643
+ inputs_embeds = (
1644
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1645
+ if inputs_embeds is not None
1646
+ else None
1647
+ )
1648
+
1649
+ outputs = self.bert(
1650
+ input_ids,
1651
+ attention_mask=attention_mask,
1652
+ token_type_ids=token_type_ids,
1653
+ position_ids=position_ids,
1654
+ head_mask=head_mask,
1655
+ inputs_embeds=inputs_embeds,
1656
+ output_attentions=output_attentions,
1657
+ output_hidden_states=output_hidden_states,
1658
+ return_dict=return_dict,
1659
+ )
1660
+
1661
+ pooled_output = outputs[1]
1662
+
1663
+ pooled_output = self.dropout(pooled_output)
1664
+ logits = self.classifier(pooled_output)
1665
+ reshaped_logits = logits.view(-1, num_choices)
1666
+
1667
+ loss = None
1668
+ if labels is not None:
1669
+ loss_fct = CrossEntropyLoss()
1670
+ loss = loss_fct(reshaped_logits, labels)
1671
+
1672
+ if not return_dict:
1673
+ output = (reshaped_logits,) + outputs[2:]
1674
+ return ((loss,) + output) if loss is not None else output
1675
+
1676
+ return MultipleChoiceModelOutput(
1677
+ loss=loss,
1678
+ logits=reshaped_logits,
1679
+ hidden_states=outputs.hidden_states,
1680
+ attentions=outputs.attentions,
1681
+ )
1682
+
1683
+
1684
+ @add_start_docstrings(
1685
+ """
1686
+ Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1687
+ Named-Entity-Recognition (NER) tasks.
1688
+ """,
1689
+ BERT_START_DOCSTRING,
1690
+ )
1691
+ class BertForTokenClassification(BertPreTrainedModel):
1692
+ def __init__(self, config):
1693
+ super().__init__(config)
1694
+ self.num_labels = config.num_labels
1695
+
1696
+ self.bert = BertModel(config, add_pooling_layer=False)
1697
+ classifier_dropout = (
1698
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1699
+ )
1700
+ self.dropout = nn.Dropout(classifier_dropout)
1701
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1702
+
1703
+ # Initialize weights and apply final processing
1704
+ self.post_init()
1705
+
1706
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1707
+ @add_code_sample_docstrings(
1708
+ checkpoint=_CHECKPOINT_FOR_TOKEN_CLASSIFICATION,
1709
+ output_type=TokenClassifierOutput,
1710
+ config_class=_CONFIG_FOR_DOC,
1711
+ expected_output=_TOKEN_CLASS_EXPECTED_OUTPUT,
1712
+ expected_loss=_TOKEN_CLASS_EXPECTED_LOSS,
1713
+ )
1714
+ def forward(
1715
+ self,
1716
+ input_ids: Optional[torch.Tensor] = None,
1717
+ attention_mask: Optional[torch.Tensor] = None,
1718
+ token_type_ids: Optional[torch.Tensor] = None,
1719
+ position_ids: Optional[torch.Tensor] = None,
1720
+ head_mask: Optional[torch.Tensor] = None,
1721
+ inputs_embeds: Optional[torch.Tensor] = None,
1722
+ labels: Optional[torch.Tensor] = None,
1723
+ output_attentions: Optional[bool] = None,
1724
+ output_hidden_states: Optional[bool] = None,
1725
+ return_dict: Optional[bool] = None,
1726
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
1727
+ r"""
1728
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1729
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1730
+ """
1731
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1732
+
1733
+ outputs = self.bert(
1734
+ input_ids,
1735
+ attention_mask=attention_mask,
1736
+ token_type_ids=token_type_ids,
1737
+ position_ids=position_ids,
1738
+ head_mask=head_mask,
1739
+ inputs_embeds=inputs_embeds,
1740
+ output_attentions=output_attentions,
1741
+ output_hidden_states=output_hidden_states,
1742
+ return_dict=return_dict,
1743
+ )
1744
+
1745
+ sequence_output = outputs[0]
1746
+
1747
+ sequence_output = self.dropout(sequence_output)
1748
+ logits = self.classifier(sequence_output)
1749
+
1750
+ loss = None
1751
+ if labels is not None:
1752
+ loss_fct = CrossEntropyLoss()
1753
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1754
+
1755
+ if not return_dict:
1756
+ output = (logits,) + outputs[2:]
1757
+ return ((loss,) + output) if loss is not None else output
1758
+
1759
+ return TokenClassifierOutput(
1760
+ loss=loss,
1761
+ logits=logits,
1762
+ hidden_states=outputs.hidden_states,
1763
+ attentions=outputs.attentions,
1764
+ )
1765
+
1766
+
1767
+ @add_start_docstrings(
1768
+ """
1769
+ Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1770
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1771
+ """,
1772
+ BERT_START_DOCSTRING,
1773
+ )
1774
+ class BertForQuestionAnswering(BertPreTrainedModel):
1775
+ def __init__(self, config):
1776
+ super().__init__(config)
1777
+ self.num_labels = config.num_labels
1778
+
1779
+ self.bert = BertModel(config, add_pooling_layer=False)
1780
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1781
+
1782
+ # Initialize weights and apply final processing
1783
+ self.post_init()
1784
+
1785
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1786
+ @add_code_sample_docstrings(
1787
+ checkpoint=_CHECKPOINT_FOR_QA,
1788
+ output_type=QuestionAnsweringModelOutput,
1789
+ config_class=_CONFIG_FOR_DOC,
1790
+ qa_target_start_index=_QA_TARGET_START_INDEX,
1791
+ qa_target_end_index=_QA_TARGET_END_INDEX,
1792
+ expected_output=_QA_EXPECTED_OUTPUT,
1793
+ expected_loss=_QA_EXPECTED_LOSS,
1794
+ )
1795
+ def forward(
1796
+ self,
1797
+ input_ids: Optional[torch.Tensor] = None,
1798
+ attention_mask: Optional[torch.Tensor] = None,
1799
+ token_type_ids: Optional[torch.Tensor] = None,
1800
+ position_ids: Optional[torch.Tensor] = None,
1801
+ head_mask: Optional[torch.Tensor] = None,
1802
+ inputs_embeds: Optional[torch.Tensor] = None,
1803
+ start_positions: Optional[torch.Tensor] = None,
1804
+ end_positions: Optional[torch.Tensor] = None,
1805
+ output_attentions: Optional[bool] = None,
1806
+ output_hidden_states: Optional[bool] = None,
1807
+ return_dict: Optional[bool] = None,
1808
+ ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
1809
+ r"""
1810
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1811
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1812
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1813
+ are not taken into account for computing the loss.
1814
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1815
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1816
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1817
+ are not taken into account for computing the loss.
1818
+ """
1819
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1820
+
1821
+ outputs = self.bert(
1822
+ input_ids,
1823
+ attention_mask=attention_mask,
1824
+ token_type_ids=token_type_ids,
1825
+ position_ids=position_ids,
1826
+ head_mask=head_mask,
1827
+ inputs_embeds=inputs_embeds,
1828
+ output_attentions=output_attentions,
1829
+ output_hidden_states=output_hidden_states,
1830
+ return_dict=return_dict,
1831
+ )
1832
+
1833
+ sequence_output = outputs[0]
1834
+
1835
+ logits = self.qa_outputs(sequence_output)
1836
+ start_logits, end_logits = logits.split(1, dim=-1)
1837
+ start_logits = start_logits.squeeze(-1).contiguous()
1838
+ end_logits = end_logits.squeeze(-1).contiguous()
1839
+
1840
+ total_loss = None
1841
+ if start_positions is not None and end_positions is not None:
1842
+ # If we are on multi-GPU, split add a dimension
1843
+ if len(start_positions.size()) > 1:
1844
+ start_positions = start_positions.squeeze(-1)
1845
+ if len(end_positions.size()) > 1:
1846
+ end_positions = end_positions.squeeze(-1)
1847
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1848
+ ignored_index = start_logits.size(1)
1849
+ start_positions = start_positions.clamp(0, ignored_index)
1850
+ end_positions = end_positions.clamp(0, ignored_index)
1851
+
1852
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1853
+ start_loss = loss_fct(start_logits, start_positions)
1854
+ end_loss = loss_fct(end_logits, end_positions)
1855
+ total_loss = (start_loss + end_loss) / 2
1856
+
1857
+ if not return_dict:
1858
+ output = (start_logits, end_logits) + outputs[2:]
1859
+ return ((total_loss,) + output) if total_loss is not None else output
1860
+
1861
+ return QuestionAnsweringModelOutput(
1862
+ loss=total_loss,
1863
+ start_logits=start_logits,
1864
+ end_logits=end_logits,
1865
+ hidden_states=outputs.hidden_states,
1866
+ attentions=outputs.attentions,
1867
+ )
venv/lib/python3.10/site-packages/transformers/models/bert/modeling_flax_bert.py ADDED
@@ -0,0 +1,1713 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Google Flax Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from typing import Callable, Optional, Tuple
17
+
18
+ import flax
19
+ import flax.linen as nn
20
+ import jax
21
+ import jax.numpy as jnp
22
+ import numpy as np
23
+ from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
24
+ from flax.linen import combine_masks, make_causal_mask
25
+ from flax.linen import partitioning as nn_partitioning
26
+ from flax.linen.attention import dot_product_attention_weights
27
+ from flax.traverse_util import flatten_dict, unflatten_dict
28
+ from jax import lax
29
+
30
+ from ...modeling_flax_outputs import (
31
+ FlaxBaseModelOutputWithPastAndCrossAttentions,
32
+ FlaxBaseModelOutputWithPooling,
33
+ FlaxBaseModelOutputWithPoolingAndCrossAttentions,
34
+ FlaxCausalLMOutputWithCrossAttentions,
35
+ FlaxMaskedLMOutput,
36
+ FlaxMultipleChoiceModelOutput,
37
+ FlaxNextSentencePredictorOutput,
38
+ FlaxQuestionAnsweringModelOutput,
39
+ FlaxSequenceClassifierOutput,
40
+ FlaxTokenClassifierOutput,
41
+ )
42
+ from ...modeling_flax_utils import (
43
+ ACT2FN,
44
+ FlaxPreTrainedModel,
45
+ append_call_sample_docstring,
46
+ append_replace_return_docstrings,
47
+ overwrite_call_docstring,
48
+ )
49
+ from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging
50
+ from .configuration_bert import BertConfig
51
+
52
+
53
+ logger = logging.get_logger(__name__)
54
+
55
+ _CHECKPOINT_FOR_DOC = "google-bert/bert-base-uncased"
56
+ _CONFIG_FOR_DOC = "BertConfig"
57
+
58
+ remat = nn_partitioning.remat
59
+
60
+
61
+ @flax.struct.dataclass
62
+ class FlaxBertForPreTrainingOutput(ModelOutput):
63
+ """
64
+ Output type of [`BertForPreTraining`].
65
+
66
+ Args:
67
+ prediction_logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.vocab_size)`):
68
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
69
+ seq_relationship_logits (`jnp.ndarray` of shape `(batch_size, 2)`):
70
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
71
+ before SoftMax).
72
+ hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
73
+ Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
74
+ `(batch_size, sequence_length, hidden_size)`.
75
+
76
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
77
+ attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
78
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
79
+ sequence_length)`.
80
+
81
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
82
+ heads.
83
+ """
84
+
85
+ prediction_logits: jnp.ndarray = None
86
+ seq_relationship_logits: jnp.ndarray = None
87
+ hidden_states: Optional[Tuple[jnp.ndarray]] = None
88
+ attentions: Optional[Tuple[jnp.ndarray]] = None
89
+
90
+
91
+ BERT_START_DOCSTRING = r"""
92
+
93
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
94
+ library implements for all its model (such as downloading, saving and converting weights from PyTorch models)
95
+
96
+ This model is also a
97
+ [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as
98
+ a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and
99
+ behavior.
100
+
101
+ Finally, this model supports inherent JAX features such as:
102
+
103
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
104
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
105
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
106
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
107
+
108
+ Parameters:
109
+ config ([`BertConfig`]): Model configuration class with all the parameters of the model.
110
+ Initializing with a config file does not load the weights associated with the model, only the
111
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
112
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
113
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
114
+ `jax.numpy.bfloat16` (on TPUs).
115
+
116
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
117
+ specified all the computation will be performed with the given `dtype`.
118
+
119
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
120
+ parameters.**
121
+
122
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
123
+ [`~FlaxPreTrainedModel.to_bf16`].
124
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
125
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
126
+ `jax.numpy.bfloat16` (on TPUs).
127
+
128
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
129
+ specified all the computation will be performed with the given `dtype`.
130
+
131
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
132
+ parameters.**
133
+
134
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
135
+ [`~FlaxPreTrainedModel.to_bf16`].
136
+
137
+ """
138
+
139
+ BERT_INPUTS_DOCSTRING = r"""
140
+ Args:
141
+ input_ids (`numpy.ndarray` of shape `({0})`):
142
+ Indices of input sequence tokens in the vocabulary.
143
+
144
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
145
+ [`PreTrainedTokenizer.__call__`] for details.
146
+
147
+ [What are input IDs?](../glossary#input-ids)
148
+ attention_mask (`numpy.ndarray` of shape `({0})`, *optional*):
149
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
150
+
151
+ - 1 for tokens that are **not masked**,
152
+ - 0 for tokens that are **masked**.
153
+
154
+ [What are attention masks?](../glossary#attention-mask)
155
+ token_type_ids (`numpy.ndarray` of shape `({0})`, *optional*):
156
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
157
+ 1]`:
158
+
159
+ - 0 corresponds to a *sentence A* token,
160
+ - 1 corresponds to a *sentence B* token.
161
+
162
+ [What are token type IDs?](../glossary#token-type-ids)
163
+ position_ids (`numpy.ndarray` of shape `({0})`, *optional*):
164
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
165
+ config.max_position_embeddings - 1]`.
166
+ head_mask (`numpy.ndarray` of shape `({0})`, `optional):
167
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
168
+
169
+ - 1 indicates the head is **not masked**,
170
+ - 0 indicates the head is **masked**.
171
+
172
+ return_dict (`bool`, *optional*):
173
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
174
+
175
+ """
176
+
177
+
178
+ class FlaxBertEmbeddings(nn.Module):
179
+ """Construct the embeddings from word, position and token_type embeddings."""
180
+
181
+ config: BertConfig
182
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
183
+
184
+ def setup(self):
185
+ self.word_embeddings = nn.Embed(
186
+ self.config.vocab_size,
187
+ self.config.hidden_size,
188
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
189
+ dtype=self.dtype,
190
+ )
191
+ self.position_embeddings = nn.Embed(
192
+ self.config.max_position_embeddings,
193
+ self.config.hidden_size,
194
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
195
+ dtype=self.dtype,
196
+ )
197
+ self.token_type_embeddings = nn.Embed(
198
+ self.config.type_vocab_size,
199
+ self.config.hidden_size,
200
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
201
+ dtype=self.dtype,
202
+ )
203
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
204
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
205
+
206
+ def __call__(self, input_ids, token_type_ids, position_ids, attention_mask, deterministic: bool = True):
207
+ # Embed
208
+ inputs_embeds = self.word_embeddings(input_ids.astype("i4"))
209
+ position_embeds = self.position_embeddings(position_ids.astype("i4"))
210
+ token_type_embeddings = self.token_type_embeddings(token_type_ids.astype("i4"))
211
+
212
+ # Sum all embeddings
213
+ hidden_states = inputs_embeds + token_type_embeddings + position_embeds
214
+
215
+ # Layer Norm
216
+ hidden_states = self.LayerNorm(hidden_states)
217
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
218
+ return hidden_states
219
+
220
+
221
+ class FlaxBertSelfAttention(nn.Module):
222
+ config: BertConfig
223
+ causal: bool = False
224
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
225
+
226
+ def setup(self):
227
+ self.head_dim = self.config.hidden_size // self.config.num_attention_heads
228
+ if self.config.hidden_size % self.config.num_attention_heads != 0:
229
+ raise ValueError(
230
+ "`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads` "
231
+ " : {self.config.num_attention_heads}"
232
+ )
233
+
234
+ self.query = nn.Dense(
235
+ self.config.hidden_size,
236
+ dtype=self.dtype,
237
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
238
+ )
239
+ self.key = nn.Dense(
240
+ self.config.hidden_size,
241
+ dtype=self.dtype,
242
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
243
+ )
244
+ self.value = nn.Dense(
245
+ self.config.hidden_size,
246
+ dtype=self.dtype,
247
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
248
+ )
249
+
250
+ if self.causal:
251
+ self.causal_mask = make_causal_mask(
252
+ jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool"
253
+ )
254
+
255
+ def _split_heads(self, hidden_states):
256
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.config.num_attention_heads, self.head_dim))
257
+
258
+ def _merge_heads(self, hidden_states):
259
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.config.hidden_size,))
260
+
261
+ @nn.compact
262
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention._concatenate_to_cache
263
+ def _concatenate_to_cache(self, key, value, query, attention_mask):
264
+ """
265
+ This function takes projected key, value states from a single input token and concatenates the states to cached
266
+ states from previous steps. This function is slighly adapted from the official Flax repository:
267
+ https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
268
+ """
269
+ # detect if we're initializing by absence of existing cache data.
270
+ is_initialized = self.has_variable("cache", "cached_key")
271
+ cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
272
+ cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
273
+ cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
274
+
275
+ if is_initialized:
276
+ *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
277
+ # update key, value caches with our new 1d spatial slices
278
+ cur_index = cache_index.value
279
+ indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
280
+ key = lax.dynamic_update_slice(cached_key.value, key, indices)
281
+ value = lax.dynamic_update_slice(cached_value.value, value, indices)
282
+ cached_key.value = key
283
+ cached_value.value = value
284
+ num_updated_cache_vectors = query.shape[1]
285
+ cache_index.value = cache_index.value + num_updated_cache_vectors
286
+ # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
287
+ pad_mask = jnp.broadcast_to(
288
+ jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
289
+ tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
290
+ )
291
+ attention_mask = combine_masks(pad_mask, attention_mask)
292
+ return key, value, attention_mask
293
+
294
+ def __call__(
295
+ self,
296
+ hidden_states,
297
+ attention_mask,
298
+ layer_head_mask,
299
+ key_value_states: Optional[jnp.ndarray] = None,
300
+ init_cache: bool = False,
301
+ deterministic=True,
302
+ output_attentions: bool = False,
303
+ ):
304
+ # if key_value_states are provided this layer is used as a cross-attention layer
305
+ # for the decoder
306
+ is_cross_attention = key_value_states is not None
307
+ batch_size = hidden_states.shape[0]
308
+
309
+ # get query proj
310
+ query_states = self.query(hidden_states)
311
+ # get key, value proj
312
+ if is_cross_attention:
313
+ # cross_attentions
314
+ key_states = self.key(key_value_states)
315
+ value_states = self.value(key_value_states)
316
+ else:
317
+ # self_attention
318
+ key_states = self.key(hidden_states)
319
+ value_states = self.value(hidden_states)
320
+
321
+ query_states = self._split_heads(query_states)
322
+ key_states = self._split_heads(key_states)
323
+ value_states = self._split_heads(value_states)
324
+
325
+ # handle cache prepare causal attention mask
326
+ if self.causal:
327
+ query_length, key_length = query_states.shape[1], key_states.shape[1]
328
+ if self.has_variable("cache", "cached_key"):
329
+ mask_shift = self.variables["cache"]["cache_index"]
330
+ max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
331
+ causal_mask = lax.dynamic_slice(
332
+ self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
333
+ )
334
+ else:
335
+ causal_mask = self.causal_mask[:, :, :query_length, :key_length]
336
+ causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
337
+
338
+ # combine masks if needed
339
+ if attention_mask is not None and self.causal:
340
+ attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
341
+ attention_mask = combine_masks(attention_mask, causal_mask)
342
+ elif self.causal:
343
+ attention_mask = causal_mask
344
+ elif attention_mask is not None:
345
+ attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
346
+
347
+ # During fast autoregressive decoding, we feed one position at a time,
348
+ # and cache the keys and values step by step.
349
+ if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
350
+ key_states, value_states, attention_mask = self._concatenate_to_cache(
351
+ key_states, value_states, query_states, attention_mask
352
+ )
353
+
354
+ # Convert the boolean attention mask to an attention bias.
355
+ if attention_mask is not None:
356
+ # attention mask in the form of attention bias
357
+ attention_bias = lax.select(
358
+ attention_mask > 0,
359
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
360
+ jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
361
+ )
362
+ else:
363
+ attention_bias = None
364
+
365
+ dropout_rng = None
366
+ if not deterministic and self.config.attention_probs_dropout_prob > 0.0:
367
+ dropout_rng = self.make_rng("dropout")
368
+
369
+ attn_weights = dot_product_attention_weights(
370
+ query_states,
371
+ key_states,
372
+ bias=attention_bias,
373
+ dropout_rng=dropout_rng,
374
+ dropout_rate=self.config.attention_probs_dropout_prob,
375
+ broadcast_dropout=True,
376
+ deterministic=deterministic,
377
+ dtype=self.dtype,
378
+ precision=None,
379
+ )
380
+
381
+ # Mask heads if we want to
382
+ if layer_head_mask is not None:
383
+ attn_weights = jnp.einsum("...hqk,h->...hqk", attn_weights, layer_head_mask)
384
+
385
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
386
+ attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,))
387
+
388
+ outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
389
+ return outputs
390
+
391
+
392
+ class FlaxBertSelfOutput(nn.Module):
393
+ config: BertConfig
394
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
395
+
396
+ def setup(self):
397
+ self.dense = nn.Dense(
398
+ self.config.hidden_size,
399
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
400
+ dtype=self.dtype,
401
+ )
402
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
403
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
404
+
405
+ def __call__(self, hidden_states, input_tensor, deterministic: bool = True):
406
+ hidden_states = self.dense(hidden_states)
407
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
408
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
409
+ return hidden_states
410
+
411
+
412
+ class FlaxBertAttention(nn.Module):
413
+ config: BertConfig
414
+ causal: bool = False
415
+ dtype: jnp.dtype = jnp.float32
416
+
417
+ def setup(self):
418
+ self.self = FlaxBertSelfAttention(self.config, causal=self.causal, dtype=self.dtype)
419
+ self.output = FlaxBertSelfOutput(self.config, dtype=self.dtype)
420
+
421
+ def __call__(
422
+ self,
423
+ hidden_states,
424
+ attention_mask,
425
+ layer_head_mask,
426
+ key_value_states=None,
427
+ init_cache=False,
428
+ deterministic=True,
429
+ output_attentions: bool = False,
430
+ ):
431
+ # Attention mask comes in as attention_mask.shape == (*batch_sizes, kv_length)
432
+ # FLAX expects: attention_mask.shape == (*batch_sizes, 1, 1, kv_length) such that it is broadcastable
433
+ # with attn_weights.shape == (*batch_sizes, num_heads, q_length, kv_length)
434
+ attn_outputs = self.self(
435
+ hidden_states,
436
+ attention_mask,
437
+ layer_head_mask=layer_head_mask,
438
+ key_value_states=key_value_states,
439
+ init_cache=init_cache,
440
+ deterministic=deterministic,
441
+ output_attentions=output_attentions,
442
+ )
443
+ attn_output = attn_outputs[0]
444
+ hidden_states = self.output(attn_output, hidden_states, deterministic=deterministic)
445
+
446
+ outputs = (hidden_states,)
447
+
448
+ if output_attentions:
449
+ outputs += (attn_outputs[1],)
450
+
451
+ return outputs
452
+
453
+
454
+ class FlaxBertIntermediate(nn.Module):
455
+ config: BertConfig
456
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
457
+
458
+ def setup(self):
459
+ self.dense = nn.Dense(
460
+ self.config.intermediate_size,
461
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
462
+ dtype=self.dtype,
463
+ )
464
+ self.activation = ACT2FN[self.config.hidden_act]
465
+
466
+ def __call__(self, hidden_states):
467
+ hidden_states = self.dense(hidden_states)
468
+ hidden_states = self.activation(hidden_states)
469
+ return hidden_states
470
+
471
+
472
+ class FlaxBertOutput(nn.Module):
473
+ config: BertConfig
474
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
475
+
476
+ def setup(self):
477
+ self.dense = nn.Dense(
478
+ self.config.hidden_size,
479
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
480
+ dtype=self.dtype,
481
+ )
482
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
483
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
484
+
485
+ def __call__(self, hidden_states, attention_output, deterministic: bool = True):
486
+ hidden_states = self.dense(hidden_states)
487
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
488
+ hidden_states = self.LayerNorm(hidden_states + attention_output)
489
+ return hidden_states
490
+
491
+
492
+ class FlaxBertLayer(nn.Module):
493
+ config: BertConfig
494
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
495
+
496
+ def setup(self):
497
+ self.attention = FlaxBertAttention(self.config, causal=self.config.is_decoder, dtype=self.dtype)
498
+ self.intermediate = FlaxBertIntermediate(self.config, dtype=self.dtype)
499
+ self.output = FlaxBertOutput(self.config, dtype=self.dtype)
500
+ if self.config.add_cross_attention:
501
+ self.crossattention = FlaxBertAttention(self.config, causal=False, dtype=self.dtype)
502
+
503
+ def __call__(
504
+ self,
505
+ hidden_states,
506
+ attention_mask,
507
+ layer_head_mask,
508
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
509
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
510
+ init_cache: bool = False,
511
+ deterministic: bool = True,
512
+ output_attentions: bool = False,
513
+ ):
514
+ # Self Attention
515
+ attention_outputs = self.attention(
516
+ hidden_states,
517
+ attention_mask,
518
+ layer_head_mask=layer_head_mask,
519
+ init_cache=init_cache,
520
+ deterministic=deterministic,
521
+ output_attentions=output_attentions,
522
+ )
523
+ attention_output = attention_outputs[0]
524
+
525
+ # Cross-Attention Block
526
+ if encoder_hidden_states is not None:
527
+ cross_attention_outputs = self.crossattention(
528
+ attention_output,
529
+ attention_mask=encoder_attention_mask,
530
+ layer_head_mask=layer_head_mask,
531
+ key_value_states=encoder_hidden_states,
532
+ deterministic=deterministic,
533
+ output_attentions=output_attentions,
534
+ )
535
+ attention_output = cross_attention_outputs[0]
536
+
537
+ hidden_states = self.intermediate(attention_output)
538
+ hidden_states = self.output(hidden_states, attention_output, deterministic=deterministic)
539
+
540
+ outputs = (hidden_states,)
541
+
542
+ if output_attentions:
543
+ outputs += (attention_outputs[1],)
544
+ if encoder_hidden_states is not None:
545
+ outputs += (cross_attention_outputs[1],)
546
+ return outputs
547
+
548
+
549
+ class FlaxBertLayerCollection(nn.Module):
550
+ config: BertConfig
551
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
552
+ gradient_checkpointing: bool = False
553
+
554
+ def setup(self):
555
+ if self.gradient_checkpointing:
556
+ FlaxBertCheckpointLayer = remat(FlaxBertLayer, static_argnums=(5, 6, 7))
557
+ self.layers = [
558
+ FlaxBertCheckpointLayer(self.config, name=str(i), dtype=self.dtype)
559
+ for i in range(self.config.num_hidden_layers)
560
+ ]
561
+ else:
562
+ self.layers = [
563
+ FlaxBertLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers)
564
+ ]
565
+
566
+ def __call__(
567
+ self,
568
+ hidden_states,
569
+ attention_mask,
570
+ head_mask,
571
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
572
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
573
+ init_cache: bool = False,
574
+ deterministic: bool = True,
575
+ output_attentions: bool = False,
576
+ output_hidden_states: bool = False,
577
+ return_dict: bool = True,
578
+ ):
579
+ all_attentions = () if output_attentions else None
580
+ all_hidden_states = () if output_hidden_states else None
581
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
582
+
583
+ # Check if head_mask has a correct number of layers specified if desired
584
+ if head_mask is not None:
585
+ if head_mask.shape[0] != (len(self.layers)):
586
+ raise ValueError(
587
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for "
588
+ f" {head_mask.shape[0]}."
589
+ )
590
+
591
+ for i, layer in enumerate(self.layers):
592
+ if output_hidden_states:
593
+ all_hidden_states += (hidden_states,)
594
+
595
+ layer_outputs = layer(
596
+ hidden_states,
597
+ attention_mask,
598
+ head_mask[i] if head_mask is not None else None,
599
+ encoder_hidden_states,
600
+ encoder_attention_mask,
601
+ init_cache,
602
+ deterministic,
603
+ output_attentions,
604
+ )
605
+
606
+ hidden_states = layer_outputs[0]
607
+
608
+ if output_attentions:
609
+ all_attentions += (layer_outputs[1],)
610
+
611
+ if encoder_hidden_states is not None:
612
+ all_cross_attentions += (layer_outputs[2],)
613
+
614
+ if output_hidden_states:
615
+ all_hidden_states += (hidden_states,)
616
+
617
+ outputs = (hidden_states, all_hidden_states, all_attentions, all_cross_attentions)
618
+
619
+ if not return_dict:
620
+ return tuple(v for v in outputs if v is not None)
621
+
622
+ return FlaxBaseModelOutputWithPastAndCrossAttentions(
623
+ last_hidden_state=hidden_states,
624
+ hidden_states=all_hidden_states,
625
+ attentions=all_attentions,
626
+ cross_attentions=all_cross_attentions,
627
+ )
628
+
629
+
630
+ class FlaxBertEncoder(nn.Module):
631
+ config: BertConfig
632
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
633
+ gradient_checkpointing: bool = False
634
+
635
+ def setup(self):
636
+ self.layer = FlaxBertLayerCollection(
637
+ self.config,
638
+ dtype=self.dtype,
639
+ gradient_checkpointing=self.gradient_checkpointing,
640
+ )
641
+
642
+ def __call__(
643
+ self,
644
+ hidden_states,
645
+ attention_mask,
646
+ head_mask,
647
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
648
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
649
+ init_cache: bool = False,
650
+ deterministic: bool = True,
651
+ output_attentions: bool = False,
652
+ output_hidden_states: bool = False,
653
+ return_dict: bool = True,
654
+ ):
655
+ return self.layer(
656
+ hidden_states,
657
+ attention_mask,
658
+ head_mask=head_mask,
659
+ encoder_hidden_states=encoder_hidden_states,
660
+ encoder_attention_mask=encoder_attention_mask,
661
+ init_cache=init_cache,
662
+ deterministic=deterministic,
663
+ output_attentions=output_attentions,
664
+ output_hidden_states=output_hidden_states,
665
+ return_dict=return_dict,
666
+ )
667
+
668
+
669
+ class FlaxBertPooler(nn.Module):
670
+ config: BertConfig
671
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
672
+
673
+ def setup(self):
674
+ self.dense = nn.Dense(
675
+ self.config.hidden_size,
676
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
677
+ dtype=self.dtype,
678
+ )
679
+
680
+ def __call__(self, hidden_states):
681
+ cls_hidden_state = hidden_states[:, 0]
682
+ cls_hidden_state = self.dense(cls_hidden_state)
683
+ return nn.tanh(cls_hidden_state)
684
+
685
+
686
+ class FlaxBertPredictionHeadTransform(nn.Module):
687
+ config: BertConfig
688
+ dtype: jnp.dtype = jnp.float32
689
+
690
+ def setup(self):
691
+ self.dense = nn.Dense(self.config.hidden_size, dtype=self.dtype)
692
+ self.activation = ACT2FN[self.config.hidden_act]
693
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
694
+
695
+ def __call__(self, hidden_states):
696
+ hidden_states = self.dense(hidden_states)
697
+ hidden_states = self.activation(hidden_states)
698
+ return self.LayerNorm(hidden_states)
699
+
700
+
701
+ class FlaxBertLMPredictionHead(nn.Module):
702
+ config: BertConfig
703
+ dtype: jnp.dtype = jnp.float32
704
+ bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros
705
+
706
+ def setup(self):
707
+ self.transform = FlaxBertPredictionHeadTransform(self.config, dtype=self.dtype)
708
+ self.decoder = nn.Dense(self.config.vocab_size, dtype=self.dtype, use_bias=False)
709
+ self.bias = self.param("bias", self.bias_init, (self.config.vocab_size,))
710
+
711
+ def __call__(self, hidden_states, shared_embedding=None):
712
+ hidden_states = self.transform(hidden_states)
713
+
714
+ if shared_embedding is not None:
715
+ hidden_states = self.decoder.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
716
+ else:
717
+ hidden_states = self.decoder(hidden_states)
718
+
719
+ bias = jnp.asarray(self.bias, self.dtype)
720
+ hidden_states += bias
721
+ return hidden_states
722
+
723
+
724
+ class FlaxBertOnlyMLMHead(nn.Module):
725
+ config: BertConfig
726
+ dtype: jnp.dtype = jnp.float32
727
+
728
+ def setup(self):
729
+ self.predictions = FlaxBertLMPredictionHead(self.config, dtype=self.dtype)
730
+
731
+ def __call__(self, hidden_states, shared_embedding=None):
732
+ hidden_states = self.predictions(hidden_states, shared_embedding=shared_embedding)
733
+ return hidden_states
734
+
735
+
736
+ class FlaxBertOnlyNSPHead(nn.Module):
737
+ dtype: jnp.dtype = jnp.float32
738
+
739
+ def setup(self):
740
+ self.seq_relationship = nn.Dense(2, dtype=self.dtype)
741
+
742
+ def __call__(self, pooled_output):
743
+ return self.seq_relationship(pooled_output)
744
+
745
+
746
+ class FlaxBertPreTrainingHeads(nn.Module):
747
+ config: BertConfig
748
+ dtype: jnp.dtype = jnp.float32
749
+
750
+ def setup(self):
751
+ self.predictions = FlaxBertLMPredictionHead(self.config, dtype=self.dtype)
752
+ self.seq_relationship = nn.Dense(2, dtype=self.dtype)
753
+
754
+ def __call__(self, hidden_states, pooled_output, shared_embedding=None):
755
+ prediction_scores = self.predictions(hidden_states, shared_embedding=shared_embedding)
756
+ seq_relationship_score = self.seq_relationship(pooled_output)
757
+ return prediction_scores, seq_relationship_score
758
+
759
+
760
+ class FlaxBertPreTrainedModel(FlaxPreTrainedModel):
761
+ """
762
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
763
+ models.
764
+ """
765
+
766
+ config_class = BertConfig
767
+ base_model_prefix = "bert"
768
+ module_class: nn.Module = None
769
+
770
+ def __init__(
771
+ self,
772
+ config: BertConfig,
773
+ input_shape: Tuple = (1, 1),
774
+ seed: int = 0,
775
+ dtype: jnp.dtype = jnp.float32,
776
+ _do_init: bool = True,
777
+ gradient_checkpointing: bool = False,
778
+ **kwargs,
779
+ ):
780
+ module = self.module_class(
781
+ config=config,
782
+ dtype=dtype,
783
+ gradient_checkpointing=gradient_checkpointing,
784
+ **kwargs,
785
+ )
786
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
787
+
788
+ def enable_gradient_checkpointing(self):
789
+ self._module = self.module_class(
790
+ config=self.config,
791
+ dtype=self.dtype,
792
+ gradient_checkpointing=True,
793
+ )
794
+
795
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
796
+ # init input tensors
797
+ input_ids = jnp.zeros(input_shape, dtype="i4")
798
+ token_type_ids = jnp.zeros_like(input_ids)
799
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape)
800
+ attention_mask = jnp.ones_like(input_ids)
801
+ head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads))
802
+
803
+ params_rng, dropout_rng = jax.random.split(rng)
804
+ rngs = {"params": params_rng, "dropout": dropout_rng}
805
+
806
+ if self.config.add_cross_attention:
807
+ encoder_hidden_states = jnp.zeros(input_shape + (self.config.hidden_size,))
808
+ encoder_attention_mask = attention_mask
809
+ module_init_outputs = self.module.init(
810
+ rngs,
811
+ input_ids,
812
+ attention_mask,
813
+ token_type_ids,
814
+ position_ids,
815
+ head_mask,
816
+ encoder_hidden_states,
817
+ encoder_attention_mask,
818
+ return_dict=False,
819
+ )
820
+ else:
821
+ module_init_outputs = self.module.init(
822
+ rngs, input_ids, attention_mask, token_type_ids, position_ids, head_mask, return_dict=False
823
+ )
824
+
825
+ random_params = module_init_outputs["params"]
826
+
827
+ if params is not None:
828
+ random_params = flatten_dict(unfreeze(random_params))
829
+ params = flatten_dict(unfreeze(params))
830
+ for missing_key in self._missing_keys:
831
+ params[missing_key] = random_params[missing_key]
832
+ self._missing_keys = set()
833
+ return freeze(unflatten_dict(params))
834
+ else:
835
+ return random_params
836
+
837
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderPreTrainedModel.init_cache
838
+ def init_cache(self, batch_size, max_length):
839
+ r"""
840
+ Args:
841
+ batch_size (`int`):
842
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
843
+ max_length (`int`):
844
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
845
+ cache.
846
+ """
847
+ # init input variables to retrieve cache
848
+ input_ids = jnp.ones((batch_size, max_length), dtype="i4")
849
+ attention_mask = jnp.ones_like(input_ids, dtype="i4")
850
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
851
+
852
+ init_variables = self.module.init(
853
+ jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True
854
+ )
855
+ return unfreeze(init_variables["cache"])
856
+
857
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
858
+ def __call__(
859
+ self,
860
+ input_ids,
861
+ attention_mask=None,
862
+ token_type_ids=None,
863
+ position_ids=None,
864
+ head_mask=None,
865
+ encoder_hidden_states=None,
866
+ encoder_attention_mask=None,
867
+ params: dict = None,
868
+ dropout_rng: jax.random.PRNGKey = None,
869
+ train: bool = False,
870
+ output_attentions: Optional[bool] = None,
871
+ output_hidden_states: Optional[bool] = None,
872
+ return_dict: Optional[bool] = None,
873
+ past_key_values: dict = None,
874
+ ):
875
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
876
+ output_hidden_states = (
877
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
878
+ )
879
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
880
+
881
+ # init input tensors if not passed
882
+ if token_type_ids is None:
883
+ token_type_ids = jnp.zeros_like(input_ids)
884
+
885
+ if position_ids is None:
886
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
887
+
888
+ if attention_mask is None:
889
+ attention_mask = jnp.ones_like(input_ids)
890
+
891
+ if head_mask is None:
892
+ head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads))
893
+
894
+ # Handle any PRNG if needed
895
+ rngs = {}
896
+ if dropout_rng is not None:
897
+ rngs["dropout"] = dropout_rng
898
+
899
+ inputs = {"params": params or self.params}
900
+
901
+ if self.config.add_cross_attention:
902
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed
903
+ # down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be
904
+ # changed by FlaxBertAttention module
905
+ if past_key_values:
906
+ inputs["cache"] = past_key_values
907
+ mutable = ["cache"]
908
+ else:
909
+ mutable = False
910
+
911
+ outputs = self.module.apply(
912
+ inputs,
913
+ jnp.array(input_ids, dtype="i4"),
914
+ jnp.array(attention_mask, dtype="i4"),
915
+ token_type_ids=jnp.array(token_type_ids, dtype="i4"),
916
+ position_ids=jnp.array(position_ids, dtype="i4"),
917
+ head_mask=jnp.array(head_mask, dtype="i4"),
918
+ encoder_hidden_states=encoder_hidden_states,
919
+ encoder_attention_mask=encoder_attention_mask,
920
+ deterministic=not train,
921
+ output_attentions=output_attentions,
922
+ output_hidden_states=output_hidden_states,
923
+ return_dict=return_dict,
924
+ rngs=rngs,
925
+ mutable=mutable,
926
+ )
927
+
928
+ # add updated cache to model output
929
+ if past_key_values is not None and return_dict:
930
+ outputs, past_key_values = outputs
931
+ outputs["past_key_values"] = unfreeze(past_key_values["cache"])
932
+ return outputs
933
+ elif past_key_values is not None and not return_dict:
934
+ outputs, past_key_values = outputs
935
+ outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:]
936
+
937
+ else:
938
+ outputs = self.module.apply(
939
+ inputs,
940
+ jnp.array(input_ids, dtype="i4"),
941
+ jnp.array(attention_mask, dtype="i4"),
942
+ token_type_ids=jnp.array(token_type_ids, dtype="i4"),
943
+ position_ids=jnp.array(position_ids, dtype="i4"),
944
+ head_mask=jnp.array(head_mask, dtype="i4"),
945
+ deterministic=not train,
946
+ output_attentions=output_attentions,
947
+ output_hidden_states=output_hidden_states,
948
+ return_dict=return_dict,
949
+ rngs=rngs,
950
+ )
951
+
952
+ return outputs
953
+
954
+
955
+ class FlaxBertModule(nn.Module):
956
+ config: BertConfig
957
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
958
+ add_pooling_layer: bool = True
959
+ gradient_checkpointing: bool = False
960
+
961
+ def setup(self):
962
+ self.embeddings = FlaxBertEmbeddings(self.config, dtype=self.dtype)
963
+ self.encoder = FlaxBertEncoder(
964
+ self.config,
965
+ dtype=self.dtype,
966
+ gradient_checkpointing=self.gradient_checkpointing,
967
+ )
968
+ self.pooler = FlaxBertPooler(self.config, dtype=self.dtype)
969
+
970
+ def __call__(
971
+ self,
972
+ input_ids,
973
+ attention_mask,
974
+ token_type_ids: Optional[jnp.ndarray] = None,
975
+ position_ids: Optional[jnp.ndarray] = None,
976
+ head_mask: Optional[jnp.ndarray] = None,
977
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
978
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
979
+ init_cache: bool = False,
980
+ deterministic: bool = True,
981
+ output_attentions: bool = False,
982
+ output_hidden_states: bool = False,
983
+ return_dict: bool = True,
984
+ ):
985
+ # make sure `token_type_ids` is correctly initialized when not passed
986
+ if token_type_ids is None:
987
+ token_type_ids = jnp.zeros_like(input_ids)
988
+
989
+ # make sure `position_ids` is correctly initialized when not passed
990
+ if position_ids is None:
991
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
992
+
993
+ hidden_states = self.embeddings(
994
+ input_ids, token_type_ids, position_ids, attention_mask, deterministic=deterministic
995
+ )
996
+ outputs = self.encoder(
997
+ hidden_states,
998
+ attention_mask,
999
+ head_mask=head_mask,
1000
+ deterministic=deterministic,
1001
+ encoder_hidden_states=encoder_hidden_states,
1002
+ encoder_attention_mask=encoder_attention_mask,
1003
+ init_cache=init_cache,
1004
+ output_attentions=output_attentions,
1005
+ output_hidden_states=output_hidden_states,
1006
+ return_dict=return_dict,
1007
+ )
1008
+ hidden_states = outputs[0]
1009
+ pooled = self.pooler(hidden_states) if self.add_pooling_layer else None
1010
+
1011
+ if not return_dict:
1012
+ # if pooled is None, don't return it
1013
+ if pooled is None:
1014
+ return (hidden_states,) + outputs[1:]
1015
+ return (hidden_states, pooled) + outputs[1:]
1016
+
1017
+ return FlaxBaseModelOutputWithPoolingAndCrossAttentions(
1018
+ last_hidden_state=hidden_states,
1019
+ pooler_output=pooled,
1020
+ hidden_states=outputs.hidden_states,
1021
+ attentions=outputs.attentions,
1022
+ cross_attentions=outputs.cross_attentions,
1023
+ )
1024
+
1025
+
1026
+ @add_start_docstrings(
1027
+ "The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
1028
+ BERT_START_DOCSTRING,
1029
+ )
1030
+ class FlaxBertModel(FlaxBertPreTrainedModel):
1031
+ module_class = FlaxBertModule
1032
+
1033
+
1034
+ append_call_sample_docstring(FlaxBertModel, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutputWithPooling, _CONFIG_FOR_DOC)
1035
+
1036
+
1037
+ class FlaxBertForPreTrainingModule(nn.Module):
1038
+ config: BertConfig
1039
+ dtype: jnp.dtype = jnp.float32
1040
+ gradient_checkpointing: bool = False
1041
+
1042
+ def setup(self):
1043
+ self.bert = FlaxBertModule(
1044
+ config=self.config,
1045
+ dtype=self.dtype,
1046
+ gradient_checkpointing=self.gradient_checkpointing,
1047
+ )
1048
+ self.cls = FlaxBertPreTrainingHeads(config=self.config, dtype=self.dtype)
1049
+
1050
+ def __call__(
1051
+ self,
1052
+ input_ids,
1053
+ attention_mask,
1054
+ token_type_ids,
1055
+ position_ids,
1056
+ head_mask,
1057
+ deterministic: bool = True,
1058
+ output_attentions: bool = False,
1059
+ output_hidden_states: bool = False,
1060
+ return_dict: bool = True,
1061
+ ):
1062
+ # Model
1063
+ outputs = self.bert(
1064
+ input_ids,
1065
+ attention_mask,
1066
+ token_type_ids,
1067
+ position_ids,
1068
+ head_mask,
1069
+ deterministic=deterministic,
1070
+ output_attentions=output_attentions,
1071
+ output_hidden_states=output_hidden_states,
1072
+ return_dict=return_dict,
1073
+ )
1074
+
1075
+ if self.config.tie_word_embeddings:
1076
+ shared_embedding = self.bert.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
1077
+ else:
1078
+ shared_embedding = None
1079
+
1080
+ hidden_states = outputs[0]
1081
+ pooled_output = outputs[1]
1082
+
1083
+ prediction_scores, seq_relationship_score = self.cls(
1084
+ hidden_states, pooled_output, shared_embedding=shared_embedding
1085
+ )
1086
+
1087
+ if not return_dict:
1088
+ return (prediction_scores, seq_relationship_score) + outputs[2:]
1089
+
1090
+ return FlaxBertForPreTrainingOutput(
1091
+ prediction_logits=prediction_scores,
1092
+ seq_relationship_logits=seq_relationship_score,
1093
+ hidden_states=outputs.hidden_states,
1094
+ attentions=outputs.attentions,
1095
+ )
1096
+
1097
+
1098
+ @add_start_docstrings(
1099
+ """
1100
+ Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next
1101
+ sentence prediction (classification)` head.
1102
+ """,
1103
+ BERT_START_DOCSTRING,
1104
+ )
1105
+ class FlaxBertForPreTraining(FlaxBertPreTrainedModel):
1106
+ module_class = FlaxBertForPreTrainingModule
1107
+
1108
+
1109
+ FLAX_BERT_FOR_PRETRAINING_DOCSTRING = """
1110
+ Returns:
1111
+
1112
+ Example:
1113
+
1114
+ ```python
1115
+ >>> from transformers import AutoTokenizer, FlaxBertForPreTraining
1116
+
1117
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
1118
+ >>> model = FlaxBertForPreTraining.from_pretrained("google-bert/bert-base-uncased")
1119
+
1120
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="np")
1121
+ >>> outputs = model(**inputs)
1122
+
1123
+ >>> prediction_logits = outputs.prediction_logits
1124
+ >>> seq_relationship_logits = outputs.seq_relationship_logits
1125
+ ```
1126
+ """
1127
+
1128
+ overwrite_call_docstring(
1129
+ FlaxBertForPreTraining,
1130
+ BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") + FLAX_BERT_FOR_PRETRAINING_DOCSTRING,
1131
+ )
1132
+ append_replace_return_docstrings(
1133
+ FlaxBertForPreTraining, output_type=FlaxBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC
1134
+ )
1135
+
1136
+
1137
+ class FlaxBertForMaskedLMModule(nn.Module):
1138
+ config: BertConfig
1139
+ dtype: jnp.dtype = jnp.float32
1140
+ gradient_checkpointing: bool = False
1141
+
1142
+ def setup(self):
1143
+ self.bert = FlaxBertModule(
1144
+ config=self.config,
1145
+ add_pooling_layer=False,
1146
+ dtype=self.dtype,
1147
+ gradient_checkpointing=self.gradient_checkpointing,
1148
+ )
1149
+ self.cls = FlaxBertOnlyMLMHead(config=self.config, dtype=self.dtype)
1150
+
1151
+ def __call__(
1152
+ self,
1153
+ input_ids,
1154
+ attention_mask,
1155
+ token_type_ids,
1156
+ position_ids,
1157
+ head_mask,
1158
+ deterministic: bool = True,
1159
+ output_attentions: bool = False,
1160
+ output_hidden_states: bool = False,
1161
+ return_dict: bool = True,
1162
+ ):
1163
+ # Model
1164
+ outputs = self.bert(
1165
+ input_ids,
1166
+ attention_mask,
1167
+ token_type_ids,
1168
+ position_ids,
1169
+ head_mask,
1170
+ deterministic=deterministic,
1171
+ output_attentions=output_attentions,
1172
+ output_hidden_states=output_hidden_states,
1173
+ return_dict=return_dict,
1174
+ )
1175
+
1176
+ hidden_states = outputs[0]
1177
+ if self.config.tie_word_embeddings:
1178
+ shared_embedding = self.bert.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
1179
+ else:
1180
+ shared_embedding = None
1181
+
1182
+ # Compute the prediction scores
1183
+ logits = self.cls(hidden_states, shared_embedding=shared_embedding)
1184
+
1185
+ if not return_dict:
1186
+ return (logits,) + outputs[1:]
1187
+
1188
+ return FlaxMaskedLMOutput(
1189
+ logits=logits,
1190
+ hidden_states=outputs.hidden_states,
1191
+ attentions=outputs.attentions,
1192
+ )
1193
+
1194
+
1195
+ @add_start_docstrings("""Bert Model with a `language modeling` head on top.""", BERT_START_DOCSTRING)
1196
+ class FlaxBertForMaskedLM(FlaxBertPreTrainedModel):
1197
+ module_class = FlaxBertForMaskedLMModule
1198
+
1199
+
1200
+ append_call_sample_docstring(FlaxBertForMaskedLM, _CHECKPOINT_FOR_DOC, FlaxMaskedLMOutput, _CONFIG_FOR_DOC)
1201
+
1202
+
1203
+ class FlaxBertForNextSentencePredictionModule(nn.Module):
1204
+ config: BertConfig
1205
+ dtype: jnp.dtype = jnp.float32
1206
+ gradient_checkpointing: bool = False
1207
+
1208
+ def setup(self):
1209
+ self.bert = FlaxBertModule(
1210
+ config=self.config,
1211
+ dtype=self.dtype,
1212
+ gradient_checkpointing=self.gradient_checkpointing,
1213
+ )
1214
+ self.cls = FlaxBertOnlyNSPHead(dtype=self.dtype)
1215
+
1216
+ def __call__(
1217
+ self,
1218
+ input_ids,
1219
+ attention_mask,
1220
+ token_type_ids,
1221
+ position_ids,
1222
+ head_mask,
1223
+ deterministic: bool = True,
1224
+ output_attentions: bool = False,
1225
+ output_hidden_states: bool = False,
1226
+ return_dict: bool = True,
1227
+ ):
1228
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1229
+
1230
+ # Model
1231
+ outputs = self.bert(
1232
+ input_ids,
1233
+ attention_mask,
1234
+ token_type_ids,
1235
+ position_ids,
1236
+ head_mask,
1237
+ deterministic=deterministic,
1238
+ output_attentions=output_attentions,
1239
+ output_hidden_states=output_hidden_states,
1240
+ return_dict=return_dict,
1241
+ )
1242
+
1243
+ pooled_output = outputs[1]
1244
+ seq_relationship_scores = self.cls(pooled_output)
1245
+
1246
+ if not return_dict:
1247
+ return (seq_relationship_scores,) + outputs[2:]
1248
+
1249
+ return FlaxNextSentencePredictorOutput(
1250
+ logits=seq_relationship_scores,
1251
+ hidden_states=outputs.hidden_states,
1252
+ attentions=outputs.attentions,
1253
+ )
1254
+
1255
+
1256
+ @add_start_docstrings(
1257
+ """Bert Model with a `next sentence prediction (classification)` head on top.""",
1258
+ BERT_START_DOCSTRING,
1259
+ )
1260
+ class FlaxBertForNextSentencePrediction(FlaxBertPreTrainedModel):
1261
+ module_class = FlaxBertForNextSentencePredictionModule
1262
+
1263
+
1264
+ FLAX_BERT_FOR_NEXT_SENT_PRED_DOCSTRING = """
1265
+ Returns:
1266
+
1267
+ Example:
1268
+
1269
+ ```python
1270
+ >>> from transformers import AutoTokenizer, FlaxBertForNextSentencePrediction
1271
+
1272
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
1273
+ >>> model = FlaxBertForNextSentencePrediction.from_pretrained("google-bert/bert-base-uncased")
1274
+
1275
+ >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
1276
+ >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
1277
+ >>> encoding = tokenizer(prompt, next_sentence, return_tensors="jax")
1278
+
1279
+ >>> outputs = model(**encoding)
1280
+ >>> logits = outputs.logits
1281
+ >>> assert logits[0, 0] < logits[0, 1] # next sentence was random
1282
+ ```
1283
+ """
1284
+
1285
+
1286
+ overwrite_call_docstring(
1287
+ FlaxBertForNextSentencePrediction,
1288
+ BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") + FLAX_BERT_FOR_NEXT_SENT_PRED_DOCSTRING,
1289
+ )
1290
+ append_replace_return_docstrings(
1291
+ FlaxBertForNextSentencePrediction, output_type=FlaxNextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC
1292
+ )
1293
+
1294
+
1295
+ class FlaxBertForSequenceClassificationModule(nn.Module):
1296
+ config: BertConfig
1297
+ dtype: jnp.dtype = jnp.float32
1298
+ gradient_checkpointing: bool = False
1299
+
1300
+ def setup(self):
1301
+ self.bert = FlaxBertModule(
1302
+ config=self.config,
1303
+ dtype=self.dtype,
1304
+ gradient_checkpointing=self.gradient_checkpointing,
1305
+ )
1306
+ classifier_dropout = (
1307
+ self.config.classifier_dropout
1308
+ if self.config.classifier_dropout is not None
1309
+ else self.config.hidden_dropout_prob
1310
+ )
1311
+ self.dropout = nn.Dropout(rate=classifier_dropout)
1312
+ self.classifier = nn.Dense(
1313
+ self.config.num_labels,
1314
+ dtype=self.dtype,
1315
+ )
1316
+
1317
+ def __call__(
1318
+ self,
1319
+ input_ids,
1320
+ attention_mask,
1321
+ token_type_ids,
1322
+ position_ids,
1323
+ head_mask,
1324
+ deterministic: bool = True,
1325
+ output_attentions: bool = False,
1326
+ output_hidden_states: bool = False,
1327
+ return_dict: bool = True,
1328
+ ):
1329
+ # Model
1330
+ outputs = self.bert(
1331
+ input_ids,
1332
+ attention_mask,
1333
+ token_type_ids,
1334
+ position_ids,
1335
+ head_mask,
1336
+ deterministic=deterministic,
1337
+ output_attentions=output_attentions,
1338
+ output_hidden_states=output_hidden_states,
1339
+ return_dict=return_dict,
1340
+ )
1341
+
1342
+ pooled_output = outputs[1]
1343
+ pooled_output = self.dropout(pooled_output, deterministic=deterministic)
1344
+ logits = self.classifier(pooled_output)
1345
+
1346
+ if not return_dict:
1347
+ return (logits,) + outputs[2:]
1348
+
1349
+ return FlaxSequenceClassifierOutput(
1350
+ logits=logits,
1351
+ hidden_states=outputs.hidden_states,
1352
+ attentions=outputs.attentions,
1353
+ )
1354
+
1355
+
1356
+ @add_start_docstrings(
1357
+ """
1358
+ Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
1359
+ output) e.g. for GLUE tasks.
1360
+ """,
1361
+ BERT_START_DOCSTRING,
1362
+ )
1363
+ class FlaxBertForSequenceClassification(FlaxBertPreTrainedModel):
1364
+ module_class = FlaxBertForSequenceClassificationModule
1365
+
1366
+
1367
+ append_call_sample_docstring(
1368
+ FlaxBertForSequenceClassification,
1369
+ _CHECKPOINT_FOR_DOC,
1370
+ FlaxSequenceClassifierOutput,
1371
+ _CONFIG_FOR_DOC,
1372
+ )
1373
+
1374
+
1375
+ class FlaxBertForMultipleChoiceModule(nn.Module):
1376
+ config: BertConfig
1377
+ dtype: jnp.dtype = jnp.float32
1378
+ gradient_checkpointing: bool = False
1379
+
1380
+ def setup(self):
1381
+ self.bert = FlaxBertModule(
1382
+ config=self.config,
1383
+ dtype=self.dtype,
1384
+ gradient_checkpointing=self.gradient_checkpointing,
1385
+ )
1386
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
1387
+ self.classifier = nn.Dense(1, dtype=self.dtype)
1388
+
1389
+ def __call__(
1390
+ self,
1391
+ input_ids,
1392
+ attention_mask,
1393
+ token_type_ids,
1394
+ position_ids,
1395
+ head_mask,
1396
+ deterministic: bool = True,
1397
+ output_attentions: bool = False,
1398
+ output_hidden_states: bool = False,
1399
+ return_dict: bool = True,
1400
+ ):
1401
+ num_choices = input_ids.shape[1]
1402
+ input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None
1403
+ attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None
1404
+ token_type_ids = token_type_ids.reshape(-1, token_type_ids.shape[-1]) if token_type_ids is not None else None
1405
+ position_ids = position_ids.reshape(-1, position_ids.shape[-1]) if position_ids is not None else None
1406
+
1407
+ # Model
1408
+ outputs = self.bert(
1409
+ input_ids,
1410
+ attention_mask,
1411
+ token_type_ids,
1412
+ position_ids,
1413
+ head_mask,
1414
+ deterministic=deterministic,
1415
+ output_attentions=output_attentions,
1416
+ output_hidden_states=output_hidden_states,
1417
+ return_dict=return_dict,
1418
+ )
1419
+
1420
+ pooled_output = outputs[1]
1421
+ pooled_output = self.dropout(pooled_output, deterministic=deterministic)
1422
+ logits = self.classifier(pooled_output)
1423
+
1424
+ reshaped_logits = logits.reshape(-1, num_choices)
1425
+
1426
+ if not return_dict:
1427
+ return (reshaped_logits,) + outputs[2:]
1428
+
1429
+ return FlaxMultipleChoiceModelOutput(
1430
+ logits=reshaped_logits,
1431
+ hidden_states=outputs.hidden_states,
1432
+ attentions=outputs.attentions,
1433
+ )
1434
+
1435
+
1436
+ @add_start_docstrings(
1437
+ """
1438
+ Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1439
+ softmax) e.g. for RocStories/SWAG tasks.
1440
+ """,
1441
+ BERT_START_DOCSTRING,
1442
+ )
1443
+ class FlaxBertForMultipleChoice(FlaxBertPreTrainedModel):
1444
+ module_class = FlaxBertForMultipleChoiceModule
1445
+
1446
+
1447
+ overwrite_call_docstring(
1448
+ FlaxBertForMultipleChoice, BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
1449
+ )
1450
+ append_call_sample_docstring(
1451
+ FlaxBertForMultipleChoice, _CHECKPOINT_FOR_DOC, FlaxMultipleChoiceModelOutput, _CONFIG_FOR_DOC
1452
+ )
1453
+
1454
+
1455
+ class FlaxBertForTokenClassificationModule(nn.Module):
1456
+ config: BertConfig
1457
+ dtype: jnp.dtype = jnp.float32
1458
+ gradient_checkpointing: bool = False
1459
+
1460
+ def setup(self):
1461
+ self.bert = FlaxBertModule(
1462
+ config=self.config,
1463
+ dtype=self.dtype,
1464
+ add_pooling_layer=False,
1465
+ gradient_checkpointing=self.gradient_checkpointing,
1466
+ )
1467
+ classifier_dropout = (
1468
+ self.config.classifier_dropout
1469
+ if self.config.classifier_dropout is not None
1470
+ else self.config.hidden_dropout_prob
1471
+ )
1472
+ self.dropout = nn.Dropout(rate=classifier_dropout)
1473
+ self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype)
1474
+
1475
+ def __call__(
1476
+ self,
1477
+ input_ids,
1478
+ attention_mask,
1479
+ token_type_ids,
1480
+ position_ids,
1481
+ head_mask,
1482
+ deterministic: bool = True,
1483
+ output_attentions: bool = False,
1484
+ output_hidden_states: bool = False,
1485
+ return_dict: bool = True,
1486
+ ):
1487
+ # Model
1488
+ outputs = self.bert(
1489
+ input_ids,
1490
+ attention_mask,
1491
+ token_type_ids,
1492
+ position_ids,
1493
+ head_mask,
1494
+ deterministic=deterministic,
1495
+ output_attentions=output_attentions,
1496
+ output_hidden_states=output_hidden_states,
1497
+ return_dict=return_dict,
1498
+ )
1499
+
1500
+ hidden_states = outputs[0]
1501
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
1502
+ logits = self.classifier(hidden_states)
1503
+
1504
+ if not return_dict:
1505
+ return (logits,) + outputs[1:]
1506
+
1507
+ return FlaxTokenClassifierOutput(
1508
+ logits=logits,
1509
+ hidden_states=outputs.hidden_states,
1510
+ attentions=outputs.attentions,
1511
+ )
1512
+
1513
+
1514
+ @add_start_docstrings(
1515
+ """
1516
+ Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1517
+ Named-Entity-Recognition (NER) tasks.
1518
+ """,
1519
+ BERT_START_DOCSTRING,
1520
+ )
1521
+ class FlaxBertForTokenClassification(FlaxBertPreTrainedModel):
1522
+ module_class = FlaxBertForTokenClassificationModule
1523
+
1524
+
1525
+ append_call_sample_docstring(
1526
+ FlaxBertForTokenClassification, _CHECKPOINT_FOR_DOC, FlaxTokenClassifierOutput, _CONFIG_FOR_DOC
1527
+ )
1528
+
1529
+
1530
+ class FlaxBertForQuestionAnsweringModule(nn.Module):
1531
+ config: BertConfig
1532
+ dtype: jnp.dtype = jnp.float32
1533
+ gradient_checkpointing: bool = False
1534
+
1535
+ def setup(self):
1536
+ self.bert = FlaxBertModule(
1537
+ config=self.config,
1538
+ dtype=self.dtype,
1539
+ add_pooling_layer=False,
1540
+ gradient_checkpointing=self.gradient_checkpointing,
1541
+ )
1542
+ self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype)
1543
+
1544
+ def __call__(
1545
+ self,
1546
+ input_ids,
1547
+ attention_mask,
1548
+ token_type_ids,
1549
+ position_ids,
1550
+ head_mask,
1551
+ deterministic: bool = True,
1552
+ output_attentions: bool = False,
1553
+ output_hidden_states: bool = False,
1554
+ return_dict: bool = True,
1555
+ ):
1556
+ # Model
1557
+ outputs = self.bert(
1558
+ input_ids,
1559
+ attention_mask,
1560
+ token_type_ids,
1561
+ position_ids,
1562
+ head_mask,
1563
+ deterministic=deterministic,
1564
+ output_attentions=output_attentions,
1565
+ output_hidden_states=output_hidden_states,
1566
+ return_dict=return_dict,
1567
+ )
1568
+
1569
+ hidden_states = outputs[0]
1570
+
1571
+ logits = self.qa_outputs(hidden_states)
1572
+ start_logits, end_logits = jnp.split(logits, self.config.num_labels, axis=-1)
1573
+ start_logits = start_logits.squeeze(-1)
1574
+ end_logits = end_logits.squeeze(-1)
1575
+
1576
+ if not return_dict:
1577
+ return (start_logits, end_logits) + outputs[1:]
1578
+
1579
+ return FlaxQuestionAnsweringModelOutput(
1580
+ start_logits=start_logits,
1581
+ end_logits=end_logits,
1582
+ hidden_states=outputs.hidden_states,
1583
+ attentions=outputs.attentions,
1584
+ )
1585
+
1586
+
1587
+ @add_start_docstrings(
1588
+ """
1589
+ Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1590
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1591
+ """,
1592
+ BERT_START_DOCSTRING,
1593
+ )
1594
+ class FlaxBertForQuestionAnswering(FlaxBertPreTrainedModel):
1595
+ module_class = FlaxBertForQuestionAnsweringModule
1596
+
1597
+
1598
+ append_call_sample_docstring(
1599
+ FlaxBertForQuestionAnswering,
1600
+ _CHECKPOINT_FOR_DOC,
1601
+ FlaxQuestionAnsweringModelOutput,
1602
+ _CONFIG_FOR_DOC,
1603
+ )
1604
+
1605
+
1606
+ class FlaxBertForCausalLMModule(nn.Module):
1607
+ config: BertConfig
1608
+ dtype: jnp.dtype = jnp.float32
1609
+ gradient_checkpointing: bool = False
1610
+
1611
+ def setup(self):
1612
+ self.bert = FlaxBertModule(
1613
+ config=self.config,
1614
+ add_pooling_layer=False,
1615
+ dtype=self.dtype,
1616
+ gradient_checkpointing=self.gradient_checkpointing,
1617
+ )
1618
+ self.cls = FlaxBertOnlyMLMHead(config=self.config, dtype=self.dtype)
1619
+
1620
+ def __call__(
1621
+ self,
1622
+ input_ids,
1623
+ attention_mask,
1624
+ position_ids,
1625
+ token_type_ids: Optional[jnp.ndarray] = None,
1626
+ head_mask: Optional[jnp.ndarray] = None,
1627
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
1628
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
1629
+ init_cache: bool = False,
1630
+ deterministic: bool = True,
1631
+ output_attentions: bool = False,
1632
+ output_hidden_states: bool = False,
1633
+ return_dict: bool = True,
1634
+ ):
1635
+ # Model
1636
+ outputs = self.bert(
1637
+ input_ids,
1638
+ attention_mask,
1639
+ token_type_ids,
1640
+ position_ids,
1641
+ head_mask,
1642
+ encoder_hidden_states=encoder_hidden_states,
1643
+ encoder_attention_mask=encoder_attention_mask,
1644
+ init_cache=init_cache,
1645
+ deterministic=deterministic,
1646
+ output_attentions=output_attentions,
1647
+ output_hidden_states=output_hidden_states,
1648
+ return_dict=return_dict,
1649
+ )
1650
+
1651
+ hidden_states = outputs[0]
1652
+ if self.config.tie_word_embeddings:
1653
+ shared_embedding = self.bert.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
1654
+ else:
1655
+ shared_embedding = None
1656
+
1657
+ # Compute the prediction scores
1658
+ logits = self.cls(hidden_states, shared_embedding=shared_embedding)
1659
+
1660
+ if not return_dict:
1661
+ return (logits,) + outputs[1:]
1662
+
1663
+ return FlaxCausalLMOutputWithCrossAttentions(
1664
+ logits=logits,
1665
+ hidden_states=outputs.hidden_states,
1666
+ attentions=outputs.attentions,
1667
+ cross_attentions=outputs.cross_attentions,
1668
+ )
1669
+
1670
+
1671
+ @add_start_docstrings(
1672
+ """
1673
+ Bert Model with a language modeling head on top (a linear layer on top of the hidden-states output) e.g for
1674
+ autoregressive tasks.
1675
+ """,
1676
+ BERT_START_DOCSTRING,
1677
+ )
1678
+ class FlaxBertForCausalLM(FlaxBertPreTrainedModel):
1679
+ module_class = FlaxBertForCausalLMModule
1680
+
1681
+ def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None):
1682
+ # initializing the cache
1683
+ batch_size, seq_length = input_ids.shape
1684
+
1685
+ past_key_values = self.init_cache(batch_size, max_length)
1686
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
1687
+ # But since the decoder uses a causal mask, those positions are masked anyway.
1688
+ # Thus, we can create a single static attention_mask here, which is more efficient for compilation
1689
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
1690
+ if attention_mask is not None:
1691
+ position_ids = attention_mask.cumsum(axis=-1) - 1
1692
+ extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0))
1693
+ else:
1694
+ position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
1695
+
1696
+ return {
1697
+ "past_key_values": past_key_values,
1698
+ "attention_mask": extended_attention_mask,
1699
+ "position_ids": position_ids,
1700
+ }
1701
+
1702
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
1703
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
1704
+ model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1
1705
+ return model_kwargs
1706
+
1707
+
1708
+ append_call_sample_docstring(
1709
+ FlaxBertForCausalLM,
1710
+ _CHECKPOINT_FOR_DOC,
1711
+ FlaxCausalLMOutputWithCrossAttentions,
1712
+ _CONFIG_FOR_DOC,
1713
+ )
venv/lib/python3.10/site-packages/transformers/models/bert/modeling_tf_bert.py ADDED
@@ -0,0 +1,2114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ TF 2.0 BERT model."""
17
+
18
+
19
+ from __future__ import annotations
20
+
21
+ import math
22
+ import warnings
23
+ from dataclasses import dataclass
24
+ from typing import Dict, Optional, Tuple, Union
25
+
26
+ import numpy as np
27
+ import tensorflow as tf
28
+
29
+ from ...activations_tf import get_tf_activation
30
+ from ...modeling_tf_outputs import (
31
+ TFBaseModelOutputWithPastAndCrossAttentions,
32
+ TFBaseModelOutputWithPoolingAndCrossAttentions,
33
+ TFCausalLMOutputWithCrossAttentions,
34
+ TFMaskedLMOutput,
35
+ TFMultipleChoiceModelOutput,
36
+ TFNextSentencePredictorOutput,
37
+ TFQuestionAnsweringModelOutput,
38
+ TFSequenceClassifierOutput,
39
+ TFTokenClassifierOutput,
40
+ )
41
+ from ...modeling_tf_utils import (
42
+ TFCausalLanguageModelingLoss,
43
+ TFMaskedLanguageModelingLoss,
44
+ TFModelInputType,
45
+ TFMultipleChoiceLoss,
46
+ TFNextSentencePredictionLoss,
47
+ TFPreTrainedModel,
48
+ TFQuestionAnsweringLoss,
49
+ TFSequenceClassificationLoss,
50
+ TFTokenClassificationLoss,
51
+ get_initializer,
52
+ keras,
53
+ keras_serializable,
54
+ unpack_inputs,
55
+ )
56
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
57
+ from ...utils import (
58
+ ModelOutput,
59
+ add_code_sample_docstrings,
60
+ add_start_docstrings,
61
+ add_start_docstrings_to_model_forward,
62
+ logging,
63
+ replace_return_docstrings,
64
+ )
65
+ from .configuration_bert import BertConfig
66
+
67
+
68
+ logger = logging.get_logger(__name__)
69
+
70
+ _CHECKPOINT_FOR_DOC = "google-bert/bert-base-uncased"
71
+ _CONFIG_FOR_DOC = "BertConfig"
72
+
73
+ # TokenClassification docstring
74
+ _CHECKPOINT_FOR_TOKEN_CLASSIFICATION = "dbmdz/bert-large-cased-finetuned-conll03-english"
75
+ _TOKEN_CLASS_EXPECTED_OUTPUT = (
76
+ "['O', 'I-ORG', 'I-ORG', 'I-ORG', 'O', 'O', 'O', 'O', 'O', 'I-LOC', 'O', 'I-LOC', 'I-LOC'] "
77
+ )
78
+ _TOKEN_CLASS_EXPECTED_LOSS = 0.01
79
+
80
+ # QuestionAnswering docstring
81
+ _CHECKPOINT_FOR_QA = "ydshieh/bert-base-cased-squad2"
82
+ _QA_EXPECTED_OUTPUT = "'a nice puppet'"
83
+ _QA_EXPECTED_LOSS = 7.41
84
+ _QA_TARGET_START_INDEX = 14
85
+ _QA_TARGET_END_INDEX = 15
86
+
87
+ # SequenceClassification docstring
88
+ _CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = "ydshieh/bert-base-uncased-yelp-polarity"
89
+ _SEQ_CLASS_EXPECTED_OUTPUT = "'LABEL_1'"
90
+ _SEQ_CLASS_EXPECTED_LOSS = 0.01
91
+
92
+
93
+ from ..deprecated._archive_maps import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
94
+
95
+
96
+ class TFBertPreTrainingLoss:
97
+ """
98
+ Loss function suitable for BERT-like pretraining, that is, the task of pretraining a language model by combining
99
+ NSP + MLM. .. note:: Any label of -100 will be ignored (along with the corresponding logits) in the loss
100
+ computation.
101
+ """
102
+
103
+ def hf_compute_loss(self, labels: tf.Tensor, logits: tf.Tensor) -> tf.Tensor:
104
+ loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=keras.losses.Reduction.NONE)
105
+
106
+ # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway
107
+ unmasked_lm_losses = loss_fn(y_true=tf.nn.relu(labels["labels"]), y_pred=logits[0])
108
+ # make sure only labels that are not equal to -100
109
+ # are taken into account for the loss computation
110
+ lm_loss_mask = tf.cast(labels["labels"] != -100, dtype=unmasked_lm_losses.dtype)
111
+ masked_lm_losses = unmasked_lm_losses * lm_loss_mask
112
+ reduced_masked_lm_loss = tf.reduce_sum(masked_lm_losses) / tf.reduce_sum(lm_loss_mask)
113
+
114
+ # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway
115
+ unmasked_ns_loss = loss_fn(y_true=tf.nn.relu(labels["next_sentence_label"]), y_pred=logits[1])
116
+ ns_loss_mask = tf.cast(labels["next_sentence_label"] != -100, dtype=unmasked_ns_loss.dtype)
117
+ masked_ns_loss = unmasked_ns_loss * ns_loss_mask
118
+
119
+ reduced_masked_ns_loss = tf.reduce_sum(masked_ns_loss) / tf.reduce_sum(ns_loss_mask)
120
+
121
+ return tf.reshape(reduced_masked_lm_loss + reduced_masked_ns_loss, (1,))
122
+
123
+
124
+ class TFBertEmbeddings(keras.layers.Layer):
125
+ """Construct the embeddings from word, position and token_type embeddings."""
126
+
127
+ def __init__(self, config: BertConfig, **kwargs):
128
+ super().__init__(**kwargs)
129
+
130
+ self.config = config
131
+ self.hidden_size = config.hidden_size
132
+ self.max_position_embeddings = config.max_position_embeddings
133
+ self.initializer_range = config.initializer_range
134
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
135
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
136
+
137
+ def build(self, input_shape=None):
138
+ with tf.name_scope("word_embeddings"):
139
+ self.weight = self.add_weight(
140
+ name="weight",
141
+ shape=[self.config.vocab_size, self.hidden_size],
142
+ initializer=get_initializer(self.initializer_range),
143
+ )
144
+
145
+ with tf.name_scope("token_type_embeddings"):
146
+ self.token_type_embeddings = self.add_weight(
147
+ name="embeddings",
148
+ shape=[self.config.type_vocab_size, self.hidden_size],
149
+ initializer=get_initializer(self.initializer_range),
150
+ )
151
+
152
+ with tf.name_scope("position_embeddings"):
153
+ self.position_embeddings = self.add_weight(
154
+ name="embeddings",
155
+ shape=[self.max_position_embeddings, self.hidden_size],
156
+ initializer=get_initializer(self.initializer_range),
157
+ )
158
+
159
+ if self.built:
160
+ return
161
+ self.built = True
162
+ if getattr(self, "LayerNorm", None) is not None:
163
+ with tf.name_scope(self.LayerNorm.name):
164
+ self.LayerNorm.build([None, None, self.config.hidden_size])
165
+
166
+ def call(
167
+ self,
168
+ input_ids: tf.Tensor = None,
169
+ position_ids: tf.Tensor = None,
170
+ token_type_ids: tf.Tensor = None,
171
+ inputs_embeds: tf.Tensor = None,
172
+ past_key_values_length=0,
173
+ training: bool = False,
174
+ ) -> tf.Tensor:
175
+ """
176
+ Applies embedding based on inputs tensor.
177
+
178
+ Returns:
179
+ final_embeddings (`tf.Tensor`): output embedding tensor.
180
+ """
181
+ if input_ids is None and inputs_embeds is None:
182
+ raise ValueError("Need to provide either `input_ids` or `input_embeds`.")
183
+
184
+ if input_ids is not None:
185
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
186
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
187
+
188
+ input_shape = shape_list(inputs_embeds)[:-1]
189
+
190
+ if token_type_ids is None:
191
+ token_type_ids = tf.fill(dims=input_shape, value=0)
192
+
193
+ if position_ids is None:
194
+ position_ids = tf.expand_dims(
195
+ tf.range(start=past_key_values_length, limit=input_shape[1] + past_key_values_length), axis=0
196
+ )
197
+
198
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
199
+ token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
200
+ final_embeddings = inputs_embeds + position_embeds + token_type_embeds
201
+ final_embeddings = self.LayerNorm(inputs=final_embeddings)
202
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
203
+
204
+ return final_embeddings
205
+
206
+
207
+ class TFBertSelfAttention(keras.layers.Layer):
208
+ def __init__(self, config: BertConfig, **kwargs):
209
+ super().__init__(**kwargs)
210
+
211
+ if config.hidden_size % config.num_attention_heads != 0:
212
+ raise ValueError(
213
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number "
214
+ f"of attention heads ({config.num_attention_heads})"
215
+ )
216
+
217
+ self.num_attention_heads = config.num_attention_heads
218
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
219
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
220
+ self.sqrt_att_head_size = math.sqrt(self.attention_head_size)
221
+
222
+ self.query = keras.layers.Dense(
223
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
224
+ )
225
+ self.key = keras.layers.Dense(
226
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
227
+ )
228
+ self.value = keras.layers.Dense(
229
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
230
+ )
231
+ self.dropout = keras.layers.Dropout(rate=config.attention_probs_dropout_prob)
232
+
233
+ self.is_decoder = config.is_decoder
234
+ self.config = config
235
+
236
+ def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:
237
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
238
+ tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
239
+
240
+ # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
241
+ return tf.transpose(tensor, perm=[0, 2, 1, 3])
242
+
243
+ def call(
244
+ self,
245
+ hidden_states: tf.Tensor,
246
+ attention_mask: tf.Tensor,
247
+ head_mask: tf.Tensor,
248
+ encoder_hidden_states: tf.Tensor,
249
+ encoder_attention_mask: tf.Tensor,
250
+ past_key_value: Tuple[tf.Tensor],
251
+ output_attentions: bool,
252
+ training: bool = False,
253
+ ) -> Tuple[tf.Tensor]:
254
+ batch_size = shape_list(hidden_states)[0]
255
+ mixed_query_layer = self.query(inputs=hidden_states)
256
+
257
+ # If this is instantiated as a cross-attention module, the keys
258
+ # and values come from an encoder; the attention mask needs to be
259
+ # such that the encoder's padding tokens are not attended to.
260
+ is_cross_attention = encoder_hidden_states is not None
261
+
262
+ if is_cross_attention and past_key_value is not None:
263
+ # reuse k,v, cross_attentions
264
+ key_layer = past_key_value[0]
265
+ value_layer = past_key_value[1]
266
+ attention_mask = encoder_attention_mask
267
+ elif is_cross_attention:
268
+ key_layer = self.transpose_for_scores(self.key(inputs=encoder_hidden_states), batch_size)
269
+ value_layer = self.transpose_for_scores(self.value(inputs=encoder_hidden_states), batch_size)
270
+ attention_mask = encoder_attention_mask
271
+ elif past_key_value is not None:
272
+ key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size)
273
+ value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size)
274
+ key_layer = tf.concat([past_key_value[0], key_layer], axis=2)
275
+ value_layer = tf.concat([past_key_value[1], value_layer], axis=2)
276
+ else:
277
+ key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size)
278
+ value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size)
279
+
280
+ query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
281
+
282
+ if self.is_decoder:
283
+ # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
284
+ # Further calls to cross_attention layer can then reuse all cross-attention
285
+ # key/value_states (first "if" case)
286
+ # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
287
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
288
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
289
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
290
+ past_key_value = (key_layer, value_layer)
291
+
292
+ # Take the dot product between "query" and "key" to get the raw attention scores.
293
+ # (batch size, num_heads, seq_len_q, seq_len_k)
294
+ attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
295
+ dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype)
296
+ attention_scores = tf.divide(attention_scores, dk)
297
+
298
+ if attention_mask is not None:
299
+ # Apply the attention mask is (precomputed for all layers in TFBertModel call() function)
300
+ attention_scores = tf.add(attention_scores, attention_mask)
301
+
302
+ # Normalize the attention scores to probabilities.
303
+ attention_probs = stable_softmax(logits=attention_scores, axis=-1)
304
+
305
+ # This is actually dropping out entire tokens to attend to, which might
306
+ # seem a bit unusual, but is taken from the original Transformer paper.
307
+ attention_probs = self.dropout(inputs=attention_probs, training=training)
308
+
309
+ # Mask heads if we want to
310
+ if head_mask is not None:
311
+ attention_probs = tf.multiply(attention_probs, head_mask)
312
+
313
+ attention_output = tf.matmul(attention_probs, value_layer)
314
+ attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3])
315
+
316
+ # (batch_size, seq_len_q, all_head_size)
317
+ attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size))
318
+ outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
319
+
320
+ if self.is_decoder:
321
+ outputs = outputs + (past_key_value,)
322
+ return outputs
323
+
324
+ def build(self, input_shape=None):
325
+ if self.built:
326
+ return
327
+ self.built = True
328
+ if getattr(self, "query", None) is not None:
329
+ with tf.name_scope(self.query.name):
330
+ self.query.build([None, None, self.config.hidden_size])
331
+ if getattr(self, "key", None) is not None:
332
+ with tf.name_scope(self.key.name):
333
+ self.key.build([None, None, self.config.hidden_size])
334
+ if getattr(self, "value", None) is not None:
335
+ with tf.name_scope(self.value.name):
336
+ self.value.build([None, None, self.config.hidden_size])
337
+
338
+
339
+ class TFBertSelfOutput(keras.layers.Layer):
340
+ def __init__(self, config: BertConfig, **kwargs):
341
+ super().__init__(**kwargs)
342
+
343
+ self.dense = keras.layers.Dense(
344
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
345
+ )
346
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
347
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
348
+ self.config = config
349
+
350
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
351
+ hidden_states = self.dense(inputs=hidden_states)
352
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
353
+ hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
354
+
355
+ return hidden_states
356
+
357
+ def build(self, input_shape=None):
358
+ if self.built:
359
+ return
360
+ self.built = True
361
+ if getattr(self, "dense", None) is not None:
362
+ with tf.name_scope(self.dense.name):
363
+ self.dense.build([None, None, self.config.hidden_size])
364
+ if getattr(self, "LayerNorm", None) is not None:
365
+ with tf.name_scope(self.LayerNorm.name):
366
+ self.LayerNorm.build([None, None, self.config.hidden_size])
367
+
368
+
369
+ class TFBertAttention(keras.layers.Layer):
370
+ def __init__(self, config: BertConfig, **kwargs):
371
+ super().__init__(**kwargs)
372
+
373
+ self.self_attention = TFBertSelfAttention(config, name="self")
374
+ self.dense_output = TFBertSelfOutput(config, name="output")
375
+
376
+ def prune_heads(self, heads):
377
+ raise NotImplementedError
378
+
379
+ def call(
380
+ self,
381
+ input_tensor: tf.Tensor,
382
+ attention_mask: tf.Tensor,
383
+ head_mask: tf.Tensor,
384
+ encoder_hidden_states: tf.Tensor,
385
+ encoder_attention_mask: tf.Tensor,
386
+ past_key_value: Tuple[tf.Tensor],
387
+ output_attentions: bool,
388
+ training: bool = False,
389
+ ) -> Tuple[tf.Tensor]:
390
+ self_outputs = self.self_attention(
391
+ hidden_states=input_tensor,
392
+ attention_mask=attention_mask,
393
+ head_mask=head_mask,
394
+ encoder_hidden_states=encoder_hidden_states,
395
+ encoder_attention_mask=encoder_attention_mask,
396
+ past_key_value=past_key_value,
397
+ output_attentions=output_attentions,
398
+ training=training,
399
+ )
400
+ attention_output = self.dense_output(
401
+ hidden_states=self_outputs[0], input_tensor=input_tensor, training=training
402
+ )
403
+ # add attentions (possibly with past_key_value) if we output them
404
+ outputs = (attention_output,) + self_outputs[1:]
405
+
406
+ return outputs
407
+
408
+ def build(self, input_shape=None):
409
+ if self.built:
410
+ return
411
+ self.built = True
412
+ if getattr(self, "self_attention", None) is not None:
413
+ with tf.name_scope(self.self_attention.name):
414
+ self.self_attention.build(None)
415
+ if getattr(self, "dense_output", None) is not None:
416
+ with tf.name_scope(self.dense_output.name):
417
+ self.dense_output.build(None)
418
+
419
+
420
+ class TFBertIntermediate(keras.layers.Layer):
421
+ def __init__(self, config: BertConfig, **kwargs):
422
+ super().__init__(**kwargs)
423
+
424
+ self.dense = keras.layers.Dense(
425
+ units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
426
+ )
427
+
428
+ if isinstance(config.hidden_act, str):
429
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
430
+ else:
431
+ self.intermediate_act_fn = config.hidden_act
432
+ self.config = config
433
+
434
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
435
+ hidden_states = self.dense(inputs=hidden_states)
436
+ hidden_states = self.intermediate_act_fn(hidden_states)
437
+
438
+ return hidden_states
439
+
440
+ def build(self, input_shape=None):
441
+ if self.built:
442
+ return
443
+ self.built = True
444
+ if getattr(self, "dense", None) is not None:
445
+ with tf.name_scope(self.dense.name):
446
+ self.dense.build([None, None, self.config.hidden_size])
447
+
448
+
449
+ class TFBertOutput(keras.layers.Layer):
450
+ def __init__(self, config: BertConfig, **kwargs):
451
+ super().__init__(**kwargs)
452
+
453
+ self.dense = keras.layers.Dense(
454
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
455
+ )
456
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
457
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
458
+ self.config = config
459
+
460
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
461
+ hidden_states = self.dense(inputs=hidden_states)
462
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
463
+ hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
464
+
465
+ return hidden_states
466
+
467
+ def build(self, input_shape=None):
468
+ if self.built:
469
+ return
470
+ self.built = True
471
+ if getattr(self, "dense", None) is not None:
472
+ with tf.name_scope(self.dense.name):
473
+ self.dense.build([None, None, self.config.intermediate_size])
474
+ if getattr(self, "LayerNorm", None) is not None:
475
+ with tf.name_scope(self.LayerNorm.name):
476
+ self.LayerNorm.build([None, None, self.config.hidden_size])
477
+
478
+
479
+ class TFBertLayer(keras.layers.Layer):
480
+ def __init__(self, config: BertConfig, **kwargs):
481
+ super().__init__(**kwargs)
482
+
483
+ self.attention = TFBertAttention(config, name="attention")
484
+ self.is_decoder = config.is_decoder
485
+ self.add_cross_attention = config.add_cross_attention
486
+ if self.add_cross_attention:
487
+ if not self.is_decoder:
488
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
489
+ self.crossattention = TFBertAttention(config, name="crossattention")
490
+ self.intermediate = TFBertIntermediate(config, name="intermediate")
491
+ self.bert_output = TFBertOutput(config, name="output")
492
+
493
+ def call(
494
+ self,
495
+ hidden_states: tf.Tensor,
496
+ attention_mask: tf.Tensor,
497
+ head_mask: tf.Tensor,
498
+ encoder_hidden_states: tf.Tensor | None,
499
+ encoder_attention_mask: tf.Tensor | None,
500
+ past_key_value: Tuple[tf.Tensor] | None,
501
+ output_attentions: bool,
502
+ training: bool = False,
503
+ ) -> Tuple[tf.Tensor]:
504
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
505
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
506
+ self_attention_outputs = self.attention(
507
+ input_tensor=hidden_states,
508
+ attention_mask=attention_mask,
509
+ head_mask=head_mask,
510
+ encoder_hidden_states=None,
511
+ encoder_attention_mask=None,
512
+ past_key_value=self_attn_past_key_value,
513
+ output_attentions=output_attentions,
514
+ training=training,
515
+ )
516
+ attention_output = self_attention_outputs[0]
517
+
518
+ # if decoder, the last output is tuple of self-attn cache
519
+ if self.is_decoder:
520
+ outputs = self_attention_outputs[1:-1]
521
+ present_key_value = self_attention_outputs[-1]
522
+ else:
523
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
524
+
525
+ cross_attn_present_key_value = None
526
+ if self.is_decoder and encoder_hidden_states is not None:
527
+ if not hasattr(self, "crossattention"):
528
+ raise ValueError(
529
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
530
+ " by setting `config.add_cross_attention=True`"
531
+ )
532
+
533
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
534
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
535
+ cross_attention_outputs = self.crossattention(
536
+ input_tensor=attention_output,
537
+ attention_mask=attention_mask,
538
+ head_mask=head_mask,
539
+ encoder_hidden_states=encoder_hidden_states,
540
+ encoder_attention_mask=encoder_attention_mask,
541
+ past_key_value=cross_attn_past_key_value,
542
+ output_attentions=output_attentions,
543
+ training=training,
544
+ )
545
+ attention_output = cross_attention_outputs[0]
546
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
547
+
548
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
549
+ cross_attn_present_key_value = cross_attention_outputs[-1]
550
+ present_key_value = present_key_value + cross_attn_present_key_value
551
+
552
+ intermediate_output = self.intermediate(hidden_states=attention_output)
553
+ layer_output = self.bert_output(
554
+ hidden_states=intermediate_output, input_tensor=attention_output, training=training
555
+ )
556
+ outputs = (layer_output,) + outputs # add attentions if we output them
557
+
558
+ # if decoder, return the attn key/values as the last output
559
+ if self.is_decoder:
560
+ outputs = outputs + (present_key_value,)
561
+
562
+ return outputs
563
+
564
+ def build(self, input_shape=None):
565
+ if self.built:
566
+ return
567
+ self.built = True
568
+ if getattr(self, "attention", None) is not None:
569
+ with tf.name_scope(self.attention.name):
570
+ self.attention.build(None)
571
+ if getattr(self, "intermediate", None) is not None:
572
+ with tf.name_scope(self.intermediate.name):
573
+ self.intermediate.build(None)
574
+ if getattr(self, "bert_output", None) is not None:
575
+ with tf.name_scope(self.bert_output.name):
576
+ self.bert_output.build(None)
577
+ if getattr(self, "crossattention", None) is not None:
578
+ with tf.name_scope(self.crossattention.name):
579
+ self.crossattention.build(None)
580
+
581
+
582
+ class TFBertEncoder(keras.layers.Layer):
583
+ def __init__(self, config: BertConfig, **kwargs):
584
+ super().__init__(**kwargs)
585
+ self.config = config
586
+ self.layer = [TFBertLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
587
+
588
+ def call(
589
+ self,
590
+ hidden_states: tf.Tensor,
591
+ attention_mask: tf.Tensor,
592
+ head_mask: tf.Tensor,
593
+ encoder_hidden_states: tf.Tensor | None,
594
+ encoder_attention_mask: tf.Tensor | None,
595
+ past_key_values: Tuple[Tuple[tf.Tensor]] | None,
596
+ use_cache: Optional[bool],
597
+ output_attentions: bool,
598
+ output_hidden_states: bool,
599
+ return_dict: bool,
600
+ training: bool = False,
601
+ ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]:
602
+ all_hidden_states = () if output_hidden_states else None
603
+ all_attentions = () if output_attentions else None
604
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
605
+
606
+ next_decoder_cache = () if use_cache else None
607
+ for i, layer_module in enumerate(self.layer):
608
+ if output_hidden_states:
609
+ all_hidden_states = all_hidden_states + (hidden_states,)
610
+
611
+ past_key_value = past_key_values[i] if past_key_values is not None else None
612
+
613
+ layer_outputs = layer_module(
614
+ hidden_states=hidden_states,
615
+ attention_mask=attention_mask,
616
+ head_mask=head_mask[i],
617
+ encoder_hidden_states=encoder_hidden_states,
618
+ encoder_attention_mask=encoder_attention_mask,
619
+ past_key_value=past_key_value,
620
+ output_attentions=output_attentions,
621
+ training=training,
622
+ )
623
+ hidden_states = layer_outputs[0]
624
+
625
+ if use_cache:
626
+ next_decoder_cache += (layer_outputs[-1],)
627
+
628
+ if output_attentions:
629
+ all_attentions = all_attentions + (layer_outputs[1],)
630
+ if self.config.add_cross_attention and encoder_hidden_states is not None:
631
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
632
+
633
+ # Add last layer
634
+ if output_hidden_states:
635
+ all_hidden_states = all_hidden_states + (hidden_states,)
636
+
637
+ if not return_dict:
638
+ return tuple(
639
+ v for v in [hidden_states, all_hidden_states, all_attentions, all_cross_attentions] if v is not None
640
+ )
641
+
642
+ return TFBaseModelOutputWithPastAndCrossAttentions(
643
+ last_hidden_state=hidden_states,
644
+ past_key_values=next_decoder_cache,
645
+ hidden_states=all_hidden_states,
646
+ attentions=all_attentions,
647
+ cross_attentions=all_cross_attentions,
648
+ )
649
+
650
+ def build(self, input_shape=None):
651
+ if self.built:
652
+ return
653
+ self.built = True
654
+ if getattr(self, "layer", None) is not None:
655
+ for layer in self.layer:
656
+ with tf.name_scope(layer.name):
657
+ layer.build(None)
658
+
659
+
660
+ class TFBertPooler(keras.layers.Layer):
661
+ def __init__(self, config: BertConfig, **kwargs):
662
+ super().__init__(**kwargs)
663
+
664
+ self.dense = keras.layers.Dense(
665
+ units=config.hidden_size,
666
+ kernel_initializer=get_initializer(config.initializer_range),
667
+ activation="tanh",
668
+ name="dense",
669
+ )
670
+ self.config = config
671
+
672
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
673
+ # We "pool" the model by simply taking the hidden state corresponding
674
+ # to the first token.
675
+ first_token_tensor = hidden_states[:, 0]
676
+ pooled_output = self.dense(inputs=first_token_tensor)
677
+
678
+ return pooled_output
679
+
680
+ def build(self, input_shape=None):
681
+ if self.built:
682
+ return
683
+ self.built = True
684
+ if getattr(self, "dense", None) is not None:
685
+ with tf.name_scope(self.dense.name):
686
+ self.dense.build([None, None, self.config.hidden_size])
687
+
688
+
689
+ class TFBertPredictionHeadTransform(keras.layers.Layer):
690
+ def __init__(self, config: BertConfig, **kwargs):
691
+ super().__init__(**kwargs)
692
+
693
+ self.dense = keras.layers.Dense(
694
+ units=config.hidden_size,
695
+ kernel_initializer=get_initializer(config.initializer_range),
696
+ name="dense",
697
+ )
698
+
699
+ if isinstance(config.hidden_act, str):
700
+ self.transform_act_fn = get_tf_activation(config.hidden_act)
701
+ else:
702
+ self.transform_act_fn = config.hidden_act
703
+
704
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
705
+ self.config = config
706
+
707
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
708
+ hidden_states = self.dense(inputs=hidden_states)
709
+ hidden_states = self.transform_act_fn(hidden_states)
710
+ hidden_states = self.LayerNorm(inputs=hidden_states)
711
+
712
+ return hidden_states
713
+
714
+ def build(self, input_shape=None):
715
+ if self.built:
716
+ return
717
+ self.built = True
718
+ if getattr(self, "dense", None) is not None:
719
+ with tf.name_scope(self.dense.name):
720
+ self.dense.build([None, None, self.config.hidden_size])
721
+ if getattr(self, "LayerNorm", None) is not None:
722
+ with tf.name_scope(self.LayerNorm.name):
723
+ self.LayerNorm.build([None, None, self.config.hidden_size])
724
+
725
+
726
+ class TFBertLMPredictionHead(keras.layers.Layer):
727
+ def __init__(self, config: BertConfig, input_embeddings: keras.layers.Layer, **kwargs):
728
+ super().__init__(**kwargs)
729
+
730
+ self.config = config
731
+ self.hidden_size = config.hidden_size
732
+
733
+ self.transform = TFBertPredictionHeadTransform(config, name="transform")
734
+
735
+ # The output weights are the same as the input embeddings, but there is
736
+ # an output-only bias for each token.
737
+ self.input_embeddings = input_embeddings
738
+
739
+ def build(self, input_shape=None):
740
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
741
+
742
+ if self.built:
743
+ return
744
+ self.built = True
745
+ if getattr(self, "transform", None) is not None:
746
+ with tf.name_scope(self.transform.name):
747
+ self.transform.build(None)
748
+
749
+ def get_output_embeddings(self) -> keras.layers.Layer:
750
+ return self.input_embeddings
751
+
752
+ def set_output_embeddings(self, value: tf.Variable):
753
+ self.input_embeddings.weight = value
754
+ self.input_embeddings.vocab_size = shape_list(value)[0]
755
+
756
+ def get_bias(self) -> Dict[str, tf.Variable]:
757
+ return {"bias": self.bias}
758
+
759
+ def set_bias(self, value: tf.Variable):
760
+ self.bias = value["bias"]
761
+ self.config.vocab_size = shape_list(value["bias"])[0]
762
+
763
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
764
+ hidden_states = self.transform(hidden_states=hidden_states)
765
+ seq_length = shape_list(hidden_states)[1]
766
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size])
767
+ hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
768
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
769
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
770
+
771
+ return hidden_states
772
+
773
+
774
+ class TFBertMLMHead(keras.layers.Layer):
775
+ def __init__(self, config: BertConfig, input_embeddings: keras.layers.Layer, **kwargs):
776
+ super().__init__(**kwargs)
777
+
778
+ self.predictions = TFBertLMPredictionHead(config, input_embeddings, name="predictions")
779
+
780
+ def call(self, sequence_output: tf.Tensor) -> tf.Tensor:
781
+ prediction_scores = self.predictions(hidden_states=sequence_output)
782
+
783
+ return prediction_scores
784
+
785
+ def build(self, input_shape=None):
786
+ if self.built:
787
+ return
788
+ self.built = True
789
+ if getattr(self, "predictions", None) is not None:
790
+ with tf.name_scope(self.predictions.name):
791
+ self.predictions.build(None)
792
+
793
+
794
+ class TFBertNSPHead(keras.layers.Layer):
795
+ def __init__(self, config: BertConfig, **kwargs):
796
+ super().__init__(**kwargs)
797
+
798
+ self.seq_relationship = keras.layers.Dense(
799
+ units=2,
800
+ kernel_initializer=get_initializer(config.initializer_range),
801
+ name="seq_relationship",
802
+ )
803
+ self.config = config
804
+
805
+ def call(self, pooled_output: tf.Tensor) -> tf.Tensor:
806
+ seq_relationship_score = self.seq_relationship(inputs=pooled_output)
807
+
808
+ return seq_relationship_score
809
+
810
+ def build(self, input_shape=None):
811
+ if self.built:
812
+ return
813
+ self.built = True
814
+ if getattr(self, "seq_relationship", None) is not None:
815
+ with tf.name_scope(self.seq_relationship.name):
816
+ self.seq_relationship.build([None, None, self.config.hidden_size])
817
+
818
+
819
+ @keras_serializable
820
+ class TFBertMainLayer(keras.layers.Layer):
821
+ config_class = BertConfig
822
+
823
+ def __init__(self, config: BertConfig, add_pooling_layer: bool = True, **kwargs):
824
+ super().__init__(**kwargs)
825
+
826
+ self.config = config
827
+ self.is_decoder = config.is_decoder
828
+
829
+ self.embeddings = TFBertEmbeddings(config, name="embeddings")
830
+ self.encoder = TFBertEncoder(config, name="encoder")
831
+ self.pooler = TFBertPooler(config, name="pooler") if add_pooling_layer else None
832
+
833
+ def get_input_embeddings(self) -> keras.layers.Layer:
834
+ return self.embeddings
835
+
836
+ def set_input_embeddings(self, value: tf.Variable):
837
+ self.embeddings.weight = value
838
+ self.embeddings.vocab_size = shape_list(value)[0]
839
+
840
+ def _prune_heads(self, heads_to_prune):
841
+ """
842
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
843
+ class PreTrainedModel
844
+ """
845
+ raise NotImplementedError
846
+
847
+ @unpack_inputs
848
+ def call(
849
+ self,
850
+ input_ids: TFModelInputType | None = None,
851
+ attention_mask: np.ndarray | tf.Tensor | None = None,
852
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
853
+ position_ids: np.ndarray | tf.Tensor | None = None,
854
+ head_mask: np.ndarray | tf.Tensor | None = None,
855
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
856
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
857
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
858
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
859
+ use_cache: Optional[bool] = None,
860
+ output_attentions: Optional[bool] = None,
861
+ output_hidden_states: Optional[bool] = None,
862
+ return_dict: Optional[bool] = None,
863
+ training: bool = False,
864
+ ) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]:
865
+ if not self.config.is_decoder:
866
+ use_cache = False
867
+
868
+ if input_ids is not None and inputs_embeds is not None:
869
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
870
+ elif input_ids is not None:
871
+ input_shape = shape_list(input_ids)
872
+ elif inputs_embeds is not None:
873
+ input_shape = shape_list(inputs_embeds)[:-1]
874
+ else:
875
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
876
+
877
+ batch_size, seq_length = input_shape
878
+
879
+ if past_key_values is None:
880
+ past_key_values_length = 0
881
+ past_key_values = [None] * len(self.encoder.layer)
882
+ else:
883
+ past_key_values_length = shape_list(past_key_values[0][0])[-2]
884
+
885
+ if attention_mask is None:
886
+ attention_mask = tf.fill(dims=(batch_size, seq_length + past_key_values_length), value=1)
887
+
888
+ if token_type_ids is None:
889
+ token_type_ids = tf.fill(dims=input_shape, value=0)
890
+
891
+ embedding_output = self.embeddings(
892
+ input_ids=input_ids,
893
+ position_ids=position_ids,
894
+ token_type_ids=token_type_ids,
895
+ inputs_embeds=inputs_embeds,
896
+ past_key_values_length=past_key_values_length,
897
+ training=training,
898
+ )
899
+
900
+ # We create a 3D attention mask from a 2D tensor mask.
901
+ # Sizes are [batch_size, 1, 1, to_seq_length]
902
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
903
+ # this attention mask is more simple than the triangular masking of causal attention
904
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
905
+ attention_mask_shape = shape_list(attention_mask)
906
+
907
+ mask_seq_length = seq_length + past_key_values_length
908
+ # Copied from `modeling_tf_t5.py`
909
+ # Provided a padding mask of dimensions [batch_size, mask_seq_length]
910
+ # - if the model is a decoder, apply a causal mask in addition to the padding mask
911
+ # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length]
912
+ if self.is_decoder:
913
+ seq_ids = tf.range(mask_seq_length)
914
+ causal_mask = tf.less_equal(
915
+ tf.tile(seq_ids[None, None, :], (batch_size, mask_seq_length, 1)),
916
+ seq_ids[None, :, None],
917
+ )
918
+ causal_mask = tf.cast(causal_mask, dtype=attention_mask.dtype)
919
+ extended_attention_mask = causal_mask * attention_mask[:, None, :]
920
+ attention_mask_shape = shape_list(extended_attention_mask)
921
+ extended_attention_mask = tf.reshape(
922
+ extended_attention_mask, (attention_mask_shape[0], 1, attention_mask_shape[1], attention_mask_shape[2])
923
+ )
924
+ if past_key_values[0] is not None:
925
+ # attention_mask needs to be sliced to the shape `[batch_size, 1, from_seq_length - cached_seq_length, to_seq_length]
926
+ extended_attention_mask = extended_attention_mask[:, :, -seq_length:, :]
927
+ else:
928
+ extended_attention_mask = tf.reshape(
929
+ attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1])
930
+ )
931
+
932
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
933
+ # masked positions, this operation will create a tensor which is 0.0 for
934
+ # positions we want to attend and -10000.0 for masked positions.
935
+ # Since we are adding it to the raw scores before the softmax, this is
936
+ # effectively the same as removing these entirely.
937
+ extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)
938
+ one_cst = tf.constant(1.0, dtype=embedding_output.dtype)
939
+ ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)
940
+ extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
941
+
942
+ # Copied from `modeling_tf_t5.py` with -1e9 -> -10000
943
+ if self.is_decoder and encoder_attention_mask is not None:
944
+ # If a 2D ou 3D attention mask is provided for the cross-attention
945
+ # we need to make broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length]
946
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
947
+ encoder_attention_mask = tf.cast(encoder_attention_mask, dtype=extended_attention_mask.dtype)
948
+ num_dims_encoder_attention_mask = len(shape_list(encoder_attention_mask))
949
+ if num_dims_encoder_attention_mask == 3:
950
+ encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
951
+ if num_dims_encoder_attention_mask == 2:
952
+ encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
953
+
954
+ # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
955
+ # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow/transformer/transformer_layers.py#L270
956
+ # encoder_extended_attention_mask = tf.math.equal(encoder_extended_attention_mask,
957
+ # tf.transpose(encoder_extended_attention_mask, perm=(-1, -2)))
958
+
959
+ encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0
960
+ else:
961
+ encoder_extended_attention_mask = None
962
+
963
+ # Prepare head mask if needed
964
+ # 1.0 in head_mask indicate we keep the head
965
+ # attention_probs has shape bsz x n_heads x N x N
966
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
967
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
968
+ if head_mask is not None:
969
+ raise NotImplementedError
970
+ else:
971
+ head_mask = [None] * self.config.num_hidden_layers
972
+
973
+ encoder_outputs = self.encoder(
974
+ hidden_states=embedding_output,
975
+ attention_mask=extended_attention_mask,
976
+ head_mask=head_mask,
977
+ encoder_hidden_states=encoder_hidden_states,
978
+ encoder_attention_mask=encoder_extended_attention_mask,
979
+ past_key_values=past_key_values,
980
+ use_cache=use_cache,
981
+ output_attentions=output_attentions,
982
+ output_hidden_states=output_hidden_states,
983
+ return_dict=return_dict,
984
+ training=training,
985
+ )
986
+
987
+ sequence_output = encoder_outputs[0]
988
+ pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None
989
+
990
+ if not return_dict:
991
+ return (
992
+ sequence_output,
993
+ pooled_output,
994
+ ) + encoder_outputs[1:]
995
+
996
+ return TFBaseModelOutputWithPoolingAndCrossAttentions(
997
+ last_hidden_state=sequence_output,
998
+ pooler_output=pooled_output,
999
+ past_key_values=encoder_outputs.past_key_values,
1000
+ hidden_states=encoder_outputs.hidden_states,
1001
+ attentions=encoder_outputs.attentions,
1002
+ cross_attentions=encoder_outputs.cross_attentions,
1003
+ )
1004
+
1005
+ def build(self, input_shape=None):
1006
+ if self.built:
1007
+ return
1008
+ self.built = True
1009
+ if getattr(self, "embeddings", None) is not None:
1010
+ with tf.name_scope(self.embeddings.name):
1011
+ self.embeddings.build(None)
1012
+ if getattr(self, "encoder", None) is not None:
1013
+ with tf.name_scope(self.encoder.name):
1014
+ self.encoder.build(None)
1015
+ if getattr(self, "pooler", None) is not None:
1016
+ with tf.name_scope(self.pooler.name):
1017
+ self.pooler.build(None)
1018
+
1019
+
1020
+ class TFBertPreTrainedModel(TFPreTrainedModel):
1021
+ """
1022
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
1023
+ models.
1024
+ """
1025
+
1026
+ config_class = BertConfig
1027
+ base_model_prefix = "bert"
1028
+
1029
+
1030
+ @dataclass
1031
+ class TFBertForPreTrainingOutput(ModelOutput):
1032
+ """
1033
+ Output type of [`TFBertForPreTraining`].
1034
+
1035
+ Args:
1036
+ prediction_logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
1037
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
1038
+ seq_relationship_logits (`tf.Tensor` of shape `(batch_size, 2)`):
1039
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
1040
+ before SoftMax).
1041
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
1042
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
1043
+ `(batch_size, sequence_length, hidden_size)`.
1044
+
1045
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
1046
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
1047
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
1048
+ sequence_length)`.
1049
+
1050
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
1051
+ heads.
1052
+ """
1053
+
1054
+ loss: tf.Tensor | None = None
1055
+ prediction_logits: tf.Tensor = None
1056
+ seq_relationship_logits: tf.Tensor = None
1057
+ hidden_states: Optional[Union[Tuple[tf.Tensor], tf.Tensor]] = None
1058
+ attentions: Optional[Union[Tuple[tf.Tensor], tf.Tensor]] = None
1059
+
1060
+
1061
+ BERT_START_DOCSTRING = r"""
1062
+
1063
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
1064
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
1065
+ etc.)
1066
+
1067
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
1068
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
1069
+ behavior.
1070
+
1071
+ <Tip>
1072
+
1073
+ TensorFlow models and layers in `transformers` accept two formats as input:
1074
+
1075
+ - having all inputs as keyword arguments (like PyTorch models), or
1076
+ - having all inputs as a list, tuple or dict in the first positional argument.
1077
+
1078
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
1079
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
1080
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
1081
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
1082
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
1083
+ positional argument:
1084
+
1085
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
1086
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
1087
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
1088
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
1089
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
1090
+
1091
+ Note that when creating models and layers with
1092
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
1093
+ about any of this, as you can just pass inputs like you would to any other Python function!
1094
+
1095
+ </Tip>
1096
+
1097
+ Args:
1098
+ config ([`BertConfig`]): Model configuration class with all the parameters of the model.
1099
+ Initializing with a config file does not load the weights associated with the model, only the
1100
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
1101
+ """
1102
+
1103
+ BERT_INPUTS_DOCSTRING = r"""
1104
+ Args:
1105
+ input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
1106
+ Indices of input sequence tokens in the vocabulary.
1107
+
1108
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
1109
+ [`PreTrainedTokenizer.encode`] for details.
1110
+
1111
+ [What are input IDs?](../glossary#input-ids)
1112
+ attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1113
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1114
+
1115
+ - 1 for tokens that are **not masked**,
1116
+ - 0 for tokens that are **masked**.
1117
+
1118
+ [What are attention masks?](../glossary#attention-mask)
1119
+ token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1120
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1121
+ 1]`:
1122
+
1123
+ - 0 corresponds to a *sentence A* token,
1124
+ - 1 corresponds to a *sentence B* token.
1125
+
1126
+ [What are token type IDs?](../glossary#token-type-ids)
1127
+ position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1128
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1129
+ config.max_position_embeddings - 1]`.
1130
+
1131
+ [What are position IDs?](../glossary#position-ids)
1132
+ head_mask (`np.ndarray` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
1133
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
1134
+
1135
+ - 1 indicates the head is **not masked**,
1136
+ - 0 indicates the head is **masked**.
1137
+
1138
+ inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
1139
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1140
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1141
+ model's internal embedding lookup matrix.
1142
+ output_attentions (`bool`, *optional*):
1143
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1144
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
1145
+ config will be used instead.
1146
+ output_hidden_states (`bool`, *optional*):
1147
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1148
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
1149
+ used instead.
1150
+ return_dict (`bool`, *optional*):
1151
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
1152
+ eager mode, in graph mode the value will always be set to True.
1153
+ training (`bool`, *optional*, defaults to `False``):
1154
+ Whether or not to use the model in training mode (some modules like dropout modules have different
1155
+ behaviors between training and evaluation).
1156
+ """
1157
+
1158
+
1159
+ @add_start_docstrings(
1160
+ "The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
1161
+ BERT_START_DOCSTRING,
1162
+ )
1163
+ class TFBertModel(TFBertPreTrainedModel):
1164
+ def __init__(self, config: BertConfig, add_pooling_layer: bool = True, *inputs, **kwargs):
1165
+ super().__init__(config, *inputs, **kwargs)
1166
+
1167
+ self.bert = TFBertMainLayer(config, add_pooling_layer, name="bert")
1168
+
1169
+ @unpack_inputs
1170
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1171
+ @add_code_sample_docstrings(
1172
+ checkpoint=_CHECKPOINT_FOR_DOC,
1173
+ output_type=TFBaseModelOutputWithPoolingAndCrossAttentions,
1174
+ config_class=_CONFIG_FOR_DOC,
1175
+ )
1176
+ def call(
1177
+ self,
1178
+ input_ids: TFModelInputType | None = None,
1179
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1180
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1181
+ position_ids: np.ndarray | tf.Tensor | None = None,
1182
+ head_mask: np.ndarray | tf.Tensor | None = None,
1183
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1184
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
1185
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
1186
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
1187
+ use_cache: Optional[bool] = None,
1188
+ output_attentions: Optional[bool] = None,
1189
+ output_hidden_states: Optional[bool] = None,
1190
+ return_dict: Optional[bool] = None,
1191
+ training: Optional[bool] = False,
1192
+ ) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]:
1193
+ r"""
1194
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1195
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1196
+ the model is configured as a decoder.
1197
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1198
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1199
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1200
+
1201
+ - 1 for tokens that are **not masked**,
1202
+ - 0 for tokens that are **masked**.
1203
+
1204
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
1205
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1206
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1207
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1208
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1209
+ use_cache (`bool`, *optional*, defaults to `True`):
1210
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1211
+ `past_key_values`). Set to `False` during training, `True` during generation
1212
+ """
1213
+ outputs = self.bert(
1214
+ input_ids=input_ids,
1215
+ attention_mask=attention_mask,
1216
+ token_type_ids=token_type_ids,
1217
+ position_ids=position_ids,
1218
+ head_mask=head_mask,
1219
+ inputs_embeds=inputs_embeds,
1220
+ encoder_hidden_states=encoder_hidden_states,
1221
+ encoder_attention_mask=encoder_attention_mask,
1222
+ past_key_values=past_key_values,
1223
+ use_cache=use_cache,
1224
+ output_attentions=output_attentions,
1225
+ output_hidden_states=output_hidden_states,
1226
+ return_dict=return_dict,
1227
+ training=training,
1228
+ )
1229
+ return outputs
1230
+
1231
+ def build(self, input_shape=None):
1232
+ if self.built:
1233
+ return
1234
+ self.built = True
1235
+ if getattr(self, "bert", None) is not None:
1236
+ with tf.name_scope(self.bert.name):
1237
+ self.bert.build(None)
1238
+
1239
+
1240
+ @add_start_docstrings(
1241
+ """
1242
+ Bert Model with two heads on top as done during the pretraining:
1243
+ a `masked language modeling` head and a `next sentence prediction (classification)` head.
1244
+ """,
1245
+ BERT_START_DOCSTRING,
1246
+ )
1247
+ class TFBertForPreTraining(TFBertPreTrainedModel, TFBertPreTrainingLoss):
1248
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1249
+ _keys_to_ignore_on_load_unexpected = [
1250
+ r"position_ids",
1251
+ r"cls.predictions.decoder.weight",
1252
+ r"cls.predictions.decoder.bias",
1253
+ ]
1254
+
1255
+ def __init__(self, config: BertConfig, *inputs, **kwargs):
1256
+ super().__init__(config, *inputs, **kwargs)
1257
+
1258
+ self.bert = TFBertMainLayer(config, name="bert")
1259
+ self.nsp = TFBertNSPHead(config, name="nsp___cls")
1260
+ self.mlm = TFBertMLMHead(config, input_embeddings=self.bert.embeddings, name="mlm___cls")
1261
+
1262
+ def get_lm_head(self) -> keras.layers.Layer:
1263
+ return self.mlm.predictions
1264
+
1265
+ def get_prefix_bias_name(self) -> str:
1266
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
1267
+ return self.name + "/" + self.mlm.name + "/" + self.mlm.predictions.name
1268
+
1269
+ @unpack_inputs
1270
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1271
+ @replace_return_docstrings(output_type=TFBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
1272
+ def call(
1273
+ self,
1274
+ input_ids: TFModelInputType | None = None,
1275
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1276
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1277
+ position_ids: np.ndarray | tf.Tensor | None = None,
1278
+ head_mask: np.ndarray | tf.Tensor | None = None,
1279
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1280
+ output_attentions: Optional[bool] = None,
1281
+ output_hidden_states: Optional[bool] = None,
1282
+ return_dict: Optional[bool] = None,
1283
+ labels: np.ndarray | tf.Tensor | None = None,
1284
+ next_sentence_label: np.ndarray | tf.Tensor | None = None,
1285
+ training: Optional[bool] = False,
1286
+ ) -> Union[TFBertForPreTrainingOutput, Tuple[tf.Tensor]]:
1287
+ r"""
1288
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1289
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1290
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1291
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1292
+ next_sentence_label (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1293
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
1294
+ (see `input_ids` docstring) Indices should be in `[0, 1]`:
1295
+
1296
+ - 0 indicates sequence B is a continuation of sequence A,
1297
+ - 1 indicates sequence B is a random sequence.
1298
+ kwargs (`Dict[str, any]`, optional, defaults to *{}*):
1299
+ Used to hide legacy arguments that have been deprecated.
1300
+
1301
+ Return:
1302
+
1303
+ Examples:
1304
+
1305
+ ```python
1306
+ >>> import tensorflow as tf
1307
+ >>> from transformers import AutoTokenizer, TFBertForPreTraining
1308
+
1309
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
1310
+ >>> model = TFBertForPreTraining.from_pretrained("google-bert/bert-base-uncased")
1311
+ >>> input_ids = tokenizer("Hello, my dog is cute", add_special_tokens=True, return_tensors="tf")
1312
+ >>> # Batch size 1
1313
+
1314
+ >>> outputs = model(input_ids)
1315
+ >>> prediction_logits, seq_relationship_logits = outputs[:2]
1316
+ ```"""
1317
+ outputs = self.bert(
1318
+ input_ids=input_ids,
1319
+ attention_mask=attention_mask,
1320
+ token_type_ids=token_type_ids,
1321
+ position_ids=position_ids,
1322
+ head_mask=head_mask,
1323
+ inputs_embeds=inputs_embeds,
1324
+ output_attentions=output_attentions,
1325
+ output_hidden_states=output_hidden_states,
1326
+ return_dict=return_dict,
1327
+ training=training,
1328
+ )
1329
+ sequence_output, pooled_output = outputs[:2]
1330
+ prediction_scores = self.mlm(sequence_output=sequence_output, training=training)
1331
+ seq_relationship_score = self.nsp(pooled_output=pooled_output)
1332
+ total_loss = None
1333
+
1334
+ if labels is not None and next_sentence_label is not None:
1335
+ d_labels = {"labels": labels}
1336
+ d_labels["next_sentence_label"] = next_sentence_label
1337
+ total_loss = self.hf_compute_loss(labels=d_labels, logits=(prediction_scores, seq_relationship_score))
1338
+
1339
+ if not return_dict:
1340
+ output = (prediction_scores, seq_relationship_score) + outputs[2:]
1341
+ return ((total_loss,) + output) if total_loss is not None else output
1342
+
1343
+ return TFBertForPreTrainingOutput(
1344
+ loss=total_loss,
1345
+ prediction_logits=prediction_scores,
1346
+ seq_relationship_logits=seq_relationship_score,
1347
+ hidden_states=outputs.hidden_states,
1348
+ attentions=outputs.attentions,
1349
+ )
1350
+
1351
+ def build(self, input_shape=None):
1352
+ if self.built:
1353
+ return
1354
+ self.built = True
1355
+ if getattr(self, "bert", None) is not None:
1356
+ with tf.name_scope(self.bert.name):
1357
+ self.bert.build(None)
1358
+ if getattr(self, "nsp", None) is not None:
1359
+ with tf.name_scope(self.nsp.name):
1360
+ self.nsp.build(None)
1361
+ if getattr(self, "mlm", None) is not None:
1362
+ with tf.name_scope(self.mlm.name):
1363
+ self.mlm.build(None)
1364
+
1365
+
1366
+ @add_start_docstrings("""Bert Model with a `language modeling` head on top.""", BERT_START_DOCSTRING)
1367
+ class TFBertForMaskedLM(TFBertPreTrainedModel, TFMaskedLanguageModelingLoss):
1368
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1369
+ _keys_to_ignore_on_load_unexpected = [
1370
+ r"pooler",
1371
+ r"cls.seq_relationship",
1372
+ r"cls.predictions.decoder.weight",
1373
+ r"nsp___cls",
1374
+ ]
1375
+
1376
+ def __init__(self, config: BertConfig, *inputs, **kwargs):
1377
+ super().__init__(config, *inputs, **kwargs)
1378
+
1379
+ if config.is_decoder:
1380
+ logger.warning(
1381
+ "If you want to use `TFBertForMaskedLM` make sure `config.is_decoder=False` for "
1382
+ "bi-directional self-attention."
1383
+ )
1384
+
1385
+ self.bert = TFBertMainLayer(config, add_pooling_layer=False, name="bert")
1386
+ self.mlm = TFBertMLMHead(config, input_embeddings=self.bert.embeddings, name="mlm___cls")
1387
+
1388
+ def get_lm_head(self) -> keras.layers.Layer:
1389
+ return self.mlm.predictions
1390
+
1391
+ def get_prefix_bias_name(self) -> str:
1392
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
1393
+ return self.name + "/" + self.mlm.name + "/" + self.mlm.predictions.name
1394
+
1395
+ @unpack_inputs
1396
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1397
+ @add_code_sample_docstrings(
1398
+ checkpoint=_CHECKPOINT_FOR_DOC,
1399
+ output_type=TFMaskedLMOutput,
1400
+ config_class=_CONFIG_FOR_DOC,
1401
+ expected_output="'paris'",
1402
+ expected_loss=0.88,
1403
+ )
1404
+ def call(
1405
+ self,
1406
+ input_ids: TFModelInputType | None = None,
1407
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1408
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1409
+ position_ids: np.ndarray | tf.Tensor | None = None,
1410
+ head_mask: np.ndarray | tf.Tensor | None = None,
1411
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1412
+ output_attentions: Optional[bool] = None,
1413
+ output_hidden_states: Optional[bool] = None,
1414
+ return_dict: Optional[bool] = None,
1415
+ labels: np.ndarray | tf.Tensor | None = None,
1416
+ training: Optional[bool] = False,
1417
+ ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
1418
+ r"""
1419
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
1420
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1421
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1422
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1423
+ """
1424
+ outputs = self.bert(
1425
+ input_ids=input_ids,
1426
+ attention_mask=attention_mask,
1427
+ token_type_ids=token_type_ids,
1428
+ position_ids=position_ids,
1429
+ head_mask=head_mask,
1430
+ inputs_embeds=inputs_embeds,
1431
+ output_attentions=output_attentions,
1432
+ output_hidden_states=output_hidden_states,
1433
+ return_dict=return_dict,
1434
+ training=training,
1435
+ )
1436
+ sequence_output = outputs[0]
1437
+ prediction_scores = self.mlm(sequence_output=sequence_output, training=training)
1438
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=prediction_scores)
1439
+
1440
+ if not return_dict:
1441
+ output = (prediction_scores,) + outputs[2:]
1442
+ return ((loss,) + output) if loss is not None else output
1443
+
1444
+ return TFMaskedLMOutput(
1445
+ loss=loss,
1446
+ logits=prediction_scores,
1447
+ hidden_states=outputs.hidden_states,
1448
+ attentions=outputs.attentions,
1449
+ )
1450
+
1451
+ def build(self, input_shape=None):
1452
+ if self.built:
1453
+ return
1454
+ self.built = True
1455
+ if getattr(self, "bert", None) is not None:
1456
+ with tf.name_scope(self.bert.name):
1457
+ self.bert.build(None)
1458
+ if getattr(self, "mlm", None) is not None:
1459
+ with tf.name_scope(self.mlm.name):
1460
+ self.mlm.build(None)
1461
+
1462
+
1463
+ class TFBertLMHeadModel(TFBertPreTrainedModel, TFCausalLanguageModelingLoss):
1464
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1465
+ _keys_to_ignore_on_load_unexpected = [
1466
+ r"pooler",
1467
+ r"cls.seq_relationship",
1468
+ r"cls.predictions.decoder.weight",
1469
+ r"nsp___cls",
1470
+ ]
1471
+
1472
+ def __init__(self, config: BertConfig, *inputs, **kwargs):
1473
+ super().__init__(config, *inputs, **kwargs)
1474
+
1475
+ if not config.is_decoder:
1476
+ logger.warning("If you want to use `TFBertLMHeadModel` as a standalone, add `is_decoder=True.`")
1477
+
1478
+ self.bert = TFBertMainLayer(config, add_pooling_layer=False, name="bert")
1479
+ self.mlm = TFBertMLMHead(config, input_embeddings=self.bert.embeddings, name="mlm___cls")
1480
+
1481
+ def get_lm_head(self) -> keras.layers.Layer:
1482
+ return self.mlm.predictions
1483
+
1484
+ def get_prefix_bias_name(self) -> str:
1485
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
1486
+ return self.name + "/" + self.mlm.name + "/" + self.mlm.predictions.name
1487
+
1488
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs):
1489
+ input_shape = input_ids.shape
1490
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
1491
+ if attention_mask is None:
1492
+ attention_mask = tf.ones(input_shape)
1493
+
1494
+ # cut decoder_input_ids if past is used
1495
+ if past_key_values is not None:
1496
+ input_ids = input_ids[:, -1:]
1497
+
1498
+ return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values}
1499
+
1500
+ @unpack_inputs
1501
+ @add_code_sample_docstrings(
1502
+ checkpoint=_CHECKPOINT_FOR_DOC,
1503
+ output_type=TFCausalLMOutputWithCrossAttentions,
1504
+ config_class=_CONFIG_FOR_DOC,
1505
+ )
1506
+ def call(
1507
+ self,
1508
+ input_ids: TFModelInputType | None = None,
1509
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1510
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1511
+ position_ids: np.ndarray | tf.Tensor | None = None,
1512
+ head_mask: np.ndarray | tf.Tensor | None = None,
1513
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1514
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
1515
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
1516
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
1517
+ use_cache: Optional[bool] = None,
1518
+ output_attentions: Optional[bool] = None,
1519
+ output_hidden_states: Optional[bool] = None,
1520
+ return_dict: Optional[bool] = None,
1521
+ labels: np.ndarray | tf.Tensor | None = None,
1522
+ training: Optional[bool] = False,
1523
+ **kwargs,
1524
+ ) -> Union[TFCausalLMOutputWithCrossAttentions, Tuple[tf.Tensor]]:
1525
+ r"""
1526
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1527
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1528
+ the model is configured as a decoder.
1529
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1530
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1531
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1532
+
1533
+ - 1 for tokens that are **not masked**,
1534
+ - 0 for tokens that are **masked**.
1535
+
1536
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
1537
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1538
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1539
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1540
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1541
+ use_cache (`bool`, *optional*, defaults to `True`):
1542
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1543
+ `past_key_values`). Set to `False` during training, `True` during generation
1544
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
1545
+ Labels for computing the cross entropy classification loss. Indices should be in `[0, ...,
1546
+ config.vocab_size - 1]`.
1547
+ """
1548
+ outputs = self.bert(
1549
+ input_ids=input_ids,
1550
+ attention_mask=attention_mask,
1551
+ token_type_ids=token_type_ids,
1552
+ position_ids=position_ids,
1553
+ head_mask=head_mask,
1554
+ inputs_embeds=inputs_embeds,
1555
+ encoder_hidden_states=encoder_hidden_states,
1556
+ encoder_attention_mask=encoder_attention_mask,
1557
+ past_key_values=past_key_values,
1558
+ use_cache=use_cache,
1559
+ output_attentions=output_attentions,
1560
+ output_hidden_states=output_hidden_states,
1561
+ return_dict=return_dict,
1562
+ training=training,
1563
+ )
1564
+ sequence_output = outputs[0]
1565
+ logits = self.mlm(sequence_output=sequence_output, training=training)
1566
+ loss = None
1567
+
1568
+ if labels is not None:
1569
+ # shift labels to the left and cut last logit token
1570
+ shifted_logits = logits[:, :-1]
1571
+ labels = labels[:, 1:]
1572
+ loss = self.hf_compute_loss(labels=labels, logits=shifted_logits)
1573
+
1574
+ if not return_dict:
1575
+ output = (logits,) + outputs[2:]
1576
+ return ((loss,) + output) if loss is not None else output
1577
+
1578
+ return TFCausalLMOutputWithCrossAttentions(
1579
+ loss=loss,
1580
+ logits=logits,
1581
+ past_key_values=outputs.past_key_values,
1582
+ hidden_states=outputs.hidden_states,
1583
+ attentions=outputs.attentions,
1584
+ cross_attentions=outputs.cross_attentions,
1585
+ )
1586
+
1587
+ def build(self, input_shape=None):
1588
+ if self.built:
1589
+ return
1590
+ self.built = True
1591
+ if getattr(self, "bert", None) is not None:
1592
+ with tf.name_scope(self.bert.name):
1593
+ self.bert.build(None)
1594
+ if getattr(self, "mlm", None) is not None:
1595
+ with tf.name_scope(self.mlm.name):
1596
+ self.mlm.build(None)
1597
+
1598
+
1599
+ @add_start_docstrings(
1600
+ """Bert Model with a `next sentence prediction (classification)` head on top.""",
1601
+ BERT_START_DOCSTRING,
1602
+ )
1603
+ class TFBertForNextSentencePrediction(TFBertPreTrainedModel, TFNextSentencePredictionLoss):
1604
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1605
+ _keys_to_ignore_on_load_unexpected = [r"mlm___cls", r"cls.predictions"]
1606
+
1607
+ def __init__(self, config: BertConfig, *inputs, **kwargs):
1608
+ super().__init__(config, *inputs, **kwargs)
1609
+
1610
+ self.bert = TFBertMainLayer(config, name="bert")
1611
+ self.nsp = TFBertNSPHead(config, name="nsp___cls")
1612
+
1613
+ @unpack_inputs
1614
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1615
+ @replace_return_docstrings(output_type=TFNextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
1616
+ def call(
1617
+ self,
1618
+ input_ids: TFModelInputType | None = None,
1619
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1620
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1621
+ position_ids: np.ndarray | tf.Tensor | None = None,
1622
+ head_mask: np.ndarray | tf.Tensor | None = None,
1623
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1624
+ output_attentions: Optional[bool] = None,
1625
+ output_hidden_states: Optional[bool] = None,
1626
+ return_dict: Optional[bool] = None,
1627
+ next_sentence_label: np.ndarray | tf.Tensor | None = None,
1628
+ training: Optional[bool] = False,
1629
+ ) -> Union[TFNextSentencePredictorOutput, Tuple[tf.Tensor]]:
1630
+ r"""
1631
+ Return:
1632
+
1633
+ Examples:
1634
+
1635
+ ```python
1636
+ >>> import tensorflow as tf
1637
+ >>> from transformers import AutoTokenizer, TFBertForNextSentencePrediction
1638
+
1639
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
1640
+ >>> model = TFBertForNextSentencePrediction.from_pretrained("google-bert/bert-base-uncased")
1641
+
1642
+ >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
1643
+ >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
1644
+ >>> encoding = tokenizer(prompt, next_sentence, return_tensors="tf")
1645
+
1646
+ >>> logits = model(encoding["input_ids"], token_type_ids=encoding["token_type_ids"])[0]
1647
+ >>> assert logits[0][0] < logits[0][1] # the next sentence was random
1648
+ ```"""
1649
+ outputs = self.bert(
1650
+ input_ids=input_ids,
1651
+ attention_mask=attention_mask,
1652
+ token_type_ids=token_type_ids,
1653
+ position_ids=position_ids,
1654
+ head_mask=head_mask,
1655
+ inputs_embeds=inputs_embeds,
1656
+ output_attentions=output_attentions,
1657
+ output_hidden_states=output_hidden_states,
1658
+ return_dict=return_dict,
1659
+ training=training,
1660
+ )
1661
+ pooled_output = outputs[1]
1662
+ seq_relationship_scores = self.nsp(pooled_output=pooled_output)
1663
+ next_sentence_loss = (
1664
+ None
1665
+ if next_sentence_label is None
1666
+ else self.hf_compute_loss(labels=next_sentence_label, logits=seq_relationship_scores)
1667
+ )
1668
+
1669
+ if not return_dict:
1670
+ output = (seq_relationship_scores,) + outputs[2:]
1671
+ return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
1672
+
1673
+ return TFNextSentencePredictorOutput(
1674
+ loss=next_sentence_loss,
1675
+ logits=seq_relationship_scores,
1676
+ hidden_states=outputs.hidden_states,
1677
+ attentions=outputs.attentions,
1678
+ )
1679
+
1680
+ def build(self, input_shape=None):
1681
+ if self.built:
1682
+ return
1683
+ self.built = True
1684
+ if getattr(self, "bert", None) is not None:
1685
+ with tf.name_scope(self.bert.name):
1686
+ self.bert.build(None)
1687
+ if getattr(self, "nsp", None) is not None:
1688
+ with tf.name_scope(self.nsp.name):
1689
+ self.nsp.build(None)
1690
+
1691
+
1692
+ @add_start_docstrings(
1693
+ """
1694
+ Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
1695
+ output) e.g. for GLUE tasks.
1696
+ """,
1697
+ BERT_START_DOCSTRING,
1698
+ )
1699
+ class TFBertForSequenceClassification(TFBertPreTrainedModel, TFSequenceClassificationLoss):
1700
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1701
+ _keys_to_ignore_on_load_unexpected = [r"mlm___cls", r"nsp___cls", r"cls.predictions", r"cls.seq_relationship"]
1702
+ _keys_to_ignore_on_load_missing = [r"dropout"]
1703
+
1704
+ def __init__(self, config: BertConfig, *inputs, **kwargs):
1705
+ super().__init__(config, *inputs, **kwargs)
1706
+
1707
+ self.num_labels = config.num_labels
1708
+
1709
+ self.bert = TFBertMainLayer(config, name="bert")
1710
+ classifier_dropout = (
1711
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1712
+ )
1713
+ self.dropout = keras.layers.Dropout(rate=classifier_dropout)
1714
+ self.classifier = keras.layers.Dense(
1715
+ units=config.num_labels,
1716
+ kernel_initializer=get_initializer(config.initializer_range),
1717
+ name="classifier",
1718
+ )
1719
+ self.config = config
1720
+
1721
+ @unpack_inputs
1722
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1723
+ @add_code_sample_docstrings(
1724
+ checkpoint=_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION,
1725
+ output_type=TFSequenceClassifierOutput,
1726
+ config_class=_CONFIG_FOR_DOC,
1727
+ expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,
1728
+ expected_loss=_SEQ_CLASS_EXPECTED_LOSS,
1729
+ )
1730
+ def call(
1731
+ self,
1732
+ input_ids: TFModelInputType | None = None,
1733
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1734
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1735
+ position_ids: np.ndarray | tf.Tensor | None = None,
1736
+ head_mask: np.ndarray | tf.Tensor | None = None,
1737
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1738
+ output_attentions: Optional[bool] = None,
1739
+ output_hidden_states: Optional[bool] = None,
1740
+ return_dict: Optional[bool] = None,
1741
+ labels: np.ndarray | tf.Tensor | None = None,
1742
+ training: Optional[bool] = False,
1743
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
1744
+ r"""
1745
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
1746
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1747
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1748
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1749
+ """
1750
+ outputs = self.bert(
1751
+ input_ids=input_ids,
1752
+ attention_mask=attention_mask,
1753
+ token_type_ids=token_type_ids,
1754
+ position_ids=position_ids,
1755
+ head_mask=head_mask,
1756
+ inputs_embeds=inputs_embeds,
1757
+ output_attentions=output_attentions,
1758
+ output_hidden_states=output_hidden_states,
1759
+ return_dict=return_dict,
1760
+ training=training,
1761
+ )
1762
+ pooled_output = outputs[1]
1763
+ pooled_output = self.dropout(inputs=pooled_output, training=training)
1764
+ logits = self.classifier(inputs=pooled_output)
1765
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
1766
+
1767
+ if not return_dict:
1768
+ output = (logits,) + outputs[2:]
1769
+ return ((loss,) + output) if loss is not None else output
1770
+
1771
+ return TFSequenceClassifierOutput(
1772
+ loss=loss,
1773
+ logits=logits,
1774
+ hidden_states=outputs.hidden_states,
1775
+ attentions=outputs.attentions,
1776
+ )
1777
+
1778
+ def build(self, input_shape=None):
1779
+ if self.built:
1780
+ return
1781
+ self.built = True
1782
+ if getattr(self, "bert", None) is not None:
1783
+ with tf.name_scope(self.bert.name):
1784
+ self.bert.build(None)
1785
+ if getattr(self, "classifier", None) is not None:
1786
+ with tf.name_scope(self.classifier.name):
1787
+ self.classifier.build([None, None, self.config.hidden_size])
1788
+
1789
+
1790
+ @add_start_docstrings(
1791
+ """
1792
+ Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1793
+ softmax) e.g. for RocStories/SWAG tasks.
1794
+ """,
1795
+ BERT_START_DOCSTRING,
1796
+ )
1797
+ class TFBertForMultipleChoice(TFBertPreTrainedModel, TFMultipleChoiceLoss):
1798
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1799
+ _keys_to_ignore_on_load_unexpected = [r"mlm___cls", r"nsp___cls", r"cls.predictions", r"cls.seq_relationship"]
1800
+ _keys_to_ignore_on_load_missing = [r"dropout"]
1801
+
1802
+ def __init__(self, config: BertConfig, *inputs, **kwargs):
1803
+ super().__init__(config, *inputs, **kwargs)
1804
+
1805
+ self.bert = TFBertMainLayer(config, name="bert")
1806
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
1807
+ self.classifier = keras.layers.Dense(
1808
+ units=1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1809
+ )
1810
+ self.config = config
1811
+
1812
+ @unpack_inputs
1813
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
1814
+ @add_code_sample_docstrings(
1815
+ checkpoint=_CHECKPOINT_FOR_DOC,
1816
+ output_type=TFMultipleChoiceModelOutput,
1817
+ config_class=_CONFIG_FOR_DOC,
1818
+ )
1819
+ def call(
1820
+ self,
1821
+ input_ids: TFModelInputType | None = None,
1822
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1823
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1824
+ position_ids: np.ndarray | tf.Tensor | None = None,
1825
+ head_mask: np.ndarray | tf.Tensor | None = None,
1826
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1827
+ output_attentions: Optional[bool] = None,
1828
+ output_hidden_states: Optional[bool] = None,
1829
+ return_dict: Optional[bool] = None,
1830
+ labels: np.ndarray | tf.Tensor | None = None,
1831
+ training: Optional[bool] = False,
1832
+ ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
1833
+ r"""
1834
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
1835
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
1836
+ where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
1837
+ """
1838
+ if input_ids is not None:
1839
+ num_choices = shape_list(input_ids)[1]
1840
+ seq_length = shape_list(input_ids)[2]
1841
+ else:
1842
+ num_choices = shape_list(inputs_embeds)[1]
1843
+ seq_length = shape_list(inputs_embeds)[2]
1844
+
1845
+ flat_input_ids = tf.reshape(tensor=input_ids, shape=(-1, seq_length)) if input_ids is not None else None
1846
+ flat_attention_mask = (
1847
+ tf.reshape(tensor=attention_mask, shape=(-1, seq_length)) if attention_mask is not None else None
1848
+ )
1849
+ flat_token_type_ids = (
1850
+ tf.reshape(tensor=token_type_ids, shape=(-1, seq_length)) if token_type_ids is not None else None
1851
+ )
1852
+ flat_position_ids = (
1853
+ tf.reshape(tensor=position_ids, shape=(-1, seq_length)) if position_ids is not None else None
1854
+ )
1855
+ flat_inputs_embeds = (
1856
+ tf.reshape(tensor=inputs_embeds, shape=(-1, seq_length, shape_list(inputs_embeds)[3]))
1857
+ if inputs_embeds is not None
1858
+ else None
1859
+ )
1860
+ outputs = self.bert(
1861
+ input_ids=flat_input_ids,
1862
+ attention_mask=flat_attention_mask,
1863
+ token_type_ids=flat_token_type_ids,
1864
+ position_ids=flat_position_ids,
1865
+ head_mask=head_mask,
1866
+ inputs_embeds=flat_inputs_embeds,
1867
+ output_attentions=output_attentions,
1868
+ output_hidden_states=output_hidden_states,
1869
+ return_dict=return_dict,
1870
+ training=training,
1871
+ )
1872
+ pooled_output = outputs[1]
1873
+ pooled_output = self.dropout(inputs=pooled_output, training=training)
1874
+ logits = self.classifier(inputs=pooled_output)
1875
+ reshaped_logits = tf.reshape(tensor=logits, shape=(-1, num_choices))
1876
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=reshaped_logits)
1877
+
1878
+ if not return_dict:
1879
+ output = (reshaped_logits,) + outputs[2:]
1880
+ return ((loss,) + output) if loss is not None else output
1881
+
1882
+ return TFMultipleChoiceModelOutput(
1883
+ loss=loss,
1884
+ logits=reshaped_logits,
1885
+ hidden_states=outputs.hidden_states,
1886
+ attentions=outputs.attentions,
1887
+ )
1888
+
1889
+ def build(self, input_shape=None):
1890
+ if self.built:
1891
+ return
1892
+ self.built = True
1893
+ if getattr(self, "bert", None) is not None:
1894
+ with tf.name_scope(self.bert.name):
1895
+ self.bert.build(None)
1896
+ if getattr(self, "classifier", None) is not None:
1897
+ with tf.name_scope(self.classifier.name):
1898
+ self.classifier.build([None, None, self.config.hidden_size])
1899
+
1900
+
1901
+ @add_start_docstrings(
1902
+ """
1903
+ Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1904
+ Named-Entity-Recognition (NER) tasks.
1905
+ """,
1906
+ BERT_START_DOCSTRING,
1907
+ )
1908
+ class TFBertForTokenClassification(TFBertPreTrainedModel, TFTokenClassificationLoss):
1909
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1910
+ _keys_to_ignore_on_load_unexpected = [
1911
+ r"pooler",
1912
+ r"mlm___cls",
1913
+ r"nsp___cls",
1914
+ r"cls.predictions",
1915
+ r"cls.seq_relationship",
1916
+ ]
1917
+ _keys_to_ignore_on_load_missing = [r"dropout"]
1918
+
1919
+ def __init__(self, config: BertConfig, *inputs, **kwargs):
1920
+ super().__init__(config, *inputs, **kwargs)
1921
+
1922
+ self.num_labels = config.num_labels
1923
+
1924
+ self.bert = TFBertMainLayer(config, add_pooling_layer=False, name="bert")
1925
+ classifier_dropout = (
1926
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1927
+ )
1928
+ self.dropout = keras.layers.Dropout(rate=classifier_dropout)
1929
+ self.classifier = keras.layers.Dense(
1930
+ units=config.num_labels,
1931
+ kernel_initializer=get_initializer(config.initializer_range),
1932
+ name="classifier",
1933
+ )
1934
+ self.config = config
1935
+
1936
+ @unpack_inputs
1937
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1938
+ @add_code_sample_docstrings(
1939
+ checkpoint=_CHECKPOINT_FOR_TOKEN_CLASSIFICATION,
1940
+ output_type=TFTokenClassifierOutput,
1941
+ config_class=_CONFIG_FOR_DOC,
1942
+ expected_output=_TOKEN_CLASS_EXPECTED_OUTPUT,
1943
+ expected_loss=_TOKEN_CLASS_EXPECTED_LOSS,
1944
+ )
1945
+ def call(
1946
+ self,
1947
+ input_ids: TFModelInputType | None = None,
1948
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1949
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1950
+ position_ids: np.ndarray | tf.Tensor | None = None,
1951
+ head_mask: np.ndarray | tf.Tensor | None = None,
1952
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1953
+ output_attentions: Optional[bool] = None,
1954
+ output_hidden_states: Optional[bool] = None,
1955
+ return_dict: Optional[bool] = None,
1956
+ labels: np.ndarray | tf.Tensor | None = None,
1957
+ training: Optional[bool] = False,
1958
+ ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
1959
+ r"""
1960
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
1961
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1962
+ """
1963
+ outputs = self.bert(
1964
+ input_ids=input_ids,
1965
+ attention_mask=attention_mask,
1966
+ token_type_ids=token_type_ids,
1967
+ position_ids=position_ids,
1968
+ head_mask=head_mask,
1969
+ inputs_embeds=inputs_embeds,
1970
+ output_attentions=output_attentions,
1971
+ output_hidden_states=output_hidden_states,
1972
+ return_dict=return_dict,
1973
+ training=training,
1974
+ )
1975
+ sequence_output = outputs[0]
1976
+ sequence_output = self.dropout(inputs=sequence_output, training=training)
1977
+ logits = self.classifier(inputs=sequence_output)
1978
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
1979
+
1980
+ if not return_dict:
1981
+ output = (logits,) + outputs[2:]
1982
+ return ((loss,) + output) if loss is not None else output
1983
+
1984
+ return TFTokenClassifierOutput(
1985
+ loss=loss,
1986
+ logits=logits,
1987
+ hidden_states=outputs.hidden_states,
1988
+ attentions=outputs.attentions,
1989
+ )
1990
+
1991
+ def build(self, input_shape=None):
1992
+ if self.built:
1993
+ return
1994
+ self.built = True
1995
+ if getattr(self, "bert", None) is not None:
1996
+ with tf.name_scope(self.bert.name):
1997
+ self.bert.build(None)
1998
+ if getattr(self, "classifier", None) is not None:
1999
+ with tf.name_scope(self.classifier.name):
2000
+ self.classifier.build([None, None, self.config.hidden_size])
2001
+
2002
+
2003
+ @add_start_docstrings(
2004
+ """
2005
+ Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
2006
+ layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
2007
+ """,
2008
+ BERT_START_DOCSTRING,
2009
+ )
2010
+ class TFBertForQuestionAnswering(TFBertPreTrainedModel, TFQuestionAnsweringLoss):
2011
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
2012
+ _keys_to_ignore_on_load_unexpected = [
2013
+ r"pooler",
2014
+ r"mlm___cls",
2015
+ r"nsp___cls",
2016
+ r"cls.predictions",
2017
+ r"cls.seq_relationship",
2018
+ ]
2019
+
2020
+ def __init__(self, config: BertConfig, *inputs, **kwargs):
2021
+ super().__init__(config, *inputs, **kwargs)
2022
+
2023
+ self.num_labels = config.num_labels
2024
+
2025
+ self.bert = TFBertMainLayer(config, add_pooling_layer=False, name="bert")
2026
+ self.qa_outputs = keras.layers.Dense(
2027
+ units=config.num_labels,
2028
+ kernel_initializer=get_initializer(config.initializer_range),
2029
+ name="qa_outputs",
2030
+ )
2031
+ self.config = config
2032
+
2033
+ @unpack_inputs
2034
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
2035
+ @add_code_sample_docstrings(
2036
+ checkpoint=_CHECKPOINT_FOR_QA,
2037
+ output_type=TFQuestionAnsweringModelOutput,
2038
+ config_class=_CONFIG_FOR_DOC,
2039
+ qa_target_start_index=_QA_TARGET_START_INDEX,
2040
+ qa_target_end_index=_QA_TARGET_END_INDEX,
2041
+ expected_output=_QA_EXPECTED_OUTPUT,
2042
+ expected_loss=_QA_EXPECTED_LOSS,
2043
+ )
2044
+ def call(
2045
+ self,
2046
+ input_ids: TFModelInputType | None = None,
2047
+ attention_mask: np.ndarray | tf.Tensor | None = None,
2048
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
2049
+ position_ids: np.ndarray | tf.Tensor | None = None,
2050
+ head_mask: np.ndarray | tf.Tensor | None = None,
2051
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
2052
+ output_attentions: Optional[bool] = None,
2053
+ output_hidden_states: Optional[bool] = None,
2054
+ return_dict: Optional[bool] = None,
2055
+ start_positions: np.ndarray | tf.Tensor | None = None,
2056
+ end_positions: np.ndarray | tf.Tensor | None = None,
2057
+ training: Optional[bool] = False,
2058
+ ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
2059
+ r"""
2060
+ start_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
2061
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
2062
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
2063
+ are not taken into account for computing the loss.
2064
+ end_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
2065
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
2066
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
2067
+ are not taken into account for computing the loss.
2068
+ """
2069
+ outputs = self.bert(
2070
+ input_ids=input_ids,
2071
+ attention_mask=attention_mask,
2072
+ token_type_ids=token_type_ids,
2073
+ position_ids=position_ids,
2074
+ head_mask=head_mask,
2075
+ inputs_embeds=inputs_embeds,
2076
+ output_attentions=output_attentions,
2077
+ output_hidden_states=output_hidden_states,
2078
+ return_dict=return_dict,
2079
+ training=training,
2080
+ )
2081
+ sequence_output = outputs[0]
2082
+ logits = self.qa_outputs(inputs=sequence_output)
2083
+ start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1)
2084
+ start_logits = tf.squeeze(input=start_logits, axis=-1)
2085
+ end_logits = tf.squeeze(input=end_logits, axis=-1)
2086
+ loss = None
2087
+
2088
+ if start_positions is not None and end_positions is not None:
2089
+ labels = {"start_position": start_positions}
2090
+ labels["end_position"] = end_positions
2091
+ loss = self.hf_compute_loss(labels=labels, logits=(start_logits, end_logits))
2092
+
2093
+ if not return_dict:
2094
+ output = (start_logits, end_logits) + outputs[2:]
2095
+ return ((loss,) + output) if loss is not None else output
2096
+
2097
+ return TFQuestionAnsweringModelOutput(
2098
+ loss=loss,
2099
+ start_logits=start_logits,
2100
+ end_logits=end_logits,
2101
+ hidden_states=outputs.hidden_states,
2102
+ attentions=outputs.attentions,
2103
+ )
2104
+
2105
+ def build(self, input_shape=None):
2106
+ if self.built:
2107
+ return
2108
+ self.built = True
2109
+ if getattr(self, "bert", None) is not None:
2110
+ with tf.name_scope(self.bert.name):
2111
+ self.bert.build(None)
2112
+ if getattr(self, "qa_outputs", None) is not None:
2113
+ with tf.name_scope(self.qa_outputs.name):
2114
+ self.qa_outputs.build([None, None, self.config.hidden_size])
venv/lib/python3.10/site-packages/transformers/models/bert/tokenization_bert.py ADDED
@@ -0,0 +1,500 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for Bert."""
16
+
17
+
18
+ import collections
19
+ import os
20
+ import unicodedata
21
+ from typing import List, Optional, Tuple
22
+
23
+ from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
24
+ from ...utils import logging
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
30
+
31
+
32
+ def load_vocab(vocab_file):
33
+ """Loads a vocabulary file into a dictionary."""
34
+ vocab = collections.OrderedDict()
35
+ with open(vocab_file, "r", encoding="utf-8") as reader:
36
+ tokens = reader.readlines()
37
+ for index, token in enumerate(tokens):
38
+ token = token.rstrip("\n")
39
+ vocab[token] = index
40
+ return vocab
41
+
42
+
43
+ def whitespace_tokenize(text):
44
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
45
+ text = text.strip()
46
+ if not text:
47
+ return []
48
+ tokens = text.split()
49
+ return tokens
50
+
51
+
52
+ class BertTokenizer(PreTrainedTokenizer):
53
+ r"""
54
+ Construct a BERT tokenizer. Based on WordPiece.
55
+
56
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
57
+ this superclass for more information regarding those methods.
58
+
59
+ Args:
60
+ vocab_file (`str`):
61
+ File containing the vocabulary.
62
+ do_lower_case (`bool`, *optional*, defaults to `True`):
63
+ Whether or not to lowercase the input when tokenizing.
64
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
65
+ Whether or not to do basic tokenization before WordPiece.
66
+ never_split (`Iterable`, *optional*):
67
+ Collection of tokens which will never be split during tokenization. Only has an effect when
68
+ `do_basic_tokenize=True`
69
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
70
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
71
+ token instead.
72
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
73
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
74
+ sequence classification or for a text and a question for question answering. It is also used as the last
75
+ token of a sequence built with special tokens.
76
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
77
+ The token used for padding, for example when batching sequences of different lengths.
78
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
79
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
80
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
81
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
82
+ The token used for masking values. This is the token used when training this model with masked language
83
+ modeling. This is the token which the model will try to predict.
84
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
85
+ Whether or not to tokenize Chinese characters.
86
+
87
+ This should likely be deactivated for Japanese (see this
88
+ [issue](https://github.com/huggingface/transformers/issues/328)).
89
+ strip_accents (`bool`, *optional*):
90
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
91
+ value for `lowercase` (as in the original BERT).
92
+ """
93
+
94
+ vocab_files_names = VOCAB_FILES_NAMES
95
+
96
+ def __init__(
97
+ self,
98
+ vocab_file,
99
+ do_lower_case=True,
100
+ do_basic_tokenize=True,
101
+ never_split=None,
102
+ unk_token="[UNK]",
103
+ sep_token="[SEP]",
104
+ pad_token="[PAD]",
105
+ cls_token="[CLS]",
106
+ mask_token="[MASK]",
107
+ tokenize_chinese_chars=True,
108
+ strip_accents=None,
109
+ **kwargs,
110
+ ):
111
+ if not os.path.isfile(vocab_file):
112
+ raise ValueError(
113
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
114
+ " model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
115
+ )
116
+ self.vocab = load_vocab(vocab_file)
117
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
118
+ self.do_basic_tokenize = do_basic_tokenize
119
+ if do_basic_tokenize:
120
+ self.basic_tokenizer = BasicTokenizer(
121
+ do_lower_case=do_lower_case,
122
+ never_split=never_split,
123
+ tokenize_chinese_chars=tokenize_chinese_chars,
124
+ strip_accents=strip_accents,
125
+ )
126
+
127
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
128
+
129
+ super().__init__(
130
+ do_lower_case=do_lower_case,
131
+ do_basic_tokenize=do_basic_tokenize,
132
+ never_split=never_split,
133
+ unk_token=unk_token,
134
+ sep_token=sep_token,
135
+ pad_token=pad_token,
136
+ cls_token=cls_token,
137
+ mask_token=mask_token,
138
+ tokenize_chinese_chars=tokenize_chinese_chars,
139
+ strip_accents=strip_accents,
140
+ **kwargs,
141
+ )
142
+
143
+ @property
144
+ def do_lower_case(self):
145
+ return self.basic_tokenizer.do_lower_case
146
+
147
+ @property
148
+ def vocab_size(self):
149
+ return len(self.vocab)
150
+
151
+ def get_vocab(self):
152
+ return dict(self.vocab, **self.added_tokens_encoder)
153
+
154
+ def _tokenize(self, text, split_special_tokens=False):
155
+ split_tokens = []
156
+ if self.do_basic_tokenize:
157
+ for token in self.basic_tokenizer.tokenize(
158
+ text, never_split=self.all_special_tokens if not split_special_tokens else None
159
+ ):
160
+ # If the token is part of the never_split set
161
+ if token in self.basic_tokenizer.never_split:
162
+ split_tokens.append(token)
163
+ else:
164
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
165
+ else:
166
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
167
+ return split_tokens
168
+
169
+ def _convert_token_to_id(self, token):
170
+ """Converts a token (str) in an id using the vocab."""
171
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
172
+
173
+ def _convert_id_to_token(self, index):
174
+ """Converts an index (integer) in a token (str) using the vocab."""
175
+ return self.ids_to_tokens.get(index, self.unk_token)
176
+
177
+ def convert_tokens_to_string(self, tokens):
178
+ """Converts a sequence of tokens (string) in a single string."""
179
+ out_string = " ".join(tokens).replace(" ##", "").strip()
180
+ return out_string
181
+
182
+ def build_inputs_with_special_tokens(
183
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
184
+ ) -> List[int]:
185
+ """
186
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
187
+ adding special tokens. A BERT sequence has the following format:
188
+
189
+ - single sequence: `[CLS] X [SEP]`
190
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
191
+
192
+ Args:
193
+ token_ids_0 (`List[int]`):
194
+ List of IDs to which the special tokens will be added.
195
+ token_ids_1 (`List[int]`, *optional*):
196
+ Optional second list of IDs for sequence pairs.
197
+
198
+ Returns:
199
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
200
+ """
201
+ if token_ids_1 is None:
202
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
203
+ cls = [self.cls_token_id]
204
+ sep = [self.sep_token_id]
205
+ return cls + token_ids_0 + sep + token_ids_1 + sep
206
+
207
+ def get_special_tokens_mask(
208
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
209
+ ) -> List[int]:
210
+ """
211
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
212
+ special tokens using the tokenizer `prepare_for_model` method.
213
+
214
+ Args:
215
+ token_ids_0 (`List[int]`):
216
+ List of IDs.
217
+ token_ids_1 (`List[int]`, *optional*):
218
+ Optional second list of IDs for sequence pairs.
219
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
220
+ Whether or not the token list is already formatted with special tokens for the model.
221
+
222
+ Returns:
223
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
224
+ """
225
+
226
+ if already_has_special_tokens:
227
+ return super().get_special_tokens_mask(
228
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
229
+ )
230
+
231
+ if token_ids_1 is not None:
232
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
233
+ return [1] + ([0] * len(token_ids_0)) + [1]
234
+
235
+ def create_token_type_ids_from_sequences(
236
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
237
+ ) -> List[int]:
238
+ """
239
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
240
+ pair mask has the following format:
241
+
242
+ ```
243
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
244
+ | first sequence | second sequence |
245
+ ```
246
+
247
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
248
+
249
+ Args:
250
+ token_ids_0 (`List[int]`):
251
+ List of IDs.
252
+ token_ids_1 (`List[int]`, *optional*):
253
+ Optional second list of IDs for sequence pairs.
254
+
255
+ Returns:
256
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
257
+ """
258
+ sep = [self.sep_token_id]
259
+ cls = [self.cls_token_id]
260
+ if token_ids_1 is None:
261
+ return len(cls + token_ids_0 + sep) * [0]
262
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
263
+
264
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
265
+ index = 0
266
+ if os.path.isdir(save_directory):
267
+ vocab_file = os.path.join(
268
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
269
+ )
270
+ else:
271
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
272
+ with open(vocab_file, "w", encoding="utf-8") as writer:
273
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
274
+ if index != token_index:
275
+ logger.warning(
276
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
277
+ " Please check that the vocabulary is not corrupted!"
278
+ )
279
+ index = token_index
280
+ writer.write(token + "\n")
281
+ index += 1
282
+ return (vocab_file,)
283
+
284
+
285
+ class BasicTokenizer(object):
286
+ """
287
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
288
+
289
+ Args:
290
+ do_lower_case (`bool`, *optional*, defaults to `True`):
291
+ Whether or not to lowercase the input when tokenizing.
292
+ never_split (`Iterable`, *optional*):
293
+ Collection of tokens which will never be split during tokenization. Only has an effect when
294
+ `do_basic_tokenize=True`
295
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
296
+ Whether or not to tokenize Chinese characters.
297
+
298
+ This should likely be deactivated for Japanese (see this
299
+ [issue](https://github.com/huggingface/transformers/issues/328)).
300
+ strip_accents (`bool`, *optional*):
301
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
302
+ value for `lowercase` (as in the original BERT).
303
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
304
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
305
+ the full context of the words, such as contractions.
306
+ """
307
+
308
+ def __init__(
309
+ self,
310
+ do_lower_case=True,
311
+ never_split=None,
312
+ tokenize_chinese_chars=True,
313
+ strip_accents=None,
314
+ do_split_on_punc=True,
315
+ ):
316
+ if never_split is None:
317
+ never_split = []
318
+ self.do_lower_case = do_lower_case
319
+ self.never_split = set(never_split)
320
+ self.tokenize_chinese_chars = tokenize_chinese_chars
321
+ self.strip_accents = strip_accents
322
+ self.do_split_on_punc = do_split_on_punc
323
+
324
+ def tokenize(self, text, never_split=None):
325
+ """
326
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
327
+
328
+ Args:
329
+ never_split (`List[str]`, *optional*)
330
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
331
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
332
+ """
333
+ # union() returns a new set by concatenating the two sets.
334
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
335
+ text = self._clean_text(text)
336
+
337
+ # This was added on November 1st, 2018 for the multilingual and Chinese
338
+ # models. This is also applied to the English models now, but it doesn't
339
+ # matter since the English models were not trained on any Chinese data
340
+ # and generally don't have any Chinese data in them (there are Chinese
341
+ # characters in the vocabulary because Wikipedia does have some Chinese
342
+ # words in the English Wikipedia.).
343
+ if self.tokenize_chinese_chars:
344
+ text = self._tokenize_chinese_chars(text)
345
+ # prevents treating the same character with different unicode codepoints as different characters
346
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
347
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
348
+ split_tokens = []
349
+ for token in orig_tokens:
350
+ if token not in never_split:
351
+ if self.do_lower_case:
352
+ token = token.lower()
353
+ if self.strip_accents is not False:
354
+ token = self._run_strip_accents(token)
355
+ elif self.strip_accents:
356
+ token = self._run_strip_accents(token)
357
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
358
+
359
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
360
+ return output_tokens
361
+
362
+ def _run_strip_accents(self, text):
363
+ """Strips accents from a piece of text."""
364
+ text = unicodedata.normalize("NFD", text)
365
+ output = []
366
+ for char in text:
367
+ cat = unicodedata.category(char)
368
+ if cat == "Mn":
369
+ continue
370
+ output.append(char)
371
+ return "".join(output)
372
+
373
+ def _run_split_on_punc(self, text, never_split=None):
374
+ """Splits punctuation on a piece of text."""
375
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
376
+ return [text]
377
+ chars = list(text)
378
+ i = 0
379
+ start_new_word = True
380
+ output = []
381
+ while i < len(chars):
382
+ char = chars[i]
383
+ if _is_punctuation(char):
384
+ output.append([char])
385
+ start_new_word = True
386
+ else:
387
+ if start_new_word:
388
+ output.append([])
389
+ start_new_word = False
390
+ output[-1].append(char)
391
+ i += 1
392
+
393
+ return ["".join(x) for x in output]
394
+
395
+ def _tokenize_chinese_chars(self, text):
396
+ """Adds whitespace around any CJK character."""
397
+ output = []
398
+ for char in text:
399
+ cp = ord(char)
400
+ if self._is_chinese_char(cp):
401
+ output.append(" ")
402
+ output.append(char)
403
+ output.append(" ")
404
+ else:
405
+ output.append(char)
406
+ return "".join(output)
407
+
408
+ def _is_chinese_char(self, cp):
409
+ """Checks whether CP is the codepoint of a CJK character."""
410
+ # This defines a "chinese character" as anything in the CJK Unicode block:
411
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
412
+ #
413
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
414
+ # despite its name. The modern Korean Hangul alphabet is a different block,
415
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
416
+ # space-separated words, so they are not treated specially and handled
417
+ # like the all of the other languages.
418
+ if (
419
+ (cp >= 0x4E00 and cp <= 0x9FFF)
420
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
421
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
422
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
423
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
424
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
425
+ or (cp >= 0xF900 and cp <= 0xFAFF)
426
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
427
+ ): #
428
+ return True
429
+
430
+ return False
431
+
432
+ def _clean_text(self, text):
433
+ """Performs invalid character removal and whitespace cleanup on text."""
434
+ output = []
435
+ for char in text:
436
+ cp = ord(char)
437
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
438
+ continue
439
+ if _is_whitespace(char):
440
+ output.append(" ")
441
+ else:
442
+ output.append(char)
443
+ return "".join(output)
444
+
445
+
446
+ class WordpieceTokenizer(object):
447
+ """Runs WordPiece tokenization."""
448
+
449
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
450
+ self.vocab = vocab
451
+ self.unk_token = unk_token
452
+ self.max_input_chars_per_word = max_input_chars_per_word
453
+
454
+ def tokenize(self, text):
455
+ """
456
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
457
+ tokenization using the given vocabulary.
458
+
459
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
460
+
461
+ Args:
462
+ text: A single token or whitespace separated tokens. This should have
463
+ already been passed through *BasicTokenizer*.
464
+
465
+ Returns:
466
+ A list of wordpiece tokens.
467
+ """
468
+
469
+ output_tokens = []
470
+ for token in whitespace_tokenize(text):
471
+ chars = list(token)
472
+ if len(chars) > self.max_input_chars_per_word:
473
+ output_tokens.append(self.unk_token)
474
+ continue
475
+
476
+ is_bad = False
477
+ start = 0
478
+ sub_tokens = []
479
+ while start < len(chars):
480
+ end = len(chars)
481
+ cur_substr = None
482
+ while start < end:
483
+ substr = "".join(chars[start:end])
484
+ if start > 0:
485
+ substr = "##" + substr
486
+ if substr in self.vocab:
487
+ cur_substr = substr
488
+ break
489
+ end -= 1
490
+ if cur_substr is None:
491
+ is_bad = True
492
+ break
493
+ sub_tokens.append(cur_substr)
494
+ start = end
495
+
496
+ if is_bad:
497
+ output_tokens.append(self.unk_token)
498
+ else:
499
+ output_tokens.extend(sub_tokens)
500
+ return output_tokens