applied-ai-018 commited on
Commit
2b1e64f
·
verified ·
1 Parent(s): d8dbab0

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. ckpts/universal/global_step40/zero/10.attention.query_key_value.weight/exp_avg.pt +3 -0
  3. ckpts/universal/global_step40/zero/23.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
  4. lm-evaluation-harness/tests/testdata/arc_easy-v0-res.json +1 -0
  5. lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_1-v0-res.json +1 -0
  6. lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_with_adjective_1-v0-res.json +1 -0
  7. lm-evaluation-harness/tests/testdata/blimp_irregular_plural_subject_verb_agreement_1-v0-res.json +1 -0
  8. lm-evaluation-harness/tests/testdata/blimp_only_npi_licensor_present-v0-loglikelihood +1 -0
  9. lm-evaluation-harness/tests/testdata/blimp_passive_1-v0-res.json +1 -0
  10. lm-evaluation-harness/tests/testdata/blimp_wh_questions_subject_gap_long_distance-v0-res.json +1 -0
  11. lm-evaluation-harness/tests/testdata/headqa_en-v0-res.json +1 -0
  12. lm-evaluation-harness/tests/testdata/hendrycksTest-anatomy-v0-loglikelihood +1 -0
  13. lm-evaluation-harness/tests/testdata/lambada_openai-v0-res.json +1 -0
  14. lm-evaluation-harness/tests/testdata/lambada_openai_cloze-v0-res.json +1 -0
  15. lm-evaluation-harness/tests/testdata/lambada_openai_mt_en-v0-loglikelihood +1 -0
  16. lm-evaluation-harness/tests/testdata/math_geometry-v0-res.json +1 -0
  17. lm-evaluation-harness/tests/testdata/pile_dm-mathematics-v1-loglikelihood_rolling +1 -0
  18. lm-evaluation-harness/tests/testdata/qa4mre_2013-v0-res.json +1 -0
  19. lm-evaluation-harness/tests/testdata/toxigen-v0-loglikelihood +1 -0
  20. lm-evaluation-harness/tests/testdata/wmt20-de-fr-v0-greedy_until +1 -0
  21. lm-evaluation-harness/tests/testdata/wmt20-pl-en-v0-greedy_until +1 -0
  22. venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/gl1.gpickle.bz2 +3 -0
  23. venv/lib/python3.10/site-packages/networkx/drawing/tests/baseline/test_house_with_colors.png +3 -0
  24. venv/lib/python3.10/site-packages/pyarrow/libarrow.so.1600 +3 -0
  25. venv/lib/python3.10/site-packages/transformers/models/albert/__init__.py +179 -0
  26. venv/lib/python3.10/site-packages/transformers/models/albert/__pycache__/convert_albert_original_tf_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/transformers/models/albert/__pycache__/modeling_albert.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/transformers/models/albert/__pycache__/modeling_flax_albert.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/transformers/models/albert/__pycache__/modeling_tf_albert.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/transformers/models/albert/__pycache__/tokenization_albert_fast.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/transformers/models/albert/configuration_albert.py +167 -0
  32. venv/lib/python3.10/site-packages/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py +63 -0
  33. venv/lib/python3.10/site-packages/transformers/models/albert/modeling_albert.py +1382 -0
  34. venv/lib/python3.10/site-packages/transformers/models/albert/modeling_flax_albert.py +1121 -0
  35. venv/lib/python3.10/site-packages/transformers/models/albert/modeling_tf_albert.py +1564 -0
  36. venv/lib/python3.10/site-packages/transformers/models/albert/tokenization_albert.py +346 -0
  37. venv/lib/python3.10/site-packages/transformers/models/albert/tokenization_albert_fast.py +210 -0
  38. venv/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/__init__.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/configuration_convbert.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/convert_convbert_original_tf1_checkpoint_to_pytorch_and_tf2.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/modeling_convbert.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/modeling_tf_convbert.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/tokenization_convbert.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/transformers/models/dialogpt/__init__.py +0 -0
  45. venv/lib/python3.10/site-packages/transformers/models/dialogpt/__pycache__/__init__.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/transformers/models/dialogpt/__pycache__/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/transformers/models/dialogpt/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.py +46 -0
  48. venv/lib/python3.10/site-packages/transformers/models/donut/__init__.py +74 -0
  49. venv/lib/python3.10/site-packages/transformers/models/donut/__pycache__/__init__.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/transformers/models/donut/__pycache__/configuration_donut_swin.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -81,3 +81,4 @@ venv/lib/python3.10/site-packages/pyarrow/libarrow_substrait.so.1600 filter=lfs
81
  venv/lib/python3.10/site-packages/pyarrow/libarrow_python.so filter=lfs diff=lfs merge=lfs -text
82
  venv/lib/python3.10/site-packages/torch/lib/libtorch_cuda_linalg.so filter=lfs diff=lfs merge=lfs -text
83
  venv/lib/python3.10/site-packages/pyarrow/libarrow_flight.so.1600 filter=lfs diff=lfs merge=lfs -text
 
 
81
  venv/lib/python3.10/site-packages/pyarrow/libarrow_python.so filter=lfs diff=lfs merge=lfs -text
82
  venv/lib/python3.10/site-packages/torch/lib/libtorch_cuda_linalg.so filter=lfs diff=lfs merge=lfs -text
83
  venv/lib/python3.10/site-packages/pyarrow/libarrow_flight.so.1600 filter=lfs diff=lfs merge=lfs -text
84
+ venv/lib/python3.10/site-packages/pyarrow/libarrow.so.1600 filter=lfs diff=lfs merge=lfs -text
ckpts/universal/global_step40/zero/10.attention.query_key_value.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fc1ef6b76144b91e148ec1a10db9697ba17309b66980ea33415cd27642757b7
3
+ size 50332828
ckpts/universal/global_step40/zero/23.attention.query_key_value.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3203d80eb2e77b27f4938946251908f61c9407d4f1b258fdd49375646eabdba3
3
+ size 50332843
lm-evaluation-harness/tests/testdata/arc_easy-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"arc_easy": {"acc": 0.2474747474747475, "acc_norm": 0.24074074074074073, "acc_norm_stderr": 0.008772796145221907, "acc_stderr": 0.008855114414834707}}, "versions": {"arc_easy": 0}}
lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_1-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_determiner_noun_agreement_1": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_determiner_noun_agreement_1": 0}}
lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_with_adjective_1-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_determiner_noun_agreement_with_adjective_1": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_determiner_noun_agreement_with_adjective_1": 0}}
lm-evaluation-harness/tests/testdata/blimp_irregular_plural_subject_verb_agreement_1-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_irregular_plural_subject_verb_agreement_1": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_irregular_plural_subject_verb_agreement_1": 0}}
lm-evaluation-harness/tests/testdata/blimp_only_npi_licensor_present-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ d2d0711611b5b218c6fa8c7278494749252b7868c396451919b761303556bd66
lm-evaluation-harness/tests/testdata/blimp_passive_1-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_passive_1": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_passive_1": 0}}
lm-evaluation-harness/tests/testdata/blimp_wh_questions_subject_gap_long_distance-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_wh_questions_subject_gap_long_distance": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_wh_questions_subject_gap_long_distance": 0}}
lm-evaluation-harness/tests/testdata/headqa_en-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"headqa_en": {"acc": 0.23559445660102116, "acc_norm": 0.2447118891320204, "acc_norm_stderr": 0.008211629406841468, "acc_stderr": 0.008105688874297972}}, "versions": {"headqa_en": 0}}
lm-evaluation-harness/tests/testdata/hendrycksTest-anatomy-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ bf05e04ed8cf61cf3aad294ed3f5a16137775ffdd20f1b129022ddffc1251768
lm-evaluation-harness/tests/testdata/lambada_openai-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"lambada_openai": {"acc": 0.0, "acc_stderr": 0.0, "ppl": 1.6479047769869253, "ppl_stderr": 0.006497321146240192}}, "versions": {"lambada_openai": 0}}
lm-evaluation-harness/tests/testdata/lambada_openai_cloze-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"lambada_openai_cloze": {"acc": 0.0, "acc_stderr": 0.0, "ppl": 1.6479047769869253, "ppl_stderr": 0.006497321146240192}}, "versions": {"lambada_openai_cloze": 0}}
lm-evaluation-harness/tests/testdata/lambada_openai_mt_en-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 6829e6a8aa5922e6c92dd31403cc060f242dc0ede4a775e085a70da095ab2e20
lm-evaluation-harness/tests/testdata/math_geometry-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"math_geometry": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"math_geometry": 0}}
lm-evaluation-harness/tests/testdata/pile_dm-mathematics-v1-loglikelihood_rolling ADDED
@@ -0,0 +1 @@
 
 
1
+ d5b7967c0ece8b816f3921a8bd0fad23365349e935b491595e2ad1135af42da6
lm-evaluation-harness/tests/testdata/qa4mre_2013-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"qa4mre_2013": {"acc": 0.18309859154929578, "acc_norm": 0.22183098591549297, "acc_norm_stderr": 0.02469760575535269, "acc_stderr": 0.022989742475464973}}, "versions": {"qa4mre_2013": 0}}
lm-evaluation-harness/tests/testdata/toxigen-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 7fedd930bafa92b9cca615a93ba92a4413244d2b77cf3f421a186815d721e0fa
lm-evaluation-harness/tests/testdata/wmt20-de-fr-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ 7f197bc281d6dbf9425900ef0dee7175021c43e355050f149f43b161c52bf0b0
lm-evaluation-harness/tests/testdata/wmt20-pl-en-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ 89274499d84176b1ffe4eaec06f2c89ca807342384dc946c2e348d00116aaade
venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/gl1.gpickle.bz2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf8f81ceb5eaaee1621aa60b892d83e596a6173f6f6517359b679ff3daa1b0f8
3
+ size 44623
venv/lib/python3.10/site-packages/networkx/drawing/tests/baseline/test_house_with_colors.png ADDED

Git LFS Details

  • SHA256: 1508bda48445c23ab882f801f1c0dd0472f97ae414245c3ab1094005fda4455a
  • Pointer size: 130 Bytes
  • Size of remote file: 21.9 kB
venv/lib/python3.10/site-packages/pyarrow/libarrow.so.1600 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d85a4a6d150efcee79c4cd53c88a5a31fd3f6f6efde3e7bd439cd8f4883024ae
3
+ size 67913016
venv/lib/python3.10/site-packages/transformers/models/albert/__init__.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_flax_available,
21
+ is_sentencepiece_available,
22
+ is_tf_available,
23
+ is_tokenizers_available,
24
+ is_torch_available,
25
+ )
26
+
27
+
28
+ _import_structure = {
29
+ "configuration_albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig", "AlbertOnnxConfig"],
30
+ }
31
+
32
+ try:
33
+ if not is_sentencepiece_available():
34
+ raise OptionalDependencyNotAvailable()
35
+ except OptionalDependencyNotAvailable:
36
+ pass
37
+ else:
38
+ _import_structure["tokenization_albert"] = ["AlbertTokenizer"]
39
+
40
+ try:
41
+ if not is_tokenizers_available():
42
+ raise OptionalDependencyNotAvailable()
43
+ except OptionalDependencyNotAvailable:
44
+ pass
45
+ else:
46
+ _import_structure["tokenization_albert_fast"] = ["AlbertTokenizerFast"]
47
+
48
+ try:
49
+ if not is_torch_available():
50
+ raise OptionalDependencyNotAvailable()
51
+ except OptionalDependencyNotAvailable:
52
+ pass
53
+ else:
54
+ _import_structure["modeling_albert"] = [
55
+ "ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
56
+ "AlbertForMaskedLM",
57
+ "AlbertForMultipleChoice",
58
+ "AlbertForPreTraining",
59
+ "AlbertForQuestionAnswering",
60
+ "AlbertForSequenceClassification",
61
+ "AlbertForTokenClassification",
62
+ "AlbertModel",
63
+ "AlbertPreTrainedModel",
64
+ "load_tf_weights_in_albert",
65
+ ]
66
+
67
+ try:
68
+ if not is_tf_available():
69
+ raise OptionalDependencyNotAvailable()
70
+ except OptionalDependencyNotAvailable:
71
+ pass
72
+ else:
73
+ _import_structure["modeling_tf_albert"] = [
74
+ "TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
75
+ "TFAlbertForMaskedLM",
76
+ "TFAlbertForMultipleChoice",
77
+ "TFAlbertForPreTraining",
78
+ "TFAlbertForQuestionAnswering",
79
+ "TFAlbertForSequenceClassification",
80
+ "TFAlbertForTokenClassification",
81
+ "TFAlbertMainLayer",
82
+ "TFAlbertModel",
83
+ "TFAlbertPreTrainedModel",
84
+ ]
85
+
86
+ try:
87
+ if not is_flax_available():
88
+ raise OptionalDependencyNotAvailable()
89
+ except OptionalDependencyNotAvailable:
90
+ pass
91
+ else:
92
+ _import_structure["modeling_flax_albert"] = [
93
+ "FlaxAlbertForMaskedLM",
94
+ "FlaxAlbertForMultipleChoice",
95
+ "FlaxAlbertForPreTraining",
96
+ "FlaxAlbertForQuestionAnswering",
97
+ "FlaxAlbertForSequenceClassification",
98
+ "FlaxAlbertForTokenClassification",
99
+ "FlaxAlbertModel",
100
+ "FlaxAlbertPreTrainedModel",
101
+ ]
102
+
103
+ if TYPE_CHECKING:
104
+ from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
105
+
106
+ try:
107
+ if not is_sentencepiece_available():
108
+ raise OptionalDependencyNotAvailable()
109
+ except OptionalDependencyNotAvailable:
110
+ pass
111
+ else:
112
+ from .tokenization_albert import AlbertTokenizer
113
+
114
+ try:
115
+ if not is_tokenizers_available():
116
+ raise OptionalDependencyNotAvailable()
117
+ except OptionalDependencyNotAvailable:
118
+ pass
119
+ else:
120
+ from .tokenization_albert_fast import AlbertTokenizerFast
121
+
122
+ try:
123
+ if not is_torch_available():
124
+ raise OptionalDependencyNotAvailable()
125
+ except OptionalDependencyNotAvailable:
126
+ pass
127
+ else:
128
+ from .modeling_albert import (
129
+ ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
130
+ AlbertForMaskedLM,
131
+ AlbertForMultipleChoice,
132
+ AlbertForPreTraining,
133
+ AlbertForQuestionAnswering,
134
+ AlbertForSequenceClassification,
135
+ AlbertForTokenClassification,
136
+ AlbertModel,
137
+ AlbertPreTrainedModel,
138
+ load_tf_weights_in_albert,
139
+ )
140
+
141
+ try:
142
+ if not is_tf_available():
143
+ raise OptionalDependencyNotAvailable()
144
+ except OptionalDependencyNotAvailable:
145
+ pass
146
+ else:
147
+ from .modeling_tf_albert import (
148
+ TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
149
+ TFAlbertForMaskedLM,
150
+ TFAlbertForMultipleChoice,
151
+ TFAlbertForPreTraining,
152
+ TFAlbertForQuestionAnswering,
153
+ TFAlbertForSequenceClassification,
154
+ TFAlbertForTokenClassification,
155
+ TFAlbertMainLayer,
156
+ TFAlbertModel,
157
+ TFAlbertPreTrainedModel,
158
+ )
159
+
160
+ try:
161
+ if not is_flax_available():
162
+ raise OptionalDependencyNotAvailable()
163
+ except OptionalDependencyNotAvailable:
164
+ pass
165
+ else:
166
+ from .modeling_flax_albert import (
167
+ FlaxAlbertForMaskedLM,
168
+ FlaxAlbertForMultipleChoice,
169
+ FlaxAlbertForPreTraining,
170
+ FlaxAlbertForQuestionAnswering,
171
+ FlaxAlbertForSequenceClassification,
172
+ FlaxAlbertForTokenClassification,
173
+ FlaxAlbertModel,
174
+ FlaxAlbertPreTrainedModel,
175
+ )
176
+ else:
177
+ import sys
178
+
179
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/albert/__pycache__/convert_albert_original_tf_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (1.43 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/albert/__pycache__/modeling_albert.cpython-310.pyc ADDED
Binary file (41.8 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/albert/__pycache__/modeling_flax_albert.cpython-310.pyc ADDED
Binary file (28.8 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/albert/__pycache__/modeling_tf_albert.cpython-310.pyc ADDED
Binary file (47.2 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/albert/__pycache__/tokenization_albert_fast.cpython-310.pyc ADDED
Binary file (7.76 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/albert/configuration_albert.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ ALBERT model configuration"""
17
+ from collections import OrderedDict
18
+ from typing import Mapping
19
+
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...onnx import OnnxConfig
22
+ from ..deprecated._archive_maps import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
23
+
24
+
25
+ class AlbertConfig(PretrainedConfig):
26
+ r"""
27
+ This is the configuration class to store the configuration of a [`AlbertModel`] or a [`TFAlbertModel`]. It is used
28
+ to instantiate an ALBERT model according to the specified arguments, defining the model architecture. Instantiating
29
+ a configuration with the defaults will yield a similar configuration to that of the ALBERT
30
+ [albert/albert-xxlarge-v2](https://huggingface.co/albert/albert-xxlarge-v2) architecture.
31
+
32
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
33
+ documentation from [`PretrainedConfig`] for more information.
34
+
35
+ Args:
36
+ vocab_size (`int`, *optional*, defaults to 30000):
37
+ Vocabulary size of the ALBERT model. Defines the number of different tokens that can be represented by the
38
+ `inputs_ids` passed when calling [`AlbertModel`] or [`TFAlbertModel`].
39
+ embedding_size (`int`, *optional*, defaults to 128):
40
+ Dimensionality of vocabulary embeddings.
41
+ hidden_size (`int`, *optional*, defaults to 4096):
42
+ Dimensionality of the encoder layers and the pooler layer.
43
+ num_hidden_layers (`int`, *optional*, defaults to 12):
44
+ Number of hidden layers in the Transformer encoder.
45
+ num_hidden_groups (`int`, *optional*, defaults to 1):
46
+ Number of groups for the hidden layers, parameters in the same group are shared.
47
+ num_attention_heads (`int`, *optional*, defaults to 64):
48
+ Number of attention heads for each attention layer in the Transformer encoder.
49
+ intermediate_size (`int`, *optional*, defaults to 16384):
50
+ The dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
51
+ inner_group_num (`int`, *optional*, defaults to 1):
52
+ The number of inner repetition of attention and ffn.
53
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu_new"`):
54
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
55
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
56
+ hidden_dropout_prob (`float`, *optional*, defaults to 0):
57
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
58
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0):
59
+ The dropout ratio for the attention probabilities.
60
+ max_position_embeddings (`int`, *optional*, defaults to 512):
61
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
62
+ (e.g., 512 or 1024 or 2048).
63
+ type_vocab_size (`int`, *optional*, defaults to 2):
64
+ The vocabulary size of the `token_type_ids` passed when calling [`AlbertModel`] or [`TFAlbertModel`].
65
+ initializer_range (`float`, *optional*, defaults to 0.02):
66
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
67
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
68
+ The epsilon used by the layer normalization layers.
69
+ classifier_dropout_prob (`float`, *optional*, defaults to 0.1):
70
+ The dropout ratio for attached classifiers.
71
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
72
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
73
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
74
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
75
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
76
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
77
+ pad_token_id (`int`, *optional*, defaults to 0):
78
+ Padding token id.
79
+ bos_token_id (`int`, *optional*, defaults to 2):
80
+ Beginning of stream token id.
81
+ eos_token_id (`int`, *optional*, defaults to 3):
82
+ End of stream token id.
83
+
84
+ Examples:
85
+
86
+ ```python
87
+ >>> from transformers import AlbertConfig, AlbertModel
88
+
89
+ >>> # Initializing an ALBERT-xxlarge style configuration
90
+ >>> albert_xxlarge_configuration = AlbertConfig()
91
+
92
+ >>> # Initializing an ALBERT-base style configuration
93
+ >>> albert_base_configuration = AlbertConfig(
94
+ ... hidden_size=768,
95
+ ... num_attention_heads=12,
96
+ ... intermediate_size=3072,
97
+ ... )
98
+
99
+ >>> # Initializing a model (with random weights) from the ALBERT-base style configuration
100
+ >>> model = AlbertModel(albert_xxlarge_configuration)
101
+
102
+ >>> # Accessing the model configuration
103
+ >>> configuration = model.config
104
+ ```"""
105
+
106
+ model_type = "albert"
107
+
108
+ def __init__(
109
+ self,
110
+ vocab_size=30000,
111
+ embedding_size=128,
112
+ hidden_size=4096,
113
+ num_hidden_layers=12,
114
+ num_hidden_groups=1,
115
+ num_attention_heads=64,
116
+ intermediate_size=16384,
117
+ inner_group_num=1,
118
+ hidden_act="gelu_new",
119
+ hidden_dropout_prob=0,
120
+ attention_probs_dropout_prob=0,
121
+ max_position_embeddings=512,
122
+ type_vocab_size=2,
123
+ initializer_range=0.02,
124
+ layer_norm_eps=1e-12,
125
+ classifier_dropout_prob=0.1,
126
+ position_embedding_type="absolute",
127
+ pad_token_id=0,
128
+ bos_token_id=2,
129
+ eos_token_id=3,
130
+ **kwargs,
131
+ ):
132
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
133
+
134
+ self.vocab_size = vocab_size
135
+ self.embedding_size = embedding_size
136
+ self.hidden_size = hidden_size
137
+ self.num_hidden_layers = num_hidden_layers
138
+ self.num_hidden_groups = num_hidden_groups
139
+ self.num_attention_heads = num_attention_heads
140
+ self.inner_group_num = inner_group_num
141
+ self.hidden_act = hidden_act
142
+ self.intermediate_size = intermediate_size
143
+ self.hidden_dropout_prob = hidden_dropout_prob
144
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
145
+ self.max_position_embeddings = max_position_embeddings
146
+ self.type_vocab_size = type_vocab_size
147
+ self.initializer_range = initializer_range
148
+ self.layer_norm_eps = layer_norm_eps
149
+ self.classifier_dropout_prob = classifier_dropout_prob
150
+ self.position_embedding_type = position_embedding_type
151
+
152
+
153
+ # Copied from transformers.models.bert.configuration_bert.BertOnnxConfig with Roberta->Albert
154
+ class AlbertOnnxConfig(OnnxConfig):
155
+ @property
156
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
157
+ if self.task == "multiple-choice":
158
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
159
+ else:
160
+ dynamic_axis = {0: "batch", 1: "sequence"}
161
+ return OrderedDict(
162
+ [
163
+ ("input_ids", dynamic_axis),
164
+ ("attention_mask", dynamic_axis),
165
+ ("token_type_ids", dynamic_axis),
166
+ ]
167
+ )
venv/lib/python3.10/site-packages/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert ALBERT checkpoint."""
16
+
17
+
18
+ import argparse
19
+
20
+ import torch
21
+
22
+ from ...utils import logging
23
+ from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
24
+
25
+
26
+ logging.set_verbosity_info()
27
+
28
+
29
+ def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, albert_config_file, pytorch_dump_path):
30
+ # Initialise PyTorch model
31
+ config = AlbertConfig.from_json_file(albert_config_file)
32
+ print(f"Building PyTorch model from configuration: {config}")
33
+ model = AlbertForPreTraining(config)
34
+
35
+ # Load weights from tf checkpoint
36
+ load_tf_weights_in_albert(model, config, tf_checkpoint_path)
37
+
38
+ # Save pytorch-model
39
+ print(f"Save PyTorch model to {pytorch_dump_path}")
40
+ torch.save(model.state_dict(), pytorch_dump_path)
41
+
42
+
43
+ if __name__ == "__main__":
44
+ parser = argparse.ArgumentParser()
45
+ # Required parameters
46
+ parser.add_argument(
47
+ "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
48
+ )
49
+ parser.add_argument(
50
+ "--albert_config_file",
51
+ default=None,
52
+ type=str,
53
+ required=True,
54
+ help=(
55
+ "The config json file corresponding to the pre-trained ALBERT model. \n"
56
+ "This specifies the model architecture."
57
+ ),
58
+ )
59
+ parser.add_argument(
60
+ "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
61
+ )
62
+ args = parser.parse_args()
63
+ convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
venv/lib/python3.10/site-packages/transformers/models/albert/modeling_albert.py ADDED
@@ -0,0 +1,1382 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch ALBERT model."""
16
+
17
+ import math
18
+ import os
19
+ from dataclasses import dataclass
20
+ from typing import Dict, List, Optional, Tuple, Union
21
+
22
+ import torch
23
+ from torch import nn
24
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
25
+
26
+ from ...activations import ACT2FN
27
+ from ...modeling_outputs import (
28
+ BaseModelOutput,
29
+ BaseModelOutputWithPooling,
30
+ MaskedLMOutput,
31
+ MultipleChoiceModelOutput,
32
+ QuestionAnsweringModelOutput,
33
+ SequenceClassifierOutput,
34
+ TokenClassifierOutput,
35
+ )
36
+ from ...modeling_utils import PreTrainedModel
37
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
38
+ from ...utils import (
39
+ ModelOutput,
40
+ add_code_sample_docstrings,
41
+ add_start_docstrings,
42
+ add_start_docstrings_to_model_forward,
43
+ logging,
44
+ replace_return_docstrings,
45
+ )
46
+ from .configuration_albert import AlbertConfig
47
+
48
+
49
+ logger = logging.get_logger(__name__)
50
+
51
+ _CHECKPOINT_FOR_DOC = "albert/albert-base-v2"
52
+ _CONFIG_FOR_DOC = "AlbertConfig"
53
+
54
+
55
+ from ..deprecated._archive_maps import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
56
+
57
+
58
+ def load_tf_weights_in_albert(model, config, tf_checkpoint_path):
59
+ """Load tf checkpoints in a pytorch model."""
60
+ try:
61
+ import re
62
+
63
+ import numpy as np
64
+ import tensorflow as tf
65
+ except ImportError:
66
+ logger.error(
67
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
68
+ "https://www.tensorflow.org/install/ for installation instructions."
69
+ )
70
+ raise
71
+ tf_path = os.path.abspath(tf_checkpoint_path)
72
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
73
+ # Load weights from TF model
74
+ init_vars = tf.train.list_variables(tf_path)
75
+ names = []
76
+ arrays = []
77
+ for name, shape in init_vars:
78
+ logger.info(f"Loading TF weight {name} with shape {shape}")
79
+ array = tf.train.load_variable(tf_path, name)
80
+ names.append(name)
81
+ arrays.append(array)
82
+
83
+ for name, array in zip(names, arrays):
84
+ print(name)
85
+
86
+ for name, array in zip(names, arrays):
87
+ original_name = name
88
+
89
+ # If saved from the TF HUB module
90
+ name = name.replace("module/", "")
91
+
92
+ # Renaming and simplifying
93
+ name = name.replace("ffn_1", "ffn")
94
+ name = name.replace("bert/", "albert/")
95
+ name = name.replace("attention_1", "attention")
96
+ name = name.replace("transform/", "")
97
+ name = name.replace("LayerNorm_1", "full_layer_layer_norm")
98
+ name = name.replace("LayerNorm", "attention/LayerNorm")
99
+ name = name.replace("transformer/", "")
100
+
101
+ # The feed forward layer had an 'intermediate' step which has been abstracted away
102
+ name = name.replace("intermediate/dense/", "")
103
+ name = name.replace("ffn/intermediate/output/dense/", "ffn_output/")
104
+
105
+ # ALBERT attention was split between self and output which have been abstracted away
106
+ name = name.replace("/output/", "/")
107
+ name = name.replace("/self/", "/")
108
+
109
+ # The pooler is a linear layer
110
+ name = name.replace("pooler/dense", "pooler")
111
+
112
+ # The classifier was simplified to predictions from cls/predictions
113
+ name = name.replace("cls/predictions", "predictions")
114
+ name = name.replace("predictions/attention", "predictions")
115
+
116
+ # Naming was changed to be more explicit
117
+ name = name.replace("embeddings/attention", "embeddings")
118
+ name = name.replace("inner_group_", "albert_layers/")
119
+ name = name.replace("group_", "albert_layer_groups/")
120
+
121
+ # Classifier
122
+ if len(name.split("/")) == 1 and ("output_bias" in name or "output_weights" in name):
123
+ name = "classifier/" + name
124
+
125
+ # No ALBERT model currently handles the next sentence prediction task
126
+ if "seq_relationship" in name:
127
+ name = name.replace("seq_relationship/output_", "sop_classifier/classifier/")
128
+ name = name.replace("weights", "weight")
129
+
130
+ name = name.split("/")
131
+
132
+ # Ignore the gradients applied by the LAMB/ADAM optimizers.
133
+ if (
134
+ "adam_m" in name
135
+ or "adam_v" in name
136
+ or "AdamWeightDecayOptimizer" in name
137
+ or "AdamWeightDecayOptimizer_1" in name
138
+ or "global_step" in name
139
+ ):
140
+ logger.info(f"Skipping {'/'.join(name)}")
141
+ continue
142
+
143
+ pointer = model
144
+ for m_name in name:
145
+ if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
146
+ scope_names = re.split(r"_(\d+)", m_name)
147
+ else:
148
+ scope_names = [m_name]
149
+
150
+ if scope_names[0] == "kernel" or scope_names[0] == "gamma":
151
+ pointer = getattr(pointer, "weight")
152
+ elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
153
+ pointer = getattr(pointer, "bias")
154
+ elif scope_names[0] == "output_weights":
155
+ pointer = getattr(pointer, "weight")
156
+ elif scope_names[0] == "squad":
157
+ pointer = getattr(pointer, "classifier")
158
+ else:
159
+ try:
160
+ pointer = getattr(pointer, scope_names[0])
161
+ except AttributeError:
162
+ logger.info(f"Skipping {'/'.join(name)}")
163
+ continue
164
+ if len(scope_names) >= 2:
165
+ num = int(scope_names[1])
166
+ pointer = pointer[num]
167
+
168
+ if m_name[-11:] == "_embeddings":
169
+ pointer = getattr(pointer, "weight")
170
+ elif m_name == "kernel":
171
+ array = np.transpose(array)
172
+ try:
173
+ if pointer.shape != array.shape:
174
+ raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
175
+ except ValueError as e:
176
+ e.args += (pointer.shape, array.shape)
177
+ raise
178
+ print(f"Initialize PyTorch weight {name} from {original_name}")
179
+ pointer.data = torch.from_numpy(array)
180
+
181
+ return model
182
+
183
+
184
+ class AlbertEmbeddings(nn.Module):
185
+ """
186
+ Construct the embeddings from word, position and token_type embeddings.
187
+ """
188
+
189
+ def __init__(self, config: AlbertConfig):
190
+ super().__init__()
191
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
192
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)
193
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)
194
+
195
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
196
+ # any TensorFlow checkpoint file
197
+ self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
198
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
199
+
200
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
201
+ self.register_buffer(
202
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
203
+ )
204
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
205
+ self.register_buffer(
206
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
207
+ )
208
+
209
+ # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.forward
210
+ def forward(
211
+ self,
212
+ input_ids: Optional[torch.LongTensor] = None,
213
+ token_type_ids: Optional[torch.LongTensor] = None,
214
+ position_ids: Optional[torch.LongTensor] = None,
215
+ inputs_embeds: Optional[torch.FloatTensor] = None,
216
+ past_key_values_length: int = 0,
217
+ ) -> torch.Tensor:
218
+ if input_ids is not None:
219
+ input_shape = input_ids.size()
220
+ else:
221
+ input_shape = inputs_embeds.size()[:-1]
222
+
223
+ seq_length = input_shape[1]
224
+
225
+ if position_ids is None:
226
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
227
+
228
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
229
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
230
+ # issue #5664
231
+ if token_type_ids is None:
232
+ if hasattr(self, "token_type_ids"):
233
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
234
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
235
+ token_type_ids = buffered_token_type_ids_expanded
236
+ else:
237
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
238
+
239
+ if inputs_embeds is None:
240
+ inputs_embeds = self.word_embeddings(input_ids)
241
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
242
+
243
+ embeddings = inputs_embeds + token_type_embeddings
244
+ if self.position_embedding_type == "absolute":
245
+ position_embeddings = self.position_embeddings(position_ids)
246
+ embeddings += position_embeddings
247
+ embeddings = self.LayerNorm(embeddings)
248
+ embeddings = self.dropout(embeddings)
249
+ return embeddings
250
+
251
+
252
+ class AlbertAttention(nn.Module):
253
+ def __init__(self, config: AlbertConfig):
254
+ super().__init__()
255
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
256
+ raise ValueError(
257
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
258
+ f"heads ({config.num_attention_heads}"
259
+ )
260
+
261
+ self.num_attention_heads = config.num_attention_heads
262
+ self.hidden_size = config.hidden_size
263
+ self.attention_head_size = config.hidden_size // config.num_attention_heads
264
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
265
+
266
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
267
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
268
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
269
+
270
+ self.attention_dropout = nn.Dropout(config.attention_probs_dropout_prob)
271
+ self.output_dropout = nn.Dropout(config.hidden_dropout_prob)
272
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
273
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
274
+ self.pruned_heads = set()
275
+
276
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
277
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
278
+ self.max_position_embeddings = config.max_position_embeddings
279
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
280
+
281
+ # Copied from transformers.models.bert.modeling_bert.BertSelfAttention.transpose_for_scores
282
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
283
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
284
+ x = x.view(new_x_shape)
285
+ return x.permute(0, 2, 1, 3)
286
+
287
+ def prune_heads(self, heads: List[int]) -> None:
288
+ if len(heads) == 0:
289
+ return
290
+ heads, index = find_pruneable_heads_and_indices(
291
+ heads, self.num_attention_heads, self.attention_head_size, self.pruned_heads
292
+ )
293
+
294
+ # Prune linear layers
295
+ self.query = prune_linear_layer(self.query, index)
296
+ self.key = prune_linear_layer(self.key, index)
297
+ self.value = prune_linear_layer(self.value, index)
298
+ self.dense = prune_linear_layer(self.dense, index, dim=1)
299
+
300
+ # Update hyper params and store pruned heads
301
+ self.num_attention_heads = self.num_attention_heads - len(heads)
302
+ self.all_head_size = self.attention_head_size * self.num_attention_heads
303
+ self.pruned_heads = self.pruned_heads.union(heads)
304
+
305
+ def forward(
306
+ self,
307
+ hidden_states: torch.Tensor,
308
+ attention_mask: Optional[torch.FloatTensor] = None,
309
+ head_mask: Optional[torch.FloatTensor] = None,
310
+ output_attentions: bool = False,
311
+ ) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]:
312
+ mixed_query_layer = self.query(hidden_states)
313
+ mixed_key_layer = self.key(hidden_states)
314
+ mixed_value_layer = self.value(hidden_states)
315
+
316
+ query_layer = self.transpose_for_scores(mixed_query_layer)
317
+ key_layer = self.transpose_for_scores(mixed_key_layer)
318
+ value_layer = self.transpose_for_scores(mixed_value_layer)
319
+
320
+ # Take the dot product between "query" and "key" to get the raw attention scores.
321
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
322
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
323
+
324
+ if attention_mask is not None:
325
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
326
+ attention_scores = attention_scores + attention_mask
327
+
328
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
329
+ seq_length = hidden_states.size()[1]
330
+ position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
331
+ position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
332
+ distance = position_ids_l - position_ids_r
333
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
334
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
335
+
336
+ if self.position_embedding_type == "relative_key":
337
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
338
+ attention_scores = attention_scores + relative_position_scores
339
+ elif self.position_embedding_type == "relative_key_query":
340
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
341
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
342
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
343
+
344
+ # Normalize the attention scores to probabilities.
345
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
346
+
347
+ # This is actually dropping out entire tokens to attend to, which might
348
+ # seem a bit unusual, but is taken from the original Transformer paper.
349
+ attention_probs = self.attention_dropout(attention_probs)
350
+
351
+ # Mask heads if we want to
352
+ if head_mask is not None:
353
+ attention_probs = attention_probs * head_mask
354
+
355
+ context_layer = torch.matmul(attention_probs, value_layer)
356
+ context_layer = context_layer.transpose(2, 1).flatten(2)
357
+
358
+ projected_context_layer = self.dense(context_layer)
359
+ projected_context_layer_dropout = self.output_dropout(projected_context_layer)
360
+ layernormed_context_layer = self.LayerNorm(hidden_states + projected_context_layer_dropout)
361
+ return (layernormed_context_layer, attention_probs) if output_attentions else (layernormed_context_layer,)
362
+
363
+
364
+ class AlbertLayer(nn.Module):
365
+ def __init__(self, config: AlbertConfig):
366
+ super().__init__()
367
+
368
+ self.config = config
369
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
370
+ self.seq_len_dim = 1
371
+ self.full_layer_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
372
+ self.attention = AlbertAttention(config)
373
+ self.ffn = nn.Linear(config.hidden_size, config.intermediate_size)
374
+ self.ffn_output = nn.Linear(config.intermediate_size, config.hidden_size)
375
+ self.activation = ACT2FN[config.hidden_act]
376
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
377
+
378
+ def forward(
379
+ self,
380
+ hidden_states: torch.Tensor,
381
+ attention_mask: Optional[torch.FloatTensor] = None,
382
+ head_mask: Optional[torch.FloatTensor] = None,
383
+ output_attentions: bool = False,
384
+ output_hidden_states: bool = False,
385
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
386
+ attention_output = self.attention(hidden_states, attention_mask, head_mask, output_attentions)
387
+
388
+ ffn_output = apply_chunking_to_forward(
389
+ self.ff_chunk,
390
+ self.chunk_size_feed_forward,
391
+ self.seq_len_dim,
392
+ attention_output[0],
393
+ )
394
+ hidden_states = self.full_layer_layer_norm(ffn_output + attention_output[0])
395
+
396
+ return (hidden_states,) + attention_output[1:] # add attentions if we output them
397
+
398
+ def ff_chunk(self, attention_output: torch.Tensor) -> torch.Tensor:
399
+ ffn_output = self.ffn(attention_output)
400
+ ffn_output = self.activation(ffn_output)
401
+ ffn_output = self.ffn_output(ffn_output)
402
+ return ffn_output
403
+
404
+
405
+ class AlbertLayerGroup(nn.Module):
406
+ def __init__(self, config: AlbertConfig):
407
+ super().__init__()
408
+
409
+ self.albert_layers = nn.ModuleList([AlbertLayer(config) for _ in range(config.inner_group_num)])
410
+
411
+ def forward(
412
+ self,
413
+ hidden_states: torch.Tensor,
414
+ attention_mask: Optional[torch.FloatTensor] = None,
415
+ head_mask: Optional[torch.FloatTensor] = None,
416
+ output_attentions: bool = False,
417
+ output_hidden_states: bool = False,
418
+ ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]:
419
+ layer_hidden_states = ()
420
+ layer_attentions = ()
421
+
422
+ for layer_index, albert_layer in enumerate(self.albert_layers):
423
+ layer_output = albert_layer(hidden_states, attention_mask, head_mask[layer_index], output_attentions)
424
+ hidden_states = layer_output[0]
425
+
426
+ if output_attentions:
427
+ layer_attentions = layer_attentions + (layer_output[1],)
428
+
429
+ if output_hidden_states:
430
+ layer_hidden_states = layer_hidden_states + (hidden_states,)
431
+
432
+ outputs = (hidden_states,)
433
+ if output_hidden_states:
434
+ outputs = outputs + (layer_hidden_states,)
435
+ if output_attentions:
436
+ outputs = outputs + (layer_attentions,)
437
+ return outputs # last-layer hidden state, (layer hidden states), (layer attentions)
438
+
439
+
440
+ class AlbertTransformer(nn.Module):
441
+ def __init__(self, config: AlbertConfig):
442
+ super().__init__()
443
+
444
+ self.config = config
445
+ self.embedding_hidden_mapping_in = nn.Linear(config.embedding_size, config.hidden_size)
446
+ self.albert_layer_groups = nn.ModuleList([AlbertLayerGroup(config) for _ in range(config.num_hidden_groups)])
447
+
448
+ def forward(
449
+ self,
450
+ hidden_states: torch.Tensor,
451
+ attention_mask: Optional[torch.FloatTensor] = None,
452
+ head_mask: Optional[torch.FloatTensor] = None,
453
+ output_attentions: bool = False,
454
+ output_hidden_states: bool = False,
455
+ return_dict: bool = True,
456
+ ) -> Union[BaseModelOutput, Tuple]:
457
+ hidden_states = self.embedding_hidden_mapping_in(hidden_states)
458
+
459
+ all_hidden_states = (hidden_states,) if output_hidden_states else None
460
+ all_attentions = () if output_attentions else None
461
+
462
+ head_mask = [None] * self.config.num_hidden_layers if head_mask is None else head_mask
463
+
464
+ for i in range(self.config.num_hidden_layers):
465
+ # Number of layers in a hidden group
466
+ layers_per_group = int(self.config.num_hidden_layers / self.config.num_hidden_groups)
467
+
468
+ # Index of the hidden group
469
+ group_idx = int(i / (self.config.num_hidden_layers / self.config.num_hidden_groups))
470
+
471
+ layer_group_output = self.albert_layer_groups[group_idx](
472
+ hidden_states,
473
+ attention_mask,
474
+ head_mask[group_idx * layers_per_group : (group_idx + 1) * layers_per_group],
475
+ output_attentions,
476
+ output_hidden_states,
477
+ )
478
+ hidden_states = layer_group_output[0]
479
+
480
+ if output_attentions:
481
+ all_attentions = all_attentions + layer_group_output[-1]
482
+
483
+ if output_hidden_states:
484
+ all_hidden_states = all_hidden_states + (hidden_states,)
485
+
486
+ if not return_dict:
487
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
488
+ return BaseModelOutput(
489
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
490
+ )
491
+
492
+
493
+ class AlbertPreTrainedModel(PreTrainedModel):
494
+ """
495
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
496
+ models.
497
+ """
498
+
499
+ config_class = AlbertConfig
500
+ load_tf_weights = load_tf_weights_in_albert
501
+ base_model_prefix = "albert"
502
+
503
+ def _init_weights(self, module):
504
+ """Initialize the weights."""
505
+ if isinstance(module, nn.Linear):
506
+ # Slightly different from the TF version which uses truncated_normal for initialization
507
+ # cf https://github.com/pytorch/pytorch/pull/5617
508
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
509
+ if module.bias is not None:
510
+ module.bias.data.zero_()
511
+ elif isinstance(module, nn.Embedding):
512
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
513
+ if module.padding_idx is not None:
514
+ module.weight.data[module.padding_idx].zero_()
515
+ elif isinstance(module, nn.LayerNorm):
516
+ module.bias.data.zero_()
517
+ module.weight.data.fill_(1.0)
518
+
519
+
520
+ @dataclass
521
+ class AlbertForPreTrainingOutput(ModelOutput):
522
+ """
523
+ Output type of [`AlbertForPreTraining`].
524
+
525
+ Args:
526
+ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
527
+ Total loss as the sum of the masked language modeling loss and the next sequence prediction
528
+ (classification) loss.
529
+ prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
530
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
531
+ sop_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
532
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
533
+ before SoftMax).
534
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
535
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
536
+ shape `(batch_size, sequence_length, hidden_size)`.
537
+
538
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
539
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
540
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
541
+ sequence_length)`.
542
+
543
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
544
+ heads.
545
+ """
546
+
547
+ loss: Optional[torch.FloatTensor] = None
548
+ prediction_logits: torch.FloatTensor = None
549
+ sop_logits: torch.FloatTensor = None
550
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
551
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
552
+
553
+
554
+ ALBERT_START_DOCSTRING = r"""
555
+
556
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
557
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
558
+ etc.)
559
+
560
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
561
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
562
+ and behavior.
563
+
564
+ Args:
565
+ config ([`AlbertConfig`]): Model configuration class with all the parameters of the model.
566
+ Initializing with a config file does not load the weights associated with the model, only the
567
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
568
+ """
569
+
570
+ ALBERT_INPUTS_DOCSTRING = r"""
571
+ Args:
572
+ input_ids (`torch.LongTensor` of shape `({0})`):
573
+ Indices of input sequence tokens in the vocabulary.
574
+
575
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
576
+ [`PreTrainedTokenizer.encode`] for details.
577
+
578
+ [What are input IDs?](../glossary#input-ids)
579
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
580
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
581
+
582
+ - 1 for tokens that are **not masked**,
583
+ - 0 for tokens that are **masked**.
584
+
585
+ [What are attention masks?](../glossary#attention-mask)
586
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
587
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
588
+ 1]`:
589
+
590
+ - 0 corresponds to a *sentence A* token,
591
+ - 1 corresponds to a *sentence B* token.
592
+
593
+ [What are token type IDs?](../glossary#token-type-ids)
594
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
595
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
596
+ config.max_position_embeddings - 1]`.
597
+
598
+ [What are position IDs?](../glossary#position-ids)
599
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
600
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
601
+
602
+ - 1 indicates the head is **not masked**,
603
+ - 0 indicates the head is **masked**.
604
+
605
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
606
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
607
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
608
+ model's internal embedding lookup matrix.
609
+ output_attentions (`bool`, *optional*):
610
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
611
+ tensors for more detail.
612
+ output_hidden_states (`bool`, *optional*):
613
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
614
+ more detail.
615
+ return_dict (`bool`, *optional*):
616
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
617
+ """
618
+
619
+
620
+ @add_start_docstrings(
621
+ "The bare ALBERT Model transformer outputting raw hidden-states without any specific head on top.",
622
+ ALBERT_START_DOCSTRING,
623
+ )
624
+ class AlbertModel(AlbertPreTrainedModel):
625
+ config_class = AlbertConfig
626
+ base_model_prefix = "albert"
627
+
628
+ def __init__(self, config: AlbertConfig, add_pooling_layer: bool = True):
629
+ super().__init__(config)
630
+
631
+ self.config = config
632
+ self.embeddings = AlbertEmbeddings(config)
633
+ self.encoder = AlbertTransformer(config)
634
+ if add_pooling_layer:
635
+ self.pooler = nn.Linear(config.hidden_size, config.hidden_size)
636
+ self.pooler_activation = nn.Tanh()
637
+ else:
638
+ self.pooler = None
639
+ self.pooler_activation = None
640
+
641
+ # Initialize weights and apply final processing
642
+ self.post_init()
643
+
644
+ def get_input_embeddings(self) -> nn.Embedding:
645
+ return self.embeddings.word_embeddings
646
+
647
+ def set_input_embeddings(self, value: nn.Embedding) -> None:
648
+ self.embeddings.word_embeddings = value
649
+
650
+ def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None:
651
+ """
652
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} ALBERT has
653
+ a different architecture in that its layers are shared across groups, which then has inner groups. If an ALBERT
654
+ model has 12 hidden layers and 2 hidden groups, with two inner groups, there is a total of 4 different layers.
655
+
656
+ These layers are flattened: the indices [0,1] correspond to the two inner groups of the first hidden layer,
657
+ while [2,3] correspond to the two inner groups of the second hidden layer.
658
+
659
+ Any layer with in index other than [0,1,2,3] will result in an error. See base class PreTrainedModel for more
660
+ information about head pruning
661
+ """
662
+ for layer, heads in heads_to_prune.items():
663
+ group_idx = int(layer / self.config.inner_group_num)
664
+ inner_group_idx = int(layer - group_idx * self.config.inner_group_num)
665
+ self.encoder.albert_layer_groups[group_idx].albert_layers[inner_group_idx].attention.prune_heads(heads)
666
+
667
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
668
+ @add_code_sample_docstrings(
669
+ checkpoint=_CHECKPOINT_FOR_DOC,
670
+ output_type=BaseModelOutputWithPooling,
671
+ config_class=_CONFIG_FOR_DOC,
672
+ )
673
+ def forward(
674
+ self,
675
+ input_ids: Optional[torch.LongTensor] = None,
676
+ attention_mask: Optional[torch.FloatTensor] = None,
677
+ token_type_ids: Optional[torch.LongTensor] = None,
678
+ position_ids: Optional[torch.LongTensor] = None,
679
+ head_mask: Optional[torch.FloatTensor] = None,
680
+ inputs_embeds: Optional[torch.FloatTensor] = None,
681
+ output_attentions: Optional[bool] = None,
682
+ output_hidden_states: Optional[bool] = None,
683
+ return_dict: Optional[bool] = None,
684
+ ) -> Union[BaseModelOutputWithPooling, Tuple]:
685
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
686
+ output_hidden_states = (
687
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
688
+ )
689
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
690
+
691
+ if input_ids is not None and inputs_embeds is not None:
692
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
693
+ elif input_ids is not None:
694
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
695
+ input_shape = input_ids.size()
696
+ elif inputs_embeds is not None:
697
+ input_shape = inputs_embeds.size()[:-1]
698
+ else:
699
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
700
+
701
+ batch_size, seq_length = input_shape
702
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
703
+
704
+ if attention_mask is None:
705
+ attention_mask = torch.ones(input_shape, device=device)
706
+ if token_type_ids is None:
707
+ if hasattr(self.embeddings, "token_type_ids"):
708
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
709
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
710
+ token_type_ids = buffered_token_type_ids_expanded
711
+ else:
712
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
713
+
714
+ extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
715
+ extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
716
+ extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(self.dtype).min
717
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
718
+
719
+ embedding_output = self.embeddings(
720
+ input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
721
+ )
722
+ encoder_outputs = self.encoder(
723
+ embedding_output,
724
+ extended_attention_mask,
725
+ head_mask=head_mask,
726
+ output_attentions=output_attentions,
727
+ output_hidden_states=output_hidden_states,
728
+ return_dict=return_dict,
729
+ )
730
+
731
+ sequence_output = encoder_outputs[0]
732
+
733
+ pooled_output = self.pooler_activation(self.pooler(sequence_output[:, 0])) if self.pooler is not None else None
734
+
735
+ if not return_dict:
736
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
737
+
738
+ return BaseModelOutputWithPooling(
739
+ last_hidden_state=sequence_output,
740
+ pooler_output=pooled_output,
741
+ hidden_states=encoder_outputs.hidden_states,
742
+ attentions=encoder_outputs.attentions,
743
+ )
744
+
745
+
746
+ @add_start_docstrings(
747
+ """
748
+ Albert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a
749
+ `sentence order prediction (classification)` head.
750
+ """,
751
+ ALBERT_START_DOCSTRING,
752
+ )
753
+ class AlbertForPreTraining(AlbertPreTrainedModel):
754
+ _tied_weights_keys = ["predictions.decoder.bias", "predictions.decoder.weight"]
755
+
756
+ def __init__(self, config: AlbertConfig):
757
+ super().__init__(config)
758
+
759
+ self.albert = AlbertModel(config)
760
+ self.predictions = AlbertMLMHead(config)
761
+ self.sop_classifier = AlbertSOPHead(config)
762
+
763
+ # Initialize weights and apply final processing
764
+ self.post_init()
765
+
766
+ def get_output_embeddings(self) -> nn.Linear:
767
+ return self.predictions.decoder
768
+
769
+ def set_output_embeddings(self, new_embeddings: nn.Linear) -> None:
770
+ self.predictions.decoder = new_embeddings
771
+
772
+ def get_input_embeddings(self) -> nn.Embedding:
773
+ return self.albert.embeddings.word_embeddings
774
+
775
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
776
+ @replace_return_docstrings(output_type=AlbertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
777
+ def forward(
778
+ self,
779
+ input_ids: Optional[torch.LongTensor] = None,
780
+ attention_mask: Optional[torch.FloatTensor] = None,
781
+ token_type_ids: Optional[torch.LongTensor] = None,
782
+ position_ids: Optional[torch.LongTensor] = None,
783
+ head_mask: Optional[torch.FloatTensor] = None,
784
+ inputs_embeds: Optional[torch.FloatTensor] = None,
785
+ labels: Optional[torch.LongTensor] = None,
786
+ sentence_order_label: Optional[torch.LongTensor] = None,
787
+ output_attentions: Optional[bool] = None,
788
+ output_hidden_states: Optional[bool] = None,
789
+ return_dict: Optional[bool] = None,
790
+ ) -> Union[AlbertForPreTrainingOutput, Tuple]:
791
+ r"""
792
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
793
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
794
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
795
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
796
+ sentence_order_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
797
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
798
+ (see `input_ids` docstring) Indices should be in `[0, 1]`. `0` indicates original order (sequence A, then
799
+ sequence B), `1` indicates switched order (sequence B, then sequence A).
800
+
801
+ Returns:
802
+
803
+ Example:
804
+
805
+ ```python
806
+ >>> from transformers import AutoTokenizer, AlbertForPreTraining
807
+ >>> import torch
808
+
809
+ >>> tokenizer = AutoTokenizer.from_pretrained("albert/albert-base-v2")
810
+ >>> model = AlbertForPreTraining.from_pretrained("albert/albert-base-v2")
811
+
812
+ >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0)
813
+ >>> # Batch size 1
814
+ >>> outputs = model(input_ids)
815
+
816
+ >>> prediction_logits = outputs.prediction_logits
817
+ >>> sop_logits = outputs.sop_logits
818
+ ```"""
819
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
820
+
821
+ outputs = self.albert(
822
+ input_ids,
823
+ attention_mask=attention_mask,
824
+ token_type_ids=token_type_ids,
825
+ position_ids=position_ids,
826
+ head_mask=head_mask,
827
+ inputs_embeds=inputs_embeds,
828
+ output_attentions=output_attentions,
829
+ output_hidden_states=output_hidden_states,
830
+ return_dict=return_dict,
831
+ )
832
+
833
+ sequence_output, pooled_output = outputs[:2]
834
+
835
+ prediction_scores = self.predictions(sequence_output)
836
+ sop_scores = self.sop_classifier(pooled_output)
837
+
838
+ total_loss = None
839
+ if labels is not None and sentence_order_label is not None:
840
+ loss_fct = CrossEntropyLoss()
841
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
842
+ sentence_order_loss = loss_fct(sop_scores.view(-1, 2), sentence_order_label.view(-1))
843
+ total_loss = masked_lm_loss + sentence_order_loss
844
+
845
+ if not return_dict:
846
+ output = (prediction_scores, sop_scores) + outputs[2:]
847
+ return ((total_loss,) + output) if total_loss is not None else output
848
+
849
+ return AlbertForPreTrainingOutput(
850
+ loss=total_loss,
851
+ prediction_logits=prediction_scores,
852
+ sop_logits=sop_scores,
853
+ hidden_states=outputs.hidden_states,
854
+ attentions=outputs.attentions,
855
+ )
856
+
857
+
858
+ class AlbertMLMHead(nn.Module):
859
+ def __init__(self, config: AlbertConfig):
860
+ super().__init__()
861
+
862
+ self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
863
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
864
+ self.dense = nn.Linear(config.hidden_size, config.embedding_size)
865
+ self.decoder = nn.Linear(config.embedding_size, config.vocab_size)
866
+ self.activation = ACT2FN[config.hidden_act]
867
+ self.decoder.bias = self.bias
868
+
869
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
870
+ hidden_states = self.dense(hidden_states)
871
+ hidden_states = self.activation(hidden_states)
872
+ hidden_states = self.LayerNorm(hidden_states)
873
+ hidden_states = self.decoder(hidden_states)
874
+
875
+ prediction_scores = hidden_states
876
+
877
+ return prediction_scores
878
+
879
+ def _tie_weights(self) -> None:
880
+ # To tie those two weights if they get disconnected (on TPU or when the bias is resized)
881
+ self.bias = self.decoder.bias
882
+
883
+
884
+ class AlbertSOPHead(nn.Module):
885
+ def __init__(self, config: AlbertConfig):
886
+ super().__init__()
887
+
888
+ self.dropout = nn.Dropout(config.classifier_dropout_prob)
889
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
890
+
891
+ def forward(self, pooled_output: torch.Tensor) -> torch.Tensor:
892
+ dropout_pooled_output = self.dropout(pooled_output)
893
+ logits = self.classifier(dropout_pooled_output)
894
+ return logits
895
+
896
+
897
+ @add_start_docstrings(
898
+ "Albert Model with a `language modeling` head on top.",
899
+ ALBERT_START_DOCSTRING,
900
+ )
901
+ class AlbertForMaskedLM(AlbertPreTrainedModel):
902
+ _tied_weights_keys = ["predictions.decoder.bias", "predictions.decoder.weight"]
903
+
904
+ def __init__(self, config):
905
+ super().__init__(config)
906
+
907
+ self.albert = AlbertModel(config, add_pooling_layer=False)
908
+ self.predictions = AlbertMLMHead(config)
909
+
910
+ # Initialize weights and apply final processing
911
+ self.post_init()
912
+
913
+ def get_output_embeddings(self) -> nn.Linear:
914
+ return self.predictions.decoder
915
+
916
+ def set_output_embeddings(self, new_embeddings: nn.Linear) -> None:
917
+ self.predictions.decoder = new_embeddings
918
+
919
+ def get_input_embeddings(self) -> nn.Embedding:
920
+ return self.albert.embeddings.word_embeddings
921
+
922
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
923
+ @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC)
924
+ def forward(
925
+ self,
926
+ input_ids: Optional[torch.LongTensor] = None,
927
+ attention_mask: Optional[torch.FloatTensor] = None,
928
+ token_type_ids: Optional[torch.LongTensor] = None,
929
+ position_ids: Optional[torch.LongTensor] = None,
930
+ head_mask: Optional[torch.FloatTensor] = None,
931
+ inputs_embeds: Optional[torch.FloatTensor] = None,
932
+ labels: Optional[torch.LongTensor] = None,
933
+ output_attentions: Optional[bool] = None,
934
+ output_hidden_states: Optional[bool] = None,
935
+ return_dict: Optional[bool] = None,
936
+ ) -> Union[MaskedLMOutput, Tuple]:
937
+ r"""
938
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
939
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
940
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
941
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
942
+
943
+ Returns:
944
+
945
+ Example:
946
+
947
+ ```python
948
+ >>> import torch
949
+ >>> from transformers import AutoTokenizer, AlbertForMaskedLM
950
+
951
+ >>> tokenizer = AutoTokenizer.from_pretrained("albert/albert-base-v2")
952
+ >>> model = AlbertForMaskedLM.from_pretrained("albert/albert-base-v2")
953
+
954
+ >>> # add mask_token
955
+ >>> inputs = tokenizer("The capital of [MASK] is Paris.", return_tensors="pt")
956
+ >>> with torch.no_grad():
957
+ ... logits = model(**inputs).logits
958
+
959
+ >>> # retrieve index of [MASK]
960
+ >>> mask_token_index = (inputs.input_ids == tokenizer.mask_token_id)[0].nonzero(as_tuple=True)[0]
961
+ >>> predicted_token_id = logits[0, mask_token_index].argmax(axis=-1)
962
+ >>> tokenizer.decode(predicted_token_id)
963
+ 'france'
964
+ ```
965
+
966
+ ```python
967
+ >>> labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"]
968
+ >>> labels = torch.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100)
969
+ >>> outputs = model(**inputs, labels=labels)
970
+ >>> round(outputs.loss.item(), 2)
971
+ 0.81
972
+ ```
973
+ """
974
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
975
+
976
+ outputs = self.albert(
977
+ input_ids=input_ids,
978
+ attention_mask=attention_mask,
979
+ token_type_ids=token_type_ids,
980
+ position_ids=position_ids,
981
+ head_mask=head_mask,
982
+ inputs_embeds=inputs_embeds,
983
+ output_attentions=output_attentions,
984
+ output_hidden_states=output_hidden_states,
985
+ return_dict=return_dict,
986
+ )
987
+ sequence_outputs = outputs[0]
988
+
989
+ prediction_scores = self.predictions(sequence_outputs)
990
+
991
+ masked_lm_loss = None
992
+ if labels is not None:
993
+ loss_fct = CrossEntropyLoss()
994
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
995
+
996
+ if not return_dict:
997
+ output = (prediction_scores,) + outputs[2:]
998
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
999
+
1000
+ return MaskedLMOutput(
1001
+ loss=masked_lm_loss,
1002
+ logits=prediction_scores,
1003
+ hidden_states=outputs.hidden_states,
1004
+ attentions=outputs.attentions,
1005
+ )
1006
+
1007
+
1008
+ @add_start_docstrings(
1009
+ """
1010
+ Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
1011
+ output) e.g. for GLUE tasks.
1012
+ """,
1013
+ ALBERT_START_DOCSTRING,
1014
+ )
1015
+ class AlbertForSequenceClassification(AlbertPreTrainedModel):
1016
+ def __init__(self, config: AlbertConfig):
1017
+ super().__init__(config)
1018
+ self.num_labels = config.num_labels
1019
+ self.config = config
1020
+
1021
+ self.albert = AlbertModel(config)
1022
+ self.dropout = nn.Dropout(config.classifier_dropout_prob)
1023
+ self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
1024
+
1025
+ # Initialize weights and apply final processing
1026
+ self.post_init()
1027
+
1028
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1029
+ @add_code_sample_docstrings(
1030
+ checkpoint="textattack/albert-base-v2-imdb",
1031
+ output_type=SequenceClassifierOutput,
1032
+ config_class=_CONFIG_FOR_DOC,
1033
+ expected_output="'LABEL_1'",
1034
+ expected_loss=0.12,
1035
+ )
1036
+ def forward(
1037
+ self,
1038
+ input_ids: Optional[torch.LongTensor] = None,
1039
+ attention_mask: Optional[torch.FloatTensor] = None,
1040
+ token_type_ids: Optional[torch.LongTensor] = None,
1041
+ position_ids: Optional[torch.LongTensor] = None,
1042
+ head_mask: Optional[torch.FloatTensor] = None,
1043
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1044
+ labels: Optional[torch.LongTensor] = None,
1045
+ output_attentions: Optional[bool] = None,
1046
+ output_hidden_states: Optional[bool] = None,
1047
+ return_dict: Optional[bool] = None,
1048
+ ) -> Union[SequenceClassifierOutput, Tuple]:
1049
+ r"""
1050
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1051
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1052
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1053
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1054
+ """
1055
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1056
+
1057
+ outputs = self.albert(
1058
+ input_ids=input_ids,
1059
+ attention_mask=attention_mask,
1060
+ token_type_ids=token_type_ids,
1061
+ position_ids=position_ids,
1062
+ head_mask=head_mask,
1063
+ inputs_embeds=inputs_embeds,
1064
+ output_attentions=output_attentions,
1065
+ output_hidden_states=output_hidden_states,
1066
+ return_dict=return_dict,
1067
+ )
1068
+
1069
+ pooled_output = outputs[1]
1070
+
1071
+ pooled_output = self.dropout(pooled_output)
1072
+ logits = self.classifier(pooled_output)
1073
+
1074
+ loss = None
1075
+ if labels is not None:
1076
+ if self.config.problem_type is None:
1077
+ if self.num_labels == 1:
1078
+ self.config.problem_type = "regression"
1079
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1080
+ self.config.problem_type = "single_label_classification"
1081
+ else:
1082
+ self.config.problem_type = "multi_label_classification"
1083
+
1084
+ if self.config.problem_type == "regression":
1085
+ loss_fct = MSELoss()
1086
+ if self.num_labels == 1:
1087
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1088
+ else:
1089
+ loss = loss_fct(logits, labels)
1090
+ elif self.config.problem_type == "single_label_classification":
1091
+ loss_fct = CrossEntropyLoss()
1092
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1093
+ elif self.config.problem_type == "multi_label_classification":
1094
+ loss_fct = BCEWithLogitsLoss()
1095
+ loss = loss_fct(logits, labels)
1096
+
1097
+ if not return_dict:
1098
+ output = (logits,) + outputs[2:]
1099
+ return ((loss,) + output) if loss is not None else output
1100
+
1101
+ return SequenceClassifierOutput(
1102
+ loss=loss,
1103
+ logits=logits,
1104
+ hidden_states=outputs.hidden_states,
1105
+ attentions=outputs.attentions,
1106
+ )
1107
+
1108
+
1109
+ @add_start_docstrings(
1110
+ """
1111
+ Albert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1112
+ Named-Entity-Recognition (NER) tasks.
1113
+ """,
1114
+ ALBERT_START_DOCSTRING,
1115
+ )
1116
+ class AlbertForTokenClassification(AlbertPreTrainedModel):
1117
+ def __init__(self, config: AlbertConfig):
1118
+ super().__init__(config)
1119
+ self.num_labels = config.num_labels
1120
+
1121
+ self.albert = AlbertModel(config, add_pooling_layer=False)
1122
+ classifier_dropout_prob = (
1123
+ config.classifier_dropout_prob
1124
+ if config.classifier_dropout_prob is not None
1125
+ else config.hidden_dropout_prob
1126
+ )
1127
+ self.dropout = nn.Dropout(classifier_dropout_prob)
1128
+ self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
1129
+
1130
+ # Initialize weights and apply final processing
1131
+ self.post_init()
1132
+
1133
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1134
+ @add_code_sample_docstrings(
1135
+ checkpoint=_CHECKPOINT_FOR_DOC,
1136
+ output_type=TokenClassifierOutput,
1137
+ config_class=_CONFIG_FOR_DOC,
1138
+ )
1139
+ def forward(
1140
+ self,
1141
+ input_ids: Optional[torch.LongTensor] = None,
1142
+ attention_mask: Optional[torch.FloatTensor] = None,
1143
+ token_type_ids: Optional[torch.LongTensor] = None,
1144
+ position_ids: Optional[torch.LongTensor] = None,
1145
+ head_mask: Optional[torch.FloatTensor] = None,
1146
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1147
+ labels: Optional[torch.LongTensor] = None,
1148
+ output_attentions: Optional[bool] = None,
1149
+ output_hidden_states: Optional[bool] = None,
1150
+ return_dict: Optional[bool] = None,
1151
+ ) -> Union[TokenClassifierOutput, Tuple]:
1152
+ r"""
1153
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1154
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1155
+ """
1156
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1157
+
1158
+ outputs = self.albert(
1159
+ input_ids,
1160
+ attention_mask=attention_mask,
1161
+ token_type_ids=token_type_ids,
1162
+ position_ids=position_ids,
1163
+ head_mask=head_mask,
1164
+ inputs_embeds=inputs_embeds,
1165
+ output_attentions=output_attentions,
1166
+ output_hidden_states=output_hidden_states,
1167
+ return_dict=return_dict,
1168
+ )
1169
+
1170
+ sequence_output = outputs[0]
1171
+
1172
+ sequence_output = self.dropout(sequence_output)
1173
+ logits = self.classifier(sequence_output)
1174
+
1175
+ loss = None
1176
+ if labels is not None:
1177
+ loss_fct = CrossEntropyLoss()
1178
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1179
+
1180
+ if not return_dict:
1181
+ output = (logits,) + outputs[2:]
1182
+ return ((loss,) + output) if loss is not None else output
1183
+
1184
+ return TokenClassifierOutput(
1185
+ loss=loss,
1186
+ logits=logits,
1187
+ hidden_states=outputs.hidden_states,
1188
+ attentions=outputs.attentions,
1189
+ )
1190
+
1191
+
1192
+ @add_start_docstrings(
1193
+ """
1194
+ Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1195
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1196
+ """,
1197
+ ALBERT_START_DOCSTRING,
1198
+ )
1199
+ class AlbertForQuestionAnswering(AlbertPreTrainedModel):
1200
+ def __init__(self, config: AlbertConfig):
1201
+ super().__init__(config)
1202
+ self.num_labels = config.num_labels
1203
+
1204
+ self.albert = AlbertModel(config, add_pooling_layer=False)
1205
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1206
+
1207
+ # Initialize weights and apply final processing
1208
+ self.post_init()
1209
+
1210
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1211
+ @add_code_sample_docstrings(
1212
+ checkpoint="twmkn9/albert-base-v2-squad2",
1213
+ output_type=QuestionAnsweringModelOutput,
1214
+ config_class=_CONFIG_FOR_DOC,
1215
+ qa_target_start_index=12,
1216
+ qa_target_end_index=13,
1217
+ expected_output="'a nice puppet'",
1218
+ expected_loss=7.36,
1219
+ )
1220
+ def forward(
1221
+ self,
1222
+ input_ids: Optional[torch.LongTensor] = None,
1223
+ attention_mask: Optional[torch.FloatTensor] = None,
1224
+ token_type_ids: Optional[torch.LongTensor] = None,
1225
+ position_ids: Optional[torch.LongTensor] = None,
1226
+ head_mask: Optional[torch.FloatTensor] = None,
1227
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1228
+ start_positions: Optional[torch.LongTensor] = None,
1229
+ end_positions: Optional[torch.LongTensor] = None,
1230
+ output_attentions: Optional[bool] = None,
1231
+ output_hidden_states: Optional[bool] = None,
1232
+ return_dict: Optional[bool] = None,
1233
+ ) -> Union[AlbertForPreTrainingOutput, Tuple]:
1234
+ r"""
1235
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1236
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1237
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1238
+ are not taken into account for computing the loss.
1239
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1240
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1241
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1242
+ are not taken into account for computing the loss.
1243
+ """
1244
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1245
+
1246
+ outputs = self.albert(
1247
+ input_ids=input_ids,
1248
+ attention_mask=attention_mask,
1249
+ token_type_ids=token_type_ids,
1250
+ position_ids=position_ids,
1251
+ head_mask=head_mask,
1252
+ inputs_embeds=inputs_embeds,
1253
+ output_attentions=output_attentions,
1254
+ output_hidden_states=output_hidden_states,
1255
+ return_dict=return_dict,
1256
+ )
1257
+
1258
+ sequence_output = outputs[0]
1259
+
1260
+ logits: torch.Tensor = self.qa_outputs(sequence_output)
1261
+ start_logits, end_logits = logits.split(1, dim=-1)
1262
+ start_logits = start_logits.squeeze(-1).contiguous()
1263
+ end_logits = end_logits.squeeze(-1).contiguous()
1264
+
1265
+ total_loss = None
1266
+ if start_positions is not None and end_positions is not None:
1267
+ # If we are on multi-GPU, split add a dimension
1268
+ if len(start_positions.size()) > 1:
1269
+ start_positions = start_positions.squeeze(-1)
1270
+ if len(end_positions.size()) > 1:
1271
+ end_positions = end_positions.squeeze(-1)
1272
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1273
+ ignored_index = start_logits.size(1)
1274
+ start_positions = start_positions.clamp(0, ignored_index)
1275
+ end_positions = end_positions.clamp(0, ignored_index)
1276
+
1277
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1278
+ start_loss = loss_fct(start_logits, start_positions)
1279
+ end_loss = loss_fct(end_logits, end_positions)
1280
+ total_loss = (start_loss + end_loss) / 2
1281
+
1282
+ if not return_dict:
1283
+ output = (start_logits, end_logits) + outputs[2:]
1284
+ return ((total_loss,) + output) if total_loss is not None else output
1285
+
1286
+ return QuestionAnsweringModelOutput(
1287
+ loss=total_loss,
1288
+ start_logits=start_logits,
1289
+ end_logits=end_logits,
1290
+ hidden_states=outputs.hidden_states,
1291
+ attentions=outputs.attentions,
1292
+ )
1293
+
1294
+
1295
+ @add_start_docstrings(
1296
+ """
1297
+ Albert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1298
+ softmax) e.g. for RocStories/SWAG tasks.
1299
+ """,
1300
+ ALBERT_START_DOCSTRING,
1301
+ )
1302
+ class AlbertForMultipleChoice(AlbertPreTrainedModel):
1303
+ def __init__(self, config: AlbertConfig):
1304
+ super().__init__(config)
1305
+
1306
+ self.albert = AlbertModel(config)
1307
+ self.dropout = nn.Dropout(config.classifier_dropout_prob)
1308
+ self.classifier = nn.Linear(config.hidden_size, 1)
1309
+
1310
+ # Initialize weights and apply final processing
1311
+ self.post_init()
1312
+
1313
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
1314
+ @add_code_sample_docstrings(
1315
+ checkpoint=_CHECKPOINT_FOR_DOC,
1316
+ output_type=MultipleChoiceModelOutput,
1317
+ config_class=_CONFIG_FOR_DOC,
1318
+ )
1319
+ def forward(
1320
+ self,
1321
+ input_ids: Optional[torch.LongTensor] = None,
1322
+ attention_mask: Optional[torch.FloatTensor] = None,
1323
+ token_type_ids: Optional[torch.LongTensor] = None,
1324
+ position_ids: Optional[torch.LongTensor] = None,
1325
+ head_mask: Optional[torch.FloatTensor] = None,
1326
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1327
+ labels: Optional[torch.LongTensor] = None,
1328
+ output_attentions: Optional[bool] = None,
1329
+ output_hidden_states: Optional[bool] = None,
1330
+ return_dict: Optional[bool] = None,
1331
+ ) -> Union[AlbertForPreTrainingOutput, Tuple]:
1332
+ r"""
1333
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1334
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1335
+ num_choices-1]` where *num_choices* is the size of the second dimension of the input tensors. (see
1336
+ *input_ids* above)
1337
+ """
1338
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1339
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1340
+
1341
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1342
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1343
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1344
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
1345
+ inputs_embeds = (
1346
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1347
+ if inputs_embeds is not None
1348
+ else None
1349
+ )
1350
+ outputs = self.albert(
1351
+ input_ids,
1352
+ attention_mask=attention_mask,
1353
+ token_type_ids=token_type_ids,
1354
+ position_ids=position_ids,
1355
+ head_mask=head_mask,
1356
+ inputs_embeds=inputs_embeds,
1357
+ output_attentions=output_attentions,
1358
+ output_hidden_states=output_hidden_states,
1359
+ return_dict=return_dict,
1360
+ )
1361
+
1362
+ pooled_output = outputs[1]
1363
+
1364
+ pooled_output = self.dropout(pooled_output)
1365
+ logits: torch.Tensor = self.classifier(pooled_output)
1366
+ reshaped_logits = logits.view(-1, num_choices)
1367
+
1368
+ loss = None
1369
+ if labels is not None:
1370
+ loss_fct = CrossEntropyLoss()
1371
+ loss = loss_fct(reshaped_logits, labels)
1372
+
1373
+ if not return_dict:
1374
+ output = (reshaped_logits,) + outputs[2:]
1375
+ return ((loss,) + output) if loss is not None else output
1376
+
1377
+ return MultipleChoiceModelOutput(
1378
+ loss=loss,
1379
+ logits=reshaped_logits,
1380
+ hidden_states=outputs.hidden_states,
1381
+ attentions=outputs.attentions,
1382
+ )
venv/lib/python3.10/site-packages/transformers/models/albert/modeling_flax_albert.py ADDED
@@ -0,0 +1,1121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 Google AI, Google Brain and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from typing import Callable, Optional, Tuple
17
+
18
+ import flax
19
+ import flax.linen as nn
20
+ import jax
21
+ import jax.numpy as jnp
22
+ import numpy as np
23
+ from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
24
+ from flax.linen.attention import dot_product_attention_weights
25
+ from flax.traverse_util import flatten_dict, unflatten_dict
26
+ from jax import lax
27
+
28
+ from ...modeling_flax_outputs import (
29
+ FlaxBaseModelOutput,
30
+ FlaxBaseModelOutputWithPooling,
31
+ FlaxMaskedLMOutput,
32
+ FlaxMultipleChoiceModelOutput,
33
+ FlaxQuestionAnsweringModelOutput,
34
+ FlaxSequenceClassifierOutput,
35
+ FlaxTokenClassifierOutput,
36
+ )
37
+ from ...modeling_flax_utils import (
38
+ ACT2FN,
39
+ FlaxPreTrainedModel,
40
+ append_call_sample_docstring,
41
+ append_replace_return_docstrings,
42
+ overwrite_call_docstring,
43
+ )
44
+ from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging
45
+ from .configuration_albert import AlbertConfig
46
+
47
+
48
+ logger = logging.get_logger(__name__)
49
+
50
+ _CHECKPOINT_FOR_DOC = "albert/albert-base-v2"
51
+ _CONFIG_FOR_DOC = "AlbertConfig"
52
+
53
+
54
+ @flax.struct.dataclass
55
+ class FlaxAlbertForPreTrainingOutput(ModelOutput):
56
+ """
57
+ Output type of [`FlaxAlbertForPreTraining`].
58
+
59
+ Args:
60
+ prediction_logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.vocab_size)`):
61
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
62
+ sop_logits (`jnp.ndarray` of shape `(batch_size, 2)`):
63
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
64
+ before SoftMax).
65
+ hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
66
+ Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
67
+ `(batch_size, sequence_length, hidden_size)`.
68
+
69
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
70
+ attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
71
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
72
+ sequence_length)`.
73
+
74
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
75
+ heads.
76
+ """
77
+
78
+ prediction_logits: jnp.ndarray = None
79
+ sop_logits: jnp.ndarray = None
80
+ hidden_states: Optional[Tuple[jnp.ndarray]] = None
81
+ attentions: Optional[Tuple[jnp.ndarray]] = None
82
+
83
+
84
+ ALBERT_START_DOCSTRING = r"""
85
+
86
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
87
+ library implements for all its model (such as downloading, saving and converting weights from PyTorch models)
88
+
89
+ This model is also a
90
+ [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as
91
+ a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and
92
+ behavior.
93
+
94
+ Finally, this model supports inherent JAX features such as:
95
+
96
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
97
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
98
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
99
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
100
+
101
+ Parameters:
102
+ config ([`AlbertConfig`]): Model configuration class with all the parameters of the model.
103
+ Initializing with a config file does not load the weights associated with the model, only the
104
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
105
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
106
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
107
+ `jax.numpy.bfloat16` (on TPUs).
108
+
109
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
110
+ specified all the computation will be performed with the given `dtype`.
111
+
112
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
113
+ parameters.**
114
+
115
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
116
+ [`~FlaxPreTrainedModel.to_bf16`].
117
+ """
118
+
119
+ ALBERT_INPUTS_DOCSTRING = r"""
120
+ Args:
121
+ input_ids (`numpy.ndarray` of shape `({0})`):
122
+ Indices of input sequence tokens in the vocabulary.
123
+
124
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
125
+ [`PreTrainedTokenizer.__call__`] for details.
126
+
127
+ [What are input IDs?](../glossary#input-ids)
128
+ attention_mask (`numpy.ndarray` of shape `({0})`, *optional*):
129
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
130
+
131
+ - 1 for tokens that are **not masked**,
132
+ - 0 for tokens that are **masked**.
133
+
134
+ [What are attention masks?](../glossary#attention-mask)
135
+ token_type_ids (`numpy.ndarray` of shape `({0})`, *optional*):
136
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
137
+ 1]`:
138
+
139
+ - 0 corresponds to a *sentence A* token,
140
+ - 1 corresponds to a *sentence B* token.
141
+
142
+ [What are token type IDs?](../glossary#token-type-ids)
143
+ position_ids (`numpy.ndarray` of shape `({0})`, *optional*):
144
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
145
+ config.max_position_embeddings - 1]`.
146
+ return_dict (`bool`, *optional*):
147
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
148
+
149
+ """
150
+
151
+
152
+ class FlaxAlbertEmbeddings(nn.Module):
153
+ """Construct the embeddings from word, position and token_type embeddings."""
154
+
155
+ config: AlbertConfig
156
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
157
+
158
+ def setup(self):
159
+ self.word_embeddings = nn.Embed(
160
+ self.config.vocab_size,
161
+ self.config.embedding_size,
162
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
163
+ )
164
+ self.position_embeddings = nn.Embed(
165
+ self.config.max_position_embeddings,
166
+ self.config.embedding_size,
167
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
168
+ )
169
+ self.token_type_embeddings = nn.Embed(
170
+ self.config.type_vocab_size,
171
+ self.config.embedding_size,
172
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
173
+ )
174
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
175
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
176
+
177
+ def __call__(self, input_ids, token_type_ids, position_ids, deterministic: bool = True):
178
+ # Embed
179
+ inputs_embeds = self.word_embeddings(input_ids.astype("i4"))
180
+ position_embeds = self.position_embeddings(position_ids.astype("i4"))
181
+ token_type_embeddings = self.token_type_embeddings(token_type_ids.astype("i4"))
182
+
183
+ # Sum all embeddings
184
+ hidden_states = inputs_embeds + token_type_embeddings + position_embeds
185
+
186
+ # Layer Norm
187
+ hidden_states = self.LayerNorm(hidden_states)
188
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
189
+ return hidden_states
190
+
191
+
192
+ class FlaxAlbertSelfAttention(nn.Module):
193
+ config: AlbertConfig
194
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
195
+
196
+ def setup(self):
197
+ if self.config.hidden_size % self.config.num_attention_heads != 0:
198
+ raise ValueError(
199
+ "`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads` "
200
+ " : {self.config.num_attention_heads}"
201
+ )
202
+
203
+ self.query = nn.Dense(
204
+ self.config.hidden_size,
205
+ dtype=self.dtype,
206
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
207
+ )
208
+ self.key = nn.Dense(
209
+ self.config.hidden_size,
210
+ dtype=self.dtype,
211
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
212
+ )
213
+ self.value = nn.Dense(
214
+ self.config.hidden_size,
215
+ dtype=self.dtype,
216
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
217
+ )
218
+ self.dense = nn.Dense(
219
+ self.config.hidden_size,
220
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
221
+ dtype=self.dtype,
222
+ )
223
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
224
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
225
+
226
+ def __call__(self, hidden_states, attention_mask, deterministic=True, output_attentions: bool = False):
227
+ head_dim = self.config.hidden_size // self.config.num_attention_heads
228
+
229
+ query_states = self.query(hidden_states).reshape(
230
+ hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
231
+ )
232
+ value_states = self.value(hidden_states).reshape(
233
+ hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
234
+ )
235
+ key_states = self.key(hidden_states).reshape(
236
+ hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
237
+ )
238
+
239
+ # Convert the boolean attention mask to an attention bias.
240
+ if attention_mask is not None:
241
+ # attention mask in the form of attention bias
242
+ attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
243
+ attention_bias = lax.select(
244
+ attention_mask > 0,
245
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
246
+ jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
247
+ )
248
+ else:
249
+ attention_bias = None
250
+
251
+ dropout_rng = None
252
+ if not deterministic and self.config.attention_probs_dropout_prob > 0.0:
253
+ dropout_rng = self.make_rng("dropout")
254
+
255
+ attn_weights = dot_product_attention_weights(
256
+ query_states,
257
+ key_states,
258
+ bias=attention_bias,
259
+ dropout_rng=dropout_rng,
260
+ dropout_rate=self.config.attention_probs_dropout_prob,
261
+ broadcast_dropout=True,
262
+ deterministic=deterministic,
263
+ dtype=self.dtype,
264
+ precision=None,
265
+ )
266
+
267
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
268
+ attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,))
269
+
270
+ projected_attn_output = self.dense(attn_output)
271
+ projected_attn_output = self.dropout(projected_attn_output, deterministic=deterministic)
272
+ layernormed_attn_output = self.LayerNorm(projected_attn_output + hidden_states)
273
+ outputs = (layernormed_attn_output, attn_weights) if output_attentions else (layernormed_attn_output,)
274
+ return outputs
275
+
276
+
277
+ class FlaxAlbertLayer(nn.Module):
278
+ config: AlbertConfig
279
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
280
+
281
+ def setup(self):
282
+ self.attention = FlaxAlbertSelfAttention(self.config, dtype=self.dtype)
283
+ self.ffn = nn.Dense(
284
+ self.config.intermediate_size,
285
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
286
+ dtype=self.dtype,
287
+ )
288
+ self.activation = ACT2FN[self.config.hidden_act]
289
+ self.ffn_output = nn.Dense(
290
+ self.config.hidden_size,
291
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
292
+ dtype=self.dtype,
293
+ )
294
+ self.full_layer_layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
295
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
296
+
297
+ def __call__(
298
+ self,
299
+ hidden_states,
300
+ attention_mask,
301
+ deterministic: bool = True,
302
+ output_attentions: bool = False,
303
+ ):
304
+ attention_outputs = self.attention(
305
+ hidden_states, attention_mask, deterministic=deterministic, output_attentions=output_attentions
306
+ )
307
+ attention_output = attention_outputs[0]
308
+ ffn_output = self.ffn(attention_output)
309
+ ffn_output = self.activation(ffn_output)
310
+ ffn_output = self.ffn_output(ffn_output)
311
+ ffn_output = self.dropout(ffn_output, deterministic=deterministic)
312
+ hidden_states = self.full_layer_layer_norm(ffn_output + attention_output)
313
+
314
+ outputs = (hidden_states,)
315
+
316
+ if output_attentions:
317
+ outputs += (attention_outputs[1],)
318
+ return outputs
319
+
320
+
321
+ class FlaxAlbertLayerCollection(nn.Module):
322
+ config: AlbertConfig
323
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
324
+
325
+ def setup(self):
326
+ self.layers = [
327
+ FlaxAlbertLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.inner_group_num)
328
+ ]
329
+
330
+ def __call__(
331
+ self,
332
+ hidden_states,
333
+ attention_mask,
334
+ deterministic: bool = True,
335
+ output_attentions: bool = False,
336
+ output_hidden_states: bool = False,
337
+ ):
338
+ layer_hidden_states = ()
339
+ layer_attentions = ()
340
+
341
+ for layer_index, albert_layer in enumerate(self.layers):
342
+ layer_output = albert_layer(
343
+ hidden_states,
344
+ attention_mask,
345
+ deterministic=deterministic,
346
+ output_attentions=output_attentions,
347
+ )
348
+ hidden_states = layer_output[0]
349
+
350
+ if output_attentions:
351
+ layer_attentions = layer_attentions + (layer_output[1],)
352
+
353
+ if output_hidden_states:
354
+ layer_hidden_states = layer_hidden_states + (hidden_states,)
355
+
356
+ outputs = (hidden_states,)
357
+ if output_hidden_states:
358
+ outputs = outputs + (layer_hidden_states,)
359
+ if output_attentions:
360
+ outputs = outputs + (layer_attentions,)
361
+ return outputs # last-layer hidden state, (layer hidden states), (layer attentions)
362
+
363
+
364
+ class FlaxAlbertLayerCollections(nn.Module):
365
+ config: AlbertConfig
366
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
367
+ layer_index: Optional[str] = None
368
+
369
+ def setup(self):
370
+ self.albert_layers = FlaxAlbertLayerCollection(self.config, dtype=self.dtype)
371
+
372
+ def __call__(
373
+ self,
374
+ hidden_states,
375
+ attention_mask,
376
+ deterministic: bool = True,
377
+ output_attentions: bool = False,
378
+ output_hidden_states: bool = False,
379
+ ):
380
+ outputs = self.albert_layers(
381
+ hidden_states,
382
+ attention_mask,
383
+ deterministic=deterministic,
384
+ output_attentions=output_attentions,
385
+ output_hidden_states=output_hidden_states,
386
+ )
387
+ return outputs
388
+
389
+
390
+ class FlaxAlbertLayerGroups(nn.Module):
391
+ config: AlbertConfig
392
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
393
+
394
+ def setup(self):
395
+ self.layers = [
396
+ FlaxAlbertLayerCollections(self.config, name=str(i), layer_index=str(i), dtype=self.dtype)
397
+ for i in range(self.config.num_hidden_groups)
398
+ ]
399
+
400
+ def __call__(
401
+ self,
402
+ hidden_states,
403
+ attention_mask,
404
+ deterministic: bool = True,
405
+ output_attentions: bool = False,
406
+ output_hidden_states: bool = False,
407
+ return_dict: bool = True,
408
+ ):
409
+ all_attentions = () if output_attentions else None
410
+ all_hidden_states = (hidden_states,) if output_hidden_states else None
411
+
412
+ for i in range(self.config.num_hidden_layers):
413
+ # Index of the hidden group
414
+ group_idx = int(i / (self.config.num_hidden_layers / self.config.num_hidden_groups))
415
+ layer_group_output = self.layers[group_idx](
416
+ hidden_states,
417
+ attention_mask,
418
+ deterministic=deterministic,
419
+ output_attentions=output_attentions,
420
+ output_hidden_states=output_hidden_states,
421
+ )
422
+ hidden_states = layer_group_output[0]
423
+
424
+ if output_attentions:
425
+ all_attentions = all_attentions + layer_group_output[-1]
426
+
427
+ if output_hidden_states:
428
+ all_hidden_states = all_hidden_states + (hidden_states,)
429
+
430
+ if not return_dict:
431
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
432
+ return FlaxBaseModelOutput(
433
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
434
+ )
435
+
436
+
437
+ class FlaxAlbertEncoder(nn.Module):
438
+ config: AlbertConfig
439
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
440
+
441
+ def setup(self):
442
+ self.embedding_hidden_mapping_in = nn.Dense(
443
+ self.config.hidden_size,
444
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
445
+ dtype=self.dtype,
446
+ )
447
+ self.albert_layer_groups = FlaxAlbertLayerGroups(self.config, dtype=self.dtype)
448
+
449
+ def __call__(
450
+ self,
451
+ hidden_states,
452
+ attention_mask,
453
+ deterministic: bool = True,
454
+ output_attentions: bool = False,
455
+ output_hidden_states: bool = False,
456
+ return_dict: bool = True,
457
+ ):
458
+ hidden_states = self.embedding_hidden_mapping_in(hidden_states)
459
+ return self.albert_layer_groups(
460
+ hidden_states,
461
+ attention_mask,
462
+ deterministic=deterministic,
463
+ output_attentions=output_attentions,
464
+ output_hidden_states=output_hidden_states,
465
+ )
466
+
467
+
468
+ class FlaxAlbertOnlyMLMHead(nn.Module):
469
+ config: AlbertConfig
470
+ dtype: jnp.dtype = jnp.float32
471
+ bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros
472
+
473
+ def setup(self):
474
+ self.dense = nn.Dense(self.config.embedding_size, dtype=self.dtype)
475
+ self.activation = ACT2FN[self.config.hidden_act]
476
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
477
+ self.decoder = nn.Dense(self.config.vocab_size, dtype=self.dtype, use_bias=False)
478
+ self.bias = self.param("bias", self.bias_init, (self.config.vocab_size,))
479
+
480
+ def __call__(self, hidden_states, shared_embedding=None):
481
+ hidden_states = self.dense(hidden_states)
482
+ hidden_states = self.activation(hidden_states)
483
+ hidden_states = self.LayerNorm(hidden_states)
484
+
485
+ if shared_embedding is not None:
486
+ hidden_states = self.decoder.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
487
+ else:
488
+ hidden_states = self.decoder(hidden_states)
489
+
490
+ hidden_states += self.bias
491
+ return hidden_states
492
+
493
+
494
+ class FlaxAlbertSOPHead(nn.Module):
495
+ config: AlbertConfig
496
+ dtype: jnp.dtype = jnp.float32
497
+
498
+ def setup(self):
499
+ self.dropout = nn.Dropout(self.config.classifier_dropout_prob)
500
+ self.classifier = nn.Dense(2, dtype=self.dtype)
501
+
502
+ def __call__(self, pooled_output, deterministic=True):
503
+ pooled_output = self.dropout(pooled_output, deterministic=deterministic)
504
+ logits = self.classifier(pooled_output)
505
+ return logits
506
+
507
+
508
+ class FlaxAlbertPreTrainedModel(FlaxPreTrainedModel):
509
+ """
510
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
511
+ models.
512
+ """
513
+
514
+ config_class = AlbertConfig
515
+ base_model_prefix = "albert"
516
+ module_class: nn.Module = None
517
+
518
+ def __init__(
519
+ self,
520
+ config: AlbertConfig,
521
+ input_shape: Tuple = (1, 1),
522
+ seed: int = 0,
523
+ dtype: jnp.dtype = jnp.float32,
524
+ _do_init: bool = True,
525
+ **kwargs,
526
+ ):
527
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
528
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
529
+
530
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
531
+ # init input tensors
532
+ input_ids = jnp.zeros(input_shape, dtype="i4")
533
+ token_type_ids = jnp.zeros_like(input_ids)
534
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape)
535
+ attention_mask = jnp.ones_like(input_ids)
536
+
537
+ params_rng, dropout_rng = jax.random.split(rng)
538
+ rngs = {"params": params_rng, "dropout": dropout_rng}
539
+
540
+ random_params = self.module.init(
541
+ rngs, input_ids, attention_mask, token_type_ids, position_ids, return_dict=False
542
+ )["params"]
543
+
544
+ if params is not None:
545
+ random_params = flatten_dict(unfreeze(random_params))
546
+ params = flatten_dict(unfreeze(params))
547
+ for missing_key in self._missing_keys:
548
+ params[missing_key] = random_params[missing_key]
549
+ self._missing_keys = set()
550
+ return freeze(unflatten_dict(params))
551
+ else:
552
+ return random_params
553
+
554
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
555
+ def __call__(
556
+ self,
557
+ input_ids,
558
+ attention_mask=None,
559
+ token_type_ids=None,
560
+ position_ids=None,
561
+ params: dict = None,
562
+ dropout_rng: jax.random.PRNGKey = None,
563
+ train: bool = False,
564
+ output_attentions: Optional[bool] = None,
565
+ output_hidden_states: Optional[bool] = None,
566
+ return_dict: Optional[bool] = None,
567
+ ):
568
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
569
+ output_hidden_states = (
570
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
571
+ )
572
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
573
+
574
+ # init input tensors if not passed
575
+ if token_type_ids is None:
576
+ token_type_ids = jnp.zeros_like(input_ids)
577
+
578
+ if position_ids is None:
579
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
580
+
581
+ if attention_mask is None:
582
+ attention_mask = jnp.ones_like(input_ids)
583
+
584
+ # Handle any PRNG if needed
585
+ rngs = {}
586
+ if dropout_rng is not None:
587
+ rngs["dropout"] = dropout_rng
588
+
589
+ return self.module.apply(
590
+ {"params": params or self.params},
591
+ jnp.array(input_ids, dtype="i4"),
592
+ jnp.array(attention_mask, dtype="i4"),
593
+ jnp.array(token_type_ids, dtype="i4"),
594
+ jnp.array(position_ids, dtype="i4"),
595
+ not train,
596
+ output_attentions,
597
+ output_hidden_states,
598
+ return_dict,
599
+ rngs=rngs,
600
+ )
601
+
602
+
603
+ class FlaxAlbertModule(nn.Module):
604
+ config: AlbertConfig
605
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
606
+ add_pooling_layer: bool = True
607
+
608
+ def setup(self):
609
+ self.embeddings = FlaxAlbertEmbeddings(self.config, dtype=self.dtype)
610
+ self.encoder = FlaxAlbertEncoder(self.config, dtype=self.dtype)
611
+ if self.add_pooling_layer:
612
+ self.pooler = nn.Dense(
613
+ self.config.hidden_size,
614
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
615
+ dtype=self.dtype,
616
+ name="pooler",
617
+ )
618
+ self.pooler_activation = nn.tanh
619
+ else:
620
+ self.pooler = None
621
+ self.pooler_activation = None
622
+
623
+ def __call__(
624
+ self,
625
+ input_ids,
626
+ attention_mask,
627
+ token_type_ids: Optional[np.ndarray] = None,
628
+ position_ids: Optional[np.ndarray] = None,
629
+ deterministic: bool = True,
630
+ output_attentions: bool = False,
631
+ output_hidden_states: bool = False,
632
+ return_dict: bool = True,
633
+ ):
634
+ # make sure `token_type_ids` is correctly initialized when not passed
635
+ if token_type_ids is None:
636
+ token_type_ids = jnp.zeros_like(input_ids)
637
+
638
+ # make sure `position_ids` is correctly initialized when not passed
639
+ if position_ids is None:
640
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
641
+
642
+ hidden_states = self.embeddings(input_ids, token_type_ids, position_ids, deterministic=deterministic)
643
+
644
+ outputs = self.encoder(
645
+ hidden_states,
646
+ attention_mask,
647
+ deterministic=deterministic,
648
+ output_attentions=output_attentions,
649
+ output_hidden_states=output_hidden_states,
650
+ return_dict=return_dict,
651
+ )
652
+ hidden_states = outputs[0]
653
+ if self.add_pooling_layer:
654
+ pooled = self.pooler(hidden_states[:, 0])
655
+ pooled = self.pooler_activation(pooled)
656
+ else:
657
+ pooled = None
658
+
659
+ if not return_dict:
660
+ # if pooled is None, don't return it
661
+ if pooled is None:
662
+ return (hidden_states,) + outputs[1:]
663
+ return (hidden_states, pooled) + outputs[1:]
664
+
665
+ return FlaxBaseModelOutputWithPooling(
666
+ last_hidden_state=hidden_states,
667
+ pooler_output=pooled,
668
+ hidden_states=outputs.hidden_states,
669
+ attentions=outputs.attentions,
670
+ )
671
+
672
+
673
+ @add_start_docstrings(
674
+ "The bare Albert Model transformer outputting raw hidden-states without any specific head on top.",
675
+ ALBERT_START_DOCSTRING,
676
+ )
677
+ class FlaxAlbertModel(FlaxAlbertPreTrainedModel):
678
+ module_class = FlaxAlbertModule
679
+
680
+
681
+ append_call_sample_docstring(FlaxAlbertModel, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutputWithPooling, _CONFIG_FOR_DOC)
682
+
683
+
684
+ class FlaxAlbertForPreTrainingModule(nn.Module):
685
+ config: AlbertConfig
686
+ dtype: jnp.dtype = jnp.float32
687
+
688
+ def setup(self):
689
+ self.albert = FlaxAlbertModule(config=self.config, dtype=self.dtype)
690
+ self.predictions = FlaxAlbertOnlyMLMHead(config=self.config, dtype=self.dtype)
691
+ self.sop_classifier = FlaxAlbertSOPHead(config=self.config, dtype=self.dtype)
692
+
693
+ def __call__(
694
+ self,
695
+ input_ids,
696
+ attention_mask,
697
+ token_type_ids,
698
+ position_ids,
699
+ deterministic: bool = True,
700
+ output_attentions: bool = False,
701
+ output_hidden_states: bool = False,
702
+ return_dict: bool = True,
703
+ ):
704
+ # Model
705
+ outputs = self.albert(
706
+ input_ids,
707
+ attention_mask,
708
+ token_type_ids,
709
+ position_ids,
710
+ deterministic=deterministic,
711
+ output_attentions=output_attentions,
712
+ output_hidden_states=output_hidden_states,
713
+ return_dict=return_dict,
714
+ )
715
+
716
+ if self.config.tie_word_embeddings:
717
+ shared_embedding = self.albert.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
718
+ else:
719
+ shared_embedding = None
720
+
721
+ hidden_states = outputs[0]
722
+ pooled_output = outputs[1]
723
+
724
+ prediction_scores = self.predictions(hidden_states, shared_embedding=shared_embedding)
725
+ sop_scores = self.sop_classifier(pooled_output, deterministic=deterministic)
726
+
727
+ if not return_dict:
728
+ return (prediction_scores, sop_scores) + outputs[2:]
729
+
730
+ return FlaxAlbertForPreTrainingOutput(
731
+ prediction_logits=prediction_scores,
732
+ sop_logits=sop_scores,
733
+ hidden_states=outputs.hidden_states,
734
+ attentions=outputs.attentions,
735
+ )
736
+
737
+
738
+ @add_start_docstrings(
739
+ """
740
+ Albert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a
741
+ `sentence order prediction (classification)` head.
742
+ """,
743
+ ALBERT_START_DOCSTRING,
744
+ )
745
+ class FlaxAlbertForPreTraining(FlaxAlbertPreTrainedModel):
746
+ module_class = FlaxAlbertForPreTrainingModule
747
+
748
+
749
+ FLAX_ALBERT_FOR_PRETRAINING_DOCSTRING = """
750
+ Returns:
751
+
752
+ Example:
753
+
754
+ ```python
755
+ >>> from transformers import AutoTokenizer, FlaxAlbertForPreTraining
756
+
757
+ >>> tokenizer = AutoTokenizer.from_pretrained("albert/albert-base-v2")
758
+ >>> model = FlaxAlbertForPreTraining.from_pretrained("albert/albert-base-v2")
759
+
760
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="np")
761
+ >>> outputs = model(**inputs)
762
+
763
+ >>> prediction_logits = outputs.prediction_logits
764
+ >>> seq_relationship_logits = outputs.sop_logits
765
+ ```
766
+ """
767
+
768
+ overwrite_call_docstring(
769
+ FlaxAlbertForPreTraining,
770
+ ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") + FLAX_ALBERT_FOR_PRETRAINING_DOCSTRING,
771
+ )
772
+ append_replace_return_docstrings(
773
+ FlaxAlbertForPreTraining, output_type=FlaxAlbertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC
774
+ )
775
+
776
+
777
+ class FlaxAlbertForMaskedLMModule(nn.Module):
778
+ config: AlbertConfig
779
+ dtype: jnp.dtype = jnp.float32
780
+
781
+ def setup(self):
782
+ self.albert = FlaxAlbertModule(config=self.config, add_pooling_layer=False, dtype=self.dtype)
783
+ self.predictions = FlaxAlbertOnlyMLMHead(config=self.config, dtype=self.dtype)
784
+
785
+ def __call__(
786
+ self,
787
+ input_ids,
788
+ attention_mask,
789
+ token_type_ids,
790
+ position_ids,
791
+ deterministic: bool = True,
792
+ output_attentions: bool = False,
793
+ output_hidden_states: bool = False,
794
+ return_dict: bool = True,
795
+ ):
796
+ # Model
797
+ outputs = self.albert(
798
+ input_ids,
799
+ attention_mask,
800
+ token_type_ids,
801
+ position_ids,
802
+ deterministic=deterministic,
803
+ output_attentions=output_attentions,
804
+ output_hidden_states=output_hidden_states,
805
+ return_dict=return_dict,
806
+ )
807
+
808
+ hidden_states = outputs[0]
809
+ if self.config.tie_word_embeddings:
810
+ shared_embedding = self.albert.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
811
+ else:
812
+ shared_embedding = None
813
+
814
+ # Compute the prediction scores
815
+ logits = self.predictions(hidden_states, shared_embedding=shared_embedding)
816
+
817
+ if not return_dict:
818
+ return (logits,) + outputs[1:]
819
+
820
+ return FlaxMaskedLMOutput(
821
+ logits=logits,
822
+ hidden_states=outputs.hidden_states,
823
+ attentions=outputs.attentions,
824
+ )
825
+
826
+
827
+ @add_start_docstrings("""Albert Model with a `language modeling` head on top.""", ALBERT_START_DOCSTRING)
828
+ class FlaxAlbertForMaskedLM(FlaxAlbertPreTrainedModel):
829
+ module_class = FlaxAlbertForMaskedLMModule
830
+
831
+
832
+ append_call_sample_docstring(
833
+ FlaxAlbertForMaskedLM, _CHECKPOINT_FOR_DOC, FlaxMaskedLMOutput, _CONFIG_FOR_DOC, revision="refs/pr/11"
834
+ )
835
+
836
+
837
+ class FlaxAlbertForSequenceClassificationModule(nn.Module):
838
+ config: AlbertConfig
839
+ dtype: jnp.dtype = jnp.float32
840
+
841
+ def setup(self):
842
+ self.albert = FlaxAlbertModule(config=self.config, dtype=self.dtype)
843
+ classifier_dropout = (
844
+ self.config.classifier_dropout_prob
845
+ if self.config.classifier_dropout_prob is not None
846
+ else self.config.hidden_dropout_prob
847
+ )
848
+ self.dropout = nn.Dropout(rate=classifier_dropout)
849
+ self.classifier = nn.Dense(
850
+ self.config.num_labels,
851
+ dtype=self.dtype,
852
+ )
853
+
854
+ def __call__(
855
+ self,
856
+ input_ids,
857
+ attention_mask,
858
+ token_type_ids,
859
+ position_ids,
860
+ deterministic: bool = True,
861
+ output_attentions: bool = False,
862
+ output_hidden_states: bool = False,
863
+ return_dict: bool = True,
864
+ ):
865
+ # Model
866
+ outputs = self.albert(
867
+ input_ids,
868
+ attention_mask,
869
+ token_type_ids,
870
+ position_ids,
871
+ deterministic=deterministic,
872
+ output_attentions=output_attentions,
873
+ output_hidden_states=output_hidden_states,
874
+ return_dict=return_dict,
875
+ )
876
+
877
+ pooled_output = outputs[1]
878
+ pooled_output = self.dropout(pooled_output, deterministic=deterministic)
879
+ logits = self.classifier(pooled_output)
880
+
881
+ if not return_dict:
882
+ return (logits,) + outputs[2:]
883
+
884
+ return FlaxSequenceClassifierOutput(
885
+ logits=logits,
886
+ hidden_states=outputs.hidden_states,
887
+ attentions=outputs.attentions,
888
+ )
889
+
890
+
891
+ @add_start_docstrings(
892
+ """
893
+ Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
894
+ output) e.g. for GLUE tasks.
895
+ """,
896
+ ALBERT_START_DOCSTRING,
897
+ )
898
+ class FlaxAlbertForSequenceClassification(FlaxAlbertPreTrainedModel):
899
+ module_class = FlaxAlbertForSequenceClassificationModule
900
+
901
+
902
+ append_call_sample_docstring(
903
+ FlaxAlbertForSequenceClassification,
904
+ _CHECKPOINT_FOR_DOC,
905
+ FlaxSequenceClassifierOutput,
906
+ _CONFIG_FOR_DOC,
907
+ )
908
+
909
+
910
+ class FlaxAlbertForMultipleChoiceModule(nn.Module):
911
+ config: AlbertConfig
912
+ dtype: jnp.dtype = jnp.float32
913
+
914
+ def setup(self):
915
+ self.albert = FlaxAlbertModule(config=self.config, dtype=self.dtype)
916
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
917
+ self.classifier = nn.Dense(1, dtype=self.dtype)
918
+
919
+ def __call__(
920
+ self,
921
+ input_ids,
922
+ attention_mask,
923
+ token_type_ids,
924
+ position_ids,
925
+ deterministic: bool = True,
926
+ output_attentions: bool = False,
927
+ output_hidden_states: bool = False,
928
+ return_dict: bool = True,
929
+ ):
930
+ num_choices = input_ids.shape[1]
931
+ input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None
932
+ attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None
933
+ token_type_ids = token_type_ids.reshape(-1, token_type_ids.shape[-1]) if token_type_ids is not None else None
934
+ position_ids = position_ids.reshape(-1, position_ids.shape[-1]) if position_ids is not None else None
935
+
936
+ # Model
937
+ outputs = self.albert(
938
+ input_ids,
939
+ attention_mask,
940
+ token_type_ids,
941
+ position_ids,
942
+ deterministic=deterministic,
943
+ output_attentions=output_attentions,
944
+ output_hidden_states=output_hidden_states,
945
+ return_dict=return_dict,
946
+ )
947
+
948
+ pooled_output = outputs[1]
949
+ pooled_output = self.dropout(pooled_output, deterministic=deterministic)
950
+ logits = self.classifier(pooled_output)
951
+
952
+ reshaped_logits = logits.reshape(-1, num_choices)
953
+
954
+ if not return_dict:
955
+ return (reshaped_logits,) + outputs[2:]
956
+
957
+ return FlaxMultipleChoiceModelOutput(
958
+ logits=reshaped_logits,
959
+ hidden_states=outputs.hidden_states,
960
+ attentions=outputs.attentions,
961
+ )
962
+
963
+
964
+ @add_start_docstrings(
965
+ """
966
+ Albert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
967
+ softmax) e.g. for RocStories/SWAG tasks.
968
+ """,
969
+ ALBERT_START_DOCSTRING,
970
+ )
971
+ class FlaxAlbertForMultipleChoice(FlaxAlbertPreTrainedModel):
972
+ module_class = FlaxAlbertForMultipleChoiceModule
973
+
974
+
975
+ overwrite_call_docstring(
976
+ FlaxAlbertForMultipleChoice, ALBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
977
+ )
978
+ append_call_sample_docstring(
979
+ FlaxAlbertForMultipleChoice,
980
+ _CHECKPOINT_FOR_DOC,
981
+ FlaxMultipleChoiceModelOutput,
982
+ _CONFIG_FOR_DOC,
983
+ )
984
+
985
+
986
+ class FlaxAlbertForTokenClassificationModule(nn.Module):
987
+ config: AlbertConfig
988
+ dtype: jnp.dtype = jnp.float32
989
+
990
+ def setup(self):
991
+ self.albert = FlaxAlbertModule(config=self.config, dtype=self.dtype, add_pooling_layer=False)
992
+ classifier_dropout = (
993
+ self.config.classifier_dropout_prob
994
+ if self.config.classifier_dropout_prob is not None
995
+ else self.config.hidden_dropout_prob
996
+ )
997
+ self.dropout = nn.Dropout(rate=classifier_dropout)
998
+ self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype)
999
+
1000
+ def __call__(
1001
+ self,
1002
+ input_ids,
1003
+ attention_mask,
1004
+ token_type_ids,
1005
+ position_ids,
1006
+ deterministic: bool = True,
1007
+ output_attentions: bool = False,
1008
+ output_hidden_states: bool = False,
1009
+ return_dict: bool = True,
1010
+ ):
1011
+ # Model
1012
+ outputs = self.albert(
1013
+ input_ids,
1014
+ attention_mask,
1015
+ token_type_ids,
1016
+ position_ids,
1017
+ deterministic=deterministic,
1018
+ output_attentions=output_attentions,
1019
+ output_hidden_states=output_hidden_states,
1020
+ return_dict=return_dict,
1021
+ )
1022
+
1023
+ hidden_states = outputs[0]
1024
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
1025
+ logits = self.classifier(hidden_states)
1026
+
1027
+ if not return_dict:
1028
+ return (logits,) + outputs[1:]
1029
+
1030
+ return FlaxTokenClassifierOutput(
1031
+ logits=logits,
1032
+ hidden_states=outputs.hidden_states,
1033
+ attentions=outputs.attentions,
1034
+ )
1035
+
1036
+
1037
+ @add_start_docstrings(
1038
+ """
1039
+ Albert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1040
+ Named-Entity-Recognition (NER) tasks.
1041
+ """,
1042
+ ALBERT_START_DOCSTRING,
1043
+ )
1044
+ class FlaxAlbertForTokenClassification(FlaxAlbertPreTrainedModel):
1045
+ module_class = FlaxAlbertForTokenClassificationModule
1046
+
1047
+
1048
+ append_call_sample_docstring(
1049
+ FlaxAlbertForTokenClassification,
1050
+ _CHECKPOINT_FOR_DOC,
1051
+ FlaxTokenClassifierOutput,
1052
+ _CONFIG_FOR_DOC,
1053
+ )
1054
+
1055
+
1056
+ class FlaxAlbertForQuestionAnsweringModule(nn.Module):
1057
+ config: AlbertConfig
1058
+ dtype: jnp.dtype = jnp.float32
1059
+
1060
+ def setup(self):
1061
+ self.albert = FlaxAlbertModule(config=self.config, dtype=self.dtype, add_pooling_layer=False)
1062
+ self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype)
1063
+
1064
+ def __call__(
1065
+ self,
1066
+ input_ids,
1067
+ attention_mask,
1068
+ token_type_ids,
1069
+ position_ids,
1070
+ deterministic: bool = True,
1071
+ output_attentions: bool = False,
1072
+ output_hidden_states: bool = False,
1073
+ return_dict: bool = True,
1074
+ ):
1075
+ # Model
1076
+ outputs = self.albert(
1077
+ input_ids,
1078
+ attention_mask,
1079
+ token_type_ids,
1080
+ position_ids,
1081
+ deterministic=deterministic,
1082
+ output_attentions=output_attentions,
1083
+ output_hidden_states=output_hidden_states,
1084
+ return_dict=return_dict,
1085
+ )
1086
+
1087
+ hidden_states = outputs[0]
1088
+
1089
+ logits = self.qa_outputs(hidden_states)
1090
+ start_logits, end_logits = logits.split(self.config.num_labels, axis=-1)
1091
+ start_logits = start_logits.squeeze(-1)
1092
+ end_logits = end_logits.squeeze(-1)
1093
+
1094
+ if not return_dict:
1095
+ return (start_logits, end_logits) + outputs[1:]
1096
+
1097
+ return FlaxQuestionAnsweringModelOutput(
1098
+ start_logits=start_logits,
1099
+ end_logits=end_logits,
1100
+ hidden_states=outputs.hidden_states,
1101
+ attentions=outputs.attentions,
1102
+ )
1103
+
1104
+
1105
+ @add_start_docstrings(
1106
+ """
1107
+ Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1108
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1109
+ """,
1110
+ ALBERT_START_DOCSTRING,
1111
+ )
1112
+ class FlaxAlbertForQuestionAnswering(FlaxAlbertPreTrainedModel):
1113
+ module_class = FlaxAlbertForQuestionAnsweringModule
1114
+
1115
+
1116
+ append_call_sample_docstring(
1117
+ FlaxAlbertForQuestionAnswering,
1118
+ _CHECKPOINT_FOR_DOC,
1119
+ FlaxQuestionAnsweringModelOutput,
1120
+ _CONFIG_FOR_DOC,
1121
+ )
venv/lib/python3.10/site-packages/transformers/models/albert/modeling_tf_albert.py ADDED
@@ -0,0 +1,1564 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ TF 2.0 ALBERT model."""
17
+
18
+
19
+ from __future__ import annotations
20
+
21
+ import math
22
+ from dataclasses import dataclass
23
+ from typing import Dict, Optional, Tuple, Union
24
+
25
+ import numpy as np
26
+ import tensorflow as tf
27
+
28
+ from ...activations_tf import get_tf_activation
29
+ from ...modeling_tf_outputs import (
30
+ TFBaseModelOutput,
31
+ TFBaseModelOutputWithPooling,
32
+ TFMaskedLMOutput,
33
+ TFMultipleChoiceModelOutput,
34
+ TFQuestionAnsweringModelOutput,
35
+ TFSequenceClassifierOutput,
36
+ TFTokenClassifierOutput,
37
+ )
38
+ from ...modeling_tf_utils import (
39
+ TFMaskedLanguageModelingLoss,
40
+ TFModelInputType,
41
+ TFMultipleChoiceLoss,
42
+ TFPreTrainedModel,
43
+ TFQuestionAnsweringLoss,
44
+ TFSequenceClassificationLoss,
45
+ TFTokenClassificationLoss,
46
+ get_initializer,
47
+ keras,
48
+ keras_serializable,
49
+ unpack_inputs,
50
+ )
51
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
52
+ from ...utils import (
53
+ ModelOutput,
54
+ add_code_sample_docstrings,
55
+ add_start_docstrings,
56
+ add_start_docstrings_to_model_forward,
57
+ logging,
58
+ replace_return_docstrings,
59
+ )
60
+ from .configuration_albert import AlbertConfig
61
+
62
+
63
+ logger = logging.get_logger(__name__)
64
+
65
+ _CHECKPOINT_FOR_DOC = "albert/albert-base-v2"
66
+ _CONFIG_FOR_DOC = "AlbertConfig"
67
+
68
+
69
+ from ..deprecated._archive_maps import TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
70
+
71
+
72
+ class TFAlbertPreTrainingLoss:
73
+ """
74
+ Loss function suitable for ALBERT pretraining, that is, the task of pretraining a language model by combining SOP +
75
+ MLM. .. note:: Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.
76
+ """
77
+
78
+ def hf_compute_loss(self, labels: tf.Tensor, logits: tf.Tensor) -> tf.Tensor:
79
+ loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=keras.losses.Reduction.NONE)
80
+ if self.config.tf_legacy_loss:
81
+ # make sure only labels that are not equal to -100
82
+ # are taken into account as loss
83
+ masked_lm_active_loss = tf.not_equal(tf.reshape(tensor=labels["labels"], shape=(-1,)), -100)
84
+ masked_lm_reduced_logits = tf.boolean_mask(
85
+ tensor=tf.reshape(tensor=logits[0], shape=(-1, shape_list(logits[0])[2])),
86
+ mask=masked_lm_active_loss,
87
+ )
88
+ masked_lm_labels = tf.boolean_mask(
89
+ tensor=tf.reshape(tensor=labels["labels"], shape=(-1,)), mask=masked_lm_active_loss
90
+ )
91
+ sentence_order_active_loss = tf.not_equal(
92
+ tf.reshape(tensor=labels["sentence_order_label"], shape=(-1,)), -100
93
+ )
94
+ sentence_order_reduced_logits = tf.boolean_mask(
95
+ tensor=tf.reshape(tensor=logits[1], shape=(-1, 2)), mask=sentence_order_active_loss
96
+ )
97
+ sentence_order_label = tf.boolean_mask(
98
+ tensor=tf.reshape(tensor=labels["sentence_order_label"], shape=(-1,)), mask=sentence_order_active_loss
99
+ )
100
+ masked_lm_loss = loss_fn(y_true=masked_lm_labels, y_pred=masked_lm_reduced_logits)
101
+ sentence_order_loss = loss_fn(y_true=sentence_order_label, y_pred=sentence_order_reduced_logits)
102
+ masked_lm_loss = tf.reshape(tensor=masked_lm_loss, shape=(-1, shape_list(sentence_order_loss)[0]))
103
+ masked_lm_loss = tf.reduce_mean(input_tensor=masked_lm_loss, axis=0)
104
+
105
+ return masked_lm_loss + sentence_order_loss
106
+
107
+ # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway
108
+ unmasked_lm_losses = loss_fn(y_true=tf.nn.relu(labels["labels"]), y_pred=logits[0])
109
+ # make sure only labels that are not equal to -100
110
+ # are taken into account for the loss computation
111
+ lm_loss_mask = tf.cast(labels["labels"] != -100, dtype=unmasked_lm_losses.dtype)
112
+ masked_lm_losses = unmasked_lm_losses * lm_loss_mask
113
+ reduced_masked_lm_loss = tf.reduce_sum(masked_lm_losses) / tf.reduce_sum(lm_loss_mask)
114
+
115
+ sop_logits = tf.reshape(logits[1], (-1, 2))
116
+ # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway
117
+ unmasked_sop_loss = loss_fn(y_true=tf.nn.relu(labels["sentence_order_label"]), y_pred=sop_logits)
118
+ sop_loss_mask = tf.cast(labels["sentence_order_label"] != -100, dtype=unmasked_sop_loss.dtype)
119
+
120
+ masked_sop_loss = unmasked_sop_loss * sop_loss_mask
121
+ reduced_masked_sop_loss = tf.reduce_sum(masked_sop_loss) / tf.reduce_sum(sop_loss_mask)
122
+
123
+ return tf.reshape(reduced_masked_lm_loss + reduced_masked_sop_loss, (1,))
124
+
125
+
126
+ class TFAlbertEmbeddings(keras.layers.Layer):
127
+ """Construct the embeddings from word, position and token_type embeddings."""
128
+
129
+ def __init__(self, config: AlbertConfig, **kwargs):
130
+ super().__init__(**kwargs)
131
+
132
+ self.config = config
133
+ self.embedding_size = config.embedding_size
134
+ self.max_position_embeddings = config.max_position_embeddings
135
+ self.initializer_range = config.initializer_range
136
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
137
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
138
+
139
+ def build(self, input_shape=None):
140
+ with tf.name_scope("word_embeddings"):
141
+ self.weight = self.add_weight(
142
+ name="weight",
143
+ shape=[self.config.vocab_size, self.embedding_size],
144
+ initializer=get_initializer(self.initializer_range),
145
+ )
146
+
147
+ with tf.name_scope("token_type_embeddings"):
148
+ self.token_type_embeddings = self.add_weight(
149
+ name="embeddings",
150
+ shape=[self.config.type_vocab_size, self.embedding_size],
151
+ initializer=get_initializer(self.initializer_range),
152
+ )
153
+
154
+ with tf.name_scope("position_embeddings"):
155
+ self.position_embeddings = self.add_weight(
156
+ name="embeddings",
157
+ shape=[self.max_position_embeddings, self.embedding_size],
158
+ initializer=get_initializer(self.initializer_range),
159
+ )
160
+
161
+ if self.built:
162
+ return
163
+ self.built = True
164
+ if getattr(self, "LayerNorm", None) is not None:
165
+ with tf.name_scope(self.LayerNorm.name):
166
+ self.LayerNorm.build([None, None, self.config.embedding_size])
167
+
168
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertEmbeddings.call
169
+ def call(
170
+ self,
171
+ input_ids: tf.Tensor = None,
172
+ position_ids: tf.Tensor = None,
173
+ token_type_ids: tf.Tensor = None,
174
+ inputs_embeds: tf.Tensor = None,
175
+ past_key_values_length=0,
176
+ training: bool = False,
177
+ ) -> tf.Tensor:
178
+ """
179
+ Applies embedding based on inputs tensor.
180
+
181
+ Returns:
182
+ final_embeddings (`tf.Tensor`): output embedding tensor.
183
+ """
184
+ if input_ids is None and inputs_embeds is None:
185
+ raise ValueError("Need to provide either `input_ids` or `input_embeds`.")
186
+
187
+ if input_ids is not None:
188
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
189
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
190
+
191
+ input_shape = shape_list(inputs_embeds)[:-1]
192
+
193
+ if token_type_ids is None:
194
+ token_type_ids = tf.fill(dims=input_shape, value=0)
195
+
196
+ if position_ids is None:
197
+ position_ids = tf.expand_dims(
198
+ tf.range(start=past_key_values_length, limit=input_shape[1] + past_key_values_length), axis=0
199
+ )
200
+
201
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
202
+ token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
203
+ final_embeddings = inputs_embeds + position_embeds + token_type_embeds
204
+ final_embeddings = self.LayerNorm(inputs=final_embeddings)
205
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
206
+
207
+ return final_embeddings
208
+
209
+
210
+ class TFAlbertAttention(keras.layers.Layer):
211
+ """Contains the complete attention sublayer, including both dropouts and layer norm."""
212
+
213
+ def __init__(self, config: AlbertConfig, **kwargs):
214
+ super().__init__(**kwargs)
215
+
216
+ if config.hidden_size % config.num_attention_heads != 0:
217
+ raise ValueError(
218
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number "
219
+ f"of attention heads ({config.num_attention_heads})"
220
+ )
221
+
222
+ self.num_attention_heads = config.num_attention_heads
223
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
224
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
225
+ self.sqrt_att_head_size = math.sqrt(self.attention_head_size)
226
+ self.output_attentions = config.output_attentions
227
+
228
+ self.query = keras.layers.Dense(
229
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
230
+ )
231
+ self.key = keras.layers.Dense(
232
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
233
+ )
234
+ self.value = keras.layers.Dense(
235
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
236
+ )
237
+ self.dense = keras.layers.Dense(
238
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
239
+ )
240
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
241
+ # Two different dropout probabilities; see https://github.com/google-research/albert/blob/master/modeling.py#L971-L993
242
+ self.attention_dropout = keras.layers.Dropout(rate=config.attention_probs_dropout_prob)
243
+ self.output_dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
244
+ self.config = config
245
+
246
+ def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:
247
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
248
+ tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
249
+
250
+ # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
251
+ return tf.transpose(tensor, perm=[0, 2, 1, 3])
252
+
253
+ def call(
254
+ self,
255
+ input_tensor: tf.Tensor,
256
+ attention_mask: tf.Tensor,
257
+ head_mask: tf.Tensor,
258
+ output_attentions: bool,
259
+ training: bool = False,
260
+ ) -> Tuple[tf.Tensor]:
261
+ batch_size = shape_list(input_tensor)[0]
262
+ mixed_query_layer = self.query(inputs=input_tensor)
263
+ mixed_key_layer = self.key(inputs=input_tensor)
264
+ mixed_value_layer = self.value(inputs=input_tensor)
265
+ query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
266
+ key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
267
+ value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
268
+
269
+ # Take the dot product between "query" and "key" to get the raw attention scores.
270
+ # (batch size, num_heads, seq_len_q, seq_len_k)
271
+ attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
272
+ dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype)
273
+ attention_scores = tf.divide(attention_scores, dk)
274
+
275
+ if attention_mask is not None:
276
+ # Apply the attention mask is (precomputed for all layers in TFAlbertModel call() function)
277
+ attention_scores = tf.add(attention_scores, attention_mask)
278
+
279
+ # Normalize the attention scores to probabilities.
280
+ attention_probs = stable_softmax(logits=attention_scores, axis=-1)
281
+
282
+ # This is actually dropping out entire tokens to attend to, which might
283
+ # seem a bit unusual, but is taken from the original Transformer paper.
284
+ attention_probs = self.attention_dropout(inputs=attention_probs, training=training)
285
+
286
+ # Mask heads if we want to
287
+ if head_mask is not None:
288
+ attention_probs = tf.multiply(attention_probs, head_mask)
289
+
290
+ context_layer = tf.matmul(attention_probs, value_layer)
291
+ context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
292
+
293
+ # (batch_size, seq_len_q, all_head_size)
294
+ context_layer = tf.reshape(tensor=context_layer, shape=(batch_size, -1, self.all_head_size))
295
+ self_outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
296
+ hidden_states = self_outputs[0]
297
+ hidden_states = self.dense(inputs=hidden_states)
298
+ hidden_states = self.output_dropout(inputs=hidden_states, training=training)
299
+ attention_output = self.LayerNorm(inputs=hidden_states + input_tensor)
300
+
301
+ # add attentions if we output them
302
+ outputs = (attention_output,) + self_outputs[1:]
303
+
304
+ return outputs
305
+
306
+ def build(self, input_shape=None):
307
+ if self.built:
308
+ return
309
+ self.built = True
310
+ if getattr(self, "query", None) is not None:
311
+ with tf.name_scope(self.query.name):
312
+ self.query.build([None, None, self.config.hidden_size])
313
+ if getattr(self, "key", None) is not None:
314
+ with tf.name_scope(self.key.name):
315
+ self.key.build([None, None, self.config.hidden_size])
316
+ if getattr(self, "value", None) is not None:
317
+ with tf.name_scope(self.value.name):
318
+ self.value.build([None, None, self.config.hidden_size])
319
+ if getattr(self, "dense", None) is not None:
320
+ with tf.name_scope(self.dense.name):
321
+ self.dense.build([None, None, self.config.hidden_size])
322
+ if getattr(self, "LayerNorm", None) is not None:
323
+ with tf.name_scope(self.LayerNorm.name):
324
+ self.LayerNorm.build([None, None, self.config.hidden_size])
325
+
326
+
327
+ class TFAlbertLayer(keras.layers.Layer):
328
+ def __init__(self, config: AlbertConfig, **kwargs):
329
+ super().__init__(**kwargs)
330
+
331
+ self.attention = TFAlbertAttention(config, name="attention")
332
+ self.ffn = keras.layers.Dense(
333
+ units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="ffn"
334
+ )
335
+
336
+ if isinstance(config.hidden_act, str):
337
+ self.activation = get_tf_activation(config.hidden_act)
338
+ else:
339
+ self.activation = config.hidden_act
340
+
341
+ self.ffn_output = keras.layers.Dense(
342
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="ffn_output"
343
+ )
344
+ self.full_layer_layer_norm = keras.layers.LayerNormalization(
345
+ epsilon=config.layer_norm_eps, name="full_layer_layer_norm"
346
+ )
347
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
348
+ self.config = config
349
+
350
+ def call(
351
+ self,
352
+ hidden_states: tf.Tensor,
353
+ attention_mask: tf.Tensor,
354
+ head_mask: tf.Tensor,
355
+ output_attentions: bool,
356
+ training: bool = False,
357
+ ) -> Tuple[tf.Tensor]:
358
+ attention_outputs = self.attention(
359
+ input_tensor=hidden_states,
360
+ attention_mask=attention_mask,
361
+ head_mask=head_mask,
362
+ output_attentions=output_attentions,
363
+ training=training,
364
+ )
365
+ ffn_output = self.ffn(inputs=attention_outputs[0])
366
+ ffn_output = self.activation(ffn_output)
367
+ ffn_output = self.ffn_output(inputs=ffn_output)
368
+ ffn_output = self.dropout(inputs=ffn_output, training=training)
369
+ hidden_states = self.full_layer_layer_norm(inputs=ffn_output + attention_outputs[0])
370
+
371
+ # add attentions if we output them
372
+ outputs = (hidden_states,) + attention_outputs[1:]
373
+
374
+ return outputs
375
+
376
+ def build(self, input_shape=None):
377
+ if self.built:
378
+ return
379
+ self.built = True
380
+ if getattr(self, "attention", None) is not None:
381
+ with tf.name_scope(self.attention.name):
382
+ self.attention.build(None)
383
+ if getattr(self, "ffn", None) is not None:
384
+ with tf.name_scope(self.ffn.name):
385
+ self.ffn.build([None, None, self.config.hidden_size])
386
+ if getattr(self, "ffn_output", None) is not None:
387
+ with tf.name_scope(self.ffn_output.name):
388
+ self.ffn_output.build([None, None, self.config.intermediate_size])
389
+ if getattr(self, "full_layer_layer_norm", None) is not None:
390
+ with tf.name_scope(self.full_layer_layer_norm.name):
391
+ self.full_layer_layer_norm.build([None, None, self.config.hidden_size])
392
+
393
+
394
+ class TFAlbertLayerGroup(keras.layers.Layer):
395
+ def __init__(self, config: AlbertConfig, **kwargs):
396
+ super().__init__(**kwargs)
397
+
398
+ self.albert_layers = [
399
+ TFAlbertLayer(config, name=f"albert_layers_._{i}") for i in range(config.inner_group_num)
400
+ ]
401
+
402
+ def call(
403
+ self,
404
+ hidden_states: tf.Tensor,
405
+ attention_mask: tf.Tensor,
406
+ head_mask: tf.Tensor,
407
+ output_attentions: bool,
408
+ output_hidden_states: bool,
409
+ training: bool = False,
410
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
411
+ layer_hidden_states = () if output_hidden_states else None
412
+ layer_attentions = () if output_attentions else None
413
+
414
+ for layer_index, albert_layer in enumerate(self.albert_layers):
415
+ if output_hidden_states:
416
+ layer_hidden_states = layer_hidden_states + (hidden_states,)
417
+
418
+ layer_output = albert_layer(
419
+ hidden_states=hidden_states,
420
+ attention_mask=attention_mask,
421
+ head_mask=head_mask[layer_index],
422
+ output_attentions=output_attentions,
423
+ training=training,
424
+ )
425
+ hidden_states = layer_output[0]
426
+
427
+ if output_attentions:
428
+ layer_attentions = layer_attentions + (layer_output[1],)
429
+
430
+ # Add last layer
431
+ if output_hidden_states:
432
+ layer_hidden_states = layer_hidden_states + (hidden_states,)
433
+
434
+ return tuple(v for v in [hidden_states, layer_hidden_states, layer_attentions] if v is not None)
435
+
436
+ def build(self, input_shape=None):
437
+ if self.built:
438
+ return
439
+ self.built = True
440
+ if getattr(self, "albert_layers", None) is not None:
441
+ for layer in self.albert_layers:
442
+ with tf.name_scope(layer.name):
443
+ layer.build(None)
444
+
445
+
446
+ class TFAlbertTransformer(keras.layers.Layer):
447
+ def __init__(self, config: AlbertConfig, **kwargs):
448
+ super().__init__(**kwargs)
449
+
450
+ self.num_hidden_layers = config.num_hidden_layers
451
+ self.num_hidden_groups = config.num_hidden_groups
452
+ # Number of layers in a hidden group
453
+ self.layers_per_group = int(config.num_hidden_layers / config.num_hidden_groups)
454
+ self.embedding_hidden_mapping_in = keras.layers.Dense(
455
+ units=config.hidden_size,
456
+ kernel_initializer=get_initializer(config.initializer_range),
457
+ name="embedding_hidden_mapping_in",
458
+ )
459
+ self.albert_layer_groups = [
460
+ TFAlbertLayerGroup(config, name=f"albert_layer_groups_._{i}") for i in range(config.num_hidden_groups)
461
+ ]
462
+ self.config = config
463
+
464
+ def call(
465
+ self,
466
+ hidden_states: tf.Tensor,
467
+ attention_mask: tf.Tensor,
468
+ head_mask: tf.Tensor,
469
+ output_attentions: bool,
470
+ output_hidden_states: bool,
471
+ return_dict: bool,
472
+ training: bool = False,
473
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
474
+ hidden_states = self.embedding_hidden_mapping_in(inputs=hidden_states)
475
+ all_attentions = () if output_attentions else None
476
+ all_hidden_states = (hidden_states,) if output_hidden_states else None
477
+
478
+ for i in range(self.num_hidden_layers):
479
+ # Index of the hidden group
480
+ group_idx = int(i / (self.num_hidden_layers / self.num_hidden_groups))
481
+ layer_group_output = self.albert_layer_groups[group_idx](
482
+ hidden_states=hidden_states,
483
+ attention_mask=attention_mask,
484
+ head_mask=head_mask[group_idx * self.layers_per_group : (group_idx + 1) * self.layers_per_group],
485
+ output_attentions=output_attentions,
486
+ output_hidden_states=output_hidden_states,
487
+ training=training,
488
+ )
489
+ hidden_states = layer_group_output[0]
490
+
491
+ if output_attentions:
492
+ all_attentions = all_attentions + layer_group_output[-1]
493
+
494
+ if output_hidden_states:
495
+ all_hidden_states = all_hidden_states + (hidden_states,)
496
+
497
+ if not return_dict:
498
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
499
+
500
+ return TFBaseModelOutput(
501
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
502
+ )
503
+
504
+ def build(self, input_shape=None):
505
+ if self.built:
506
+ return
507
+ self.built = True
508
+ if getattr(self, "embedding_hidden_mapping_in", None) is not None:
509
+ with tf.name_scope(self.embedding_hidden_mapping_in.name):
510
+ self.embedding_hidden_mapping_in.build([None, None, self.config.embedding_size])
511
+ if getattr(self, "albert_layer_groups", None) is not None:
512
+ for layer in self.albert_layer_groups:
513
+ with tf.name_scope(layer.name):
514
+ layer.build(None)
515
+
516
+
517
+ class TFAlbertPreTrainedModel(TFPreTrainedModel):
518
+ """
519
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
520
+ models.
521
+ """
522
+
523
+ config_class = AlbertConfig
524
+ base_model_prefix = "albert"
525
+
526
+
527
+ class TFAlbertMLMHead(keras.layers.Layer):
528
+ def __init__(self, config: AlbertConfig, input_embeddings: keras.layers.Layer, **kwargs):
529
+ super().__init__(**kwargs)
530
+
531
+ self.config = config
532
+ self.embedding_size = config.embedding_size
533
+ self.dense = keras.layers.Dense(
534
+ config.embedding_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
535
+ )
536
+ if isinstance(config.hidden_act, str):
537
+ self.activation = get_tf_activation(config.hidden_act)
538
+ else:
539
+ self.activation = config.hidden_act
540
+
541
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
542
+
543
+ # The output weights are the same as the input embeddings, but there is
544
+ # an output-only bias for each token.
545
+ self.decoder = input_embeddings
546
+
547
+ def build(self, input_shape=None):
548
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
549
+ self.decoder_bias = self.add_weight(
550
+ shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="decoder/bias"
551
+ )
552
+
553
+ if self.built:
554
+ return
555
+ self.built = True
556
+ if getattr(self, "dense", None) is not None:
557
+ with tf.name_scope(self.dense.name):
558
+ self.dense.build([None, None, self.config.hidden_size])
559
+ if getattr(self, "LayerNorm", None) is not None:
560
+ with tf.name_scope(self.LayerNorm.name):
561
+ self.LayerNorm.build([None, None, self.config.embedding_size])
562
+
563
+ def get_output_embeddings(self) -> keras.layers.Layer:
564
+ return self.decoder
565
+
566
+ def set_output_embeddings(self, value: tf.Variable):
567
+ self.decoder.weight = value
568
+ self.decoder.vocab_size = shape_list(value)[0]
569
+
570
+ def get_bias(self) -> Dict[str, tf.Variable]:
571
+ return {"bias": self.bias, "decoder_bias": self.decoder_bias}
572
+
573
+ def set_bias(self, value: tf.Variable):
574
+ self.bias = value["bias"]
575
+ self.decoder_bias = value["decoder_bias"]
576
+ self.config.vocab_size = shape_list(value["bias"])[0]
577
+
578
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
579
+ hidden_states = self.dense(inputs=hidden_states)
580
+ hidden_states = self.activation(hidden_states)
581
+ hidden_states = self.LayerNorm(inputs=hidden_states)
582
+ seq_length = shape_list(tensor=hidden_states)[1]
583
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size])
584
+ hidden_states = tf.matmul(a=hidden_states, b=self.decoder.weight, transpose_b=True)
585
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
586
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.decoder_bias)
587
+
588
+ return hidden_states
589
+
590
+
591
+ @keras_serializable
592
+ class TFAlbertMainLayer(keras.layers.Layer):
593
+ config_class = AlbertConfig
594
+
595
+ def __init__(self, config: AlbertConfig, add_pooling_layer: bool = True, **kwargs):
596
+ super().__init__(**kwargs)
597
+
598
+ self.config = config
599
+
600
+ self.embeddings = TFAlbertEmbeddings(config, name="embeddings")
601
+ self.encoder = TFAlbertTransformer(config, name="encoder")
602
+ self.pooler = (
603
+ keras.layers.Dense(
604
+ units=config.hidden_size,
605
+ kernel_initializer=get_initializer(config.initializer_range),
606
+ activation="tanh",
607
+ name="pooler",
608
+ )
609
+ if add_pooling_layer
610
+ else None
611
+ )
612
+
613
+ def get_input_embeddings(self) -> keras.layers.Layer:
614
+ return self.embeddings
615
+
616
+ def set_input_embeddings(self, value: tf.Variable):
617
+ self.embeddings.weight = value
618
+ self.embeddings.vocab_size = shape_list(value)[0]
619
+
620
+ def _prune_heads(self, heads_to_prune):
621
+ """
622
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
623
+ class PreTrainedModel
624
+ """
625
+ raise NotImplementedError
626
+
627
+ @unpack_inputs
628
+ def call(
629
+ self,
630
+ input_ids: TFModelInputType | None = None,
631
+ attention_mask: np.ndarray | tf.Tensor | None = None,
632
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
633
+ position_ids: np.ndarray | tf.Tensor | None = None,
634
+ head_mask: np.ndarray | tf.Tensor | None = None,
635
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
636
+ output_attentions: Optional[bool] = None,
637
+ output_hidden_states: Optional[bool] = None,
638
+ return_dict: Optional[bool] = None,
639
+ training: bool = False,
640
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
641
+ if input_ids is not None and inputs_embeds is not None:
642
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
643
+ elif input_ids is not None:
644
+ input_shape = shape_list(input_ids)
645
+ elif inputs_embeds is not None:
646
+ input_shape = shape_list(inputs_embeds)[:-1]
647
+ else:
648
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
649
+
650
+ if attention_mask is None:
651
+ attention_mask = tf.fill(dims=input_shape, value=1)
652
+
653
+ if token_type_ids is None:
654
+ token_type_ids = tf.fill(dims=input_shape, value=0)
655
+
656
+ embedding_output = self.embeddings(
657
+ input_ids=input_ids,
658
+ position_ids=position_ids,
659
+ token_type_ids=token_type_ids,
660
+ inputs_embeds=inputs_embeds,
661
+ training=training,
662
+ )
663
+
664
+ # We create a 3D attention mask from a 2D tensor mask.
665
+ # Sizes are [batch_size, 1, 1, to_seq_length]
666
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
667
+ # this attention mask is more simple than the triangular masking of causal attention
668
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
669
+ extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1]))
670
+
671
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
672
+ # masked positions, this operation will create a tensor which is 0.0 for
673
+ # positions we want to attend and -10000.0 for masked positions.
674
+ # Since we are adding it to the raw scores before the softmax, this is
675
+ # effectively the same as removing these entirely.
676
+ extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)
677
+ one_cst = tf.constant(1.0, dtype=embedding_output.dtype)
678
+ ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)
679
+ extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
680
+
681
+ # Prepare head mask if needed
682
+ # 1.0 in head_mask indicate we keep the head
683
+ # attention_probs has shape bsz x n_heads x N x N
684
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
685
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
686
+ if head_mask is not None:
687
+ raise NotImplementedError
688
+ else:
689
+ head_mask = [None] * self.config.num_hidden_layers
690
+
691
+ encoder_outputs = self.encoder(
692
+ hidden_states=embedding_output,
693
+ attention_mask=extended_attention_mask,
694
+ head_mask=head_mask,
695
+ output_attentions=output_attentions,
696
+ output_hidden_states=output_hidden_states,
697
+ return_dict=return_dict,
698
+ training=training,
699
+ )
700
+
701
+ sequence_output = encoder_outputs[0]
702
+ pooled_output = self.pooler(inputs=sequence_output[:, 0]) if self.pooler is not None else None
703
+
704
+ if not return_dict:
705
+ return (
706
+ sequence_output,
707
+ pooled_output,
708
+ ) + encoder_outputs[1:]
709
+
710
+ return TFBaseModelOutputWithPooling(
711
+ last_hidden_state=sequence_output,
712
+ pooler_output=pooled_output,
713
+ hidden_states=encoder_outputs.hidden_states,
714
+ attentions=encoder_outputs.attentions,
715
+ )
716
+
717
+ def build(self, input_shape=None):
718
+ if self.built:
719
+ return
720
+ self.built = True
721
+ if getattr(self, "embeddings", None) is not None:
722
+ with tf.name_scope(self.embeddings.name):
723
+ self.embeddings.build(None)
724
+ if getattr(self, "encoder", None) is not None:
725
+ with tf.name_scope(self.encoder.name):
726
+ self.encoder.build(None)
727
+ if getattr(self, "pooler", None) is not None:
728
+ with tf.name_scope(self.pooler.name):
729
+ self.pooler.build([None, None, self.config.hidden_size])
730
+
731
+
732
+ @dataclass
733
+ class TFAlbertForPreTrainingOutput(ModelOutput):
734
+ """
735
+ Output type of [`TFAlbertForPreTraining`].
736
+
737
+ Args:
738
+ prediction_logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
739
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
740
+ sop_logits (`tf.Tensor` of shape `(batch_size, 2)`):
741
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
742
+ before SoftMax).
743
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
744
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
745
+ `(batch_size, sequence_length, hidden_size)`.
746
+
747
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
748
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
749
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
750
+ sequence_length)`.
751
+
752
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
753
+ heads.
754
+ """
755
+
756
+ loss: tf.Tensor = None
757
+ prediction_logits: tf.Tensor = None
758
+ sop_logits: tf.Tensor = None
759
+ hidden_states: Tuple[tf.Tensor] | None = None
760
+ attentions: Tuple[tf.Tensor] | None = None
761
+
762
+
763
+ ALBERT_START_DOCSTRING = r"""
764
+
765
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
766
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
767
+ etc.)
768
+
769
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
770
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
771
+ behavior.
772
+
773
+ <Tip>
774
+
775
+ TensorFlow models and layers in `transformers` accept two formats as input:
776
+
777
+ - having all inputs as keyword arguments (like PyTorch models), or
778
+ - having all inputs as a list, tuple or dict in the first positional argument.
779
+
780
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
781
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
782
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
783
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
784
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
785
+ positional argument:
786
+
787
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
788
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
789
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
790
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
791
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
792
+
793
+ Note that when creating models and layers with
794
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
795
+ about any of this, as you can just pass inputs like you would to any other Python function!
796
+
797
+ </Tip>
798
+
799
+ Args:
800
+ config ([`AlbertConfig`]): Model configuration class with all the parameters of the model.
801
+ Initializing with a config file does not load the weights associated with the model, only the
802
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
803
+ """
804
+
805
+ ALBERT_INPUTS_DOCSTRING = r"""
806
+ Args:
807
+ input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
808
+ Indices of input sequence tokens in the vocabulary.
809
+
810
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
811
+ [`PreTrainedTokenizer.encode`] for details.
812
+
813
+ [What are input IDs?](../glossary#input-ids)
814
+ attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
815
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
816
+
817
+ - 1 for tokens that are **not masked**,
818
+ - 0 for tokens that are **masked**.
819
+
820
+ [What are attention masks?](../glossary#attention-mask)
821
+ token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
822
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
823
+ 1]`:
824
+
825
+ - 0 corresponds to a *sentence A* token,
826
+ - 1 corresponds to a *sentence B* token.
827
+
828
+ [What are token type IDs?](../glossary#token-type-ids)
829
+ position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
830
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
831
+ config.max_position_embeddings - 1]`.
832
+
833
+ [What are position IDs?](../glossary#position-ids)
834
+ head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
835
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
836
+
837
+ - 1 indicates the head is **not masked**,
838
+ - 0 indicates the head is **masked**.
839
+
840
+ inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
841
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
842
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
843
+ model's internal embedding lookup matrix.
844
+ output_attentions (`bool`, *optional*):
845
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
846
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
847
+ config will be used instead.
848
+ output_hidden_states (`bool`, *optional*):
849
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
850
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
851
+ used instead.
852
+ return_dict (`bool`, *optional*):
853
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
854
+ eager mode, in graph mode the value will always be set to True.
855
+ training (`bool`, *optional*, defaults to `False`):
856
+ Whether or not to use the model in training mode (some modules like dropout modules have different
857
+ behaviors between training and evaluation).
858
+ """
859
+
860
+
861
+ @add_start_docstrings(
862
+ "The bare Albert Model transformer outputting raw hidden-states without any specific head on top.",
863
+ ALBERT_START_DOCSTRING,
864
+ )
865
+ class TFAlbertModel(TFAlbertPreTrainedModel):
866
+ def __init__(self, config: AlbertConfig, *inputs, **kwargs):
867
+ super().__init__(config, *inputs, **kwargs)
868
+
869
+ self.albert = TFAlbertMainLayer(config, name="albert")
870
+
871
+ @unpack_inputs
872
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
873
+ @add_code_sample_docstrings(
874
+ checkpoint=_CHECKPOINT_FOR_DOC,
875
+ output_type=TFBaseModelOutputWithPooling,
876
+ config_class=_CONFIG_FOR_DOC,
877
+ )
878
+ def call(
879
+ self,
880
+ input_ids: TFModelInputType | None = None,
881
+ attention_mask: np.ndarray | tf.Tensor | None = None,
882
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
883
+ position_ids: np.ndarray | tf.Tensor | None = None,
884
+ head_mask: np.ndarray | tf.Tensor | None = None,
885
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
886
+ output_attentions: Optional[bool] = None,
887
+ output_hidden_states: Optional[bool] = None,
888
+ return_dict: Optional[bool] = None,
889
+ training: Optional[bool] = False,
890
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
891
+ outputs = self.albert(
892
+ input_ids=input_ids,
893
+ attention_mask=attention_mask,
894
+ token_type_ids=token_type_ids,
895
+ position_ids=position_ids,
896
+ head_mask=head_mask,
897
+ inputs_embeds=inputs_embeds,
898
+ output_attentions=output_attentions,
899
+ output_hidden_states=output_hidden_states,
900
+ return_dict=return_dict,
901
+ training=training,
902
+ )
903
+
904
+ return outputs
905
+
906
+ def build(self, input_shape=None):
907
+ if self.built:
908
+ return
909
+ self.built = True
910
+ if getattr(self, "albert", None) is not None:
911
+ with tf.name_scope(self.albert.name):
912
+ self.albert.build(None)
913
+
914
+
915
+ @add_start_docstrings(
916
+ """
917
+ Albert Model with two heads on top for pretraining: a `masked language modeling` head and a `sentence order
918
+ prediction` (classification) head.
919
+ """,
920
+ ALBERT_START_DOCSTRING,
921
+ )
922
+ class TFAlbertForPreTraining(TFAlbertPreTrainedModel, TFAlbertPreTrainingLoss):
923
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
924
+ _keys_to_ignore_on_load_unexpected = [r"predictions.decoder.weight"]
925
+
926
+ def __init__(self, config: AlbertConfig, *inputs, **kwargs):
927
+ super().__init__(config, *inputs, **kwargs)
928
+
929
+ self.num_labels = config.num_labels
930
+
931
+ self.albert = TFAlbertMainLayer(config, name="albert")
932
+ self.predictions = TFAlbertMLMHead(config, input_embeddings=self.albert.embeddings, name="predictions")
933
+ self.sop_classifier = TFAlbertSOPHead(config, name="sop_classifier")
934
+
935
+ def get_lm_head(self) -> keras.layers.Layer:
936
+ return self.predictions
937
+
938
+ @unpack_inputs
939
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
940
+ @replace_return_docstrings(output_type=TFAlbertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
941
+ def call(
942
+ self,
943
+ input_ids: TFModelInputType | None = None,
944
+ attention_mask: np.ndarray | tf.Tensor | None = None,
945
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
946
+ position_ids: np.ndarray | tf.Tensor | None = None,
947
+ head_mask: np.ndarray | tf.Tensor | None = None,
948
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
949
+ output_attentions: Optional[bool] = None,
950
+ output_hidden_states: Optional[bool] = None,
951
+ return_dict: Optional[bool] = None,
952
+ labels: np.ndarray | tf.Tensor | None = None,
953
+ sentence_order_label: np.ndarray | tf.Tensor | None = None,
954
+ training: Optional[bool] = False,
955
+ ) -> Union[TFAlbertForPreTrainingOutput, Tuple[tf.Tensor]]:
956
+ r"""
957
+ Return:
958
+
959
+ Example:
960
+
961
+ ```python
962
+ >>> import tensorflow as tf
963
+ >>> from transformers import AutoTokenizer, TFAlbertForPreTraining
964
+
965
+ >>> tokenizer = AutoTokenizer.from_pretrained("albert/albert-base-v2")
966
+ >>> model = TFAlbertForPreTraining.from_pretrained("albert/albert-base-v2")
967
+
968
+ >>> input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :]
969
+ >>> # Batch size 1
970
+ >>> outputs = model(input_ids)
971
+
972
+ >>> prediction_logits = outputs.prediction_logits
973
+ >>> sop_logits = outputs.sop_logits
974
+ ```"""
975
+
976
+ outputs = self.albert(
977
+ input_ids=input_ids,
978
+ attention_mask=attention_mask,
979
+ token_type_ids=token_type_ids,
980
+ position_ids=position_ids,
981
+ head_mask=head_mask,
982
+ inputs_embeds=inputs_embeds,
983
+ output_attentions=output_attentions,
984
+ output_hidden_states=output_hidden_states,
985
+ return_dict=return_dict,
986
+ training=training,
987
+ )
988
+ sequence_output, pooled_output = outputs[:2]
989
+ prediction_scores = self.predictions(hidden_states=sequence_output)
990
+ sop_scores = self.sop_classifier(pooled_output=pooled_output, training=training)
991
+ total_loss = None
992
+
993
+ if labels is not None and sentence_order_label is not None:
994
+ d_labels = {"labels": labels}
995
+ d_labels["sentence_order_label"] = sentence_order_label
996
+ total_loss = self.hf_compute_loss(labels=d_labels, logits=(prediction_scores, sop_scores))
997
+
998
+ if not return_dict:
999
+ output = (prediction_scores, sop_scores) + outputs[2:]
1000
+ return ((total_loss,) + output) if total_loss is not None else output
1001
+
1002
+ return TFAlbertForPreTrainingOutput(
1003
+ loss=total_loss,
1004
+ prediction_logits=prediction_scores,
1005
+ sop_logits=sop_scores,
1006
+ hidden_states=outputs.hidden_states,
1007
+ attentions=outputs.attentions,
1008
+ )
1009
+
1010
+ def build(self, input_shape=None):
1011
+ if self.built:
1012
+ return
1013
+ self.built = True
1014
+ if getattr(self, "albert", None) is not None:
1015
+ with tf.name_scope(self.albert.name):
1016
+ self.albert.build(None)
1017
+ if getattr(self, "predictions", None) is not None:
1018
+ with tf.name_scope(self.predictions.name):
1019
+ self.predictions.build(None)
1020
+ if getattr(self, "sop_classifier", None) is not None:
1021
+ with tf.name_scope(self.sop_classifier.name):
1022
+ self.sop_classifier.build(None)
1023
+
1024
+
1025
+ class TFAlbertSOPHead(keras.layers.Layer):
1026
+ def __init__(self, config: AlbertConfig, **kwargs):
1027
+ super().__init__(**kwargs)
1028
+
1029
+ self.dropout = keras.layers.Dropout(rate=config.classifier_dropout_prob)
1030
+ self.classifier = keras.layers.Dense(
1031
+ units=config.num_labels,
1032
+ kernel_initializer=get_initializer(config.initializer_range),
1033
+ name="classifier",
1034
+ )
1035
+ self.config = config
1036
+
1037
+ def call(self, pooled_output: tf.Tensor, training: bool) -> tf.Tensor:
1038
+ dropout_pooled_output = self.dropout(inputs=pooled_output, training=training)
1039
+ logits = self.classifier(inputs=dropout_pooled_output)
1040
+
1041
+ return logits
1042
+
1043
+ def build(self, input_shape=None):
1044
+ if self.built:
1045
+ return
1046
+ self.built = True
1047
+ if getattr(self, "classifier", None) is not None:
1048
+ with tf.name_scope(self.classifier.name):
1049
+ self.classifier.build([None, None, self.config.hidden_size])
1050
+
1051
+
1052
+ @add_start_docstrings("""Albert Model with a `language modeling` head on top.""", ALBERT_START_DOCSTRING)
1053
+ class TFAlbertForMaskedLM(TFAlbertPreTrainedModel, TFMaskedLanguageModelingLoss):
1054
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1055
+ _keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions.decoder.weight"]
1056
+
1057
+ def __init__(self, config: AlbertConfig, *inputs, **kwargs):
1058
+ super().__init__(config, *inputs, **kwargs)
1059
+
1060
+ self.albert = TFAlbertMainLayer(config, add_pooling_layer=False, name="albert")
1061
+ self.predictions = TFAlbertMLMHead(config, input_embeddings=self.albert.embeddings, name="predictions")
1062
+
1063
+ def get_lm_head(self) -> keras.layers.Layer:
1064
+ return self.predictions
1065
+
1066
+ @unpack_inputs
1067
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1068
+ @replace_return_docstrings(output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC)
1069
+ def call(
1070
+ self,
1071
+ input_ids: TFModelInputType | None = None,
1072
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1073
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1074
+ position_ids: np.ndarray | tf.Tensor | None = None,
1075
+ head_mask: np.ndarray | tf.Tensor | None = None,
1076
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1077
+ output_attentions: Optional[bool] = None,
1078
+ output_hidden_states: Optional[bool] = None,
1079
+ return_dict: Optional[bool] = None,
1080
+ labels: np.ndarray | tf.Tensor | None = None,
1081
+ training: Optional[bool] = False,
1082
+ ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
1083
+ r"""
1084
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1085
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1086
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1087
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1088
+
1089
+ Returns:
1090
+
1091
+ Example:
1092
+
1093
+ ```python
1094
+ >>> import tensorflow as tf
1095
+ >>> from transformers import AutoTokenizer, TFAlbertForMaskedLM
1096
+
1097
+ >>> tokenizer = AutoTokenizer.from_pretrained("albert/albert-base-v2")
1098
+ >>> model = TFAlbertForMaskedLM.from_pretrained("albert/albert-base-v2")
1099
+
1100
+ >>> # add mask_token
1101
+ >>> inputs = tokenizer(f"The capital of [MASK] is Paris.", return_tensors="tf")
1102
+ >>> logits = model(**inputs).logits
1103
+
1104
+ >>> # retrieve index of [MASK]
1105
+ >>> mask_token_index = tf.where(inputs.input_ids == tokenizer.mask_token_id)[0][1]
1106
+ >>> predicted_token_id = tf.math.argmax(logits[0, mask_token_index], axis=-1)
1107
+ >>> tokenizer.decode(predicted_token_id)
1108
+ 'france'
1109
+ ```
1110
+
1111
+ ```python
1112
+ >>> labels = tokenizer("The capital of France is Paris.", return_tensors="tf")["input_ids"]
1113
+ >>> labels = tf.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100)
1114
+ >>> outputs = model(**inputs, labels=labels)
1115
+ >>> round(float(outputs.loss), 2)
1116
+ 0.81
1117
+ ```
1118
+ """
1119
+ outputs = self.albert(
1120
+ input_ids=input_ids,
1121
+ attention_mask=attention_mask,
1122
+ token_type_ids=token_type_ids,
1123
+ position_ids=position_ids,
1124
+ head_mask=head_mask,
1125
+ inputs_embeds=inputs_embeds,
1126
+ output_attentions=output_attentions,
1127
+ output_hidden_states=output_hidden_states,
1128
+ return_dict=return_dict,
1129
+ training=training,
1130
+ )
1131
+ sequence_output = outputs[0]
1132
+ prediction_scores = self.predictions(hidden_states=sequence_output, training=training)
1133
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=prediction_scores)
1134
+
1135
+ if not return_dict:
1136
+ output = (prediction_scores,) + outputs[2:]
1137
+
1138
+ return ((loss,) + output) if loss is not None else output
1139
+
1140
+ return TFMaskedLMOutput(
1141
+ loss=loss,
1142
+ logits=prediction_scores,
1143
+ hidden_states=outputs.hidden_states,
1144
+ attentions=outputs.attentions,
1145
+ )
1146
+
1147
+ def build(self, input_shape=None):
1148
+ if self.built:
1149
+ return
1150
+ self.built = True
1151
+ if getattr(self, "albert", None) is not None:
1152
+ with tf.name_scope(self.albert.name):
1153
+ self.albert.build(None)
1154
+ if getattr(self, "predictions", None) is not None:
1155
+ with tf.name_scope(self.predictions.name):
1156
+ self.predictions.build(None)
1157
+
1158
+
1159
+ @add_start_docstrings(
1160
+ """
1161
+ Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
1162
+ output) e.g. for GLUE tasks.
1163
+ """,
1164
+ ALBERT_START_DOCSTRING,
1165
+ )
1166
+ class TFAlbertForSequenceClassification(TFAlbertPreTrainedModel, TFSequenceClassificationLoss):
1167
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1168
+ _keys_to_ignore_on_load_unexpected = [r"predictions"]
1169
+ _keys_to_ignore_on_load_missing = [r"dropout"]
1170
+
1171
+ def __init__(self, config: AlbertConfig, *inputs, **kwargs):
1172
+ super().__init__(config, *inputs, **kwargs)
1173
+
1174
+ self.num_labels = config.num_labels
1175
+
1176
+ self.albert = TFAlbertMainLayer(config, name="albert")
1177
+ self.dropout = keras.layers.Dropout(rate=config.classifier_dropout_prob)
1178
+ self.classifier = keras.layers.Dense(
1179
+ units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1180
+ )
1181
+ self.config = config
1182
+
1183
+ @unpack_inputs
1184
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1185
+ @add_code_sample_docstrings(
1186
+ checkpoint="vumichien/albert-base-v2-imdb",
1187
+ output_type=TFSequenceClassifierOutput,
1188
+ config_class=_CONFIG_FOR_DOC,
1189
+ expected_output="'LABEL_1'",
1190
+ expected_loss=0.12,
1191
+ )
1192
+ def call(
1193
+ self,
1194
+ input_ids: TFModelInputType | None = None,
1195
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1196
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1197
+ position_ids: np.ndarray | tf.Tensor | None = None,
1198
+ head_mask: np.ndarray | tf.Tensor | None = None,
1199
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1200
+ output_attentions: Optional[bool] = None,
1201
+ output_hidden_states: Optional[bool] = None,
1202
+ return_dict: Optional[bool] = None,
1203
+ labels: np.ndarray | tf.Tensor | None = None,
1204
+ training: Optional[bool] = False,
1205
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
1206
+ r"""
1207
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1208
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1209
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1210
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1211
+ """
1212
+ outputs = self.albert(
1213
+ input_ids=input_ids,
1214
+ attention_mask=attention_mask,
1215
+ token_type_ids=token_type_ids,
1216
+ position_ids=position_ids,
1217
+ head_mask=head_mask,
1218
+ inputs_embeds=inputs_embeds,
1219
+ output_attentions=output_attentions,
1220
+ output_hidden_states=output_hidden_states,
1221
+ return_dict=return_dict,
1222
+ training=training,
1223
+ )
1224
+ pooled_output = outputs[1]
1225
+ pooled_output = self.dropout(inputs=pooled_output, training=training)
1226
+ logits = self.classifier(inputs=pooled_output)
1227
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
1228
+
1229
+ if not return_dict:
1230
+ output = (logits,) + outputs[2:]
1231
+
1232
+ return ((loss,) + output) if loss is not None else output
1233
+
1234
+ return TFSequenceClassifierOutput(
1235
+ loss=loss,
1236
+ logits=logits,
1237
+ hidden_states=outputs.hidden_states,
1238
+ attentions=outputs.attentions,
1239
+ )
1240
+
1241
+ def build(self, input_shape=None):
1242
+ if self.built:
1243
+ return
1244
+ self.built = True
1245
+ if getattr(self, "albert", None) is not None:
1246
+ with tf.name_scope(self.albert.name):
1247
+ self.albert.build(None)
1248
+ if getattr(self, "classifier", None) is not None:
1249
+ with tf.name_scope(self.classifier.name):
1250
+ self.classifier.build([None, None, self.config.hidden_size])
1251
+
1252
+
1253
+ @add_start_docstrings(
1254
+ """
1255
+ Albert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1256
+ Named-Entity-Recognition (NER) tasks.
1257
+ """,
1258
+ ALBERT_START_DOCSTRING,
1259
+ )
1260
+ class TFAlbertForTokenClassification(TFAlbertPreTrainedModel, TFTokenClassificationLoss):
1261
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1262
+ _keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions"]
1263
+ _keys_to_ignore_on_load_missing = [r"dropout"]
1264
+
1265
+ def __init__(self, config: AlbertConfig, *inputs, **kwargs):
1266
+ super().__init__(config, *inputs, **kwargs)
1267
+
1268
+ self.num_labels = config.num_labels
1269
+
1270
+ self.albert = TFAlbertMainLayer(config, add_pooling_layer=False, name="albert")
1271
+ classifier_dropout_prob = (
1272
+ config.classifier_dropout_prob
1273
+ if config.classifier_dropout_prob is not None
1274
+ else config.hidden_dropout_prob
1275
+ )
1276
+ self.dropout = keras.layers.Dropout(rate=classifier_dropout_prob)
1277
+ self.classifier = keras.layers.Dense(
1278
+ units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1279
+ )
1280
+ self.config = config
1281
+
1282
+ @unpack_inputs
1283
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1284
+ @add_code_sample_docstrings(
1285
+ checkpoint=_CHECKPOINT_FOR_DOC,
1286
+ output_type=TFTokenClassifierOutput,
1287
+ config_class=_CONFIG_FOR_DOC,
1288
+ )
1289
+ def call(
1290
+ self,
1291
+ input_ids: TFModelInputType | None = None,
1292
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1293
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1294
+ position_ids: np.ndarray | tf.Tensor | None = None,
1295
+ head_mask: np.ndarray | tf.Tensor | None = None,
1296
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1297
+ output_attentions: Optional[bool] = None,
1298
+ output_hidden_states: Optional[bool] = None,
1299
+ return_dict: Optional[bool] = None,
1300
+ labels: np.ndarray | tf.Tensor | None = None,
1301
+ training: Optional[bool] = False,
1302
+ ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
1303
+ r"""
1304
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1305
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1306
+ """
1307
+ outputs = self.albert(
1308
+ input_ids=input_ids,
1309
+ attention_mask=attention_mask,
1310
+ token_type_ids=token_type_ids,
1311
+ position_ids=position_ids,
1312
+ head_mask=head_mask,
1313
+ inputs_embeds=inputs_embeds,
1314
+ output_attentions=output_attentions,
1315
+ output_hidden_states=output_hidden_states,
1316
+ return_dict=return_dict,
1317
+ training=training,
1318
+ )
1319
+ sequence_output = outputs[0]
1320
+ sequence_output = self.dropout(inputs=sequence_output, training=training)
1321
+ logits = self.classifier(inputs=sequence_output)
1322
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
1323
+
1324
+ if not return_dict:
1325
+ output = (logits,) + outputs[2:]
1326
+
1327
+ return ((loss,) + output) if loss is not None else output
1328
+
1329
+ return TFTokenClassifierOutput(
1330
+ loss=loss,
1331
+ logits=logits,
1332
+ hidden_states=outputs.hidden_states,
1333
+ attentions=outputs.attentions,
1334
+ )
1335
+
1336
+ def build(self, input_shape=None):
1337
+ if self.built:
1338
+ return
1339
+ self.built = True
1340
+ if getattr(self, "albert", None) is not None:
1341
+ with tf.name_scope(self.albert.name):
1342
+ self.albert.build(None)
1343
+ if getattr(self, "classifier", None) is not None:
1344
+ with tf.name_scope(self.classifier.name):
1345
+ self.classifier.build([None, None, self.config.hidden_size])
1346
+
1347
+
1348
+ @add_start_docstrings(
1349
+ """
1350
+ Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1351
+ layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
1352
+ """,
1353
+ ALBERT_START_DOCSTRING,
1354
+ )
1355
+ class TFAlbertForQuestionAnswering(TFAlbertPreTrainedModel, TFQuestionAnsweringLoss):
1356
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1357
+ _keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions"]
1358
+
1359
+ def __init__(self, config: AlbertConfig, *inputs, **kwargs):
1360
+ super().__init__(config, *inputs, **kwargs)
1361
+
1362
+ self.num_labels = config.num_labels
1363
+
1364
+ self.albert = TFAlbertMainLayer(config, add_pooling_layer=False, name="albert")
1365
+ self.qa_outputs = keras.layers.Dense(
1366
+ units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
1367
+ )
1368
+ self.config = config
1369
+
1370
+ @unpack_inputs
1371
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1372
+ @add_code_sample_docstrings(
1373
+ checkpoint="vumichien/albert-base-v2-squad2",
1374
+ output_type=TFQuestionAnsweringModelOutput,
1375
+ config_class=_CONFIG_FOR_DOC,
1376
+ qa_target_start_index=12,
1377
+ qa_target_end_index=13,
1378
+ expected_output="'a nice puppet'",
1379
+ expected_loss=7.36,
1380
+ )
1381
+ def call(
1382
+ self,
1383
+ input_ids: TFModelInputType | None = None,
1384
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1385
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1386
+ position_ids: np.ndarray | tf.Tensor | None = None,
1387
+ head_mask: np.ndarray | tf.Tensor | None = None,
1388
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1389
+ output_attentions: Optional[bool] = None,
1390
+ output_hidden_states: Optional[bool] = None,
1391
+ return_dict: Optional[bool] = None,
1392
+ start_positions: np.ndarray | tf.Tensor | None = None,
1393
+ end_positions: np.ndarray | tf.Tensor | None = None,
1394
+ training: Optional[bool] = False,
1395
+ ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
1396
+ r"""
1397
+ start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1398
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1399
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1400
+ are not taken into account for computing the loss.
1401
+ end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1402
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1403
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1404
+ are not taken into account for computing the loss.
1405
+ """
1406
+ outputs = self.albert(
1407
+ input_ids=input_ids,
1408
+ attention_mask=attention_mask,
1409
+ token_type_ids=token_type_ids,
1410
+ position_ids=position_ids,
1411
+ head_mask=head_mask,
1412
+ inputs_embeds=inputs_embeds,
1413
+ output_attentions=output_attentions,
1414
+ output_hidden_states=output_hidden_states,
1415
+ return_dict=return_dict,
1416
+ training=training,
1417
+ )
1418
+ sequence_output = outputs[0]
1419
+ logits = self.qa_outputs(inputs=sequence_output)
1420
+ start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1)
1421
+ start_logits = tf.squeeze(input=start_logits, axis=-1)
1422
+ end_logits = tf.squeeze(input=end_logits, axis=-1)
1423
+ loss = None
1424
+
1425
+ if start_positions is not None and end_positions is not None:
1426
+ labels = {"start_position": start_positions}
1427
+ labels["end_position"] = end_positions
1428
+ loss = self.hf_compute_loss(labels=labels, logits=(start_logits, end_logits))
1429
+
1430
+ if not return_dict:
1431
+ output = (start_logits, end_logits) + outputs[2:]
1432
+
1433
+ return ((loss,) + output) if loss is not None else output
1434
+
1435
+ return TFQuestionAnsweringModelOutput(
1436
+ loss=loss,
1437
+ start_logits=start_logits,
1438
+ end_logits=end_logits,
1439
+ hidden_states=outputs.hidden_states,
1440
+ attentions=outputs.attentions,
1441
+ )
1442
+
1443
+ def build(self, input_shape=None):
1444
+ if self.built:
1445
+ return
1446
+ self.built = True
1447
+ if getattr(self, "albert", None) is not None:
1448
+ with tf.name_scope(self.albert.name):
1449
+ self.albert.build(None)
1450
+ if getattr(self, "qa_outputs", None) is not None:
1451
+ with tf.name_scope(self.qa_outputs.name):
1452
+ self.qa_outputs.build([None, None, self.config.hidden_size])
1453
+
1454
+
1455
+ @add_start_docstrings(
1456
+ """
1457
+ Albert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1458
+ softmax) e.g. for RocStories/SWAG tasks.
1459
+ """,
1460
+ ALBERT_START_DOCSTRING,
1461
+ )
1462
+ class TFAlbertForMultipleChoice(TFAlbertPreTrainedModel, TFMultipleChoiceLoss):
1463
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1464
+ _keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions"]
1465
+ _keys_to_ignore_on_load_missing = [r"dropout"]
1466
+
1467
+ def __init__(self, config: AlbertConfig, *inputs, **kwargs):
1468
+ super().__init__(config, *inputs, **kwargs)
1469
+
1470
+ self.albert = TFAlbertMainLayer(config, name="albert")
1471
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
1472
+ self.classifier = keras.layers.Dense(
1473
+ units=1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1474
+ )
1475
+ self.config = config
1476
+
1477
+ @unpack_inputs
1478
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
1479
+ @add_code_sample_docstrings(
1480
+ checkpoint=_CHECKPOINT_FOR_DOC,
1481
+ output_type=TFMultipleChoiceModelOutput,
1482
+ config_class=_CONFIG_FOR_DOC,
1483
+ )
1484
+ def call(
1485
+ self,
1486
+ input_ids: TFModelInputType | None = None,
1487
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1488
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1489
+ position_ids: np.ndarray | tf.Tensor | None = None,
1490
+ head_mask: np.ndarray | tf.Tensor | None = None,
1491
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1492
+ output_attentions: Optional[bool] = None,
1493
+ output_hidden_states: Optional[bool] = None,
1494
+ return_dict: Optional[bool] = None,
1495
+ labels: np.ndarray | tf.Tensor | None = None,
1496
+ training: Optional[bool] = False,
1497
+ ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
1498
+ r"""
1499
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1500
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
1501
+ where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
1502
+ """
1503
+
1504
+ if input_ids is not None:
1505
+ num_choices = shape_list(input_ids)[1]
1506
+ seq_length = shape_list(input_ids)[2]
1507
+ else:
1508
+ num_choices = shape_list(inputs_embeds)[1]
1509
+ seq_length = shape_list(inputs_embeds)[2]
1510
+
1511
+ flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
1512
+ flat_attention_mask = (
1513
+ tf.reshape(tensor=attention_mask, shape=(-1, seq_length)) if attention_mask is not None else None
1514
+ )
1515
+ flat_token_type_ids = (
1516
+ tf.reshape(tensor=token_type_ids, shape=(-1, seq_length)) if token_type_ids is not None else None
1517
+ )
1518
+ flat_position_ids = (
1519
+ tf.reshape(tensor=position_ids, shape=(-1, seq_length)) if position_ids is not None else None
1520
+ )
1521
+ flat_inputs_embeds = (
1522
+ tf.reshape(tensor=inputs_embeds, shape=(-1, seq_length, shape_list(inputs_embeds)[3]))
1523
+ if inputs_embeds is not None
1524
+ else None
1525
+ )
1526
+ outputs = self.albert(
1527
+ input_ids=flat_input_ids,
1528
+ attention_mask=flat_attention_mask,
1529
+ token_type_ids=flat_token_type_ids,
1530
+ position_ids=flat_position_ids,
1531
+ head_mask=head_mask,
1532
+ inputs_embeds=flat_inputs_embeds,
1533
+ output_attentions=output_attentions,
1534
+ output_hidden_states=output_hidden_states,
1535
+ return_dict=return_dict,
1536
+ training=training,
1537
+ )
1538
+ pooled_output = outputs[1]
1539
+ pooled_output = self.dropout(inputs=pooled_output, training=training)
1540
+ logits = self.classifier(inputs=pooled_output)
1541
+ reshaped_logits = tf.reshape(tensor=logits, shape=(-1, num_choices))
1542
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=reshaped_logits)
1543
+
1544
+ if not return_dict:
1545
+ output = (reshaped_logits,) + outputs[2:]
1546
+ return ((loss,) + output) if loss is not None else output
1547
+
1548
+ return TFMultipleChoiceModelOutput(
1549
+ loss=loss,
1550
+ logits=reshaped_logits,
1551
+ hidden_states=outputs.hidden_states,
1552
+ attentions=outputs.attentions,
1553
+ )
1554
+
1555
+ def build(self, input_shape=None):
1556
+ if self.built:
1557
+ return
1558
+ self.built = True
1559
+ if getattr(self, "albert", None) is not None:
1560
+ with tf.name_scope(self.albert.name):
1561
+ self.albert.build(None)
1562
+ if getattr(self, "classifier", None) is not None:
1563
+ with tf.name_scope(self.classifier.name):
1564
+ self.classifier.build([None, None, self.config.hidden_size])
venv/lib/python3.10/site-packages/transformers/models/albert/tokenization_albert.py ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Tokenization classes for ALBERT model."""
16
+
17
+
18
+ import os
19
+ import unicodedata
20
+ from shutil import copyfile
21
+ from typing import Any, Dict, List, Optional, Tuple
22
+
23
+ import sentencepiece as spm
24
+
25
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
26
+ from ...utils import logging
27
+
28
+
29
+ logger = logging.get_logger(__name__)
30
+ VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
31
+
32
+
33
+ SPIECE_UNDERLINE = "▁"
34
+
35
+
36
+ class AlbertTokenizer(PreTrainedTokenizer):
37
+ """
38
+ Construct an ALBERT tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
39
+
40
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
41
+ this superclass for more information regarding those methods.
42
+
43
+ Args:
44
+ vocab_file (`str`):
45
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
46
+ contains the vocabulary necessary to instantiate a tokenizer.
47
+ do_lower_case (`bool`, *optional*, defaults to `True`):
48
+ Whether or not to lowercase the input when tokenizing.
49
+ remove_space (`bool`, *optional*, defaults to `True`):
50
+ Whether or not to strip the text when tokenizing (removing excess spaces before and after the string).
51
+ keep_accents (`bool`, *optional*, defaults to `False`):
52
+ Whether or not to keep accents when tokenizing.
53
+ bos_token (`str`, *optional*, defaults to `"[CLS]"`):
54
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
55
+
56
+ <Tip>
57
+
58
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
59
+ sequence. The token used is the `cls_token`.
60
+
61
+ </Tip>
62
+
63
+ eos_token (`str`, *optional*, defaults to `"[SEP]"`):
64
+ The end of sequence token.
65
+
66
+ <Tip>
67
+
68
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
69
+ The token used is the `sep_token`.
70
+
71
+ </Tip>
72
+
73
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
74
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
75
+ token instead.
76
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
77
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
78
+ sequence classification or for a text and a question for question answering. It is also used as the last
79
+ token of a sequence built with special tokens.
80
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
81
+ The token used for padding, for example when batching sequences of different lengths.
82
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
83
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
84
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
85
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
86
+ The token used for masking values. This is the token used when training this model with masked language
87
+ modeling. This is the token which the model will try to predict.
88
+ sp_model_kwargs (`dict`, *optional*):
89
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
90
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
91
+ to set:
92
+
93
+ - `enable_sampling`: Enable subword regularization.
94
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
95
+
96
+ - `nbest_size = {0,1}`: No sampling is performed.
97
+ - `nbest_size > 1`: samples from the nbest_size results.
98
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
99
+ using forward-filtering-and-backward-sampling algorithm.
100
+
101
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
102
+ BPE-dropout.
103
+
104
+ Attributes:
105
+ sp_model (`SentencePieceProcessor`):
106
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
107
+ """
108
+
109
+ vocab_files_names = VOCAB_FILES_NAMES
110
+
111
+ def __init__(
112
+ self,
113
+ vocab_file,
114
+ do_lower_case=True,
115
+ remove_space=True,
116
+ keep_accents=False,
117
+ bos_token="[CLS]",
118
+ eos_token="[SEP]",
119
+ unk_token="<unk>",
120
+ sep_token="[SEP]",
121
+ pad_token="<pad>",
122
+ cls_token="[CLS]",
123
+ mask_token="[MASK]",
124
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
125
+ **kwargs,
126
+ ) -> None:
127
+ # Mask token behave like a normal word, i.e. include the space before it and
128
+ # is included in the raw text, there should be a match in a non-normalized sentence.
129
+ mask_token = (
130
+ AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False)
131
+ if isinstance(mask_token, str)
132
+ else mask_token
133
+ )
134
+
135
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
136
+
137
+ self.do_lower_case = do_lower_case
138
+ self.remove_space = remove_space
139
+ self.keep_accents = keep_accents
140
+ self.vocab_file = vocab_file
141
+
142
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
143
+ self.sp_model.Load(vocab_file)
144
+
145
+ super().__init__(
146
+ do_lower_case=do_lower_case,
147
+ remove_space=remove_space,
148
+ keep_accents=keep_accents,
149
+ bos_token=bos_token,
150
+ eos_token=eos_token,
151
+ unk_token=unk_token,
152
+ sep_token=sep_token,
153
+ pad_token=pad_token,
154
+ cls_token=cls_token,
155
+ mask_token=mask_token,
156
+ sp_model_kwargs=self.sp_model_kwargs,
157
+ **kwargs,
158
+ )
159
+
160
+ @property
161
+ def vocab_size(self) -> int:
162
+ return len(self.sp_model)
163
+
164
+ def get_vocab(self) -> Dict[str, int]:
165
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
166
+ vocab.update(self.added_tokens_encoder)
167
+ return vocab
168
+
169
+ def __getstate__(self):
170
+ state = self.__dict__.copy()
171
+ state["sp_model"] = None
172
+ return state
173
+
174
+ def __setstate__(self, d):
175
+ self.__dict__ = d
176
+
177
+ # for backward compatibility
178
+ if not hasattr(self, "sp_model_kwargs"):
179
+ self.sp_model_kwargs = {}
180
+
181
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
182
+ self.sp_model.Load(self.vocab_file)
183
+
184
+ def preprocess_text(self, inputs):
185
+ if self.remove_space:
186
+ outputs = " ".join(inputs.strip().split())
187
+ else:
188
+ outputs = inputs
189
+ outputs = outputs.replace("``", '"').replace("''", '"')
190
+
191
+ if not self.keep_accents:
192
+ outputs = unicodedata.normalize("NFKD", outputs)
193
+ outputs = "".join([c for c in outputs if not unicodedata.combining(c)])
194
+ if self.do_lower_case:
195
+ outputs = outputs.lower()
196
+
197
+ return outputs
198
+
199
+ def _tokenize(self, text: str) -> List[str]:
200
+ """Tokenize a string."""
201
+ text = self.preprocess_text(text)
202
+ pieces = self.sp_model.encode(text, out_type=str)
203
+ new_pieces = []
204
+ for piece in pieces:
205
+ if len(piece) > 1 and piece[-1] == str(",") and piece[-2].isdigit():
206
+ # Logic to handle special cases see https://github.com/google-research/bert/blob/master/README.md#tokenization
207
+ # `9,9` -> ['▁9', ',', '9'] instead of [`_9,`, '9']
208
+ cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, ""))
209
+ if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
210
+ if len(cur_pieces[0]) == 1:
211
+ cur_pieces = cur_pieces[1:]
212
+ else:
213
+ cur_pieces[0] = cur_pieces[0][1:]
214
+ cur_pieces.append(piece[-1])
215
+ new_pieces.extend(cur_pieces)
216
+ else:
217
+ new_pieces.append(piece)
218
+
219
+ return new_pieces
220
+
221
+ def _convert_token_to_id(self, token):
222
+ """Converts a token (str) in an id using the vocab."""
223
+ return self.sp_model.PieceToId(token)
224
+
225
+ def _convert_id_to_token(self, index):
226
+ """Converts an index (integer) in a token (str) using the vocab."""
227
+ return self.sp_model.IdToPiece(index)
228
+
229
+ def convert_tokens_to_string(self, tokens):
230
+ """Converts a sequence of tokens (string) in a single string."""
231
+ current_sub_tokens = []
232
+ out_string = ""
233
+ prev_is_special = False
234
+ for token in tokens:
235
+ # make sure that special tokens are not decoded using sentencepiece model
236
+ if token in self.all_special_tokens:
237
+ if not prev_is_special:
238
+ out_string += " "
239
+ out_string += self.sp_model.decode(current_sub_tokens) + token
240
+ prev_is_special = True
241
+ current_sub_tokens = []
242
+ else:
243
+ current_sub_tokens.append(token)
244
+ prev_is_special = False
245
+ out_string += self.sp_model.decode(current_sub_tokens)
246
+ return out_string.strip()
247
+
248
+ def build_inputs_with_special_tokens(
249
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
250
+ ) -> List[int]:
251
+ """
252
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
253
+ adding special tokens. An ALBERT sequence has the following format:
254
+
255
+ - single sequence: `[CLS] X [SEP]`
256
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
257
+
258
+ Args:
259
+ token_ids_0 (`List[int]`):
260
+ List of IDs to which the special tokens will be added.
261
+ token_ids_1 (`List[int]`, *optional*):
262
+ Optional second list of IDs for sequence pairs.
263
+
264
+ Returns:
265
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
266
+ """
267
+ sep = [self.sep_token_id]
268
+ cls = [self.cls_token_id]
269
+ if token_ids_1 is None:
270
+ return cls + token_ids_0 + sep
271
+ return cls + token_ids_0 + sep + token_ids_1 + sep
272
+
273
+ def get_special_tokens_mask(
274
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
275
+ ) -> List[int]:
276
+ """
277
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
278
+ special tokens using the tokenizer `prepare_for_model` method.
279
+
280
+ Args:
281
+ token_ids_0 (`List[int]`):
282
+ List of IDs.
283
+ token_ids_1 (`List[int]`, *optional*):
284
+ Optional second list of IDs for sequence pairs.
285
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
286
+ Whether or not the token list is already formatted with special tokens for the model.
287
+
288
+ Returns:
289
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
290
+ """
291
+
292
+ if already_has_special_tokens:
293
+ return super().get_special_tokens_mask(
294
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
295
+ )
296
+
297
+ if token_ids_1 is not None:
298
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
299
+ return [1] + ([0] * len(token_ids_0)) + [1]
300
+
301
+ def create_token_type_ids_from_sequences(
302
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
303
+ ) -> List[int]:
304
+ """
305
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
306
+ sequence pair mask has the following format:
307
+
308
+ ```
309
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
310
+ | first sequence | second sequence |
311
+ ```
312
+
313
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
314
+
315
+ Args:
316
+ token_ids_0 (`List[int]`):
317
+ List of IDs.
318
+ token_ids_1 (`List[int]`, *optional*):
319
+ Optional second list of IDs for sequence pairs.
320
+
321
+ Returns:
322
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
323
+ """
324
+ sep = [self.sep_token_id]
325
+ cls = [self.cls_token_id]
326
+
327
+ if token_ids_1 is None:
328
+ return len(cls + token_ids_0 + sep) * [0]
329
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
330
+
331
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
332
+ if not os.path.isdir(save_directory):
333
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
334
+ return
335
+ out_vocab_file = os.path.join(
336
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
337
+ )
338
+
339
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
340
+ copyfile(self.vocab_file, out_vocab_file)
341
+ elif not os.path.isfile(self.vocab_file):
342
+ with open(out_vocab_file, "wb") as fi:
343
+ content_spiece_model = self.sp_model.serialized_model_proto()
344
+ fi.write(content_spiece_model)
345
+
346
+ return (out_vocab_file,)
venv/lib/python3.10/site-packages/transformers/models/albert/tokenization_albert_fast.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Tokenization classes for ALBERT model."""
16
+
17
+
18
+ import os
19
+ from shutil import copyfile
20
+ from typing import List, Optional, Tuple
21
+
22
+ from ...tokenization_utils import AddedToken
23
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
24
+ from ...utils import is_sentencepiece_available, logging
25
+
26
+
27
+ if is_sentencepiece_available():
28
+ from .tokenization_albert import AlbertTokenizer
29
+ else:
30
+ AlbertTokenizer = None
31
+
32
+ logger = logging.get_logger(__name__)
33
+ VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
34
+
35
+
36
+ SPIECE_UNDERLINE = "▁"
37
+
38
+
39
+ class AlbertTokenizerFast(PreTrainedTokenizerFast):
40
+ """
41
+ Construct a "fast" ALBERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on
42
+ [Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models). This
43
+ tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to
44
+ this superclass for more information regarding those methods
45
+
46
+ Args:
47
+ vocab_file (`str`):
48
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
49
+ contains the vocabulary necessary to instantiate a tokenizer.
50
+ do_lower_case (`bool`, *optional*, defaults to `True`):
51
+ Whether or not to lowercase the input when tokenizing.
52
+ remove_space (`bool`, *optional*, defaults to `True`):
53
+ Whether or not to strip the text when tokenizing (removing excess spaces before and after the string).
54
+ keep_accents (`bool`, *optional*, defaults to `False`):
55
+ Whether or not to keep accents when tokenizing.
56
+ bos_token (`str`, *optional*, defaults to `"[CLS]"`):
57
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
58
+
59
+ <Tip>
60
+
61
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
62
+ sequence. The token used is the `cls_token`.
63
+
64
+ </Tip>
65
+
66
+ eos_token (`str`, *optional*, defaults to `"[SEP]"`):
67
+ The end of sequence token. .. note:: When building a sequence using special tokens, this is not the token
68
+ that is used for the end of sequence. The token used is the `sep_token`.
69
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
70
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
71
+ token instead.
72
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
73
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
74
+ sequence classification or for a text and a question for question answering. It is also used as the last
75
+ token of a sequence built with special tokens.
76
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
77
+ The token used for padding, for example when batching sequences of different lengths.
78
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
79
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
80
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
81
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
82
+ The token used for masking values. This is the token used when training this model with masked language
83
+ modeling. This is the token which the model will try to predict.
84
+ """
85
+
86
+ vocab_files_names = VOCAB_FILES_NAMES
87
+ slow_tokenizer_class = AlbertTokenizer
88
+
89
+ def __init__(
90
+ self,
91
+ vocab_file=None,
92
+ tokenizer_file=None,
93
+ do_lower_case=True,
94
+ remove_space=True,
95
+ keep_accents=False,
96
+ bos_token="[CLS]",
97
+ eos_token="[SEP]",
98
+ unk_token="<unk>",
99
+ sep_token="[SEP]",
100
+ pad_token="<pad>",
101
+ cls_token="[CLS]",
102
+ mask_token="[MASK]",
103
+ **kwargs,
104
+ ):
105
+ # Mask token behave like a normal word, i.e. include the space before it and
106
+ # is included in the raw text, there should be a match in a non-normalized sentence.
107
+ mask_token = (
108
+ AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False)
109
+ if isinstance(mask_token, str)
110
+ else mask_token
111
+ )
112
+
113
+ super().__init__(
114
+ vocab_file,
115
+ tokenizer_file=tokenizer_file,
116
+ do_lower_case=do_lower_case,
117
+ remove_space=remove_space,
118
+ keep_accents=keep_accents,
119
+ bos_token=bos_token,
120
+ eos_token=eos_token,
121
+ unk_token=unk_token,
122
+ sep_token=sep_token,
123
+ pad_token=pad_token,
124
+ cls_token=cls_token,
125
+ mask_token=mask_token,
126
+ **kwargs,
127
+ )
128
+
129
+ self.do_lower_case = do_lower_case
130
+ self.remove_space = remove_space
131
+ self.keep_accents = keep_accents
132
+ self.vocab_file = vocab_file
133
+
134
+ @property
135
+ def can_save_slow_tokenizer(self) -> bool:
136
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
137
+
138
+ def build_inputs_with_special_tokens(
139
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
140
+ ) -> List[int]:
141
+ """
142
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
143
+ adding special tokens. An ALBERT sequence has the following format:
144
+
145
+ - single sequence: `[CLS] X [SEP]`
146
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
147
+
148
+ Args:
149
+ token_ids_0 (`List[int]`):
150
+ List of IDs to which the special tokens will be added
151
+ token_ids_1 (`List[int]`, *optional*):
152
+ Optional second list of IDs for sequence pairs.
153
+
154
+ Returns:
155
+ `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
156
+ """
157
+ sep = [self.sep_token_id]
158
+ cls = [self.cls_token_id]
159
+ if token_ids_1 is None:
160
+ return cls + token_ids_0 + sep
161
+ return cls + token_ids_0 + sep + token_ids_1 + sep
162
+
163
+ def create_token_type_ids_from_sequences(
164
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
165
+ ) -> List[int]:
166
+ """
167
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
168
+ sequence pair mask has the following format:
169
+
170
+ ```
171
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
172
+ | first sequence | second sequence |
173
+ ```
174
+
175
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
176
+
177
+ Args:
178
+ token_ids_0 (`List[int]`):
179
+ List of ids.
180
+ token_ids_1 (`List[int]`, *optional*):
181
+ Optional second list of IDs for sequence pairs.
182
+
183
+ Returns:
184
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
185
+ """
186
+ sep = [self.sep_token_id]
187
+ cls = [self.cls_token_id]
188
+
189
+ if token_ids_1 is None:
190
+ return len(cls + token_ids_0 + sep) * [0]
191
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
192
+
193
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
194
+ if not self.can_save_slow_tokenizer:
195
+ raise ValueError(
196
+ "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
197
+ "tokenizer."
198
+ )
199
+
200
+ if not os.path.isdir(save_directory):
201
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
202
+ return
203
+ out_vocab_file = os.path.join(
204
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
205
+ )
206
+
207
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
208
+ copyfile(self.vocab_file, out_vocab_file)
209
+
210
+ return (out_vocab_file,)
venv/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/configuration_convbert.cpython-310.pyc ADDED
Binary file (6.09 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/convert_convbert_original_tf1_checkpoint_to_pytorch_and_tf2.cpython-310.pyc ADDED
Binary file (1.42 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/modeling_convbert.cpython-310.pyc ADDED
Binary file (38.6 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/modeling_tf_convbert.cpython-310.pyc ADDED
Binary file (43.2 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/tokenization_convbert.cpython-310.pyc ADDED
Binary file (17.1 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/dialogpt/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/transformers/models/dialogpt/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (196 Bytes). View file
 
venv/lib/python3.10/site-packages/transformers/models/dialogpt/__pycache__/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (1.1 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/dialogpt/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import argparse
16
+ import os
17
+
18
+ import torch
19
+
20
+ from transformers.utils import WEIGHTS_NAME
21
+
22
+
23
+ DIALOGPT_MODELS = ["small", "medium", "large"]
24
+
25
+ OLD_KEY = "lm_head.decoder.weight"
26
+ NEW_KEY = "lm_head.weight"
27
+
28
+
29
+ def convert_dialogpt_checkpoint(checkpoint_path: str, pytorch_dump_folder_path: str):
30
+ d = torch.load(checkpoint_path)
31
+ d[NEW_KEY] = d.pop(OLD_KEY)
32
+ os.makedirs(pytorch_dump_folder_path, exist_ok=True)
33
+ torch.save(d, os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME))
34
+
35
+
36
+ if __name__ == "__main__":
37
+ parser = argparse.ArgumentParser()
38
+ parser.add_argument("--dialogpt_path", default=".", type=str)
39
+ args = parser.parse_args()
40
+ for MODEL in DIALOGPT_MODELS:
41
+ checkpoint_path = os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl")
42
+ pytorch_dump_folder_path = f"./DialoGPT-{MODEL}"
43
+ convert_dialogpt_checkpoint(
44
+ checkpoint_path,
45
+ pytorch_dump_folder_path,
46
+ )
venv/lib/python3.10/site-packages/transformers/models/donut/__init__.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_donut_swin": ["DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "DonutSwinConfig"],
21
+ "processing_donut": ["DonutProcessor"],
22
+ }
23
+
24
+ try:
25
+ if not is_torch_available():
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ pass
29
+ else:
30
+ _import_structure["modeling_donut_swin"] = [
31
+ "DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
32
+ "DonutSwinModel",
33
+ "DonutSwinPreTrainedModel",
34
+ ]
35
+
36
+ try:
37
+ if not is_vision_available():
38
+ raise OptionalDependencyNotAvailable()
39
+ except OptionalDependencyNotAvailable:
40
+ pass
41
+ else:
42
+ _import_structure["feature_extraction_donut"] = ["DonutFeatureExtractor"]
43
+ _import_structure["image_processing_donut"] = ["DonutImageProcessor"]
44
+
45
+
46
+ if TYPE_CHECKING:
47
+ from .configuration_donut_swin import DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, DonutSwinConfig
48
+ from .processing_donut import DonutProcessor
49
+
50
+ try:
51
+ if not is_torch_available():
52
+ raise OptionalDependencyNotAvailable()
53
+ except OptionalDependencyNotAvailable:
54
+ pass
55
+ else:
56
+ from .modeling_donut_swin import (
57
+ DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
58
+ DonutSwinModel,
59
+ DonutSwinPreTrainedModel,
60
+ )
61
+
62
+ try:
63
+ if not is_vision_available():
64
+ raise OptionalDependencyNotAvailable()
65
+ except OptionalDependencyNotAvailable:
66
+ pass
67
+ else:
68
+ from .feature_extraction_donut import DonutFeatureExtractor
69
+ from .image_processing_donut import DonutImageProcessor
70
+
71
+ else:
72
+ import sys
73
+
74
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/donut/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.29 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/donut/__pycache__/configuration_donut_swin.cpython-310.pyc ADDED
Binary file (4.95 kB). View file