diff --git a/.gitattributes b/.gitattributes
index 6eb9f6fb579b291e3d39a9bfac02febfe7abb01d..39098152ad1adb37438d19cab705abee635ae90c 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -81,3 +81,4 @@ venv/lib/python3.10/site-packages/pyarrow/libarrow_substrait.so.1600 filter=lfs
venv/lib/python3.10/site-packages/pyarrow/libarrow_python.so filter=lfs diff=lfs merge=lfs -text
venv/lib/python3.10/site-packages/torch/lib/libtorch_cuda_linalg.so filter=lfs diff=lfs merge=lfs -text
venv/lib/python3.10/site-packages/pyarrow/libarrow_flight.so.1600 filter=lfs diff=lfs merge=lfs -text
+venv/lib/python3.10/site-packages/pyarrow/libarrow.so.1600 filter=lfs diff=lfs merge=lfs -text
diff --git a/ckpts/universal/global_step40/zero/10.attention.query_key_value.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/10.attention.query_key_value.weight/exp_avg.pt
new file mode 100644
index 0000000000000000000000000000000000000000..8591335da117796b44d5fc0233ff92945253ecaa
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/10.attention.query_key_value.weight/exp_avg.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3fc1ef6b76144b91e148ec1a10db9697ba17309b66980ea33415cd27642757b7
+size 50332828
diff --git a/ckpts/universal/global_step40/zero/23.attention.query_key_value.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/23.attention.query_key_value.weight/exp_avg_sq.pt
new file mode 100644
index 0000000000000000000000000000000000000000..a4a072bc2bc185ab5b8895d45e25eae80b06bd87
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/23.attention.query_key_value.weight/exp_avg_sq.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3203d80eb2e77b27f4938946251908f61c9407d4f1b258fdd49375646eabdba3
+size 50332843
diff --git a/lm-evaluation-harness/tests/testdata/arc_easy-v0-res.json b/lm-evaluation-harness/tests/testdata/arc_easy-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..f217448594199a54d671be7302857509eb6d691f
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/arc_easy-v0-res.json
@@ -0,0 +1 @@
+{"results": {"arc_easy": {"acc": 0.2474747474747475, "acc_norm": 0.24074074074074073, "acc_norm_stderr": 0.008772796145221907, "acc_stderr": 0.008855114414834707}}, "versions": {"arc_easy": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_1-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_1-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..a2457550677d4a39a7e466d1fddaa4583bc649d7
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_1-v0-res.json
@@ -0,0 +1 @@
+{"results": {"blimp_determiner_noun_agreement_1": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_determiner_noun_agreement_1": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_with_adjective_1-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_with_adjective_1-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..66b30be1b864c277e52541b2bd54cda1eb51d4a0
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_with_adjective_1-v0-res.json
@@ -0,0 +1 @@
+{"results": {"blimp_determiner_noun_agreement_with_adjective_1": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_determiner_noun_agreement_with_adjective_1": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_irregular_plural_subject_verb_agreement_1-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_irregular_plural_subject_verb_agreement_1-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..d70bd8bad3bdbb6d000939f1cf57261a9351a00a
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_irregular_plural_subject_verb_agreement_1-v0-res.json
@@ -0,0 +1 @@
+{"results": {"blimp_irregular_plural_subject_verb_agreement_1": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_irregular_plural_subject_verb_agreement_1": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_only_npi_licensor_present-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/blimp_only_npi_licensor_present-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..03f45fd6199a5f9ba70098e00937fe0603cae2dd
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_only_npi_licensor_present-v0-loglikelihood
@@ -0,0 +1 @@
+d2d0711611b5b218c6fa8c7278494749252b7868c396451919b761303556bd66
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_passive_1-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_passive_1-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..64070cf58dd53d10a9e3b8f3510d3387f2983cfd
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_passive_1-v0-res.json
@@ -0,0 +1 @@
+{"results": {"blimp_passive_1": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_passive_1": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_wh_questions_subject_gap_long_distance-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_wh_questions_subject_gap_long_distance-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..fe6bbf95e5406ad38d4894bf5d4609beeaa05f9a
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_wh_questions_subject_gap_long_distance-v0-res.json
@@ -0,0 +1 @@
+{"results": {"blimp_wh_questions_subject_gap_long_distance": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_wh_questions_subject_gap_long_distance": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/headqa_en-v0-res.json b/lm-evaluation-harness/tests/testdata/headqa_en-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..6ac5a9c0b8e70a47f2c985713a50336c68b11382
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/headqa_en-v0-res.json
@@ -0,0 +1 @@
+{"results": {"headqa_en": {"acc": 0.23559445660102116, "acc_norm": 0.2447118891320204, "acc_norm_stderr": 0.008211629406841468, "acc_stderr": 0.008105688874297972}}, "versions": {"headqa_en": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-anatomy-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/hendrycksTest-anatomy-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..a7ae5fa705e58cf0e7c06ca0fe84a186d24b506f
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-anatomy-v0-loglikelihood
@@ -0,0 +1 @@
+bf05e04ed8cf61cf3aad294ed3f5a16137775ffdd20f1b129022ddffc1251768
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/lambada_openai-v0-res.json b/lm-evaluation-harness/tests/testdata/lambada_openai-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..30fcb907b5dbbabb2af4cf3a156cf18c67d387df
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/lambada_openai-v0-res.json
@@ -0,0 +1 @@
+{"results": {"lambada_openai": {"acc": 0.0, "acc_stderr": 0.0, "ppl": 1.6479047769869253, "ppl_stderr": 0.006497321146240192}}, "versions": {"lambada_openai": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/lambada_openai_cloze-v0-res.json b/lm-evaluation-harness/tests/testdata/lambada_openai_cloze-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..a52f2a9f1c83bcc119c95c05394f1bd2a86bf888
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/lambada_openai_cloze-v0-res.json
@@ -0,0 +1 @@
+{"results": {"lambada_openai_cloze": {"acc": 0.0, "acc_stderr": 0.0, "ppl": 1.6479047769869253, "ppl_stderr": 0.006497321146240192}}, "versions": {"lambada_openai_cloze": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/lambada_openai_mt_en-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/lambada_openai_mt_en-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..efd450a8f2a4ca067f7380af809fdda48d1ee465
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/lambada_openai_mt_en-v0-loglikelihood
@@ -0,0 +1 @@
+6829e6a8aa5922e6c92dd31403cc060f242dc0ede4a775e085a70da095ab2e20
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/math_geometry-v0-res.json b/lm-evaluation-harness/tests/testdata/math_geometry-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..1b25dc283c96c63d30df9f0ce3d04aadb8f93625
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/math_geometry-v0-res.json
@@ -0,0 +1 @@
+{"results": {"math_geometry": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"math_geometry": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/pile_dm-mathematics-v1-loglikelihood_rolling b/lm-evaluation-harness/tests/testdata/pile_dm-mathematics-v1-loglikelihood_rolling
new file mode 100644
index 0000000000000000000000000000000000000000..2fb27786c54abe6303683c0a247d4c689586a97c
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/pile_dm-mathematics-v1-loglikelihood_rolling
@@ -0,0 +1 @@
+d5b7967c0ece8b816f3921a8bd0fad23365349e935b491595e2ad1135af42da6
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/qa4mre_2013-v0-res.json b/lm-evaluation-harness/tests/testdata/qa4mre_2013-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..c87e487e9ac147c5a9ba8cb3a4b2a39048d1dcaa
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/qa4mre_2013-v0-res.json
@@ -0,0 +1 @@
+{"results": {"qa4mre_2013": {"acc": 0.18309859154929578, "acc_norm": 0.22183098591549297, "acc_norm_stderr": 0.02469760575535269, "acc_stderr": 0.022989742475464973}}, "versions": {"qa4mre_2013": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/toxigen-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/toxigen-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..3074e09e14cf0763aa58e8fe2801337da805b734
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/toxigen-v0-loglikelihood
@@ -0,0 +1 @@
+7fedd930bafa92b9cca615a93ba92a4413244d2b77cf3f421a186815d721e0fa
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/wmt20-de-fr-v0-greedy_until b/lm-evaluation-harness/tests/testdata/wmt20-de-fr-v0-greedy_until
new file mode 100644
index 0000000000000000000000000000000000000000..7cb9424082836f0d56afe809cf44c78fc844d993
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/wmt20-de-fr-v0-greedy_until
@@ -0,0 +1 @@
+7f197bc281d6dbf9425900ef0dee7175021c43e355050f149f43b161c52bf0b0
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/wmt20-pl-en-v0-greedy_until b/lm-evaluation-harness/tests/testdata/wmt20-pl-en-v0-greedy_until
new file mode 100644
index 0000000000000000000000000000000000000000..899ce01919910ab80fc95b99371bf334a7c7c37a
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/wmt20-pl-en-v0-greedy_until
@@ -0,0 +1 @@
+89274499d84176b1ffe4eaec06f2c89ca807342384dc946c2e348d00116aaade
\ No newline at end of file
diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/gl1.gpickle.bz2 b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/gl1.gpickle.bz2
new file mode 100644
index 0000000000000000000000000000000000000000..5e9291ea7aa77204bbaab28651e6a4d4f47a4bea
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/gl1.gpickle.bz2
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cf8f81ceb5eaaee1621aa60b892d83e596a6173f6f6517359b679ff3daa1b0f8
+size 44623
diff --git a/venv/lib/python3.10/site-packages/networkx/drawing/tests/baseline/test_house_with_colors.png b/venv/lib/python3.10/site-packages/networkx/drawing/tests/baseline/test_house_with_colors.png
new file mode 100644
index 0000000000000000000000000000000000000000..6c9e5bdee713b998b23ceb6782595b29c2870a0d
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/networkx/drawing/tests/baseline/test_house_with_colors.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1508bda48445c23ab882f801f1c0dd0472f97ae414245c3ab1094005fda4455a
+size 21918
diff --git a/venv/lib/python3.10/site-packages/pyarrow/libarrow.so.1600 b/venv/lib/python3.10/site-packages/pyarrow/libarrow.so.1600
new file mode 100644
index 0000000000000000000000000000000000000000..ea2385bc713a72eb3e99632dbd2ff0117aedb518
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pyarrow/libarrow.so.1600
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d85a4a6d150efcee79c4cd53c88a5a31fd3f6f6efde3e7bd439cd8f4883024ae
+size 67913016
diff --git a/venv/lib/python3.10/site-packages/transformers/models/albert/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/albert/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..168c68db837d08817e08e493efa81e7419ab9de9
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/albert/__init__.py
@@ -0,0 +1,179 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_flax_available,
+ is_sentencepiece_available,
+ is_tf_available,
+ is_tokenizers_available,
+ is_torch_available,
+)
+
+
+_import_structure = {
+ "configuration_albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig", "AlbertOnnxConfig"],
+}
+
+try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_albert"] = ["AlbertTokenizer"]
+
+try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_albert_fast"] = ["AlbertTokenizerFast"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_albert"] = [
+ "ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "AlbertForMaskedLM",
+ "AlbertForMultipleChoice",
+ "AlbertForPreTraining",
+ "AlbertForQuestionAnswering",
+ "AlbertForSequenceClassification",
+ "AlbertForTokenClassification",
+ "AlbertModel",
+ "AlbertPreTrainedModel",
+ "load_tf_weights_in_albert",
+ ]
+
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tf_albert"] = [
+ "TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TFAlbertForMaskedLM",
+ "TFAlbertForMultipleChoice",
+ "TFAlbertForPreTraining",
+ "TFAlbertForQuestionAnswering",
+ "TFAlbertForSequenceClassification",
+ "TFAlbertForTokenClassification",
+ "TFAlbertMainLayer",
+ "TFAlbertModel",
+ "TFAlbertPreTrainedModel",
+ ]
+
+try:
+ if not is_flax_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_flax_albert"] = [
+ "FlaxAlbertForMaskedLM",
+ "FlaxAlbertForMultipleChoice",
+ "FlaxAlbertForPreTraining",
+ "FlaxAlbertForQuestionAnswering",
+ "FlaxAlbertForSequenceClassification",
+ "FlaxAlbertForTokenClassification",
+ "FlaxAlbertModel",
+ "FlaxAlbertPreTrainedModel",
+ ]
+
+if TYPE_CHECKING:
+ from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
+
+ try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_albert import AlbertTokenizer
+
+ try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_albert_fast import AlbertTokenizerFast
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_albert import (
+ ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ AlbertForMaskedLM,
+ AlbertForMultipleChoice,
+ AlbertForPreTraining,
+ AlbertForQuestionAnswering,
+ AlbertForSequenceClassification,
+ AlbertForTokenClassification,
+ AlbertModel,
+ AlbertPreTrainedModel,
+ load_tf_weights_in_albert,
+ )
+
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tf_albert import (
+ TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ TFAlbertForMaskedLM,
+ TFAlbertForMultipleChoice,
+ TFAlbertForPreTraining,
+ TFAlbertForQuestionAnswering,
+ TFAlbertForSequenceClassification,
+ TFAlbertForTokenClassification,
+ TFAlbertMainLayer,
+ TFAlbertModel,
+ TFAlbertPreTrainedModel,
+ )
+
+ try:
+ if not is_flax_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_flax_albert import (
+ FlaxAlbertForMaskedLM,
+ FlaxAlbertForMultipleChoice,
+ FlaxAlbertForPreTraining,
+ FlaxAlbertForQuestionAnswering,
+ FlaxAlbertForSequenceClassification,
+ FlaxAlbertForTokenClassification,
+ FlaxAlbertModel,
+ FlaxAlbertPreTrainedModel,
+ )
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/albert/__pycache__/convert_albert_original_tf_checkpoint_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/albert/__pycache__/convert_albert_original_tf_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..351e49e03d8dfb5a442b7a18968a0042b8337b54
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/albert/__pycache__/convert_albert_original_tf_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/albert/__pycache__/modeling_albert.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/albert/__pycache__/modeling_albert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dace2c9902f5bdb02a22ad915de7afddbffc8984
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/albert/__pycache__/modeling_albert.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/albert/__pycache__/modeling_flax_albert.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/albert/__pycache__/modeling_flax_albert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e32d97a2a94551353d560c90bdb8ecdd38e92809
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/albert/__pycache__/modeling_flax_albert.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/albert/__pycache__/modeling_tf_albert.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/albert/__pycache__/modeling_tf_albert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e5e72c92c7c65e9de1dc673c4a6a239c829d29ee
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/albert/__pycache__/modeling_tf_albert.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/albert/__pycache__/tokenization_albert_fast.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/albert/__pycache__/tokenization_albert_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4ded7b221454effe1432635ec694938e9bcb32e8
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/albert/__pycache__/tokenization_albert_fast.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/albert/configuration_albert.py b/venv/lib/python3.10/site-packages/transformers/models/albert/configuration_albert.py
new file mode 100644
index 0000000000000000000000000000000000000000..c5ddded4833481fb1d1679f66b00e00b28ffa06d
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/albert/configuration_albert.py
@@ -0,0 +1,167 @@
+# coding=utf-8
+# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" ALBERT model configuration"""
+from collections import OrderedDict
+from typing import Mapping
+
+from ...configuration_utils import PretrainedConfig
+from ...onnx import OnnxConfig
+from ..deprecated._archive_maps import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class AlbertConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`AlbertModel`] or a [`TFAlbertModel`]. It is used
+ to instantiate an ALBERT model according to the specified arguments, defining the model architecture. Instantiating
+ a configuration with the defaults will yield a similar configuration to that of the ALBERT
+ [albert/albert-xxlarge-v2](https://huggingface.co/albert/albert-xxlarge-v2) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 30000):
+ Vocabulary size of the ALBERT model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`AlbertModel`] or [`TFAlbertModel`].
+ embedding_size (`int`, *optional*, defaults to 128):
+ Dimensionality of vocabulary embeddings.
+ hidden_size (`int`, *optional*, defaults to 4096):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_hidden_groups (`int`, *optional*, defaults to 1):
+ Number of groups for the hidden layers, parameters in the same group are shared.
+ num_attention_heads (`int`, *optional*, defaults to 64):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 16384):
+ The dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
+ inner_group_num (`int`, *optional*, defaults to 1):
+ The number of inner repetition of attention and ffn.
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu_new"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0):
+ The dropout ratio for the attention probabilities.
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ (e.g., 512 or 1024 or 2048).
+ type_vocab_size (`int`, *optional*, defaults to 2):
+ The vocabulary size of the `token_type_ids` passed when calling [`AlbertModel`] or [`TFAlbertModel`].
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ classifier_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for attached classifiers.
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
+ pad_token_id (`int`, *optional*, defaults to 0):
+ Padding token id.
+ bos_token_id (`int`, *optional*, defaults to 2):
+ Beginning of stream token id.
+ eos_token_id (`int`, *optional*, defaults to 3):
+ End of stream token id.
+
+ Examples:
+
+ ```python
+ >>> from transformers import AlbertConfig, AlbertModel
+
+ >>> # Initializing an ALBERT-xxlarge style configuration
+ >>> albert_xxlarge_configuration = AlbertConfig()
+
+ >>> # Initializing an ALBERT-base style configuration
+ >>> albert_base_configuration = AlbertConfig(
+ ... hidden_size=768,
+ ... num_attention_heads=12,
+ ... intermediate_size=3072,
+ ... )
+
+ >>> # Initializing a model (with random weights) from the ALBERT-base style configuration
+ >>> model = AlbertModel(albert_xxlarge_configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "albert"
+
+ def __init__(
+ self,
+ vocab_size=30000,
+ embedding_size=128,
+ hidden_size=4096,
+ num_hidden_layers=12,
+ num_hidden_groups=1,
+ num_attention_heads=64,
+ intermediate_size=16384,
+ inner_group_num=1,
+ hidden_act="gelu_new",
+ hidden_dropout_prob=0,
+ attention_probs_dropout_prob=0,
+ max_position_embeddings=512,
+ type_vocab_size=2,
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ classifier_dropout_prob=0.1,
+ position_embedding_type="absolute",
+ pad_token_id=0,
+ bos_token_id=2,
+ eos_token_id=3,
+ **kwargs,
+ ):
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
+
+ self.vocab_size = vocab_size
+ self.embedding_size = embedding_size
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_hidden_groups = num_hidden_groups
+ self.num_attention_heads = num_attention_heads
+ self.inner_group_num = inner_group_num
+ self.hidden_act = hidden_act
+ self.intermediate_size = intermediate_size
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.max_position_embeddings = max_position_embeddings
+ self.type_vocab_size = type_vocab_size
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.classifier_dropout_prob = classifier_dropout_prob
+ self.position_embedding_type = position_embedding_type
+
+
+# Copied from transformers.models.bert.configuration_bert.BertOnnxConfig with Roberta->Albert
+class AlbertOnnxConfig(OnnxConfig):
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ if self.task == "multiple-choice":
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
+ else:
+ dynamic_axis = {0: "batch", 1: "sequence"}
+ return OrderedDict(
+ [
+ ("input_ids", dynamic_axis),
+ ("attention_mask", dynamic_axis),
+ ("token_type_ids", dynamic_axis),
+ ]
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..eecada8b432a2def95f71b1c613839647fc0ca6f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py
@@ -0,0 +1,63 @@
+# coding=utf-8
+# Copyright 2018 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert ALBERT checkpoint."""
+
+
+import argparse
+
+import torch
+
+from ...utils import logging
+from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
+
+
+logging.set_verbosity_info()
+
+
+def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, albert_config_file, pytorch_dump_path):
+ # Initialise PyTorch model
+ config = AlbertConfig.from_json_file(albert_config_file)
+ print(f"Building PyTorch model from configuration: {config}")
+ model = AlbertForPreTraining(config)
+
+ # Load weights from tf checkpoint
+ load_tf_weights_in_albert(model, config, tf_checkpoint_path)
+
+ # Save pytorch-model
+ print(f"Save PyTorch model to {pytorch_dump_path}")
+ torch.save(model.state_dict(), pytorch_dump_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
+ )
+ parser.add_argument(
+ "--albert_config_file",
+ default=None,
+ type=str,
+ required=True,
+ help=(
+ "The config json file corresponding to the pre-trained ALBERT model. \n"
+ "This specifies the model architecture."
+ ),
+ )
+ parser.add_argument(
+ "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
+ )
+ args = parser.parse_args()
+ convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/albert/modeling_albert.py b/venv/lib/python3.10/site-packages/transformers/models/albert/modeling_albert.py
new file mode 100644
index 0000000000000000000000000000000000000000..87f5a9e30c8f542e266d610563329baf840e4bf5
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/albert/modeling_albert.py
@@ -0,0 +1,1382 @@
+# coding=utf-8
+# Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch ALBERT model."""
+
+import math
+import os
+from dataclasses import dataclass
+from typing import Dict, List, Optional, Tuple, Union
+
+import torch
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import (
+ BaseModelOutput,
+ BaseModelOutputWithPooling,
+ MaskedLMOutput,
+ MultipleChoiceModelOutput,
+ QuestionAnsweringModelOutput,
+ SequenceClassifierOutput,
+ TokenClassifierOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_albert import AlbertConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "albert/albert-base-v2"
+_CONFIG_FOR_DOC = "AlbertConfig"
+
+
+from ..deprecated._archive_maps import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+def load_tf_weights_in_albert(model, config, tf_checkpoint_path):
+ """Load tf checkpoints in a pytorch model."""
+ try:
+ import re
+
+ import numpy as np
+ import tensorflow as tf
+ except ImportError:
+ logger.error(
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
+ "https://www.tensorflow.org/install/ for installation instructions."
+ )
+ raise
+ tf_path = os.path.abspath(tf_checkpoint_path)
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
+ # Load weights from TF model
+ init_vars = tf.train.list_variables(tf_path)
+ names = []
+ arrays = []
+ for name, shape in init_vars:
+ logger.info(f"Loading TF weight {name} with shape {shape}")
+ array = tf.train.load_variable(tf_path, name)
+ names.append(name)
+ arrays.append(array)
+
+ for name, array in zip(names, arrays):
+ print(name)
+
+ for name, array in zip(names, arrays):
+ original_name = name
+
+ # If saved from the TF HUB module
+ name = name.replace("module/", "")
+
+ # Renaming and simplifying
+ name = name.replace("ffn_1", "ffn")
+ name = name.replace("bert/", "albert/")
+ name = name.replace("attention_1", "attention")
+ name = name.replace("transform/", "")
+ name = name.replace("LayerNorm_1", "full_layer_layer_norm")
+ name = name.replace("LayerNorm", "attention/LayerNorm")
+ name = name.replace("transformer/", "")
+
+ # The feed forward layer had an 'intermediate' step which has been abstracted away
+ name = name.replace("intermediate/dense/", "")
+ name = name.replace("ffn/intermediate/output/dense/", "ffn_output/")
+
+ # ALBERT attention was split between self and output which have been abstracted away
+ name = name.replace("/output/", "/")
+ name = name.replace("/self/", "/")
+
+ # The pooler is a linear layer
+ name = name.replace("pooler/dense", "pooler")
+
+ # The classifier was simplified to predictions from cls/predictions
+ name = name.replace("cls/predictions", "predictions")
+ name = name.replace("predictions/attention", "predictions")
+
+ # Naming was changed to be more explicit
+ name = name.replace("embeddings/attention", "embeddings")
+ name = name.replace("inner_group_", "albert_layers/")
+ name = name.replace("group_", "albert_layer_groups/")
+
+ # Classifier
+ if len(name.split("/")) == 1 and ("output_bias" in name or "output_weights" in name):
+ name = "classifier/" + name
+
+ # No ALBERT model currently handles the next sentence prediction task
+ if "seq_relationship" in name:
+ name = name.replace("seq_relationship/output_", "sop_classifier/classifier/")
+ name = name.replace("weights", "weight")
+
+ name = name.split("/")
+
+ # Ignore the gradients applied by the LAMB/ADAM optimizers.
+ if (
+ "adam_m" in name
+ or "adam_v" in name
+ or "AdamWeightDecayOptimizer" in name
+ or "AdamWeightDecayOptimizer_1" in name
+ or "global_step" in name
+ ):
+ logger.info(f"Skipping {'/'.join(name)}")
+ continue
+
+ pointer = model
+ for m_name in name:
+ if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
+ scope_names = re.split(r"_(\d+)", m_name)
+ else:
+ scope_names = [m_name]
+
+ if scope_names[0] == "kernel" or scope_names[0] == "gamma":
+ pointer = getattr(pointer, "weight")
+ elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
+ pointer = getattr(pointer, "bias")
+ elif scope_names[0] == "output_weights":
+ pointer = getattr(pointer, "weight")
+ elif scope_names[0] == "squad":
+ pointer = getattr(pointer, "classifier")
+ else:
+ try:
+ pointer = getattr(pointer, scope_names[0])
+ except AttributeError:
+ logger.info(f"Skipping {'/'.join(name)}")
+ continue
+ if len(scope_names) >= 2:
+ num = int(scope_names[1])
+ pointer = pointer[num]
+
+ if m_name[-11:] == "_embeddings":
+ pointer = getattr(pointer, "weight")
+ elif m_name == "kernel":
+ array = np.transpose(array)
+ try:
+ if pointer.shape != array.shape:
+ raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
+ except ValueError as e:
+ e.args += (pointer.shape, array.shape)
+ raise
+ print(f"Initialize PyTorch weight {name} from {original_name}")
+ pointer.data = torch.from_numpy(array)
+
+ return model
+
+
+class AlbertEmbeddings(nn.Module):
+ """
+ Construct the embeddings from word, position and token_type embeddings.
+ """
+
+ def __init__(self, config: AlbertConfig):
+ super().__init__()
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)
+
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
+ # any TensorFlow checkpoint file
+ self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
+ self.register_buffer(
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
+ )
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
+ self.register_buffer(
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
+ )
+
+ # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.forward
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ past_key_values_length: int = 0,
+ ) -> torch.Tensor:
+ if input_ids is not None:
+ input_shape = input_ids.size()
+ else:
+ input_shape = inputs_embeds.size()[:-1]
+
+ seq_length = input_shape[1]
+
+ if position_ids is None:
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
+
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
+ # issue #5664
+ if token_type_ids is None:
+ if hasattr(self, "token_type_ids"):
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
+ token_type_ids = buffered_token_type_ids_expanded
+ else:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
+
+ embeddings = inputs_embeds + token_type_embeddings
+ if self.position_embedding_type == "absolute":
+ position_embeddings = self.position_embeddings(position_ids)
+ embeddings += position_embeddings
+ embeddings = self.LayerNorm(embeddings)
+ embeddings = self.dropout(embeddings)
+ return embeddings
+
+
+class AlbertAttention(nn.Module):
+ def __init__(self, config: AlbertConfig):
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({config.num_attention_heads}"
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.hidden_size = config.hidden_size
+ self.attention_head_size = config.hidden_size // config.num_attention_heads
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
+
+ self.attention_dropout = nn.Dropout(config.attention_probs_dropout_prob)
+ self.output_dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.pruned_heads = set()
+
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ self.max_position_embeddings = config.max_position_embeddings
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
+
+ # Copied from transformers.models.bert.modeling_bert.BertSelfAttention.transpose_for_scores
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def prune_heads(self, heads: List[int]) -> None:
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.num_attention_heads, self.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.query = prune_linear_layer(self.query, index)
+ self.key = prune_linear_layer(self.key, index)
+ self.value = prune_linear_layer(self.value, index)
+ self.dense = prune_linear_layer(self.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.num_attention_heads = self.num_attention_heads - len(heads)
+ self.all_head_size = self.attention_head_size * self.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: bool = False,
+ ) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]:
+ mixed_query_layer = self.query(hidden_states)
+ mixed_key_layer = self.key(hidden_states)
+ mixed_value_layer = self.value(hidden_states)
+
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+ key_layer = self.transpose_for_scores(mixed_key_layer)
+ value_layer = self.transpose_for_scores(mixed_value_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
+ attention_scores = attention_scores + attention_mask
+
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ seq_length = hidden_states.size()[1]
+ position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
+ position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
+ distance = position_ids_l - position_ids_r
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
+
+ if self.position_embedding_type == "relative_key":
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores
+ elif self.position_embedding_type == "relative_key_query":
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.attention_dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+ context_layer = context_layer.transpose(2, 1).flatten(2)
+
+ projected_context_layer = self.dense(context_layer)
+ projected_context_layer_dropout = self.output_dropout(projected_context_layer)
+ layernormed_context_layer = self.LayerNorm(hidden_states + projected_context_layer_dropout)
+ return (layernormed_context_layer, attention_probs) if output_attentions else (layernormed_context_layer,)
+
+
+class AlbertLayer(nn.Module):
+ def __init__(self, config: AlbertConfig):
+ super().__init__()
+
+ self.config = config
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.full_layer_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.attention = AlbertAttention(config)
+ self.ffn = nn.Linear(config.hidden_size, config.intermediate_size)
+ self.ffn_output = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.activation = ACT2FN[config.hidden_act]
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
+ attention_output = self.attention(hidden_states, attention_mask, head_mask, output_attentions)
+
+ ffn_output = apply_chunking_to_forward(
+ self.ff_chunk,
+ self.chunk_size_feed_forward,
+ self.seq_len_dim,
+ attention_output[0],
+ )
+ hidden_states = self.full_layer_layer_norm(ffn_output + attention_output[0])
+
+ return (hidden_states,) + attention_output[1:] # add attentions if we output them
+
+ def ff_chunk(self, attention_output: torch.Tensor) -> torch.Tensor:
+ ffn_output = self.ffn(attention_output)
+ ffn_output = self.activation(ffn_output)
+ ffn_output = self.ffn_output(ffn_output)
+ return ffn_output
+
+
+class AlbertLayerGroup(nn.Module):
+ def __init__(self, config: AlbertConfig):
+ super().__init__()
+
+ self.albert_layers = nn.ModuleList([AlbertLayer(config) for _ in range(config.inner_group_num)])
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]:
+ layer_hidden_states = ()
+ layer_attentions = ()
+
+ for layer_index, albert_layer in enumerate(self.albert_layers):
+ layer_output = albert_layer(hidden_states, attention_mask, head_mask[layer_index], output_attentions)
+ hidden_states = layer_output[0]
+
+ if output_attentions:
+ layer_attentions = layer_attentions + (layer_output[1],)
+
+ if output_hidden_states:
+ layer_hidden_states = layer_hidden_states + (hidden_states,)
+
+ outputs = (hidden_states,)
+ if output_hidden_states:
+ outputs = outputs + (layer_hidden_states,)
+ if output_attentions:
+ outputs = outputs + (layer_attentions,)
+ return outputs # last-layer hidden state, (layer hidden states), (layer attentions)
+
+
+class AlbertTransformer(nn.Module):
+ def __init__(self, config: AlbertConfig):
+ super().__init__()
+
+ self.config = config
+ self.embedding_hidden_mapping_in = nn.Linear(config.embedding_size, config.hidden_size)
+ self.albert_layer_groups = nn.ModuleList([AlbertLayerGroup(config) for _ in range(config.num_hidden_groups)])
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ) -> Union[BaseModelOutput, Tuple]:
+ hidden_states = self.embedding_hidden_mapping_in(hidden_states)
+
+ all_hidden_states = (hidden_states,) if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ head_mask = [None] * self.config.num_hidden_layers if head_mask is None else head_mask
+
+ for i in range(self.config.num_hidden_layers):
+ # Number of layers in a hidden group
+ layers_per_group = int(self.config.num_hidden_layers / self.config.num_hidden_groups)
+
+ # Index of the hidden group
+ group_idx = int(i / (self.config.num_hidden_layers / self.config.num_hidden_groups))
+
+ layer_group_output = self.albert_layer_groups[group_idx](
+ hidden_states,
+ attention_mask,
+ head_mask[group_idx * layers_per_group : (group_idx + 1) * layers_per_group],
+ output_attentions,
+ output_hidden_states,
+ )
+ hidden_states = layer_group_output[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + layer_group_output[-1]
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
+ )
+
+
+class AlbertPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = AlbertConfig
+ load_tf_weights = load_tf_weights_in_albert
+ base_model_prefix = "albert"
+
+ def _init_weights(self, module):
+ """Initialize the weights."""
+ if isinstance(module, nn.Linear):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+@dataclass
+class AlbertForPreTrainingOutput(ModelOutput):
+ """
+ Output type of [`AlbertForPreTraining`].
+
+ Args:
+ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
+ Total loss as the sum of the masked language modeling loss and the next sequence prediction
+ (classification) loss.
+ prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ sop_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
+ before SoftMax).
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ prediction_logits: torch.FloatTensor = None
+ sop_logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+ALBERT_START_DOCSTRING = r"""
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Args:
+ config ([`AlbertConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+ALBERT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
+ [`PreTrainedTokenizer.encode`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare ALBERT Model transformer outputting raw hidden-states without any specific head on top.",
+ ALBERT_START_DOCSTRING,
+)
+class AlbertModel(AlbertPreTrainedModel):
+ config_class = AlbertConfig
+ base_model_prefix = "albert"
+
+ def __init__(self, config: AlbertConfig, add_pooling_layer: bool = True):
+ super().__init__(config)
+
+ self.config = config
+ self.embeddings = AlbertEmbeddings(config)
+ self.encoder = AlbertTransformer(config)
+ if add_pooling_layer:
+ self.pooler = nn.Linear(config.hidden_size, config.hidden_size)
+ self.pooler_activation = nn.Tanh()
+ else:
+ self.pooler = None
+ self.pooler_activation = None
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self) -> nn.Embedding:
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, value: nn.Embedding) -> None:
+ self.embeddings.word_embeddings = value
+
+ def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None:
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} ALBERT has
+ a different architecture in that its layers are shared across groups, which then has inner groups. If an ALBERT
+ model has 12 hidden layers and 2 hidden groups, with two inner groups, there is a total of 4 different layers.
+
+ These layers are flattened: the indices [0,1] correspond to the two inner groups of the first hidden layer,
+ while [2,3] correspond to the two inner groups of the second hidden layer.
+
+ Any layer with in index other than [0,1,2,3] will result in an error. See base class PreTrainedModel for more
+ information about head pruning
+ """
+ for layer, heads in heads_to_prune.items():
+ group_idx = int(layer / self.config.inner_group_num)
+ inner_group_idx = int(layer - group_idx * self.config.inner_group_num)
+ self.encoder.albert_layer_groups[group_idx].albert_layers[inner_group_idx].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutputWithPooling,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[BaseModelOutputWithPooling, Tuple]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ batch_size, seq_length = input_shape
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ if attention_mask is None:
+ attention_mask = torch.ones(input_shape, device=device)
+ if token_type_ids is None:
+ if hasattr(self.embeddings, "token_type_ids"):
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
+ token_type_ids = buffered_token_type_ids_expanded
+ else:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
+
+ extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
+ extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
+ extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(self.dtype).min
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ embedding_output = self.embeddings(
+ input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
+ )
+ encoder_outputs = self.encoder(
+ embedding_output,
+ extended_attention_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = encoder_outputs[0]
+
+ pooled_output = self.pooler_activation(self.pooler(sequence_output[:, 0])) if self.pooler is not None else None
+
+ if not return_dict:
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPooling(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Albert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a
+ `sentence order prediction (classification)` head.
+ """,
+ ALBERT_START_DOCSTRING,
+)
+class AlbertForPreTraining(AlbertPreTrainedModel):
+ _tied_weights_keys = ["predictions.decoder.bias", "predictions.decoder.weight"]
+
+ def __init__(self, config: AlbertConfig):
+ super().__init__(config)
+
+ self.albert = AlbertModel(config)
+ self.predictions = AlbertMLMHead(config)
+ self.sop_classifier = AlbertSOPHead(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self) -> nn.Linear:
+ return self.predictions.decoder
+
+ def set_output_embeddings(self, new_embeddings: nn.Linear) -> None:
+ self.predictions.decoder = new_embeddings
+
+ def get_input_embeddings(self) -> nn.Embedding:
+ return self.albert.embeddings.word_embeddings
+
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=AlbertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ sentence_order_label: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[AlbertForPreTrainingOutput, Tuple]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ sentence_order_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
+ (see `input_ids` docstring) Indices should be in `[0, 1]`. `0` indicates original order (sequence A, then
+ sequence B), `1` indicates switched order (sequence B, then sequence A).
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, AlbertForPreTraining
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("albert/albert-base-v2")
+ >>> model = AlbertForPreTraining.from_pretrained("albert/albert-base-v2")
+
+ >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0)
+ >>> # Batch size 1
+ >>> outputs = model(input_ids)
+
+ >>> prediction_logits = outputs.prediction_logits
+ >>> sop_logits = outputs.sop_logits
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.albert(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output, pooled_output = outputs[:2]
+
+ prediction_scores = self.predictions(sequence_output)
+ sop_scores = self.sop_classifier(pooled_output)
+
+ total_loss = None
+ if labels is not None and sentence_order_label is not None:
+ loss_fct = CrossEntropyLoss()
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
+ sentence_order_loss = loss_fct(sop_scores.view(-1, 2), sentence_order_label.view(-1))
+ total_loss = masked_lm_loss + sentence_order_loss
+
+ if not return_dict:
+ output = (prediction_scores, sop_scores) + outputs[2:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return AlbertForPreTrainingOutput(
+ loss=total_loss,
+ prediction_logits=prediction_scores,
+ sop_logits=sop_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+class AlbertMLMHead(nn.Module):
+ def __init__(self, config: AlbertConfig):
+ super().__init__()
+
+ self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
+ self.dense = nn.Linear(config.hidden_size, config.embedding_size)
+ self.decoder = nn.Linear(config.embedding_size, config.vocab_size)
+ self.activation = ACT2FN[config.hidden_act]
+ self.decoder.bias = self.bias
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states)
+ hidden_states = self.decoder(hidden_states)
+
+ prediction_scores = hidden_states
+
+ return prediction_scores
+
+ def _tie_weights(self) -> None:
+ # To tie those two weights if they get disconnected (on TPU or when the bias is resized)
+ self.bias = self.decoder.bias
+
+
+class AlbertSOPHead(nn.Module):
+ def __init__(self, config: AlbertConfig):
+ super().__init__()
+
+ self.dropout = nn.Dropout(config.classifier_dropout_prob)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ def forward(self, pooled_output: torch.Tensor) -> torch.Tensor:
+ dropout_pooled_output = self.dropout(pooled_output)
+ logits = self.classifier(dropout_pooled_output)
+ return logits
+
+
+@add_start_docstrings(
+ "Albert Model with a `language modeling` head on top.",
+ ALBERT_START_DOCSTRING,
+)
+class AlbertForMaskedLM(AlbertPreTrainedModel):
+ _tied_weights_keys = ["predictions.decoder.bias", "predictions.decoder.weight"]
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.albert = AlbertModel(config, add_pooling_layer=False)
+ self.predictions = AlbertMLMHead(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self) -> nn.Linear:
+ return self.predictions.decoder
+
+ def set_output_embeddings(self, new_embeddings: nn.Linear) -> None:
+ self.predictions.decoder = new_embeddings
+
+ def get_input_embeddings(self) -> nn.Embedding:
+ return self.albert.embeddings.word_embeddings
+
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[MaskedLMOutput, Tuple]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> import torch
+ >>> from transformers import AutoTokenizer, AlbertForMaskedLM
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("albert/albert-base-v2")
+ >>> model = AlbertForMaskedLM.from_pretrained("albert/albert-base-v2")
+
+ >>> # add mask_token
+ >>> inputs = tokenizer("The capital of [MASK] is Paris.", return_tensors="pt")
+ >>> with torch.no_grad():
+ ... logits = model(**inputs).logits
+
+ >>> # retrieve index of [MASK]
+ >>> mask_token_index = (inputs.input_ids == tokenizer.mask_token_id)[0].nonzero(as_tuple=True)[0]
+ >>> predicted_token_id = logits[0, mask_token_index].argmax(axis=-1)
+ >>> tokenizer.decode(predicted_token_id)
+ 'france'
+ ```
+
+ ```python
+ >>> labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"]
+ >>> labels = torch.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100)
+ >>> outputs = model(**inputs, labels=labels)
+ >>> round(outputs.loss.item(), 2)
+ 0.81
+ ```
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.albert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_outputs = outputs[0]
+
+ prediction_scores = self.predictions(sequence_outputs)
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[2:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return MaskedLMOutput(
+ loss=masked_lm_loss,
+ logits=prediction_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
+ output) e.g. for GLUE tasks.
+ """,
+ ALBERT_START_DOCSTRING,
+)
+class AlbertForSequenceClassification(AlbertPreTrainedModel):
+ def __init__(self, config: AlbertConfig):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.config = config
+
+ self.albert = AlbertModel(config)
+ self.dropout = nn.Dropout(config.classifier_dropout_prob)
+ self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint="textattack/albert-base-v2-imdb",
+ output_type=SequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output="'LABEL_1'",
+ expected_loss=0.12,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[SequenceClassifierOutput, Tuple]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.albert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = outputs[1]
+
+ pooled_output = self.dropout(pooled_output)
+ logits = self.classifier(pooled_output)
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Albert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
+ Named-Entity-Recognition (NER) tasks.
+ """,
+ ALBERT_START_DOCSTRING,
+)
+class AlbertForTokenClassification(AlbertPreTrainedModel):
+ def __init__(self, config: AlbertConfig):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.albert = AlbertModel(config, add_pooling_layer=False)
+ classifier_dropout_prob = (
+ config.classifier_dropout_prob
+ if config.classifier_dropout_prob is not None
+ else config.hidden_dropout_prob
+ )
+ self.dropout = nn.Dropout(classifier_dropout_prob)
+ self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[TokenClassifierOutput, Tuple]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.albert(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ sequence_output = self.dropout(sequence_output)
+ logits = self.classifier(sequence_output)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ ALBERT_START_DOCSTRING,
+)
+class AlbertForQuestionAnswering(AlbertPreTrainedModel):
+ def __init__(self, config: AlbertConfig):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.albert = AlbertModel(config, add_pooling_layer=False)
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint="twmkn9/albert-base-v2-squad2",
+ output_type=QuestionAnsweringModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ qa_target_start_index=12,
+ qa_target_end_index=13,
+ expected_output="'a nice puppet'",
+ expected_loss=7.36,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ start_positions: Optional[torch.LongTensor] = None,
+ end_positions: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[AlbertForPreTrainingOutput, Tuple]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.albert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ logits: torch.Tensor = self.qa_outputs(sequence_output)
+ start_logits, end_logits = logits.split(1, dim=-1)
+ start_logits = start_logits.squeeze(-1).contiguous()
+ end_logits = end_logits.squeeze(-1).contiguous()
+
+ total_loss = None
+ if start_positions is not None and end_positions is not None:
+ # If we are on multi-GPU, split add a dimension
+ if len(start_positions.size()) > 1:
+ start_positions = start_positions.squeeze(-1)
+ if len(end_positions.size()) > 1:
+ end_positions = end_positions.squeeze(-1)
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
+ ignored_index = start_logits.size(1)
+ start_positions = start_positions.clamp(0, ignored_index)
+ end_positions = end_positions.clamp(0, ignored_index)
+
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
+ start_loss = loss_fct(start_logits, start_positions)
+ end_loss = loss_fct(end_logits, end_positions)
+ total_loss = (start_loss + end_loss) / 2
+
+ if not return_dict:
+ output = (start_logits, end_logits) + outputs[2:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return QuestionAnsweringModelOutput(
+ loss=total_loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Albert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
+ softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ ALBERT_START_DOCSTRING,
+)
+class AlbertForMultipleChoice(AlbertPreTrainedModel):
+ def __init__(self, config: AlbertConfig):
+ super().__init__(config)
+
+ self.albert = AlbertModel(config)
+ self.dropout = nn.Dropout(config.classifier_dropout_prob)
+ self.classifier = nn.Linear(config.hidden_size, 1)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=MultipleChoiceModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[AlbertForPreTrainingOutput, Tuple]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
+ num_choices-1]` where *num_choices* is the size of the second dimension of the input tensors. (see
+ *input_ids* above)
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
+
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
+ inputs_embeds = (
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
+ if inputs_embeds is not None
+ else None
+ )
+ outputs = self.albert(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = outputs[1]
+
+ pooled_output = self.dropout(pooled_output)
+ logits: torch.Tensor = self.classifier(pooled_output)
+ reshaped_logits = logits.view(-1, num_choices)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(reshaped_logits, labels)
+
+ if not return_dict:
+ output = (reshaped_logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return MultipleChoiceModelOutput(
+ loss=loss,
+ logits=reshaped_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/albert/modeling_flax_albert.py b/venv/lib/python3.10/site-packages/transformers/models/albert/modeling_flax_albert.py
new file mode 100644
index 0000000000000000000000000000000000000000..b2c01ded3619ca913033980f72979ec77c0f76e0
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/albert/modeling_flax_albert.py
@@ -0,0 +1,1121 @@
+# coding=utf-8
+# Copyright 2021 Google AI, Google Brain and the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Callable, Optional, Tuple
+
+import flax
+import flax.linen as nn
+import jax
+import jax.numpy as jnp
+import numpy as np
+from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
+from flax.linen.attention import dot_product_attention_weights
+from flax.traverse_util import flatten_dict, unflatten_dict
+from jax import lax
+
+from ...modeling_flax_outputs import (
+ FlaxBaseModelOutput,
+ FlaxBaseModelOutputWithPooling,
+ FlaxMaskedLMOutput,
+ FlaxMultipleChoiceModelOutput,
+ FlaxQuestionAnsweringModelOutput,
+ FlaxSequenceClassifierOutput,
+ FlaxTokenClassifierOutput,
+)
+from ...modeling_flax_utils import (
+ ACT2FN,
+ FlaxPreTrainedModel,
+ append_call_sample_docstring,
+ append_replace_return_docstrings,
+ overwrite_call_docstring,
+)
+from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging
+from .configuration_albert import AlbertConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "albert/albert-base-v2"
+_CONFIG_FOR_DOC = "AlbertConfig"
+
+
+@flax.struct.dataclass
+class FlaxAlbertForPreTrainingOutput(ModelOutput):
+ """
+ Output type of [`FlaxAlbertForPreTraining`].
+
+ Args:
+ prediction_logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ sop_logits (`jnp.ndarray` of shape `(batch_size, 2)`):
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
+ before SoftMax).
+ hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ prediction_logits: jnp.ndarray = None
+ sop_logits: jnp.ndarray = None
+ hidden_states: Optional[Tuple[jnp.ndarray]] = None
+ attentions: Optional[Tuple[jnp.ndarray]] = None
+
+
+ALBERT_START_DOCSTRING = r"""
+
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading, saving and converting weights from PyTorch models)
+
+ This model is also a
+ [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as
+ a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and
+ behavior.
+
+ Finally, this model supports inherent JAX features such as:
+
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
+
+ Parameters:
+ config ([`AlbertConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
+ `jax.numpy.bfloat16` (on TPUs).
+
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
+ specified all the computation will be performed with the given `dtype`.
+
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
+ parameters.**
+
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
+ [`~FlaxPreTrainedModel.to_bf16`].
+"""
+
+ALBERT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`numpy.ndarray` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`numpy.ndarray` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`numpy.ndarray` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`numpy.ndarray` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+
+"""
+
+
+class FlaxAlbertEmbeddings(nn.Module):
+ """Construct the embeddings from word, position and token_type embeddings."""
+
+ config: AlbertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.word_embeddings = nn.Embed(
+ self.config.vocab_size,
+ self.config.embedding_size,
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ self.position_embeddings = nn.Embed(
+ self.config.max_position_embeddings,
+ self.config.embedding_size,
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ self.token_type_embeddings = nn.Embed(
+ self.config.type_vocab_size,
+ self.config.embedding_size,
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
+
+ def __call__(self, input_ids, token_type_ids, position_ids, deterministic: bool = True):
+ # Embed
+ inputs_embeds = self.word_embeddings(input_ids.astype("i4"))
+ position_embeds = self.position_embeddings(position_ids.astype("i4"))
+ token_type_embeddings = self.token_type_embeddings(token_type_ids.astype("i4"))
+
+ # Sum all embeddings
+ hidden_states = inputs_embeds + token_type_embeddings + position_embeds
+
+ # Layer Norm
+ hidden_states = self.LayerNorm(hidden_states)
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ return hidden_states
+
+
+class FlaxAlbertSelfAttention(nn.Module):
+ config: AlbertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ if self.config.hidden_size % self.config.num_attention_heads != 0:
+ raise ValueError(
+ "`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads` "
+ " : {self.config.num_attention_heads}"
+ )
+
+ self.query = nn.Dense(
+ self.config.hidden_size,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
+ )
+ self.key = nn.Dense(
+ self.config.hidden_size,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
+ )
+ self.value = nn.Dense(
+ self.config.hidden_size,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
+ )
+ self.dense = nn.Dense(
+ self.config.hidden_size,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
+ dtype=self.dtype,
+ )
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
+
+ def __call__(self, hidden_states, attention_mask, deterministic=True, output_attentions: bool = False):
+ head_dim = self.config.hidden_size // self.config.num_attention_heads
+
+ query_states = self.query(hidden_states).reshape(
+ hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
+ )
+ value_states = self.value(hidden_states).reshape(
+ hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
+ )
+ key_states = self.key(hidden_states).reshape(
+ hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
+ )
+
+ # Convert the boolean attention mask to an attention bias.
+ if attention_mask is not None:
+ # attention mask in the form of attention bias
+ attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
+ attention_bias = lax.select(
+ attention_mask > 0,
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
+ jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
+ )
+ else:
+ attention_bias = None
+
+ dropout_rng = None
+ if not deterministic and self.config.attention_probs_dropout_prob > 0.0:
+ dropout_rng = self.make_rng("dropout")
+
+ attn_weights = dot_product_attention_weights(
+ query_states,
+ key_states,
+ bias=attention_bias,
+ dropout_rng=dropout_rng,
+ dropout_rate=self.config.attention_probs_dropout_prob,
+ broadcast_dropout=True,
+ deterministic=deterministic,
+ dtype=self.dtype,
+ precision=None,
+ )
+
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
+ attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,))
+
+ projected_attn_output = self.dense(attn_output)
+ projected_attn_output = self.dropout(projected_attn_output, deterministic=deterministic)
+ layernormed_attn_output = self.LayerNorm(projected_attn_output + hidden_states)
+ outputs = (layernormed_attn_output, attn_weights) if output_attentions else (layernormed_attn_output,)
+ return outputs
+
+
+class FlaxAlbertLayer(nn.Module):
+ config: AlbertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.attention = FlaxAlbertSelfAttention(self.config, dtype=self.dtype)
+ self.ffn = nn.Dense(
+ self.config.intermediate_size,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
+ dtype=self.dtype,
+ )
+ self.activation = ACT2FN[self.config.hidden_act]
+ self.ffn_output = nn.Dense(
+ self.config.hidden_size,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
+ dtype=self.dtype,
+ )
+ self.full_layer_layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ ):
+ attention_outputs = self.attention(
+ hidden_states, attention_mask, deterministic=deterministic, output_attentions=output_attentions
+ )
+ attention_output = attention_outputs[0]
+ ffn_output = self.ffn(attention_output)
+ ffn_output = self.activation(ffn_output)
+ ffn_output = self.ffn_output(ffn_output)
+ ffn_output = self.dropout(ffn_output, deterministic=deterministic)
+ hidden_states = self.full_layer_layer_norm(ffn_output + attention_output)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attention_outputs[1],)
+ return outputs
+
+
+class FlaxAlbertLayerCollection(nn.Module):
+ config: AlbertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.layers = [
+ FlaxAlbertLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.inner_group_num)
+ ]
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ ):
+ layer_hidden_states = ()
+ layer_attentions = ()
+
+ for layer_index, albert_layer in enumerate(self.layers):
+ layer_output = albert_layer(
+ hidden_states,
+ attention_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ )
+ hidden_states = layer_output[0]
+
+ if output_attentions:
+ layer_attentions = layer_attentions + (layer_output[1],)
+
+ if output_hidden_states:
+ layer_hidden_states = layer_hidden_states + (hidden_states,)
+
+ outputs = (hidden_states,)
+ if output_hidden_states:
+ outputs = outputs + (layer_hidden_states,)
+ if output_attentions:
+ outputs = outputs + (layer_attentions,)
+ return outputs # last-layer hidden state, (layer hidden states), (layer attentions)
+
+
+class FlaxAlbertLayerCollections(nn.Module):
+ config: AlbertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+ layer_index: Optional[str] = None
+
+ def setup(self):
+ self.albert_layers = FlaxAlbertLayerCollection(self.config, dtype=self.dtype)
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ ):
+ outputs = self.albert_layers(
+ hidden_states,
+ attention_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ )
+ return outputs
+
+
+class FlaxAlbertLayerGroups(nn.Module):
+ config: AlbertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.layers = [
+ FlaxAlbertLayerCollections(self.config, name=str(i), layer_index=str(i), dtype=self.dtype)
+ for i in range(self.config.num_hidden_groups)
+ ]
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ all_attentions = () if output_attentions else None
+ all_hidden_states = (hidden_states,) if output_hidden_states else None
+
+ for i in range(self.config.num_hidden_layers):
+ # Index of the hidden group
+ group_idx = int(i / (self.config.num_hidden_layers / self.config.num_hidden_groups))
+ layer_group_output = self.layers[group_idx](
+ hidden_states,
+ attention_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ )
+ hidden_states = layer_group_output[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + layer_group_output[-1]
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
+ return FlaxBaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
+ )
+
+
+class FlaxAlbertEncoder(nn.Module):
+ config: AlbertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.embedding_hidden_mapping_in = nn.Dense(
+ self.config.hidden_size,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
+ dtype=self.dtype,
+ )
+ self.albert_layer_groups = FlaxAlbertLayerGroups(self.config, dtype=self.dtype)
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ hidden_states = self.embedding_hidden_mapping_in(hidden_states)
+ return self.albert_layer_groups(
+ hidden_states,
+ attention_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ )
+
+
+class FlaxAlbertOnlyMLMHead(nn.Module):
+ config: AlbertConfig
+ dtype: jnp.dtype = jnp.float32
+ bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros
+
+ def setup(self):
+ self.dense = nn.Dense(self.config.embedding_size, dtype=self.dtype)
+ self.activation = ACT2FN[self.config.hidden_act]
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
+ self.decoder = nn.Dense(self.config.vocab_size, dtype=self.dtype, use_bias=False)
+ self.bias = self.param("bias", self.bias_init, (self.config.vocab_size,))
+
+ def __call__(self, hidden_states, shared_embedding=None):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states)
+
+ if shared_embedding is not None:
+ hidden_states = self.decoder.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
+ else:
+ hidden_states = self.decoder(hidden_states)
+
+ hidden_states += self.bias
+ return hidden_states
+
+
+class FlaxAlbertSOPHead(nn.Module):
+ config: AlbertConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.dropout = nn.Dropout(self.config.classifier_dropout_prob)
+ self.classifier = nn.Dense(2, dtype=self.dtype)
+
+ def __call__(self, pooled_output, deterministic=True):
+ pooled_output = self.dropout(pooled_output, deterministic=deterministic)
+ logits = self.classifier(pooled_output)
+ return logits
+
+
+class FlaxAlbertPreTrainedModel(FlaxPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = AlbertConfig
+ base_model_prefix = "albert"
+ module_class: nn.Module = None
+
+ def __init__(
+ self,
+ config: AlbertConfig,
+ input_shape: Tuple = (1, 1),
+ seed: int = 0,
+ dtype: jnp.dtype = jnp.float32,
+ _do_init: bool = True,
+ **kwargs,
+ ):
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
+
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
+ # init input tensors
+ input_ids = jnp.zeros(input_shape, dtype="i4")
+ token_type_ids = jnp.zeros_like(input_ids)
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape)
+ attention_mask = jnp.ones_like(input_ids)
+
+ params_rng, dropout_rng = jax.random.split(rng)
+ rngs = {"params": params_rng, "dropout": dropout_rng}
+
+ random_params = self.module.init(
+ rngs, input_ids, attention_mask, token_type_ids, position_ids, return_dict=False
+ )["params"]
+
+ if params is not None:
+ random_params = flatten_dict(unfreeze(random_params))
+ params = flatten_dict(unfreeze(params))
+ for missing_key in self._missing_keys:
+ params[missing_key] = random_params[missing_key]
+ self._missing_keys = set()
+ return freeze(unflatten_dict(params))
+ else:
+ return random_params
+
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ def __call__(
+ self,
+ input_ids,
+ attention_mask=None,
+ token_type_ids=None,
+ position_ids=None,
+ params: dict = None,
+ dropout_rng: jax.random.PRNGKey = None,
+ train: bool = False,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ):
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ # init input tensors if not passed
+ if token_type_ids is None:
+ token_type_ids = jnp.zeros_like(input_ids)
+
+ if position_ids is None:
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
+
+ if attention_mask is None:
+ attention_mask = jnp.ones_like(input_ids)
+
+ # Handle any PRNG if needed
+ rngs = {}
+ if dropout_rng is not None:
+ rngs["dropout"] = dropout_rng
+
+ return self.module.apply(
+ {"params": params or self.params},
+ jnp.array(input_ids, dtype="i4"),
+ jnp.array(attention_mask, dtype="i4"),
+ jnp.array(token_type_ids, dtype="i4"),
+ jnp.array(position_ids, dtype="i4"),
+ not train,
+ output_attentions,
+ output_hidden_states,
+ return_dict,
+ rngs=rngs,
+ )
+
+
+class FlaxAlbertModule(nn.Module):
+ config: AlbertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+ add_pooling_layer: bool = True
+
+ def setup(self):
+ self.embeddings = FlaxAlbertEmbeddings(self.config, dtype=self.dtype)
+ self.encoder = FlaxAlbertEncoder(self.config, dtype=self.dtype)
+ if self.add_pooling_layer:
+ self.pooler = nn.Dense(
+ self.config.hidden_size,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
+ dtype=self.dtype,
+ name="pooler",
+ )
+ self.pooler_activation = nn.tanh
+ else:
+ self.pooler = None
+ self.pooler_activation = None
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ token_type_ids: Optional[np.ndarray] = None,
+ position_ids: Optional[np.ndarray] = None,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ # make sure `token_type_ids` is correctly initialized when not passed
+ if token_type_ids is None:
+ token_type_ids = jnp.zeros_like(input_ids)
+
+ # make sure `position_ids` is correctly initialized when not passed
+ if position_ids is None:
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
+
+ hidden_states = self.embeddings(input_ids, token_type_ids, position_ids, deterministic=deterministic)
+
+ outputs = self.encoder(
+ hidden_states,
+ attention_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = outputs[0]
+ if self.add_pooling_layer:
+ pooled = self.pooler(hidden_states[:, 0])
+ pooled = self.pooler_activation(pooled)
+ else:
+ pooled = None
+
+ if not return_dict:
+ # if pooled is None, don't return it
+ if pooled is None:
+ return (hidden_states,) + outputs[1:]
+ return (hidden_states, pooled) + outputs[1:]
+
+ return FlaxBaseModelOutputWithPooling(
+ last_hidden_state=hidden_states,
+ pooler_output=pooled,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ "The bare Albert Model transformer outputting raw hidden-states without any specific head on top.",
+ ALBERT_START_DOCSTRING,
+)
+class FlaxAlbertModel(FlaxAlbertPreTrainedModel):
+ module_class = FlaxAlbertModule
+
+
+append_call_sample_docstring(FlaxAlbertModel, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutputWithPooling, _CONFIG_FOR_DOC)
+
+
+class FlaxAlbertForPreTrainingModule(nn.Module):
+ config: AlbertConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.albert = FlaxAlbertModule(config=self.config, dtype=self.dtype)
+ self.predictions = FlaxAlbertOnlyMLMHead(config=self.config, dtype=self.dtype)
+ self.sop_classifier = FlaxAlbertSOPHead(config=self.config, dtype=self.dtype)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ # Model
+ outputs = self.albert(
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ if self.config.tie_word_embeddings:
+ shared_embedding = self.albert.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
+ else:
+ shared_embedding = None
+
+ hidden_states = outputs[0]
+ pooled_output = outputs[1]
+
+ prediction_scores = self.predictions(hidden_states, shared_embedding=shared_embedding)
+ sop_scores = self.sop_classifier(pooled_output, deterministic=deterministic)
+
+ if not return_dict:
+ return (prediction_scores, sop_scores) + outputs[2:]
+
+ return FlaxAlbertForPreTrainingOutput(
+ prediction_logits=prediction_scores,
+ sop_logits=sop_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Albert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a
+ `sentence order prediction (classification)` head.
+ """,
+ ALBERT_START_DOCSTRING,
+)
+class FlaxAlbertForPreTraining(FlaxAlbertPreTrainedModel):
+ module_class = FlaxAlbertForPreTrainingModule
+
+
+FLAX_ALBERT_FOR_PRETRAINING_DOCSTRING = """
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, FlaxAlbertForPreTraining
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("albert/albert-base-v2")
+ >>> model = FlaxAlbertForPreTraining.from_pretrained("albert/albert-base-v2")
+
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="np")
+ >>> outputs = model(**inputs)
+
+ >>> prediction_logits = outputs.prediction_logits
+ >>> seq_relationship_logits = outputs.sop_logits
+ ```
+"""
+
+overwrite_call_docstring(
+ FlaxAlbertForPreTraining,
+ ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") + FLAX_ALBERT_FOR_PRETRAINING_DOCSTRING,
+)
+append_replace_return_docstrings(
+ FlaxAlbertForPreTraining, output_type=FlaxAlbertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC
+)
+
+
+class FlaxAlbertForMaskedLMModule(nn.Module):
+ config: AlbertConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.albert = FlaxAlbertModule(config=self.config, add_pooling_layer=False, dtype=self.dtype)
+ self.predictions = FlaxAlbertOnlyMLMHead(config=self.config, dtype=self.dtype)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ # Model
+ outputs = self.albert(
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+ if self.config.tie_word_embeddings:
+ shared_embedding = self.albert.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
+ else:
+ shared_embedding = None
+
+ # Compute the prediction scores
+ logits = self.predictions(hidden_states, shared_embedding=shared_embedding)
+
+ if not return_dict:
+ return (logits,) + outputs[1:]
+
+ return FlaxMaskedLMOutput(
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings("""Albert Model with a `language modeling` head on top.""", ALBERT_START_DOCSTRING)
+class FlaxAlbertForMaskedLM(FlaxAlbertPreTrainedModel):
+ module_class = FlaxAlbertForMaskedLMModule
+
+
+append_call_sample_docstring(
+ FlaxAlbertForMaskedLM, _CHECKPOINT_FOR_DOC, FlaxMaskedLMOutput, _CONFIG_FOR_DOC, revision="refs/pr/11"
+)
+
+
+class FlaxAlbertForSequenceClassificationModule(nn.Module):
+ config: AlbertConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.albert = FlaxAlbertModule(config=self.config, dtype=self.dtype)
+ classifier_dropout = (
+ self.config.classifier_dropout_prob
+ if self.config.classifier_dropout_prob is not None
+ else self.config.hidden_dropout_prob
+ )
+ self.dropout = nn.Dropout(rate=classifier_dropout)
+ self.classifier = nn.Dense(
+ self.config.num_labels,
+ dtype=self.dtype,
+ )
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ # Model
+ outputs = self.albert(
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = outputs[1]
+ pooled_output = self.dropout(pooled_output, deterministic=deterministic)
+ logits = self.classifier(pooled_output)
+
+ if not return_dict:
+ return (logits,) + outputs[2:]
+
+ return FlaxSequenceClassifierOutput(
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
+ output) e.g. for GLUE tasks.
+ """,
+ ALBERT_START_DOCSTRING,
+)
+class FlaxAlbertForSequenceClassification(FlaxAlbertPreTrainedModel):
+ module_class = FlaxAlbertForSequenceClassificationModule
+
+
+append_call_sample_docstring(
+ FlaxAlbertForSequenceClassification,
+ _CHECKPOINT_FOR_DOC,
+ FlaxSequenceClassifierOutput,
+ _CONFIG_FOR_DOC,
+)
+
+
+class FlaxAlbertForMultipleChoiceModule(nn.Module):
+ config: AlbertConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.albert = FlaxAlbertModule(config=self.config, dtype=self.dtype)
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
+ self.classifier = nn.Dense(1, dtype=self.dtype)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ num_choices = input_ids.shape[1]
+ input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None
+ attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None
+ token_type_ids = token_type_ids.reshape(-1, token_type_ids.shape[-1]) if token_type_ids is not None else None
+ position_ids = position_ids.reshape(-1, position_ids.shape[-1]) if position_ids is not None else None
+
+ # Model
+ outputs = self.albert(
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = outputs[1]
+ pooled_output = self.dropout(pooled_output, deterministic=deterministic)
+ logits = self.classifier(pooled_output)
+
+ reshaped_logits = logits.reshape(-1, num_choices)
+
+ if not return_dict:
+ return (reshaped_logits,) + outputs[2:]
+
+ return FlaxMultipleChoiceModelOutput(
+ logits=reshaped_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Albert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
+ softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ ALBERT_START_DOCSTRING,
+)
+class FlaxAlbertForMultipleChoice(FlaxAlbertPreTrainedModel):
+ module_class = FlaxAlbertForMultipleChoiceModule
+
+
+overwrite_call_docstring(
+ FlaxAlbertForMultipleChoice, ALBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
+)
+append_call_sample_docstring(
+ FlaxAlbertForMultipleChoice,
+ _CHECKPOINT_FOR_DOC,
+ FlaxMultipleChoiceModelOutput,
+ _CONFIG_FOR_DOC,
+)
+
+
+class FlaxAlbertForTokenClassificationModule(nn.Module):
+ config: AlbertConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.albert = FlaxAlbertModule(config=self.config, dtype=self.dtype, add_pooling_layer=False)
+ classifier_dropout = (
+ self.config.classifier_dropout_prob
+ if self.config.classifier_dropout_prob is not None
+ else self.config.hidden_dropout_prob
+ )
+ self.dropout = nn.Dropout(rate=classifier_dropout)
+ self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ # Model
+ outputs = self.albert(
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ logits = self.classifier(hidden_states)
+
+ if not return_dict:
+ return (logits,) + outputs[1:]
+
+ return FlaxTokenClassifierOutput(
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Albert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
+ Named-Entity-Recognition (NER) tasks.
+ """,
+ ALBERT_START_DOCSTRING,
+)
+class FlaxAlbertForTokenClassification(FlaxAlbertPreTrainedModel):
+ module_class = FlaxAlbertForTokenClassificationModule
+
+
+append_call_sample_docstring(
+ FlaxAlbertForTokenClassification,
+ _CHECKPOINT_FOR_DOC,
+ FlaxTokenClassifierOutput,
+ _CONFIG_FOR_DOC,
+)
+
+
+class FlaxAlbertForQuestionAnsweringModule(nn.Module):
+ config: AlbertConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.albert = FlaxAlbertModule(config=self.config, dtype=self.dtype, add_pooling_layer=False)
+ self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ # Model
+ outputs = self.albert(
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+
+ logits = self.qa_outputs(hidden_states)
+ start_logits, end_logits = logits.split(self.config.num_labels, axis=-1)
+ start_logits = start_logits.squeeze(-1)
+ end_logits = end_logits.squeeze(-1)
+
+ if not return_dict:
+ return (start_logits, end_logits) + outputs[1:]
+
+ return FlaxQuestionAnsweringModelOutput(
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ ALBERT_START_DOCSTRING,
+)
+class FlaxAlbertForQuestionAnswering(FlaxAlbertPreTrainedModel):
+ module_class = FlaxAlbertForQuestionAnsweringModule
+
+
+append_call_sample_docstring(
+ FlaxAlbertForQuestionAnswering,
+ _CHECKPOINT_FOR_DOC,
+ FlaxQuestionAnsweringModelOutput,
+ _CONFIG_FOR_DOC,
+)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/albert/modeling_tf_albert.py b/venv/lib/python3.10/site-packages/transformers/models/albert/modeling_tf_albert.py
new file mode 100644
index 0000000000000000000000000000000000000000..5aa521bb73dea7681670416e1705497b1531700c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/albert/modeling_tf_albert.py
@@ -0,0 +1,1564 @@
+# coding=utf-8
+# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" TF 2.0 ALBERT model."""
+
+
+from __future__ import annotations
+
+import math
+from dataclasses import dataclass
+from typing import Dict, Optional, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+
+from ...activations_tf import get_tf_activation
+from ...modeling_tf_outputs import (
+ TFBaseModelOutput,
+ TFBaseModelOutputWithPooling,
+ TFMaskedLMOutput,
+ TFMultipleChoiceModelOutput,
+ TFQuestionAnsweringModelOutput,
+ TFSequenceClassifierOutput,
+ TFTokenClassifierOutput,
+)
+from ...modeling_tf_utils import (
+ TFMaskedLanguageModelingLoss,
+ TFModelInputType,
+ TFMultipleChoiceLoss,
+ TFPreTrainedModel,
+ TFQuestionAnsweringLoss,
+ TFSequenceClassificationLoss,
+ TFTokenClassificationLoss,
+ get_initializer,
+ keras,
+ keras_serializable,
+ unpack_inputs,
+)
+from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
+from ...utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_albert import AlbertConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "albert/albert-base-v2"
+_CONFIG_FOR_DOC = "AlbertConfig"
+
+
+from ..deprecated._archive_maps import TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+class TFAlbertPreTrainingLoss:
+ """
+ Loss function suitable for ALBERT pretraining, that is, the task of pretraining a language model by combining SOP +
+ MLM. .. note:: Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.
+ """
+
+ def hf_compute_loss(self, labels: tf.Tensor, logits: tf.Tensor) -> tf.Tensor:
+ loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=keras.losses.Reduction.NONE)
+ if self.config.tf_legacy_loss:
+ # make sure only labels that are not equal to -100
+ # are taken into account as loss
+ masked_lm_active_loss = tf.not_equal(tf.reshape(tensor=labels["labels"], shape=(-1,)), -100)
+ masked_lm_reduced_logits = tf.boolean_mask(
+ tensor=tf.reshape(tensor=logits[0], shape=(-1, shape_list(logits[0])[2])),
+ mask=masked_lm_active_loss,
+ )
+ masked_lm_labels = tf.boolean_mask(
+ tensor=tf.reshape(tensor=labels["labels"], shape=(-1,)), mask=masked_lm_active_loss
+ )
+ sentence_order_active_loss = tf.not_equal(
+ tf.reshape(tensor=labels["sentence_order_label"], shape=(-1,)), -100
+ )
+ sentence_order_reduced_logits = tf.boolean_mask(
+ tensor=tf.reshape(tensor=logits[1], shape=(-1, 2)), mask=sentence_order_active_loss
+ )
+ sentence_order_label = tf.boolean_mask(
+ tensor=tf.reshape(tensor=labels["sentence_order_label"], shape=(-1,)), mask=sentence_order_active_loss
+ )
+ masked_lm_loss = loss_fn(y_true=masked_lm_labels, y_pred=masked_lm_reduced_logits)
+ sentence_order_loss = loss_fn(y_true=sentence_order_label, y_pred=sentence_order_reduced_logits)
+ masked_lm_loss = tf.reshape(tensor=masked_lm_loss, shape=(-1, shape_list(sentence_order_loss)[0]))
+ masked_lm_loss = tf.reduce_mean(input_tensor=masked_lm_loss, axis=0)
+
+ return masked_lm_loss + sentence_order_loss
+
+ # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway
+ unmasked_lm_losses = loss_fn(y_true=tf.nn.relu(labels["labels"]), y_pred=logits[0])
+ # make sure only labels that are not equal to -100
+ # are taken into account for the loss computation
+ lm_loss_mask = tf.cast(labels["labels"] != -100, dtype=unmasked_lm_losses.dtype)
+ masked_lm_losses = unmasked_lm_losses * lm_loss_mask
+ reduced_masked_lm_loss = tf.reduce_sum(masked_lm_losses) / tf.reduce_sum(lm_loss_mask)
+
+ sop_logits = tf.reshape(logits[1], (-1, 2))
+ # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway
+ unmasked_sop_loss = loss_fn(y_true=tf.nn.relu(labels["sentence_order_label"]), y_pred=sop_logits)
+ sop_loss_mask = tf.cast(labels["sentence_order_label"] != -100, dtype=unmasked_sop_loss.dtype)
+
+ masked_sop_loss = unmasked_sop_loss * sop_loss_mask
+ reduced_masked_sop_loss = tf.reduce_sum(masked_sop_loss) / tf.reduce_sum(sop_loss_mask)
+
+ return tf.reshape(reduced_masked_lm_loss + reduced_masked_sop_loss, (1,))
+
+
+class TFAlbertEmbeddings(keras.layers.Layer):
+ """Construct the embeddings from word, position and token_type embeddings."""
+
+ def __init__(self, config: AlbertConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.embedding_size = config.embedding_size
+ self.max_position_embeddings = config.max_position_embeddings
+ self.initializer_range = config.initializer_range
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
+
+ def build(self, input_shape=None):
+ with tf.name_scope("word_embeddings"):
+ self.weight = self.add_weight(
+ name="weight",
+ shape=[self.config.vocab_size, self.embedding_size],
+ initializer=get_initializer(self.initializer_range),
+ )
+
+ with tf.name_scope("token_type_embeddings"):
+ self.token_type_embeddings = self.add_weight(
+ name="embeddings",
+ shape=[self.config.type_vocab_size, self.embedding_size],
+ initializer=get_initializer(self.initializer_range),
+ )
+
+ with tf.name_scope("position_embeddings"):
+ self.position_embeddings = self.add_weight(
+ name="embeddings",
+ shape=[self.max_position_embeddings, self.embedding_size],
+ initializer=get_initializer(self.initializer_range),
+ )
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.config.embedding_size])
+
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertEmbeddings.call
+ def call(
+ self,
+ input_ids: tf.Tensor = None,
+ position_ids: tf.Tensor = None,
+ token_type_ids: tf.Tensor = None,
+ inputs_embeds: tf.Tensor = None,
+ past_key_values_length=0,
+ training: bool = False,
+ ) -> tf.Tensor:
+ """
+ Applies embedding based on inputs tensor.
+
+ Returns:
+ final_embeddings (`tf.Tensor`): output embedding tensor.
+ """
+ if input_ids is None and inputs_embeds is None:
+ raise ValueError("Need to provide either `input_ids` or `input_embeds`.")
+
+ if input_ids is not None:
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
+
+ input_shape = shape_list(inputs_embeds)[:-1]
+
+ if token_type_ids is None:
+ token_type_ids = tf.fill(dims=input_shape, value=0)
+
+ if position_ids is None:
+ position_ids = tf.expand_dims(
+ tf.range(start=past_key_values_length, limit=input_shape[1] + past_key_values_length), axis=0
+ )
+
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
+ token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
+ final_embeddings = inputs_embeds + position_embeds + token_type_embeds
+ final_embeddings = self.LayerNorm(inputs=final_embeddings)
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
+
+ return final_embeddings
+
+
+class TFAlbertAttention(keras.layers.Layer):
+ """Contains the complete attention sublayer, including both dropouts and layer norm."""
+
+ def __init__(self, config: AlbertConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ if config.hidden_size % config.num_attention_heads != 0:
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number "
+ f"of attention heads ({config.num_attention_heads})"
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+ self.sqrt_att_head_size = math.sqrt(self.attention_head_size)
+ self.output_attentions = config.output_attentions
+
+ self.query = keras.layers.Dense(
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
+ )
+ self.key = keras.layers.Dense(
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
+ )
+ self.value = keras.layers.Dense(
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
+ )
+ self.dense = keras.layers.Dense(
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
+ )
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
+ # Two different dropout probabilities; see https://github.com/google-research/albert/blob/master/modeling.py#L971-L993
+ self.attention_dropout = keras.layers.Dropout(rate=config.attention_probs_dropout_prob)
+ self.output_dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
+ self.config = config
+
+ def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
+ tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
+
+ # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
+ return tf.transpose(tensor, perm=[0, 2, 1, 3])
+
+ def call(
+ self,
+ input_tensor: tf.Tensor,
+ attention_mask: tf.Tensor,
+ head_mask: tf.Tensor,
+ output_attentions: bool,
+ training: bool = False,
+ ) -> Tuple[tf.Tensor]:
+ batch_size = shape_list(input_tensor)[0]
+ mixed_query_layer = self.query(inputs=input_tensor)
+ mixed_key_layer = self.key(inputs=input_tensor)
+ mixed_value_layer = self.value(inputs=input_tensor)
+ query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
+ key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
+ value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ # (batch size, num_heads, seq_len_q, seq_len_k)
+ attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
+ dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype)
+ attention_scores = tf.divide(attention_scores, dk)
+
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in TFAlbertModel call() function)
+ attention_scores = tf.add(attention_scores, attention_mask)
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = stable_softmax(logits=attention_scores, axis=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.attention_dropout(inputs=attention_probs, training=training)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = tf.multiply(attention_probs, head_mask)
+
+ context_layer = tf.matmul(attention_probs, value_layer)
+ context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
+
+ # (batch_size, seq_len_q, all_head_size)
+ context_layer = tf.reshape(tensor=context_layer, shape=(batch_size, -1, self.all_head_size))
+ self_outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+ hidden_states = self_outputs[0]
+ hidden_states = self.dense(inputs=hidden_states)
+ hidden_states = self.output_dropout(inputs=hidden_states, training=training)
+ attention_output = self.LayerNorm(inputs=hidden_states + input_tensor)
+
+ # add attentions if we output them
+ outputs = (attention_output,) + self_outputs[1:]
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "query", None) is not None:
+ with tf.name_scope(self.query.name):
+ self.query.build([None, None, self.config.hidden_size])
+ if getattr(self, "key", None) is not None:
+ with tf.name_scope(self.key.name):
+ self.key.build([None, None, self.config.hidden_size])
+ if getattr(self, "value", None) is not None:
+ with tf.name_scope(self.value.name):
+ self.value.build([None, None, self.config.hidden_size])
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.config.hidden_size])
+
+
+class TFAlbertLayer(keras.layers.Layer):
+ def __init__(self, config: AlbertConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.attention = TFAlbertAttention(config, name="attention")
+ self.ffn = keras.layers.Dense(
+ units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="ffn"
+ )
+
+ if isinstance(config.hidden_act, str):
+ self.activation = get_tf_activation(config.hidden_act)
+ else:
+ self.activation = config.hidden_act
+
+ self.ffn_output = keras.layers.Dense(
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="ffn_output"
+ )
+ self.full_layer_layer_norm = keras.layers.LayerNormalization(
+ epsilon=config.layer_norm_eps, name="full_layer_layer_norm"
+ )
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
+ self.config = config
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ attention_mask: tf.Tensor,
+ head_mask: tf.Tensor,
+ output_attentions: bool,
+ training: bool = False,
+ ) -> Tuple[tf.Tensor]:
+ attention_outputs = self.attention(
+ input_tensor=hidden_states,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ training=training,
+ )
+ ffn_output = self.ffn(inputs=attention_outputs[0])
+ ffn_output = self.activation(ffn_output)
+ ffn_output = self.ffn_output(inputs=ffn_output)
+ ffn_output = self.dropout(inputs=ffn_output, training=training)
+ hidden_states = self.full_layer_layer_norm(inputs=ffn_output + attention_outputs[0])
+
+ # add attentions if we output them
+ outputs = (hidden_states,) + attention_outputs[1:]
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "attention", None) is not None:
+ with tf.name_scope(self.attention.name):
+ self.attention.build(None)
+ if getattr(self, "ffn", None) is not None:
+ with tf.name_scope(self.ffn.name):
+ self.ffn.build([None, None, self.config.hidden_size])
+ if getattr(self, "ffn_output", None) is not None:
+ with tf.name_scope(self.ffn_output.name):
+ self.ffn_output.build([None, None, self.config.intermediate_size])
+ if getattr(self, "full_layer_layer_norm", None) is not None:
+ with tf.name_scope(self.full_layer_layer_norm.name):
+ self.full_layer_layer_norm.build([None, None, self.config.hidden_size])
+
+
+class TFAlbertLayerGroup(keras.layers.Layer):
+ def __init__(self, config: AlbertConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.albert_layers = [
+ TFAlbertLayer(config, name=f"albert_layers_._{i}") for i in range(config.inner_group_num)
+ ]
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ attention_mask: tf.Tensor,
+ head_mask: tf.Tensor,
+ output_attentions: bool,
+ output_hidden_states: bool,
+ training: bool = False,
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
+ layer_hidden_states = () if output_hidden_states else None
+ layer_attentions = () if output_attentions else None
+
+ for layer_index, albert_layer in enumerate(self.albert_layers):
+ if output_hidden_states:
+ layer_hidden_states = layer_hidden_states + (hidden_states,)
+
+ layer_output = albert_layer(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ head_mask=head_mask[layer_index],
+ output_attentions=output_attentions,
+ training=training,
+ )
+ hidden_states = layer_output[0]
+
+ if output_attentions:
+ layer_attentions = layer_attentions + (layer_output[1],)
+
+ # Add last layer
+ if output_hidden_states:
+ layer_hidden_states = layer_hidden_states + (hidden_states,)
+
+ return tuple(v for v in [hidden_states, layer_hidden_states, layer_attentions] if v is not None)
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "albert_layers", None) is not None:
+ for layer in self.albert_layers:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+class TFAlbertTransformer(keras.layers.Layer):
+ def __init__(self, config: AlbertConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.num_hidden_layers = config.num_hidden_layers
+ self.num_hidden_groups = config.num_hidden_groups
+ # Number of layers in a hidden group
+ self.layers_per_group = int(config.num_hidden_layers / config.num_hidden_groups)
+ self.embedding_hidden_mapping_in = keras.layers.Dense(
+ units=config.hidden_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="embedding_hidden_mapping_in",
+ )
+ self.albert_layer_groups = [
+ TFAlbertLayerGroup(config, name=f"albert_layer_groups_._{i}") for i in range(config.num_hidden_groups)
+ ]
+ self.config = config
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ attention_mask: tf.Tensor,
+ head_mask: tf.Tensor,
+ output_attentions: bool,
+ output_hidden_states: bool,
+ return_dict: bool,
+ training: bool = False,
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
+ hidden_states = self.embedding_hidden_mapping_in(inputs=hidden_states)
+ all_attentions = () if output_attentions else None
+ all_hidden_states = (hidden_states,) if output_hidden_states else None
+
+ for i in range(self.num_hidden_layers):
+ # Index of the hidden group
+ group_idx = int(i / (self.num_hidden_layers / self.num_hidden_groups))
+ layer_group_output = self.albert_layer_groups[group_idx](
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ head_mask=head_mask[group_idx * self.layers_per_group : (group_idx + 1) * self.layers_per_group],
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ training=training,
+ )
+ hidden_states = layer_group_output[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + layer_group_output[-1]
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
+
+ return TFBaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "embedding_hidden_mapping_in", None) is not None:
+ with tf.name_scope(self.embedding_hidden_mapping_in.name):
+ self.embedding_hidden_mapping_in.build([None, None, self.config.embedding_size])
+ if getattr(self, "albert_layer_groups", None) is not None:
+ for layer in self.albert_layer_groups:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+class TFAlbertPreTrainedModel(TFPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = AlbertConfig
+ base_model_prefix = "albert"
+
+
+class TFAlbertMLMHead(keras.layers.Layer):
+ def __init__(self, config: AlbertConfig, input_embeddings: keras.layers.Layer, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.embedding_size = config.embedding_size
+ self.dense = keras.layers.Dense(
+ config.embedding_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
+ )
+ if isinstance(config.hidden_act, str):
+ self.activation = get_tf_activation(config.hidden_act)
+ else:
+ self.activation = config.hidden_act
+
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
+
+ # The output weights are the same as the input embeddings, but there is
+ # an output-only bias for each token.
+ self.decoder = input_embeddings
+
+ def build(self, input_shape=None):
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
+ self.decoder_bias = self.add_weight(
+ shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="decoder/bias"
+ )
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.config.embedding_size])
+
+ def get_output_embeddings(self) -> keras.layers.Layer:
+ return self.decoder
+
+ def set_output_embeddings(self, value: tf.Variable):
+ self.decoder.weight = value
+ self.decoder.vocab_size = shape_list(value)[0]
+
+ def get_bias(self) -> Dict[str, tf.Variable]:
+ return {"bias": self.bias, "decoder_bias": self.decoder_bias}
+
+ def set_bias(self, value: tf.Variable):
+ self.bias = value["bias"]
+ self.decoder_bias = value["decoder_bias"]
+ self.config.vocab_size = shape_list(value["bias"])[0]
+
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
+ hidden_states = self.dense(inputs=hidden_states)
+ hidden_states = self.activation(hidden_states)
+ hidden_states = self.LayerNorm(inputs=hidden_states)
+ seq_length = shape_list(tensor=hidden_states)[1]
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size])
+ hidden_states = tf.matmul(a=hidden_states, b=self.decoder.weight, transpose_b=True)
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.decoder_bias)
+
+ return hidden_states
+
+
+@keras_serializable
+class TFAlbertMainLayer(keras.layers.Layer):
+ config_class = AlbertConfig
+
+ def __init__(self, config: AlbertConfig, add_pooling_layer: bool = True, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+
+ self.embeddings = TFAlbertEmbeddings(config, name="embeddings")
+ self.encoder = TFAlbertTransformer(config, name="encoder")
+ self.pooler = (
+ keras.layers.Dense(
+ units=config.hidden_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ activation="tanh",
+ name="pooler",
+ )
+ if add_pooling_layer
+ else None
+ )
+
+ def get_input_embeddings(self) -> keras.layers.Layer:
+ return self.embeddings
+
+ def set_input_embeddings(self, value: tf.Variable):
+ self.embeddings.weight = value
+ self.embeddings.vocab_size = shape_list(value)[0]
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ raise NotImplementedError
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = shape_list(input_ids)
+ elif inputs_embeds is not None:
+ input_shape = shape_list(inputs_embeds)[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if attention_mask is None:
+ attention_mask = tf.fill(dims=input_shape, value=1)
+
+ if token_type_ids is None:
+ token_type_ids = tf.fill(dims=input_shape, value=0)
+
+ embedding_output = self.embeddings(
+ input_ids=input_ids,
+ position_ids=position_ids,
+ token_type_ids=token_type_ids,
+ inputs_embeds=inputs_embeds,
+ training=training,
+ )
+
+ # We create a 3D attention mask from a 2D tensor mask.
+ # Sizes are [batch_size, 1, 1, to_seq_length]
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
+ # this attention mask is more simple than the triangular masking of causal attention
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
+ extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1]))
+
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
+ # masked positions, this operation will create a tensor which is 0.0 for
+ # positions we want to attend and -10000.0 for masked positions.
+ # Since we are adding it to the raw scores before the softmax, this is
+ # effectively the same as removing these entirely.
+ extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)
+ one_cst = tf.constant(1.0, dtype=embedding_output.dtype)
+ ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)
+ extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ if head_mask is not None:
+ raise NotImplementedError
+ else:
+ head_mask = [None] * self.config.num_hidden_layers
+
+ encoder_outputs = self.encoder(
+ hidden_states=embedding_output,
+ attention_mask=extended_attention_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ sequence_output = encoder_outputs[0]
+ pooled_output = self.pooler(inputs=sequence_output[:, 0]) if self.pooler is not None else None
+
+ if not return_dict:
+ return (
+ sequence_output,
+ pooled_output,
+ ) + encoder_outputs[1:]
+
+ return TFBaseModelOutputWithPooling(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "embeddings", None) is not None:
+ with tf.name_scope(self.embeddings.name):
+ self.embeddings.build(None)
+ if getattr(self, "encoder", None) is not None:
+ with tf.name_scope(self.encoder.name):
+ self.encoder.build(None)
+ if getattr(self, "pooler", None) is not None:
+ with tf.name_scope(self.pooler.name):
+ self.pooler.build([None, None, self.config.hidden_size])
+
+
+@dataclass
+class TFAlbertForPreTrainingOutput(ModelOutput):
+ """
+ Output type of [`TFAlbertForPreTraining`].
+
+ Args:
+ prediction_logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ sop_logits (`tf.Tensor` of shape `(batch_size, 2)`):
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
+ before SoftMax).
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: tf.Tensor = None
+ prediction_logits: tf.Tensor = None
+ sop_logits: tf.Tensor = None
+ hidden_states: Tuple[tf.Tensor] | None = None
+ attentions: Tuple[tf.Tensor] | None = None
+
+
+ALBERT_START_DOCSTRING = r"""
+
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+
+
+ TensorFlow models and layers in `transformers` accept two formats as input:
+
+ - having all inputs as keyword arguments (like PyTorch models), or
+ - having all inputs as a list, tuple or dict in the first positional argument.
+
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
+ positional argument:
+
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
+
+ Note that when creating models and layers with
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
+ about any of this, as you can just pass inputs like you would to any other Python function!
+
+
+
+ Args:
+ config ([`AlbertConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+ALBERT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
+ [`PreTrainedTokenizer.encode`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
+ config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
+ used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
+ eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+"""
+
+
+@add_start_docstrings(
+ "The bare Albert Model transformer outputting raw hidden-states without any specific head on top.",
+ ALBERT_START_DOCSTRING,
+)
+class TFAlbertModel(TFAlbertPreTrainedModel):
+ def __init__(self, config: AlbertConfig, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.albert = TFAlbertMainLayer(config, name="albert")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFBaseModelOutputWithPooling,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
+ outputs = self.albert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "albert", None) is not None:
+ with tf.name_scope(self.albert.name):
+ self.albert.build(None)
+
+
+@add_start_docstrings(
+ """
+ Albert Model with two heads on top for pretraining: a `masked language modeling` head and a `sentence order
+ prediction` (classification) head.
+ """,
+ ALBERT_START_DOCSTRING,
+)
+class TFAlbertForPreTraining(TFAlbertPreTrainedModel, TFAlbertPreTrainingLoss):
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
+ _keys_to_ignore_on_load_unexpected = [r"predictions.decoder.weight"]
+
+ def __init__(self, config: AlbertConfig, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.num_labels = config.num_labels
+
+ self.albert = TFAlbertMainLayer(config, name="albert")
+ self.predictions = TFAlbertMLMHead(config, input_embeddings=self.albert.embeddings, name="predictions")
+ self.sop_classifier = TFAlbertSOPHead(config, name="sop_classifier")
+
+ def get_lm_head(self) -> keras.layers.Layer:
+ return self.predictions
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=TFAlbertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ sentence_order_label: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFAlbertForPreTrainingOutput, Tuple[tf.Tensor]]:
+ r"""
+ Return:
+
+ Example:
+
+ ```python
+ >>> import tensorflow as tf
+ >>> from transformers import AutoTokenizer, TFAlbertForPreTraining
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("albert/albert-base-v2")
+ >>> model = TFAlbertForPreTraining.from_pretrained("albert/albert-base-v2")
+
+ >>> input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :]
+ >>> # Batch size 1
+ >>> outputs = model(input_ids)
+
+ >>> prediction_logits = outputs.prediction_logits
+ >>> sop_logits = outputs.sop_logits
+ ```"""
+
+ outputs = self.albert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output, pooled_output = outputs[:2]
+ prediction_scores = self.predictions(hidden_states=sequence_output)
+ sop_scores = self.sop_classifier(pooled_output=pooled_output, training=training)
+ total_loss = None
+
+ if labels is not None and sentence_order_label is not None:
+ d_labels = {"labels": labels}
+ d_labels["sentence_order_label"] = sentence_order_label
+ total_loss = self.hf_compute_loss(labels=d_labels, logits=(prediction_scores, sop_scores))
+
+ if not return_dict:
+ output = (prediction_scores, sop_scores) + outputs[2:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return TFAlbertForPreTrainingOutput(
+ loss=total_loss,
+ prediction_logits=prediction_scores,
+ sop_logits=sop_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "albert", None) is not None:
+ with tf.name_scope(self.albert.name):
+ self.albert.build(None)
+ if getattr(self, "predictions", None) is not None:
+ with tf.name_scope(self.predictions.name):
+ self.predictions.build(None)
+ if getattr(self, "sop_classifier", None) is not None:
+ with tf.name_scope(self.sop_classifier.name):
+ self.sop_classifier.build(None)
+
+
+class TFAlbertSOPHead(keras.layers.Layer):
+ def __init__(self, config: AlbertConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.dropout = keras.layers.Dropout(rate=config.classifier_dropout_prob)
+ self.classifier = keras.layers.Dense(
+ units=config.num_labels,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="classifier",
+ )
+ self.config = config
+
+ def call(self, pooled_output: tf.Tensor, training: bool) -> tf.Tensor:
+ dropout_pooled_output = self.dropout(inputs=pooled_output, training=training)
+ logits = self.classifier(inputs=dropout_pooled_output)
+
+ return logits
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.hidden_size])
+
+
+@add_start_docstrings("""Albert Model with a `language modeling` head on top.""", ALBERT_START_DOCSTRING)
+class TFAlbertForMaskedLM(TFAlbertPreTrainedModel, TFMaskedLanguageModelingLoss):
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
+ _keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions.decoder.weight"]
+
+ def __init__(self, config: AlbertConfig, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.albert = TFAlbertMainLayer(config, add_pooling_layer=False, name="albert")
+ self.predictions = TFAlbertMLMHead(config, input_embeddings=self.albert.embeddings, name="predictions")
+
+ def get_lm_head(self) -> keras.layers.Layer:
+ return self.predictions
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC)
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> import tensorflow as tf
+ >>> from transformers import AutoTokenizer, TFAlbertForMaskedLM
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("albert/albert-base-v2")
+ >>> model = TFAlbertForMaskedLM.from_pretrained("albert/albert-base-v2")
+
+ >>> # add mask_token
+ >>> inputs = tokenizer(f"The capital of [MASK] is Paris.", return_tensors="tf")
+ >>> logits = model(**inputs).logits
+
+ >>> # retrieve index of [MASK]
+ >>> mask_token_index = tf.where(inputs.input_ids == tokenizer.mask_token_id)[0][1]
+ >>> predicted_token_id = tf.math.argmax(logits[0, mask_token_index], axis=-1)
+ >>> tokenizer.decode(predicted_token_id)
+ 'france'
+ ```
+
+ ```python
+ >>> labels = tokenizer("The capital of France is Paris.", return_tensors="tf")["input_ids"]
+ >>> labels = tf.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100)
+ >>> outputs = model(**inputs, labels=labels)
+ >>> round(float(outputs.loss), 2)
+ 0.81
+ ```
+ """
+ outputs = self.albert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+ prediction_scores = self.predictions(hidden_states=sequence_output, training=training)
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=prediction_scores)
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[2:]
+
+ return ((loss,) + output) if loss is not None else output
+
+ return TFMaskedLMOutput(
+ loss=loss,
+ logits=prediction_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "albert", None) is not None:
+ with tf.name_scope(self.albert.name):
+ self.albert.build(None)
+ if getattr(self, "predictions", None) is not None:
+ with tf.name_scope(self.predictions.name):
+ self.predictions.build(None)
+
+
+@add_start_docstrings(
+ """
+ Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
+ output) e.g. for GLUE tasks.
+ """,
+ ALBERT_START_DOCSTRING,
+)
+class TFAlbertForSequenceClassification(TFAlbertPreTrainedModel, TFSequenceClassificationLoss):
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
+ _keys_to_ignore_on_load_unexpected = [r"predictions"]
+ _keys_to_ignore_on_load_missing = [r"dropout"]
+
+ def __init__(self, config: AlbertConfig, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.num_labels = config.num_labels
+
+ self.albert = TFAlbertMainLayer(config, name="albert")
+ self.dropout = keras.layers.Dropout(rate=config.classifier_dropout_prob)
+ self.classifier = keras.layers.Dense(
+ units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint="vumichien/albert-base-v2-imdb",
+ output_type=TFSequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output="'LABEL_1'",
+ expected_loss=0.12,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ outputs = self.albert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ pooled_output = outputs[1]
+ pooled_output = self.dropout(inputs=pooled_output, training=training)
+ logits = self.classifier(inputs=pooled_output)
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+
+ return ((loss,) + output) if loss is not None else output
+
+ return TFSequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "albert", None) is not None:
+ with tf.name_scope(self.albert.name):
+ self.albert.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.hidden_size])
+
+
+@add_start_docstrings(
+ """
+ Albert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
+ Named-Entity-Recognition (NER) tasks.
+ """,
+ ALBERT_START_DOCSTRING,
+)
+class TFAlbertForTokenClassification(TFAlbertPreTrainedModel, TFTokenClassificationLoss):
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
+ _keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions"]
+ _keys_to_ignore_on_load_missing = [r"dropout"]
+
+ def __init__(self, config: AlbertConfig, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.num_labels = config.num_labels
+
+ self.albert = TFAlbertMainLayer(config, add_pooling_layer=False, name="albert")
+ classifier_dropout_prob = (
+ config.classifier_dropout_prob
+ if config.classifier_dropout_prob is not None
+ else config.hidden_dropout_prob
+ )
+ self.dropout = keras.layers.Dropout(rate=classifier_dropout_prob)
+ self.classifier = keras.layers.Dense(
+ units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFTokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+ outputs = self.albert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+ sequence_output = self.dropout(inputs=sequence_output, training=training)
+ logits = self.classifier(inputs=sequence_output)
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+
+ return ((loss,) + output) if loss is not None else output
+
+ return TFTokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "albert", None) is not None:
+ with tf.name_scope(self.albert.name):
+ self.albert.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.hidden_size])
+
+
+@add_start_docstrings(
+ """
+ Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
+ layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ ALBERT_START_DOCSTRING,
+)
+class TFAlbertForQuestionAnswering(TFAlbertPreTrainedModel, TFQuestionAnsweringLoss):
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
+ _keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions"]
+
+ def __init__(self, config: AlbertConfig, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.num_labels = config.num_labels
+
+ self.albert = TFAlbertMainLayer(config, add_pooling_layer=False, name="albert")
+ self.qa_outputs = keras.layers.Dense(
+ units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint="vumichien/albert-base-v2-squad2",
+ output_type=TFQuestionAnsweringModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ qa_target_start_index=12,
+ qa_target_end_index=13,
+ expected_output="'a nice puppet'",
+ expected_loss=7.36,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ start_positions: np.ndarray | tf.Tensor | None = None,
+ end_positions: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
+ r"""
+ start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ outputs = self.albert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+ logits = self.qa_outputs(inputs=sequence_output)
+ start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1)
+ start_logits = tf.squeeze(input=start_logits, axis=-1)
+ end_logits = tf.squeeze(input=end_logits, axis=-1)
+ loss = None
+
+ if start_positions is not None and end_positions is not None:
+ labels = {"start_position": start_positions}
+ labels["end_position"] = end_positions
+ loss = self.hf_compute_loss(labels=labels, logits=(start_logits, end_logits))
+
+ if not return_dict:
+ output = (start_logits, end_logits) + outputs[2:]
+
+ return ((loss,) + output) if loss is not None else output
+
+ return TFQuestionAnsweringModelOutput(
+ loss=loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "albert", None) is not None:
+ with tf.name_scope(self.albert.name):
+ self.albert.build(None)
+ if getattr(self, "qa_outputs", None) is not None:
+ with tf.name_scope(self.qa_outputs.name):
+ self.qa_outputs.build([None, None, self.config.hidden_size])
+
+
+@add_start_docstrings(
+ """
+ Albert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
+ softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ ALBERT_START_DOCSTRING,
+)
+class TFAlbertForMultipleChoice(TFAlbertPreTrainedModel, TFMultipleChoiceLoss):
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
+ _keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions"]
+ _keys_to_ignore_on_load_missing = [r"dropout"]
+
+ def __init__(self, config: AlbertConfig, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.albert = TFAlbertMainLayer(config, name="albert")
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
+ self.classifier = keras.layers.Dense(
+ units=1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFMultipleChoiceModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
+ where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
+ """
+
+ if input_ids is not None:
+ num_choices = shape_list(input_ids)[1]
+ seq_length = shape_list(input_ids)[2]
+ else:
+ num_choices = shape_list(inputs_embeds)[1]
+ seq_length = shape_list(inputs_embeds)[2]
+
+ flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
+ flat_attention_mask = (
+ tf.reshape(tensor=attention_mask, shape=(-1, seq_length)) if attention_mask is not None else None
+ )
+ flat_token_type_ids = (
+ tf.reshape(tensor=token_type_ids, shape=(-1, seq_length)) if token_type_ids is not None else None
+ )
+ flat_position_ids = (
+ tf.reshape(tensor=position_ids, shape=(-1, seq_length)) if position_ids is not None else None
+ )
+ flat_inputs_embeds = (
+ tf.reshape(tensor=inputs_embeds, shape=(-1, seq_length, shape_list(inputs_embeds)[3]))
+ if inputs_embeds is not None
+ else None
+ )
+ outputs = self.albert(
+ input_ids=flat_input_ids,
+ attention_mask=flat_attention_mask,
+ token_type_ids=flat_token_type_ids,
+ position_ids=flat_position_ids,
+ head_mask=head_mask,
+ inputs_embeds=flat_inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ pooled_output = outputs[1]
+ pooled_output = self.dropout(inputs=pooled_output, training=training)
+ logits = self.classifier(inputs=pooled_output)
+ reshaped_logits = tf.reshape(tensor=logits, shape=(-1, num_choices))
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=reshaped_logits)
+
+ if not return_dict:
+ output = (reshaped_logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFMultipleChoiceModelOutput(
+ loss=loss,
+ logits=reshaped_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "albert", None) is not None:
+ with tf.name_scope(self.albert.name):
+ self.albert.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.hidden_size])
diff --git a/venv/lib/python3.10/site-packages/transformers/models/albert/tokenization_albert.py b/venv/lib/python3.10/site-packages/transformers/models/albert/tokenization_albert.py
new file mode 100644
index 0000000000000000000000000000000000000000..786f9eeafc513c3d09cfe3300d7ac8c3911caf4a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/albert/tokenization_albert.py
@@ -0,0 +1,346 @@
+# coding=utf-8
+# Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Tokenization classes for ALBERT model."""
+
+
+import os
+import unicodedata
+from shutil import copyfile
+from typing import Any, Dict, List, Optional, Tuple
+
+import sentencepiece as spm
+
+from ...tokenization_utils import AddedToken, PreTrainedTokenizer
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
+
+
+SPIECE_UNDERLINE = "▁"
+
+
+class AlbertTokenizer(PreTrainedTokenizer):
+ """
+ Construct an ALBERT tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
+ contains the vocabulary necessary to instantiate a tokenizer.
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ remove_space (`bool`, *optional*, defaults to `True`):
+ Whether or not to strip the text when tokenizing (removing excess spaces before and after the string).
+ keep_accents (`bool`, *optional*, defaults to `False`):
+ Whether or not to keep accents when tokenizing.
+ bos_token (`str`, *optional*, defaults to `"[CLS]"`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+
+
+
+ eos_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The end of sequence token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
+ The token used is the `sep_token`.
+
+
+
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ sp_model_kwargs (`dict`, *optional*):
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
+ to set:
+
+ - `enable_sampling`: Enable subword regularization.
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
+
+ - `nbest_size = {0,1}`: No sampling is performed.
+ - `nbest_size > 1`: samples from the nbest_size results.
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
+ using forward-filtering-and-backward-sampling algorithm.
+
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
+ BPE-dropout.
+
+ Attributes:
+ sp_model (`SentencePieceProcessor`):
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+
+ def __init__(
+ self,
+ vocab_file,
+ do_lower_case=True,
+ remove_space=True,
+ keep_accents=False,
+ bos_token="[CLS]",
+ eos_token="[SEP]",
+ unk_token="",
+ sep_token="[SEP]",
+ pad_token="",
+ cls_token="[CLS]",
+ mask_token="[MASK]",
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
+ **kwargs,
+ ) -> None:
+ # Mask token behave like a normal word, i.e. include the space before it and
+ # is included in the raw text, there should be a match in a non-normalized sentence.
+ mask_token = (
+ AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False)
+ if isinstance(mask_token, str)
+ else mask_token
+ )
+
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
+
+ self.do_lower_case = do_lower_case
+ self.remove_space = remove_space
+ self.keep_accents = keep_accents
+ self.vocab_file = vocab_file
+
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(vocab_file)
+
+ super().__init__(
+ do_lower_case=do_lower_case,
+ remove_space=remove_space,
+ keep_accents=keep_accents,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ sp_model_kwargs=self.sp_model_kwargs,
+ **kwargs,
+ )
+
+ @property
+ def vocab_size(self) -> int:
+ return len(self.sp_model)
+
+ def get_vocab(self) -> Dict[str, int]:
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state["sp_model"] = None
+ return state
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+
+ # for backward compatibility
+ if not hasattr(self, "sp_model_kwargs"):
+ self.sp_model_kwargs = {}
+
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(self.vocab_file)
+
+ def preprocess_text(self, inputs):
+ if self.remove_space:
+ outputs = " ".join(inputs.strip().split())
+ else:
+ outputs = inputs
+ outputs = outputs.replace("``", '"').replace("''", '"')
+
+ if not self.keep_accents:
+ outputs = unicodedata.normalize("NFKD", outputs)
+ outputs = "".join([c for c in outputs if not unicodedata.combining(c)])
+ if self.do_lower_case:
+ outputs = outputs.lower()
+
+ return outputs
+
+ def _tokenize(self, text: str) -> List[str]:
+ """Tokenize a string."""
+ text = self.preprocess_text(text)
+ pieces = self.sp_model.encode(text, out_type=str)
+ new_pieces = []
+ for piece in pieces:
+ if len(piece) > 1 and piece[-1] == str(",") and piece[-2].isdigit():
+ # Logic to handle special cases see https://github.com/google-research/bert/blob/master/README.md#tokenization
+ # `9,9` -> ['▁9', ',', '9'] instead of [`_9,`, '9']
+ cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, ""))
+ if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
+ if len(cur_pieces[0]) == 1:
+ cur_pieces = cur_pieces[1:]
+ else:
+ cur_pieces[0] = cur_pieces[0][1:]
+ cur_pieces.append(piece[-1])
+ new_pieces.extend(cur_pieces)
+ else:
+ new_pieces.append(piece)
+
+ return new_pieces
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.sp_model.PieceToId(token)
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.sp_model.IdToPiece(index)
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ current_sub_tokens = []
+ out_string = ""
+ prev_is_special = False
+ for token in tokens:
+ # make sure that special tokens are not decoded using sentencepiece model
+ if token in self.all_special_tokens:
+ if not prev_is_special:
+ out_string += " "
+ out_string += self.sp_model.decode(current_sub_tokens) + token
+ prev_is_special = True
+ current_sub_tokens = []
+ else:
+ current_sub_tokens.append(token)
+ prev_is_special = False
+ out_string += self.sp_model.decode(current_sub_tokens)
+ return out_string.strip()
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. An ALBERT sequence has the following format:
+
+ - single sequence: `[CLS] X [SEP]`
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+ if token_ids_1 is None:
+ return cls + token_ids_0 + sep
+ return cls + token_ids_0 + sep + token_ids_1 + sep
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is not None:
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
+ return [1] + ([0] * len(token_ids_0)) + [1]
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
+ sequence pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ out_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+ elif not os.path.isfile(self.vocab_file):
+ with open(out_vocab_file, "wb") as fi:
+ content_spiece_model = self.sp_model.serialized_model_proto()
+ fi.write(content_spiece_model)
+
+ return (out_vocab_file,)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/albert/tokenization_albert_fast.py b/venv/lib/python3.10/site-packages/transformers/models/albert/tokenization_albert_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..e0b09a73560ac19bc9cb71510b4e24d4e77cf8be
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/albert/tokenization_albert_fast.py
@@ -0,0 +1,210 @@
+# coding=utf-8
+# Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Tokenization classes for ALBERT model."""
+
+
+import os
+from shutil import copyfile
+from typing import List, Optional, Tuple
+
+from ...tokenization_utils import AddedToken
+from ...tokenization_utils_fast import PreTrainedTokenizerFast
+from ...utils import is_sentencepiece_available, logging
+
+
+if is_sentencepiece_available():
+ from .tokenization_albert import AlbertTokenizer
+else:
+ AlbertTokenizer = None
+
+logger = logging.get_logger(__name__)
+VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
+
+
+SPIECE_UNDERLINE = "▁"
+
+
+class AlbertTokenizerFast(PreTrainedTokenizerFast):
+ """
+ Construct a "fast" ALBERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on
+ [Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models). This
+ tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods
+
+ Args:
+ vocab_file (`str`):
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
+ contains the vocabulary necessary to instantiate a tokenizer.
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ remove_space (`bool`, *optional*, defaults to `True`):
+ Whether or not to strip the text when tokenizing (removing excess spaces before and after the string).
+ keep_accents (`bool`, *optional*, defaults to `False`):
+ Whether or not to keep accents when tokenizing.
+ bos_token (`str`, *optional*, defaults to `"[CLS]"`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+
+
+
+ eos_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The end of sequence token. .. note:: When building a sequence using special tokens, this is not the token
+ that is used for the end of sequence. The token used is the `sep_token`.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ slow_tokenizer_class = AlbertTokenizer
+
+ def __init__(
+ self,
+ vocab_file=None,
+ tokenizer_file=None,
+ do_lower_case=True,
+ remove_space=True,
+ keep_accents=False,
+ bos_token="[CLS]",
+ eos_token="[SEP]",
+ unk_token="",
+ sep_token="[SEP]",
+ pad_token="",
+ cls_token="[CLS]",
+ mask_token="[MASK]",
+ **kwargs,
+ ):
+ # Mask token behave like a normal word, i.e. include the space before it and
+ # is included in the raw text, there should be a match in a non-normalized sentence.
+ mask_token = (
+ AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False)
+ if isinstance(mask_token, str)
+ else mask_token
+ )
+
+ super().__init__(
+ vocab_file,
+ tokenizer_file=tokenizer_file,
+ do_lower_case=do_lower_case,
+ remove_space=remove_space,
+ keep_accents=keep_accents,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ **kwargs,
+ )
+
+ self.do_lower_case = do_lower_case
+ self.remove_space = remove_space
+ self.keep_accents = keep_accents
+ self.vocab_file = vocab_file
+
+ @property
+ def can_save_slow_tokenizer(self) -> bool:
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. An ALBERT sequence has the following format:
+
+ - single sequence: `[CLS] X [SEP]`
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+ if token_ids_1 is None:
+ return cls + token_ids_0 + sep
+ return cls + token_ids_0 + sep + token_ids_1 + sep
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
+ sequence pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of ids.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not self.can_save_slow_tokenizer:
+ raise ValueError(
+ "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
+ "tokenizer."
+ )
+
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ out_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+
+ return (out_vocab_file,)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..232f08fa3da286ac59b6f9bdf141c58e4c5edfc3
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/configuration_convbert.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/configuration_convbert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bafcf54df83f0fa2eb3fc13687f41be0aaa262de
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/configuration_convbert.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/convert_convbert_original_tf1_checkpoint_to_pytorch_and_tf2.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/convert_convbert_original_tf1_checkpoint_to_pytorch_and_tf2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..481fdc42fb041ad2e5e74da3036480fac38efd39
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/convert_convbert_original_tf1_checkpoint_to_pytorch_and_tf2.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/modeling_convbert.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/modeling_convbert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8fdc6b953e9f9519f102828b06028e773d1283a7
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/modeling_convbert.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/modeling_tf_convbert.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/modeling_tf_convbert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1eef2738387021cdf99fb37824da1f9bae89c1a6
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/modeling_tf_convbert.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/tokenization_convbert.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/tokenization_convbert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a4731e69dacb57bab833d23ac1c88550ed6ba59a
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/tokenization_convbert.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/dialogpt/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/dialogpt/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/venv/lib/python3.10/site-packages/transformers/models/dialogpt/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/dialogpt/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c9dd012f7a04a557f426ada6557aee95a9afb95e
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/dialogpt/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/dialogpt/__pycache__/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/dialogpt/__pycache__/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f5abc37d3b8046030543c040a330e60a01317493
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/dialogpt/__pycache__/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/dialogpt/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/dialogpt/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..fbf34012924b901f3a074d36ed9be7b1fc32913b
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/dialogpt/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.py
@@ -0,0 +1,46 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import os
+
+import torch
+
+from transformers.utils import WEIGHTS_NAME
+
+
+DIALOGPT_MODELS = ["small", "medium", "large"]
+
+OLD_KEY = "lm_head.decoder.weight"
+NEW_KEY = "lm_head.weight"
+
+
+def convert_dialogpt_checkpoint(checkpoint_path: str, pytorch_dump_folder_path: str):
+ d = torch.load(checkpoint_path)
+ d[NEW_KEY] = d.pop(OLD_KEY)
+ os.makedirs(pytorch_dump_folder_path, exist_ok=True)
+ torch.save(d, os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME))
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--dialogpt_path", default=".", type=str)
+ args = parser.parse_args()
+ for MODEL in DIALOGPT_MODELS:
+ checkpoint_path = os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl")
+ pytorch_dump_folder_path = f"./DialoGPT-{MODEL}"
+ convert_dialogpt_checkpoint(
+ checkpoint_path,
+ pytorch_dump_folder_path,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/donut/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/donut/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c548a181a3bf3023fd64defca5a3748624db6b7c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/donut/__init__.py
@@ -0,0 +1,74 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
+
+
+_import_structure = {
+ "configuration_donut_swin": ["DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "DonutSwinConfig"],
+ "processing_donut": ["DonutProcessor"],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_donut_swin"] = [
+ "DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "DonutSwinModel",
+ "DonutSwinPreTrainedModel",
+ ]
+
+try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["feature_extraction_donut"] = ["DonutFeatureExtractor"]
+ _import_structure["image_processing_donut"] = ["DonutImageProcessor"]
+
+
+if TYPE_CHECKING:
+ from .configuration_donut_swin import DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, DonutSwinConfig
+ from .processing_donut import DonutProcessor
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_donut_swin import (
+ DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
+ DonutSwinModel,
+ DonutSwinPreTrainedModel,
+ )
+
+ try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .feature_extraction_donut import DonutFeatureExtractor
+ from .image_processing_donut import DonutImageProcessor
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/donut/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/donut/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e7e38a32c998511f4fa3bada76857c13e423a5f3
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/donut/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/donut/__pycache__/configuration_donut_swin.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/donut/__pycache__/configuration_donut_swin.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..86f0d25132a805c18bc7df4c5bbc9a1733e426a7
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/donut/__pycache__/configuration_donut_swin.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/donut/__pycache__/convert_donut_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/donut/__pycache__/convert_donut_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..24f754303d12c338a5c00b66536216a9bc7020de
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/donut/__pycache__/convert_donut_to_pytorch.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/donut/__pycache__/feature_extraction_donut.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/donut/__pycache__/feature_extraction_donut.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e7cd7f981eb76d20764fa3d355eefc00636b54e4
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/donut/__pycache__/feature_extraction_donut.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/donut/__pycache__/image_processing_donut.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/donut/__pycache__/image_processing_donut.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8b9dca2fe57b5ebd20e606b374168d15801b0836
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/donut/__pycache__/image_processing_donut.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/donut/__pycache__/modeling_donut_swin.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/donut/__pycache__/modeling_donut_swin.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4ff2097e01a436f245bc58597d6e714768d64550
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/donut/__pycache__/modeling_donut_swin.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/donut/__pycache__/processing_donut.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/donut/__pycache__/processing_donut.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..847a238812fe126811cdff3d9bd34132ccab0c02
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/donut/__pycache__/processing_donut.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/donut/configuration_donut_swin.py b/venv/lib/python3.10/site-packages/transformers/models/donut/configuration_donut_swin.py
new file mode 100644
index 0000000000000000000000000000000000000000..e57ddb255a71185cec8567db362def58e16e5fc1
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/donut/configuration_donut_swin.py
@@ -0,0 +1,135 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Donut Swin Transformer model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class DonutSwinConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`DonutSwinModel`]. It is used to instantiate a
+ Donut model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the Donut
+ [naver-clova-ix/donut-base](https://huggingface.co/naver-clova-ix/donut-base) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ image_size (`int`, *optional*, defaults to 224):
+ The size (resolution) of each image.
+ patch_size (`int`, *optional*, defaults to 4):
+ The size (resolution) of each patch.
+ num_channels (`int`, *optional*, defaults to 3):
+ The number of input channels.
+ embed_dim (`int`, *optional*, defaults to 96):
+ Dimensionality of patch embedding.
+ depths (`list(int)`, *optional*, defaults to `[2, 2, 6, 2]`):
+ Depth of each layer in the Transformer encoder.
+ num_heads (`list(int)`, *optional*, defaults to `[3, 6, 12, 24]`):
+ Number of attention heads in each layer of the Transformer encoder.
+ window_size (`int`, *optional*, defaults to 7):
+ Size of windows.
+ mlp_ratio (`float`, *optional*, defaults to 4.0):
+ Ratio of MLP hidden dimensionality to embedding dimensionality.
+ qkv_bias (`bool`, *optional*, defaults to `True`):
+ Whether or not a learnable bias should be added to the queries, keys and values.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout probability for all fully connected layers in the embeddings and encoder.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ drop_path_rate (`float`, *optional*, defaults to 0.1):
+ Stochastic depth rate.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
+ `"selu"` and `"gelu_new"` are supported.
+ use_absolute_embeddings (`bool`, *optional*, defaults to `False`):
+ Whether or not to add absolute position embeddings to the patch embeddings.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
+ The epsilon used by the layer normalization layers.
+
+ Example:
+
+ ```python
+ >>> from transformers import DonutSwinConfig, DonutSwinModel
+
+ >>> # Initializing a Donut naver-clova-ix/donut-base style configuration
+ >>> configuration = DonutSwinConfig()
+
+ >>> # Randomly initializing a model from the naver-clova-ix/donut-base style configuration
+ >>> model = DonutSwinModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "donut-swin"
+
+ attribute_map = {
+ "num_attention_heads": "num_heads",
+ "num_hidden_layers": "num_layers",
+ }
+
+ def __init__(
+ self,
+ image_size=224,
+ patch_size=4,
+ num_channels=3,
+ embed_dim=96,
+ depths=[2, 2, 6, 2],
+ num_heads=[3, 6, 12, 24],
+ window_size=7,
+ mlp_ratio=4.0,
+ qkv_bias=True,
+ hidden_dropout_prob=0.0,
+ attention_probs_dropout_prob=0.0,
+ drop_path_rate=0.1,
+ hidden_act="gelu",
+ use_absolute_embeddings=False,
+ initializer_range=0.02,
+ layer_norm_eps=1e-5,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_channels = num_channels
+ self.embed_dim = embed_dim
+ self.depths = depths
+ self.num_layers = len(depths)
+ self.num_heads = num_heads
+ self.window_size = window_size
+ self.mlp_ratio = mlp_ratio
+ self.qkv_bias = qkv_bias
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.drop_path_rate = drop_path_rate
+ self.hidden_act = hidden_act
+ self.use_absolute_embeddings = use_absolute_embeddings
+ self.layer_norm_eps = layer_norm_eps
+ self.initializer_range = initializer_range
+ # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
+ # this indicates the channel dimension after the last stage of the model
+ self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1))
diff --git a/venv/lib/python3.10/site-packages/transformers/models/donut/convert_donut_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/donut/convert_donut_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..13f669ad97fdcc5bbfcbb2a92536fcca491253a5
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/donut/convert_donut_to_pytorch.py
@@ -0,0 +1,234 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert Donut checkpoints using the original `donut-python` library. URL: https://github.com/clovaai/donut"""
+
+import argparse
+
+import torch
+from datasets import load_dataset
+from donut import DonutModel
+
+from transformers import (
+ DonutImageProcessor,
+ DonutProcessor,
+ DonutSwinConfig,
+ DonutSwinModel,
+ MBartConfig,
+ MBartForCausalLM,
+ VisionEncoderDecoderModel,
+ XLMRobertaTokenizerFast,
+)
+
+
+def get_configs(model):
+ original_config = model.config
+
+ encoder_config = DonutSwinConfig(
+ image_size=original_config.input_size,
+ patch_size=4,
+ depths=original_config.encoder_layer,
+ num_heads=[4, 8, 16, 32],
+ window_size=original_config.window_size,
+ embed_dim=128,
+ )
+ decoder_config = MBartConfig(
+ is_decoder=True,
+ is_encoder_decoder=False,
+ add_cross_attention=True,
+ decoder_layers=original_config.decoder_layer,
+ max_position_embeddings=original_config.max_position_embeddings,
+ vocab_size=len(
+ model.decoder.tokenizer
+ ), # several special tokens are added to the vocab of XLMRobertaTokenizer, see repo on the hub (added_tokens.json)
+ scale_embedding=True,
+ add_final_layer_norm=True,
+ )
+
+ return encoder_config, decoder_config
+
+
+def rename_key(name):
+ if "encoder.model" in name:
+ name = name.replace("encoder.model", "encoder")
+ if "decoder.model" in name:
+ name = name.replace("decoder.model", "decoder")
+ if "patch_embed.proj" in name:
+ name = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection")
+ if "patch_embed.norm" in name:
+ name = name.replace("patch_embed.norm", "embeddings.norm")
+ if name.startswith("encoder"):
+ if "layers" in name:
+ name = "encoder." + name
+ if "attn.proj" in name:
+ name = name.replace("attn.proj", "attention.output.dense")
+ if "attn" in name and "mask" not in name:
+ name = name.replace("attn", "attention.self")
+ if "norm1" in name:
+ name = name.replace("norm1", "layernorm_before")
+ if "norm2" in name:
+ name = name.replace("norm2", "layernorm_after")
+ if "mlp.fc1" in name:
+ name = name.replace("mlp.fc1", "intermediate.dense")
+ if "mlp.fc2" in name:
+ name = name.replace("mlp.fc2", "output.dense")
+
+ if name == "encoder.norm.weight":
+ name = "encoder.layernorm.weight"
+ if name == "encoder.norm.bias":
+ name = "encoder.layernorm.bias"
+
+ return name
+
+
+def convert_state_dict(orig_state_dict, model):
+ for key in orig_state_dict.copy().keys():
+ val = orig_state_dict.pop(key)
+
+ if "qkv" in key:
+ key_split = key.split(".")
+ layer_num = int(key_split[3])
+ block_num = int(key_split[5])
+ dim = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
+
+ if "weight" in key:
+ orig_state_dict[
+ f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.weight"
+ ] = val[:dim, :]
+ orig_state_dict[
+ f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.weight"
+ ] = val[dim : dim * 2, :]
+ orig_state_dict[
+ f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.weight"
+ ] = val[-dim:, :]
+ else:
+ orig_state_dict[
+ f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.bias"
+ ] = val[:dim]
+ orig_state_dict[
+ f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.bias"
+ ] = val[dim : dim * 2]
+ orig_state_dict[
+ f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.bias"
+ ] = val[-dim:]
+ elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
+ # HuggingFace implementation doesn't use attn_mask buffer
+ # and model doesn't use final LayerNorms for the encoder
+ pass
+ else:
+ orig_state_dict[rename_key(key)] = val
+
+ return orig_state_dict
+
+
+def convert_donut_checkpoint(model_name, pytorch_dump_folder_path=None, push_to_hub=False):
+ # load original model
+ original_model = DonutModel.from_pretrained(model_name).eval()
+
+ # load HuggingFace model
+ encoder_config, decoder_config = get_configs(original_model)
+ encoder = DonutSwinModel(encoder_config)
+ decoder = MBartForCausalLM(decoder_config)
+ model = VisionEncoderDecoderModel(encoder=encoder, decoder=decoder)
+ model.eval()
+
+ state_dict = original_model.state_dict()
+ new_state_dict = convert_state_dict(state_dict, model)
+ model.load_state_dict(new_state_dict)
+
+ # verify results on scanned document
+ dataset = load_dataset("hf-internal-testing/example-documents")
+ image = dataset["test"][0]["image"].convert("RGB")
+
+ tokenizer = XLMRobertaTokenizerFast.from_pretrained(model_name, from_slow=True)
+ image_processor = DonutImageProcessor(
+ do_align_long_axis=original_model.config.align_long_axis, size=original_model.config.input_size[::-1]
+ )
+ processor = DonutProcessor(image_processor, tokenizer)
+ pixel_values = processor(image, return_tensors="pt").pixel_values
+
+ if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
+ task_prompt = "{user_input}"
+ question = "When is the coffee break?"
+ task_prompt = task_prompt.replace("{user_input}", question)
+ elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
+ task_prompt = ""
+ elif model_name in [
+ "naver-clova-ix/donut-base-finetuned-cord-v1",
+ "naver-clova-ix/donut-base-finetuned-cord-v1-2560",
+ ]:
+ task_prompt = ""
+ elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
+ task_prompt = "s_cord-v2>"
+ elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
+ task_prompt = ""
+ elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
+ # use a random prompt
+ task_prompt = "hello world"
+ else:
+ raise ValueError("Model name not supported")
+ prompt_tensors = original_model.decoder.tokenizer(task_prompt, add_special_tokens=False, return_tensors="pt")[
+ "input_ids"
+ ]
+
+ original_patch_embed = original_model.encoder.model.patch_embed(pixel_values)
+ patch_embeddings, _ = model.encoder.embeddings(pixel_values)
+ assert torch.allclose(original_patch_embed, patch_embeddings, atol=1e-3)
+
+ # verify encoder hidden states
+ original_last_hidden_state = original_model.encoder(pixel_values)
+ last_hidden_state = model.encoder(pixel_values).last_hidden_state
+ assert torch.allclose(original_last_hidden_state, last_hidden_state, atol=1e-2)
+
+ # verify decoder hidden states
+ original_logits = original_model(pixel_values, prompt_tensors, None).logits
+ logits = model(pixel_values, decoder_input_ids=prompt_tensors).logits
+ assert torch.allclose(original_logits, logits, atol=1e-3)
+ print("Looks ok!")
+
+ if pytorch_dump_folder_path is not None:
+ print(f"Saving model and processor to {pytorch_dump_folder_path}")
+ model.save_pretrained(pytorch_dump_folder_path)
+ processor.save_pretrained(pytorch_dump_folder_path)
+
+ if push_to_hub:
+ model.push_to_hub("nielsr/" + model_name.split("/")[-1], commit_message="Update model")
+ processor.push_to_hub("nielsr/" + model_name.split("/")[-1], commit_message="Update model")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--model_name",
+ default="naver-clova-ix/donut-base-finetuned-docvqa",
+ required=False,
+ type=str,
+ help="Name of the original model you'd like to convert.",
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path",
+ default=None,
+ required=False,
+ type=str,
+ help="Path to the output PyTorch model directory.",
+ )
+ parser.add_argument(
+ "--push_to_hub",
+ action="store_true",
+ help="Whether or not to push the converted model and processor to the 🤗 hub.",
+ )
+
+ args = parser.parse_args()
+ convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/donut/feature_extraction_donut.py b/venv/lib/python3.10/site-packages/transformers/models/donut/feature_extraction_donut.py
new file mode 100644
index 0000000000000000000000000000000000000000..e6ca078c0e8ac4939514dcb297f5d2c63de032f7
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/donut/feature_extraction_donut.py
@@ -0,0 +1,33 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Feature extractor class for Donut."""
+
+import warnings
+
+from ...utils import logging
+from .image_processing_donut import DonutImageProcessor
+
+
+logger = logging.get_logger(__name__)
+
+
+class DonutFeatureExtractor(DonutImageProcessor):
+ def __init__(self, *args, **kwargs) -> None:
+ warnings.warn(
+ "The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
+ " use DonutImageProcessor instead.",
+ FutureWarning,
+ )
+ super().__init__(*args, **kwargs)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/donut/image_processing_donut.py b/venv/lib/python3.10/site-packages/transformers/models/donut/image_processing_donut.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c6e4723139046ae4c479690c5242e35ef5e604d
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/donut/image_processing_donut.py
@@ -0,0 +1,480 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Image processor class for Donut."""
+
+from typing import Dict, List, Optional, Union
+
+import numpy as np
+
+from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
+from ...image_transforms import (
+ get_resize_output_image_size,
+ pad,
+ resize,
+ to_channel_dimension_format,
+)
+from ...image_utils import (
+ IMAGENET_STANDARD_MEAN,
+ IMAGENET_STANDARD_STD,
+ ChannelDimension,
+ ImageInput,
+ PILImageResampling,
+ get_image_size,
+ infer_channel_dimension_format,
+ is_scaled_image,
+ make_list_of_images,
+ to_numpy_array,
+ valid_images,
+ validate_kwargs,
+ validate_preprocess_arguments,
+)
+from ...utils import TensorType, logging
+from ...utils.import_utils import is_vision_available
+
+
+logger = logging.get_logger(__name__)
+
+
+if is_vision_available():
+ import PIL
+
+
+class DonutImageProcessor(BaseImageProcessor):
+ r"""
+ Constructs a Donut image processor.
+
+ Args:
+ do_resize (`bool`, *optional*, defaults to `True`):
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
+ `do_resize` in the `preprocess` method.
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
+ Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
+ the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
+ method.
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
+ Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
+ do_thumbnail (`bool`, *optional*, defaults to `True`):
+ Whether to resize the image using thumbnail method.
+ do_align_long_axis (`bool`, *optional*, defaults to `False`):
+ Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees.
+ do_pad (`bool`, *optional*, defaults to `True`):
+ Whether to pad the image. If `random_padding` is set to `True` in `preprocess`, each image is padded with a
+ random amont of padding on each size, up to the largest image size in the batch. Otherwise, all images are
+ padded to the largest image size in the batch.
+ do_rescale (`bool`, *optional*, defaults to `True`):
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
+ the `preprocess` method.
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
+ Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
+ method.
+ do_normalize (`bool`, *optional*, defaults to `True`):
+ Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
+ Image standard deviation.
+ """
+
+ model_input_names = ["pixel_values"]
+
+ def __init__(
+ self,
+ do_resize: bool = True,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
+ do_thumbnail: bool = True,
+ do_align_long_axis: bool = False,
+ do_pad: bool = True,
+ do_rescale: bool = True,
+ rescale_factor: Union[int, float] = 1 / 255,
+ do_normalize: bool = True,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ **kwargs,
+ ) -> None:
+ super().__init__(**kwargs)
+
+ size = size if size is not None else {"height": 2560, "width": 1920}
+ if isinstance(size, (tuple, list)):
+ # The previous feature extractor size parameter was in (width, height) format
+ size = size[::-1]
+ size = get_size_dict(size)
+
+ self.do_resize = do_resize
+ self.size = size
+ self.resample = resample
+ self.do_thumbnail = do_thumbnail
+ self.do_align_long_axis = do_align_long_axis
+ self.do_pad = do_pad
+ self.do_rescale = do_rescale
+ self.rescale_factor = rescale_factor
+ self.do_normalize = do_normalize
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
+ self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
+ self._valid_processor_keys = [
+ "images",
+ "do_resize",
+ "size",
+ "resample",
+ "do_thumbnail",
+ "do_align_long_axis",
+ "do_pad",
+ "random_padding",
+ "do_rescale",
+ "rescale_factor",
+ "do_normalize",
+ "image_mean",
+ "image_std",
+ "return_tensors",
+ "data_format",
+ "input_data_format",
+ ]
+
+ def align_long_axis(
+ self,
+ image: np.ndarray,
+ size: Dict[str, int],
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ) -> np.ndarray:
+ """
+ Align the long axis of the image to the longest axis of the specified size.
+
+ Args:
+ image (`np.ndarray`):
+ The image to be aligned.
+ size (`Dict[str, int]`):
+ The size `{"height": h, "width": w}` to align the long axis to.
+ data_format (`str` or `ChannelDimension`, *optional*):
+ The data format of the output image. If unset, the same format as the input image is used.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred.
+
+ Returns:
+ `np.ndarray`: The aligned image.
+ """
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
+ output_height, output_width = size["height"], size["width"]
+
+ if (output_width < output_height and input_width > input_height) or (
+ output_width > output_height and input_width < input_height
+ ):
+ image = np.rot90(image, 3)
+
+ if data_format is not None:
+ image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
+
+ return image
+
+ def pad_image(
+ self,
+ image: np.ndarray,
+ size: Dict[str, int],
+ random_padding: bool = False,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ) -> np.ndarray:
+ """
+ Pad the image to the specified size.
+
+ Args:
+ image (`np.ndarray`):
+ The image to be padded.
+ size (`Dict[str, int]`):
+ The size `{"height": h, "width": w}` to pad the image to.
+ random_padding (`bool`, *optional*, defaults to `False`):
+ Whether to use random padding or not.
+ data_format (`str` or `ChannelDimension`, *optional*):
+ The data format of the output image. If unset, the same format as the input image is used.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred.
+ """
+ output_height, output_width = size["height"], size["width"]
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
+
+ delta_width = output_width - input_width
+ delta_height = output_height - input_height
+
+ if random_padding:
+ pad_top = np.random.randint(low=0, high=delta_height + 1)
+ pad_left = np.random.randint(low=0, high=delta_width + 1)
+ else:
+ pad_top = delta_height // 2
+ pad_left = delta_width // 2
+
+ pad_bottom = delta_height - pad_top
+ pad_right = delta_width - pad_left
+
+ padding = ((pad_top, pad_bottom), (pad_left, pad_right))
+ return pad(image, padding, data_format=data_format, input_data_format=input_data_format)
+
+ def pad(self, *args, **kwargs):
+ logger.info("pad is deprecated and will be removed in version 4.27. Please use pad_image instead.")
+ return self.pad_image(*args, **kwargs)
+
+ def thumbnail(
+ self,
+ image: np.ndarray,
+ size: Dict[str, int],
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> np.ndarray:
+ """
+ Resize the image to make a thumbnail. The image is resized so that no dimension is larger than any
+ corresponding dimension of the specified size.
+
+ Args:
+ image (`np.ndarray`):
+ The image to be resized.
+ size (`Dict[str, int]`):
+ The size `{"height": h, "width": w}` to resize the image to.
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
+ The resampling filter to use.
+ data_format (`Optional[Union[str, ChannelDimension]]`, *optional*):
+ The data format of the output image. If unset, the same format as the input image is used.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred.
+ """
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
+ output_height, output_width = size["height"], size["width"]
+
+ # We always resize to the smallest of either the input or output size.
+ height = min(input_height, output_height)
+ width = min(input_width, output_width)
+
+ if height == input_height and width == input_width:
+ return image
+
+ if input_height > input_width:
+ width = int(input_width * height / input_height)
+ elif input_width > input_height:
+ height = int(input_height * width / input_width)
+
+ return resize(
+ image,
+ size=(height, width),
+ resample=resample,
+ reducing_gap=2.0,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ **kwargs,
+ )
+
+ def resize(
+ self,
+ image: np.ndarray,
+ size: Dict[str, int],
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> np.ndarray:
+ """
+ Resizes `image` to `(height, width)` specified by `size` using the PIL library.
+
+ Args:
+ image (`np.ndarray`):
+ Image to resize.
+ size (`Dict[str, int]`):
+ Size of the output image.
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
+ Resampling filter to use when resiizing the image.
+ data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred.
+ """
+ size = get_size_dict(size)
+ shortest_edge = min(size["height"], size["width"])
+ output_size = get_resize_output_image_size(
+ image, size=shortest_edge, default_to_square=False, input_data_format=input_data_format
+ )
+ resized_image = resize(
+ image,
+ size=output_size,
+ resample=resample,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ **kwargs,
+ )
+ return resized_image
+
+ def preprocess(
+ self,
+ images: ImageInput,
+ do_resize: bool = None,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = None,
+ do_thumbnail: bool = None,
+ do_align_long_axis: bool = None,
+ do_pad: bool = None,
+ random_padding: bool = False,
+ do_rescale: bool = None,
+ rescale_factor: float = None,
+ do_normalize: bool = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> PIL.Image.Image:
+ """
+ Preprocess an image or batch of images.
+
+ Args:
+ images (`ImageInput`):
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
+ Whether to resize the image.
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
+ Size of the image after resizing. Shortest edge of the image is resized to min(size["height"],
+ size["width"]) with the longest edge resized to keep the input aspect ratio.
+ resample (`int`, *optional*, defaults to `self.resample`):
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
+ has an effect if `do_resize` is set to `True`.
+ do_thumbnail (`bool`, *optional*, defaults to `self.do_thumbnail`):
+ Whether to resize the image using thumbnail method.
+ do_align_long_axis (`bool`, *optional*, defaults to `self.do_align_long_axis`):
+ Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees.
+ do_pad (`bool`, *optional*, defaults to `self.do_pad`):
+ Whether to pad the image. If `random_padding` is set to `True`, each image is padded with a random
+ amont of padding on each size, up to the largest image size in the batch. Otherwise, all images are
+ padded to the largest image size in the batch.
+ random_padding (`bool`, *optional*, defaults to `self.random_padding`):
+ Whether to use random padding when padding the image. If `True`, each image in the batch with be padded
+ with a random amount of padding on each side up to the size of the largest image in the batch.
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
+ Whether to rescale the image pixel values.
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
+ Whether to normalize the image.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
+ Image mean to use for normalization.
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
+ Image standard deviation to use for normalization.
+ return_tensors (`str` or `TensorType`, *optional*):
+ The type of tensors to return. Can be one of:
+ - Unset: Return a list of `np.ndarray`.
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
+ The channel dimension format for the output image. Can be one of:
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - Unset: defaults to the channel dimension format of the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ """
+ do_resize = do_resize if do_resize is not None else self.do_resize
+ size = size if size is not None else self.size
+ if isinstance(size, (tuple, list)):
+ # Previous feature extractor had size in (width, height) format
+ size = size[::-1]
+ size = get_size_dict(size)
+ resample = resample if resample is not None else self.resample
+ do_thumbnail = do_thumbnail if do_thumbnail is not None else self.do_thumbnail
+ do_align_long_axis = do_align_long_axis if do_align_long_axis is not None else self.do_align_long_axis
+ do_pad = do_pad if do_pad is not None else self.do_pad
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
+ image_mean = image_mean if image_mean is not None else self.image_mean
+ image_std = image_std if image_std is not None else self.image_std
+
+ images = make_list_of_images(images)
+
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
+
+ if not valid_images(images):
+ raise ValueError(
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+ validate_preprocess_arguments(
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_normalize=do_normalize,
+ image_mean=image_mean,
+ image_std=image_std,
+ do_pad=do_pad,
+ size_divisibility=size, # There is no pad divisibility in this processor, but pad requires the size arg.
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ )
+
+ # All transformations expect numpy arrays.
+ images = [to_numpy_array(image) for image in images]
+
+ if is_scaled_image(images[0]) and do_rescale:
+ logger.warning_once(
+ "It looks like you are trying to rescale already rescaled images. If the input"
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
+ )
+
+ if input_data_format is None:
+ # We assume that all images have the same channel dimension format.
+ input_data_format = infer_channel_dimension_format(images[0])
+
+ if do_align_long_axis:
+ images = [self.align_long_axis(image, size=size, input_data_format=input_data_format) for image in images]
+
+ if do_resize:
+ images = [
+ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ if do_thumbnail:
+ images = [self.thumbnail(image=image, size=size, input_data_format=input_data_format) for image in images]
+
+ if do_pad:
+ images = [
+ self.pad_image(
+ image=image, size=size, random_padding=random_padding, input_data_format=input_data_format
+ )
+ for image in images
+ ]
+
+ if do_rescale:
+ images = [
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ if do_normalize:
+ images = [
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ images = [
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
+ ]
+
+ data = {"pixel_values": images}
+ return BatchFeature(data=data, tensor_type=return_tensors)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/donut/modeling_donut_swin.py b/venv/lib/python3.10/site-packages/transformers/models/donut/modeling_donut_swin.py
new file mode 100644
index 0000000000000000000000000000000000000000..b2aa8d61b1d8d102fa4ddbc5171c77069c265b4b
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/donut/modeling_donut_swin.py
@@ -0,0 +1,955 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch Donut Swin Transformer model.
+
+This implementation is identical to a regular Swin Transformer, without final layer norm on top of the final hidden
+states."""
+
+import collections.abc
+import math
+from dataclasses import dataclass
+from typing import Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+
+from ...activations import ACT2FN
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import find_pruneable_heads_and_indices, meshgrid, prune_linear_layer
+from ...utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+)
+from .configuration_donut_swin import DonutSwinConfig
+
+
+logger = logging.get_logger(__name__)
+
+# General docstring
+_CONFIG_FOR_DOC = "DonutSwinConfig"
+
+# Base docstring
+_CHECKPOINT_FOR_DOC = "https://huggingface.co/naver-clova-ix/donut-base"
+_EXPECTED_OUTPUT_SHAPE = [1, 49, 768]
+
+
+from ..deprecated._archive_maps import DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+@dataclass
+# Copied from transformers.models.swin.modeling_swin.SwinEncoderOutput with Swin->DonutSwin
+class DonutSwinEncoderOutput(ModelOutput):
+ """
+ DonutSwin encoder's outputs, with potential hidden states and attentions.
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
+ shape `(batch_size, hidden_size, height, width)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
+ include the spatial dimensions.
+ """
+
+ last_hidden_state: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+ reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+@dataclass
+# Copied from transformers.models.swin.modeling_swin.SwinModelOutput with Swin->DonutSwin
+class DonutSwinModelOutput(ModelOutput):
+ """
+ DonutSwin model's outputs that also contains a pooling of the last hidden states.
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed):
+ Average pooling of the last layer hidden-state.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
+ shape `(batch_size, hidden_size, height, width)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
+ include the spatial dimensions.
+ """
+
+ last_hidden_state: torch.FloatTensor = None
+ pooler_output: Optional[torch.FloatTensor] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+ reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+# Copied from transformers.models.swin.modeling_swin.window_partition
+def window_partition(input_feature, window_size):
+ """
+ Partitions the given input into windows.
+ """
+ batch_size, height, width, num_channels = input_feature.shape
+ input_feature = input_feature.view(
+ batch_size, height // window_size, window_size, width // window_size, window_size, num_channels
+ )
+ windows = input_feature.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels)
+ return windows
+
+
+# Copied from transformers.models.swin.modeling_swin.window_reverse
+def window_reverse(windows, window_size, height, width):
+ """
+ Merges windows to produce higher resolution features.
+ """
+ num_channels = windows.shape[-1]
+ windows = windows.view(-1, height // window_size, width // window_size, window_size, window_size, num_channels)
+ windows = windows.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, height, width, num_channels)
+ return windows
+
+
+# Copied from transformers.models.swin.modeling_swin.SwinEmbeddings with Swin->DonutSwin
+class DonutSwinEmbeddings(nn.Module):
+ """
+ Construct the patch and position embeddings. Optionally, also the mask token.
+ """
+
+ def __init__(self, config, use_mask_token=False):
+ super().__init__()
+
+ self.patch_embeddings = DonutSwinPatchEmbeddings(config)
+ num_patches = self.patch_embeddings.num_patches
+ self.patch_grid = self.patch_embeddings.grid_size
+ self.mask_token = nn.Parameter(torch.zeros(1, 1, config.embed_dim)) if use_mask_token else None
+
+ if config.use_absolute_embeddings:
+ self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.embed_dim))
+ else:
+ self.position_embeddings = None
+
+ self.norm = nn.LayerNorm(config.embed_dim)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(
+ self, pixel_values: Optional[torch.FloatTensor], bool_masked_pos: Optional[torch.BoolTensor] = None
+ ) -> Tuple[torch.Tensor]:
+ embeddings, output_dimensions = self.patch_embeddings(pixel_values)
+ embeddings = self.norm(embeddings)
+ batch_size, seq_len, _ = embeddings.size()
+
+ if bool_masked_pos is not None:
+ mask_tokens = self.mask_token.expand(batch_size, seq_len, -1)
+ # replace the masked visual tokens by mask_tokens
+ mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
+ embeddings = embeddings * (1.0 - mask) + mask_tokens * mask
+
+ if self.position_embeddings is not None:
+ embeddings = embeddings + self.position_embeddings
+
+ embeddings = self.dropout(embeddings)
+
+ return embeddings, output_dimensions
+
+
+# Copied from transformers.models.swin.modeling_swin.SwinPatchEmbeddings
+class DonutSwinPatchEmbeddings(nn.Module):
+ """
+ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
+ `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
+ Transformer.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ image_size, patch_size = config.image_size, config.patch_size
+ num_channels, hidden_size = config.num_channels, config.embed_dim
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_channels = num_channels
+ self.num_patches = num_patches
+ self.grid_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
+
+ self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
+
+ def maybe_pad(self, pixel_values, height, width):
+ if width % self.patch_size[1] != 0:
+ pad_values = (0, self.patch_size[1] - width % self.patch_size[1])
+ pixel_values = nn.functional.pad(pixel_values, pad_values)
+ if height % self.patch_size[0] != 0:
+ pad_values = (0, 0, 0, self.patch_size[0] - height % self.patch_size[0])
+ pixel_values = nn.functional.pad(pixel_values, pad_values)
+ return pixel_values
+
+ def forward(self, pixel_values: Optional[torch.FloatTensor]) -> Tuple[torch.Tensor, Tuple[int]]:
+ _, num_channels, height, width = pixel_values.shape
+ if num_channels != self.num_channels:
+ raise ValueError(
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
+ )
+ # pad the input to be divisible by self.patch_size, if needed
+ pixel_values = self.maybe_pad(pixel_values, height, width)
+ embeddings = self.projection(pixel_values)
+ _, _, height, width = embeddings.shape
+ output_dimensions = (height, width)
+ embeddings = embeddings.flatten(2).transpose(1, 2)
+
+ return embeddings, output_dimensions
+
+
+# Copied from transformers.models.swin.modeling_swin.SwinPatchMerging
+class DonutSwinPatchMerging(nn.Module):
+ """
+ Patch Merging Layer.
+
+ Args:
+ input_resolution (`Tuple[int]`):
+ Resolution of input feature.
+ dim (`int`):
+ Number of input channels.
+ norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
+ Normalization layer class.
+ """
+
+ def __init__(self, input_resolution: Tuple[int], dim: int, norm_layer: nn.Module = nn.LayerNorm) -> None:
+ super().__init__()
+ self.input_resolution = input_resolution
+ self.dim = dim
+ self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
+ self.norm = norm_layer(4 * dim)
+
+ def maybe_pad(self, input_feature, height, width):
+ should_pad = (height % 2 == 1) or (width % 2 == 1)
+ if should_pad:
+ pad_values = (0, 0, 0, width % 2, 0, height % 2)
+ input_feature = nn.functional.pad(input_feature, pad_values)
+
+ return input_feature
+
+ def forward(self, input_feature: torch.Tensor, input_dimensions: Tuple[int, int]) -> torch.Tensor:
+ height, width = input_dimensions
+ # `dim` is height * width
+ batch_size, dim, num_channels = input_feature.shape
+
+ input_feature = input_feature.view(batch_size, height, width, num_channels)
+ # pad input to be disible by width and height, if needed
+ input_feature = self.maybe_pad(input_feature, height, width)
+ # [batch_size, height/2, width/2, num_channels]
+ input_feature_0 = input_feature[:, 0::2, 0::2, :]
+ # [batch_size, height/2, width/2, num_channels]
+ input_feature_1 = input_feature[:, 1::2, 0::2, :]
+ # [batch_size, height/2, width/2, num_channels]
+ input_feature_2 = input_feature[:, 0::2, 1::2, :]
+ # [batch_size, height/2, width/2, num_channels]
+ input_feature_3 = input_feature[:, 1::2, 1::2, :]
+ # batch_size height/2 width/2 4*num_channels
+ input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1)
+ input_feature = input_feature.view(batch_size, -1, 4 * num_channels) # batch_size height/2*width/2 4*C
+
+ input_feature = self.norm(input_feature)
+ input_feature = self.reduction(input_feature)
+
+ return input_feature
+
+
+# Copied from transformers.models.beit.modeling_beit.drop_path
+def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
+ """
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
+
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
+ argument.
+ """
+ if drop_prob == 0.0 or not training:
+ return input
+ keep_prob = 1 - drop_prob
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
+ random_tensor.floor_() # binarize
+ output = input.div(keep_prob) * random_tensor
+ return output
+
+
+# Copied from transformers.models.swin.modeling_swin.SwinDropPath
+class DonutSwinDropPath(nn.Module):
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
+
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
+ super().__init__()
+ self.drop_prob = drop_prob
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ return drop_path(hidden_states, self.drop_prob, self.training)
+
+ def extra_repr(self) -> str:
+ return "p={}".format(self.drop_prob)
+
+
+# Copied from transformers.models.swin.modeling_swin.SwinSelfAttention with Swin->DonutSwin
+class DonutSwinSelfAttention(nn.Module):
+ def __init__(self, config, dim, num_heads, window_size):
+ super().__init__()
+ if dim % num_heads != 0:
+ raise ValueError(
+ f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})"
+ )
+
+ self.num_attention_heads = num_heads
+ self.attention_head_size = int(dim / num_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+ self.window_size = (
+ window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size)
+ )
+
+ self.relative_position_bias_table = nn.Parameter(
+ torch.zeros((2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1), num_heads)
+ )
+
+ # get pair-wise relative position index for each token inside the window
+ coords_h = torch.arange(self.window_size[0])
+ coords_w = torch.arange(self.window_size[1])
+ coords = torch.stack(meshgrid([coords_h, coords_w], indexing="ij"))
+ coords_flatten = torch.flatten(coords, 1)
+ relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
+ relative_coords = relative_coords.permute(1, 2, 0).contiguous()
+ relative_coords[:, :, 0] += self.window_size[0] - 1
+ relative_coords[:, :, 1] += self.window_size[1] - 1
+ relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
+ relative_position_index = relative_coords.sum(-1)
+ self.register_buffer("relative_position_index", relative_position_index)
+
+ self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
+ self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
+ self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+
+ def transpose_for_scores(self, x):
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ batch_size, dim, num_channels = hidden_states.shape
+ mixed_query_layer = self.query(hidden_states)
+
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+
+ relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)]
+ relative_position_bias = relative_position_bias.view(
+ self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1
+ )
+
+ relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
+ attention_scores = attention_scores + relative_position_bias.unsqueeze(0)
+
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in DonutSwinModel forward() function)
+ mask_shape = attention_mask.shape[0]
+ attention_scores = attention_scores.view(
+ batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim
+ )
+ attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0)
+ attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim)
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ return outputs
+
+
+# Copied from transformers.models.swin.modeling_swin.SwinSelfOutput
+class DonutSwinSelfOutput(nn.Module):
+ def __init__(self, config, dim):
+ super().__init__()
+ self.dense = nn.Linear(dim, dim)
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ return hidden_states
+
+
+# Copied from transformers.models.swin.modeling_swin.SwinAttention with Swin->DonutSwin
+class DonutSwinAttention(nn.Module):
+ def __init__(self, config, dim, num_heads, window_size):
+ super().__init__()
+ self.self = DonutSwinSelfAttention(config, dim, num_heads, window_size)
+ self.output = DonutSwinSelfOutput(config, dim)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.self.query = prune_linear_layer(self.self.query, index)
+ self.self.key = prune_linear_layer(self.self.key, index)
+ self.self.value = prune_linear_layer(self.self.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions)
+ attention_output = self.output(self_outputs[0], hidden_states)
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.swin.modeling_swin.SwinIntermediate
+class DonutSwinIntermediate(nn.Module):
+ def __init__(self, config, dim):
+ super().__init__()
+ self.dense = nn.Linear(dim, int(config.mlp_ratio * dim))
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.swin.modeling_swin.SwinOutput
+class DonutSwinOutput(nn.Module):
+ def __init__(self, config, dim):
+ super().__init__()
+ self.dense = nn.Linear(int(config.mlp_ratio * dim), dim)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.swin.modeling_swin.SwinLayer with Swin->DonutSwin
+class DonutSwinLayer(nn.Module):
+ def __init__(self, config, dim, input_resolution, num_heads, shift_size=0):
+ super().__init__()
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.shift_size = shift_size
+ self.window_size = config.window_size
+ self.input_resolution = input_resolution
+ self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps)
+ self.attention = DonutSwinAttention(config, dim, num_heads, window_size=self.window_size)
+ self.drop_path = DonutSwinDropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity()
+ self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps)
+ self.intermediate = DonutSwinIntermediate(config, dim)
+ self.output = DonutSwinOutput(config, dim)
+
+ def set_shift_and_window_size(self, input_resolution):
+ if min(input_resolution) <= self.window_size:
+ # if window size is larger than input resolution, we don't partition windows
+ self.shift_size = 0
+ self.window_size = min(input_resolution)
+
+ def get_attn_mask(self, height, width, dtype):
+ if self.shift_size > 0:
+ # calculate attention mask for SW-MSA
+ img_mask = torch.zeros((1, height, width, 1), dtype=dtype)
+ height_slices = (
+ slice(0, -self.window_size),
+ slice(-self.window_size, -self.shift_size),
+ slice(-self.shift_size, None),
+ )
+ width_slices = (
+ slice(0, -self.window_size),
+ slice(-self.window_size, -self.shift_size),
+ slice(-self.shift_size, None),
+ )
+ count = 0
+ for height_slice in height_slices:
+ for width_slice in width_slices:
+ img_mask[:, height_slice, width_slice, :] = count
+ count += 1
+
+ mask_windows = window_partition(img_mask, self.window_size)
+ mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
+ attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
+ attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
+ else:
+ attn_mask = None
+ return attn_mask
+
+ def maybe_pad(self, hidden_states, height, width):
+ pad_right = (self.window_size - width % self.window_size) % self.window_size
+ pad_bottom = (self.window_size - height % self.window_size) % self.window_size
+ pad_values = (0, 0, 0, pad_right, 0, pad_bottom)
+ hidden_states = nn.functional.pad(hidden_states, pad_values)
+ return hidden_states, pad_values
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ input_dimensions: Tuple[int, int],
+ head_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = False,
+ always_partition: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
+ if not always_partition:
+ self.set_shift_and_window_size(input_dimensions)
+ else:
+ pass
+ height, width = input_dimensions
+ batch_size, _, channels = hidden_states.size()
+ shortcut = hidden_states
+
+ hidden_states = self.layernorm_before(hidden_states)
+
+ hidden_states = hidden_states.view(batch_size, height, width, channels)
+
+ # pad hidden_states to multiples of window size
+ hidden_states, pad_values = self.maybe_pad(hidden_states, height, width)
+
+ _, height_pad, width_pad, _ = hidden_states.shape
+ # cyclic shift
+ if self.shift_size > 0:
+ shifted_hidden_states = torch.roll(hidden_states, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
+ else:
+ shifted_hidden_states = hidden_states
+
+ # partition windows
+ hidden_states_windows = window_partition(shifted_hidden_states, self.window_size)
+ hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels)
+ attn_mask = self.get_attn_mask(height_pad, width_pad, dtype=hidden_states.dtype)
+ if attn_mask is not None:
+ attn_mask = attn_mask.to(hidden_states_windows.device)
+
+ attention_outputs = self.attention(
+ hidden_states_windows, attn_mask, head_mask, output_attentions=output_attentions
+ )
+
+ attention_output = attention_outputs[0]
+
+ attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels)
+ shifted_windows = window_reverse(attention_windows, self.window_size, height_pad, width_pad)
+
+ # reverse cyclic shift
+ if self.shift_size > 0:
+ attention_windows = torch.roll(shifted_windows, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
+ else:
+ attention_windows = shifted_windows
+
+ was_padded = pad_values[3] > 0 or pad_values[5] > 0
+ if was_padded:
+ attention_windows = attention_windows[:, :height, :width, :].contiguous()
+
+ attention_windows = attention_windows.view(batch_size, height * width, channels)
+
+ hidden_states = shortcut + self.drop_path(attention_windows)
+
+ layer_output = self.layernorm_after(hidden_states)
+ layer_output = self.intermediate(layer_output)
+ layer_output = hidden_states + self.output(layer_output)
+
+ layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,)
+ return layer_outputs
+
+
+# Copied from transformers.models.swin.modeling_swin.SwinStage with Swin->DonutSwin
+class DonutSwinStage(nn.Module):
+ def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, downsample):
+ super().__init__()
+ self.config = config
+ self.dim = dim
+ self.blocks = nn.ModuleList(
+ [
+ DonutSwinLayer(
+ config=config,
+ dim=dim,
+ input_resolution=input_resolution,
+ num_heads=num_heads,
+ shift_size=0 if (i % 2 == 0) else config.window_size // 2,
+ )
+ for i in range(depth)
+ ]
+ )
+
+ # patch merging layer
+ if downsample is not None:
+ self.downsample = downsample(input_resolution, dim=dim, norm_layer=nn.LayerNorm)
+ else:
+ self.downsample = None
+
+ self.pointing = False
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ input_dimensions: Tuple[int, int],
+ head_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = False,
+ always_partition: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ height, width = input_dimensions
+ for i, layer_module in enumerate(self.blocks):
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+
+ layer_outputs = layer_module(
+ hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition
+ )
+
+ hidden_states = layer_outputs[0]
+
+ hidden_states_before_downsampling = hidden_states
+ if self.downsample is not None:
+ height_downsampled, width_downsampled = (height + 1) // 2, (width + 1) // 2
+ output_dimensions = (height, width, height_downsampled, width_downsampled)
+ hidden_states = self.downsample(hidden_states_before_downsampling, input_dimensions)
+ else:
+ output_dimensions = (height, width, height, width)
+
+ stage_outputs = (hidden_states, hidden_states_before_downsampling, output_dimensions)
+
+ if output_attentions:
+ stage_outputs += layer_outputs[1:]
+ return stage_outputs
+
+
+# Copied from transformers.models.swin.modeling_swin.SwinEncoder with Swin->DonutSwin
+class DonutSwinEncoder(nn.Module):
+ def __init__(self, config, grid_size):
+ super().__init__()
+ self.num_layers = len(config.depths)
+ self.config = config
+ dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))]
+ self.layers = nn.ModuleList(
+ [
+ DonutSwinStage(
+ config=config,
+ dim=int(config.embed_dim * 2**i_layer),
+ input_resolution=(grid_size[0] // (2**i_layer), grid_size[1] // (2**i_layer)),
+ depth=config.depths[i_layer],
+ num_heads=config.num_heads[i_layer],
+ drop_path=dpr[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])],
+ downsample=DonutSwinPatchMerging if (i_layer < self.num_layers - 1) else None,
+ )
+ for i_layer in range(self.num_layers)
+ ]
+ )
+
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ input_dimensions: Tuple[int, int],
+ head_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = False,
+ output_hidden_states: Optional[bool] = False,
+ output_hidden_states_before_downsampling: Optional[bool] = False,
+ always_partition: Optional[bool] = False,
+ return_dict: Optional[bool] = True,
+ ) -> Union[Tuple, DonutSwinEncoderOutput]:
+ all_hidden_states = () if output_hidden_states else None
+ all_reshaped_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ if output_hidden_states:
+ batch_size, _, hidden_size = hidden_states.shape
+ # rearrange b (h w) c -> b c h w
+ reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
+ reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
+ all_hidden_states += (hidden_states,)
+ all_reshaped_hidden_states += (reshaped_hidden_state,)
+
+ for i, layer_module in enumerate(self.layers):
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ input_dimensions,
+ layer_head_mask,
+ output_attentions,
+ always_partition,
+ )
+ else:
+ layer_outputs = layer_module(
+ hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition
+ )
+
+ hidden_states = layer_outputs[0]
+ hidden_states_before_downsampling = layer_outputs[1]
+ output_dimensions = layer_outputs[2]
+
+ input_dimensions = (output_dimensions[-2], output_dimensions[-1])
+
+ if output_hidden_states and output_hidden_states_before_downsampling:
+ batch_size, _, hidden_size = hidden_states_before_downsampling.shape
+ # rearrange b (h w) c -> b c h w
+ # here we use the original (not downsampled) height and width
+ reshaped_hidden_state = hidden_states_before_downsampling.view(
+ batch_size, *(output_dimensions[0], output_dimensions[1]), hidden_size
+ )
+ reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
+ all_hidden_states += (hidden_states_before_downsampling,)
+ all_reshaped_hidden_states += (reshaped_hidden_state,)
+ elif output_hidden_states and not output_hidden_states_before_downsampling:
+ batch_size, _, hidden_size = hidden_states.shape
+ # rearrange b (h w) c -> b c h w
+ reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
+ reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
+ all_hidden_states += (hidden_states,)
+ all_reshaped_hidden_states += (reshaped_hidden_state,)
+
+ if output_attentions:
+ all_self_attentions += layer_outputs[3:]
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
+
+ return DonutSwinEncoderOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ reshaped_hidden_states=all_reshaped_hidden_states,
+ )
+
+
+# Copied from transformers.models.swin.modeling_swin.SwinPreTrainedModel with Swin->DonutSwin
+class DonutSwinPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = DonutSwinConfig
+ base_model_prefix = "swin"
+ main_input_name = "pixel_values"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+SWIN_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`DonutSwinConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+SWIN_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
+ [`DonutImageProcessor.__call__`] for details.
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare Donut Swin Model transformer outputting raw hidden-states without any specific head on top.",
+ SWIN_START_DOCSTRING,
+)
+class DonutSwinModel(DonutSwinPreTrainedModel):
+ def __init__(self, config, add_pooling_layer=True, use_mask_token=False):
+ super().__init__(config)
+ self.config = config
+ self.num_layers = len(config.depths)
+ self.num_features = int(config.embed_dim * 2 ** (self.num_layers - 1))
+
+ self.embeddings = DonutSwinEmbeddings(config, use_mask_token=use_mask_token)
+ self.encoder = DonutSwinEncoder(config, self.embeddings.patch_grid)
+
+ self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.patch_embeddings
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(SWIN_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=DonutSwinModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ modality="vision",
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
+ )
+ def forward(
+ self,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ bool_masked_pos: Optional[torch.BoolTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, DonutSwinModelOutput]:
+ r"""
+ bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ head_mask = self.get_head_mask(head_mask, len(self.config.depths))
+
+ embedding_output, input_dimensions = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos)
+
+ encoder_outputs = self.encoder(
+ embedding_output,
+ input_dimensions,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = encoder_outputs[0]
+
+ pooled_output = None
+ if self.pooler is not None:
+ pooled_output = self.pooler(sequence_output.transpose(1, 2))
+ pooled_output = torch.flatten(pooled_output, 1)
+
+ if not return_dict:
+ output = (sequence_output, pooled_output) + encoder_outputs[1:]
+
+ return output
+
+ return DonutSwinModelOutput(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ reshaped_hidden_states=encoder_outputs.reshaped_hidden_states,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/donut/processing_donut.py b/venv/lib/python3.10/site-packages/transformers/models/donut/processing_donut.py
new file mode 100644
index 0000000000000000000000000000000000000000..1f03fd6306fc0a9940fe1bf2b497d705fc4e22ea
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/donut/processing_donut.py
@@ -0,0 +1,196 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Processor class for Donut.
+"""
+import re
+import warnings
+from contextlib import contextmanager
+
+from ...processing_utils import ProcessorMixin
+
+
+class DonutProcessor(ProcessorMixin):
+ r"""
+ Constructs a Donut processor which wraps a Donut image processor and an XLMRoBERTa tokenizer into a single
+ processor.
+
+ [`DonutProcessor`] offers all the functionalities of [`DonutImageProcessor`] and
+ [`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`]. See the [`~DonutProcessor.__call__`] and
+ [`~DonutProcessor.decode`] for more information.
+
+ Args:
+ image_processor ([`DonutImageProcessor`], *optional*):
+ An instance of [`DonutImageProcessor`]. The image processor is a required input.
+ tokenizer ([`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`], *optional*):
+ An instance of [`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`]. The tokenizer is a required input.
+ """
+
+ attributes = ["image_processor", "tokenizer"]
+ image_processor_class = "AutoImageProcessor"
+ tokenizer_class = "AutoTokenizer"
+
+ def __init__(self, image_processor=None, tokenizer=None, **kwargs):
+ feature_extractor = None
+ if "feature_extractor" in kwargs:
+ warnings.warn(
+ "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
+ " instead.",
+ FutureWarning,
+ )
+ feature_extractor = kwargs.pop("feature_extractor")
+
+ image_processor = image_processor if image_processor is not None else feature_extractor
+ if image_processor is None:
+ raise ValueError("You need to specify an `image_processor`.")
+ if tokenizer is None:
+ raise ValueError("You need to specify a `tokenizer`.")
+
+ super().__init__(image_processor, tokenizer)
+ self.current_processor = self.image_processor
+ self._in_target_context_manager = False
+
+ def __call__(self, *args, **kwargs):
+ """
+ When used in normal mode, this method forwards all its arguments to AutoImageProcessor's
+ [`~AutoImageProcessor.__call__`] and returns its output. If used in the context
+ [`~DonutProcessor.as_target_processor`] this method forwards all its arguments to DonutTokenizer's
+ [`~DonutTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more information.
+ """
+ # For backward compatibility
+ if self._in_target_context_manager:
+ return self.current_processor(*args, **kwargs)
+
+ images = kwargs.pop("images", None)
+ text = kwargs.pop("text", None)
+ if len(args) > 0:
+ images = args[0]
+ args = args[1:]
+
+ if images is None and text is None:
+ raise ValueError("You need to specify either an `images` or `text` input to process.")
+
+ if images is not None:
+ inputs = self.image_processor(images, *args, **kwargs)
+ if text is not None:
+ encodings = self.tokenizer(text, **kwargs)
+
+ if text is None:
+ return inputs
+ elif images is None:
+ return encodings
+ else:
+ inputs["labels"] = encodings["input_ids"]
+ return inputs
+
+ def batch_decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to DonutTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer
+ to the docstring of this method for more information.
+ """
+ return self.tokenizer.batch_decode(*args, **kwargs)
+
+ def decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to DonutTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the
+ docstring of this method for more information.
+ """
+ return self.tokenizer.decode(*args, **kwargs)
+
+ @contextmanager
+ def as_target_processor(self):
+ """
+ Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning TrOCR.
+ """
+ warnings.warn(
+ "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
+ "labels by using the argument `text` of the regular `__call__` method (either in the same call as "
+ "your images inputs, or in a separate call."
+ )
+ self._in_target_context_manager = True
+ self.current_processor = self.tokenizer
+ yield
+ self.current_processor = self.image_processor
+ self._in_target_context_manager = False
+
+ def token2json(self, tokens, is_inner_value=False, added_vocab=None):
+ """
+ Convert a (generated) token sequence into an ordered JSON format.
+ """
+ if added_vocab is None:
+ added_vocab = self.tokenizer.get_added_vocab()
+
+ output = {}
+
+ while tokens:
+ start_token = re.search(r"", tokens, re.IGNORECASE)
+ if start_token is None:
+ break
+ key = start_token.group(1)
+ key_escaped = re.escape(key)
+
+ end_token = re.search(rf"", tokens, re.IGNORECASE)
+ start_token = start_token.group()
+ if end_token is None:
+ tokens = tokens.replace(start_token, "")
+ else:
+ end_token = end_token.group()
+ start_token_escaped = re.escape(start_token)
+ end_token_escaped = re.escape(end_token)
+ content = re.search(
+ f"{start_token_escaped}(.*?){end_token_escaped}", tokens, re.IGNORECASE | re.DOTALL
+ )
+ if content is not None:
+ content = content.group(1).strip()
+ if r""):
+ leaf = leaf.strip()
+ if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
+ leaf = leaf[1:-2] # for categorical special tokens
+ output[key].append(leaf)
+ if len(output[key]) == 1:
+ output[key] = output[key][0]
+
+ tokens = tokens[tokens.find(end_token) + len(end_token) :].strip()
+ if tokens[:6] == r"": # non-leaf nodes
+ return [output] + self.token2json(tokens[6:], is_inner_value=True, added_vocab=added_vocab)
+
+ if len(output):
+ return [output] if is_inner_value else output
+ else:
+ return [] if is_inner_value else {"text_sequence": tokens}
+
+ @property
+ def feature_extractor_class(self):
+ warnings.warn(
+ "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
+ FutureWarning,
+ )
+ return self.image_processor_class
+
+ @property
+ def feature_extractor(self):
+ warnings.warn(
+ "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",
+ FutureWarning,
+ )
+ return self.image_processor
diff --git a/venv/lib/python3.10/site-packages/transformers/models/focalnet/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/focalnet/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..b043a006f9376609c774e84f5376323f48f2cae7
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/focalnet/__init__.py
@@ -0,0 +1,59 @@
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+# rely on isort to merge the imports
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
+
+
+_import_structure = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
+
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_focalnet"] = [
+ "FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "FocalNetForImageClassification",
+ "FocalNetForMaskedImageModeling",
+ "FocalNetBackbone",
+ "FocalNetModel",
+ "FocalNetPreTrainedModel",
+ ]
+
+if TYPE_CHECKING:
+ from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_focalnet import (
+ FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
+ FocalNetBackbone,
+ FocalNetForImageClassification,
+ FocalNetForMaskedImageModeling,
+ FocalNetModel,
+ FocalNetPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/focalnet/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/focalnet/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2c4066b0c8294f5953889c9380499f0d80aef27b
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/focalnet/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/focalnet/__pycache__/configuration_focalnet.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/focalnet/__pycache__/configuration_focalnet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0818e94f5eaadbe35bc6798184a6540ea3a9d80e
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/focalnet/__pycache__/configuration_focalnet.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/focalnet/__pycache__/convert_focalnet_to_hf_format.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/focalnet/__pycache__/convert_focalnet_to_hf_format.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..245f382bb9b9317ce3706a0031a38e21117fa9de
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/focalnet/__pycache__/convert_focalnet_to_hf_format.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/focalnet/__pycache__/modeling_focalnet.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/focalnet/__pycache__/modeling_focalnet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4c29b7a9c299819bdf6b3bf54d751241fee6b94a
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/focalnet/__pycache__/modeling_focalnet.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/focalnet/configuration_focalnet.py b/venv/lib/python3.10/site-packages/transformers/models/focalnet/configuration_focalnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..7f590b9c2c00a40dafaf54f6ae2131316b6674bb
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/focalnet/configuration_focalnet.py
@@ -0,0 +1,164 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" FocalNet model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class FocalNetConfig(BackboneConfigMixin, PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`FocalNetModel`]. It is used to instantiate a
+ FocalNet model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the FocalNet
+ [microsoft/focalnet-tiny](https://huggingface.co/microsoft/focalnet-tiny) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ image_size (`int`, *optional*, defaults to 224):
+ The size (resolution) of each image.
+ patch_size (`int`, *optional*, defaults to 4):
+ The size (resolution) of each patch in the embeddings layer.
+ num_channels (`int`, *optional*, defaults to 3):
+ The number of input channels.
+ embed_dim (`int`, *optional*, defaults to 96):
+ Dimensionality of patch embedding.
+ use_conv_embed (`bool`, *optional*, defaults to `False`):
+ Whether to use convolutional embedding. The authors noted that using convolutional embedding usually
+ improve the performance, but it's not used by default.
+ hidden_sizes (`List[int]`, *optional*, defaults to `[192, 384, 768, 768]`):
+ Dimensionality (hidden size) at each stage.
+ depths (`list(int)`, *optional*, defaults to `[2, 2, 6, 2]`):
+ Depth (number of layers) of each stage in the encoder.
+ focal_levels (`list(int)`, *optional*, defaults to `[2, 2, 2, 2]`):
+ Number of focal levels in each layer of the respective stages in the encoder.
+ focal_windows (`list(int)`, *optional*, defaults to `[3, 3, 3, 3]`):
+ Focal window size in each layer of the respective stages in the encoder.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
+ `"selu"` and `"gelu_new"` are supported.
+ mlp_ratio (`float`, *optional*, defaults to 4.0):
+ Ratio of MLP hidden dimensionality to embedding dimensionality.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout probability for all fully connected layers in the embeddings and encoder.
+ drop_path_rate (`float`, *optional*, defaults to 0.1):
+ Stochastic depth rate.
+ use_layerscale (`bool`, *optional*, defaults to `False`):
+ Whether to use layer scale in the encoder.
+ layerscale_value (`float`, *optional*, defaults to 0.0001):
+ The initial value of the layer scale.
+ use_post_layernorm (`bool`, *optional*, defaults to `False`):
+ Whether to use post layer normalization in the encoder.
+ use_post_layernorm_in_modulation (`bool`, *optional*, defaults to `False`):
+ Whether to use post layer normalization in the modulation layer.
+ normalize_modulator (`bool`, *optional*, defaults to `False`):
+ Whether to normalize the modulator.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
+ The epsilon used by the layer normalization layers.
+ encoder_stride (`int`, *optional*, defaults to 32):
+ Factor to increase the spatial resolution by in the decoder head for masked image modeling.
+ out_features (`List[str]`, *optional*):
+ If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
+ (depending on how many stages the model has). If unset and `out_indices` is set, will default to the
+ corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
+ same order as defined in the `stage_names` attribute.
+ out_indices (`List[int]`, *optional*):
+ If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
+ many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
+ If unset and `out_features` is unset, will default to the last stage. Must be in the
+ same order as defined in the `stage_names` attribute.
+
+ Example:
+
+ ```python
+ >>> from transformers import FocalNetConfig, FocalNetModel
+
+ >>> # Initializing a FocalNet microsoft/focalnet-tiny style configuration
+ >>> configuration = FocalNetConfig()
+
+ >>> # Initializing a model (with random weights) from the microsoft/focalnet-tiny style configuration
+ >>> model = FocalNetModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "focalnet"
+
+ def __init__(
+ self,
+ image_size=224,
+ patch_size=4,
+ num_channels=3,
+ embed_dim=96,
+ use_conv_embed=False,
+ hidden_sizes=[192, 384, 768, 768],
+ depths=[2, 2, 6, 2],
+ focal_levels=[2, 2, 2, 2],
+ focal_windows=[3, 3, 3, 3],
+ hidden_act="gelu",
+ mlp_ratio=4.0,
+ hidden_dropout_prob=0.0,
+ drop_path_rate=0.1,
+ use_layerscale=False,
+ layerscale_value=1e-4,
+ use_post_layernorm=False,
+ use_post_layernorm_in_modulation=False,
+ normalize_modulator=False,
+ initializer_range=0.02,
+ layer_norm_eps=1e-5,
+ encoder_stride=32,
+ out_features=None,
+ out_indices=None,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_channels = num_channels
+ self.embed_dim = embed_dim
+ self.use_conv_embed = use_conv_embed
+ self.hidden_sizes = hidden_sizes
+ self.depths = depths
+ self.focal_levels = focal_levels
+ self.focal_windows = focal_windows
+ self.hidden_act = hidden_act
+ self.mlp_ratio = mlp_ratio
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.drop_path_rate = drop_path_rate
+ self.use_layerscale = use_layerscale
+ self.layerscale_value = layerscale_value
+ self.use_post_layernorm = use_post_layernorm
+ self.use_post_layernorm_in_modulation = use_post_layernorm_in_modulation
+ self.normalize_modulator = normalize_modulator
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.encoder_stride = encoder_stride
+ self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
+ self._out_features, self._out_indices = get_aligned_output_features_output_indices(
+ out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/focalnet/convert_focalnet_to_hf_format.py b/venv/lib/python3.10/site-packages/transformers/models/focalnet/convert_focalnet_to_hf_format.py
new file mode 100644
index 0000000000000000000000000000000000000000..4aed15928062976c5f9589e2e6896e4e028b4eea
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/focalnet/convert_focalnet_to_hf_format.py
@@ -0,0 +1,237 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert FocalNet checkpoints from the original repository. URL: https://github.com/microsoft/FocalNet/tree/main"""
+
+import argparse
+import json
+
+import requests
+import torch
+from huggingface_hub import hf_hub_download
+from PIL import Image
+from torchvision import transforms
+
+from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
+from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
+
+
+def get_focalnet_config(model_name):
+ depths = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
+ use_conv_embed = True if "large" in model_name or "huge" in model_name else False
+ use_post_layernorm = True if "large" in model_name or "huge" in model_name else False
+ use_layerscale = True if "large" in model_name or "huge" in model_name else False
+
+ if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
+ if "fl3" in model_name:
+ focal_levels = [3, 3, 3, 3]
+ focal_windows = [5, 5, 5, 5]
+ elif "fl4" in model_name:
+ focal_levels = [4, 4, 4, 4]
+ focal_windows = [3, 3, 3, 3]
+
+ if "tiny" in model_name or "small" in model_name or "base" in model_name:
+ focal_windows = [3, 3, 3, 3]
+ if "lrf" in model_name:
+ focal_levels = [3, 3, 3, 3]
+ else:
+ focal_levels = [2, 2, 2, 2]
+
+ if "tiny" in model_name:
+ embed_dim = 96
+ elif "small" in model_name:
+ embed_dim = 96
+ elif "base" in model_name:
+ embed_dim = 128
+ elif "large" in model_name:
+ embed_dim = 192
+ elif "xlarge" in model_name:
+ embed_dim = 256
+ elif "huge" in model_name:
+ embed_dim = 352
+
+ # set label information
+ repo_id = "huggingface/label-files"
+ if "large" in model_name or "huge" in model_name:
+ filename = "imagenet-22k-id2label.json"
+ else:
+ filename = "imagenet-1k-id2label.json"
+
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
+ id2label = {int(k): v for k, v in id2label.items()}
+ label2id = {v: k for k, v in id2label.items()}
+
+ config = FocalNetConfig(
+ embed_dim=embed_dim,
+ depths=depths,
+ focal_levels=focal_levels,
+ focal_windows=focal_windows,
+ use_conv_embed=use_conv_embed,
+ id2label=id2label,
+ label2id=label2id,
+ use_post_layernorm=use_post_layernorm,
+ use_layerscale=use_layerscale,
+ )
+
+ return config
+
+
+def rename_key(name):
+ if "patch_embed.proj" in name:
+ name = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection")
+ if "patch_embed.norm" in name:
+ name = name.replace("patch_embed.norm", "embeddings.norm")
+ if "layers" in name:
+ name = "encoder." + name
+ if "encoder.layers" in name:
+ name = name.replace("encoder.layers", "encoder.stages")
+ if "downsample.proj" in name:
+ name = name.replace("downsample.proj", "downsample.projection")
+ if "blocks" in name:
+ name = name.replace("blocks", "layers")
+ if "modulation.f.weight" in name or "modulation.f.bias" in name:
+ name = name.replace("modulation.f", "modulation.projection_in")
+ if "modulation.h.weight" in name or "modulation.h.bias" in name:
+ name = name.replace("modulation.h", "modulation.projection_context")
+ if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
+ name = name.replace("modulation.proj", "modulation.projection_out")
+
+ if name == "norm.weight":
+ name = "layernorm.weight"
+ if name == "norm.bias":
+ name = "layernorm.bias"
+
+ if "head" in name:
+ name = name.replace("head", "classifier")
+ else:
+ name = "focalnet." + name
+
+ return name
+
+
+def convert_focalnet_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub=False):
+ # fmt: off
+ model_name_to_url = {
+ "focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
+ "focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
+ "focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
+ "focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
+ "focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
+ "focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
+ "focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
+ "focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
+ "focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
+ "focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
+ }
+ # fmt: on
+
+ checkpoint_url = model_name_to_url[model_name]
+ print("Checkpoint URL: ", checkpoint_url)
+ state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")["model"]
+
+ # rename keys
+ for key in state_dict.copy().keys():
+ val = state_dict.pop(key)
+ state_dict[rename_key(key)] = val
+
+ config = get_focalnet_config(model_name)
+ model = FocalNetForImageClassification(config)
+ model.eval()
+
+ # load state dict
+ model.load_state_dict(state_dict)
+
+ # verify conversion
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+
+ processor = BitImageProcessor(
+ do_resize=True,
+ size={"shortest_edge": 256},
+ resample=PILImageResampling.BILINEAR,
+ do_center_crop=True,
+ crop_size=224,
+ do_normalize=True,
+ image_mean=IMAGENET_DEFAULT_MEAN,
+ image_std=IMAGENET_DEFAULT_STD,
+ )
+ image = Image.open(requests.get(url, stream=True).raw)
+ inputs = processor(images=image, return_tensors="pt")
+
+ image_transforms = transforms.Compose(
+ [
+ transforms.Resize(256),
+ transforms.CenterCrop(224),
+ transforms.ToTensor(),
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
+ ]
+ )
+
+ original_pixel_values = image_transforms(image).unsqueeze(0)
+
+ # verify pixel_values
+ assert torch.allclose(inputs.pixel_values, original_pixel_values, atol=1e-4)
+
+ outputs = model(**inputs)
+
+ predicted_class_idx = outputs.logits.argmax(-1).item()
+ print("Predicted class:", model.config.id2label[predicted_class_idx])
+
+ print("First values of logits:", outputs.logits[0, :3])
+
+ if model_name == "focalnet-tiny":
+ expected_slice = torch.tensor([0.2166, -0.4368, 0.2191])
+ elif model_name == "focalnet-tiny-lrf":
+ expected_slice = torch.tensor([1.1669, 0.0125, -0.1695])
+ elif model_name == "focalnet-small":
+ expected_slice = torch.tensor([0.4917, -0.0430, 0.1341])
+ elif model_name == "focalnet-small-lrf":
+ expected_slice = torch.tensor([-0.2588, -0.5342, -0.2331])
+ elif model_name == "focalnet-base":
+ expected_slice = torch.tensor([-0.1655, -0.4090, -0.1730])
+ elif model_name == "focalnet-base-lrf":
+ expected_slice = torch.tensor([0.5306, -0.0483, -0.3928])
+ assert torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)
+ print("Looks ok!")
+
+ if pytorch_dump_folder_path is not None:
+ print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}")
+ model.save_pretrained(pytorch_dump_folder_path)
+ processor.save_pretrained(pytorch_dump_folder_path)
+
+ if push_to_hub:
+ print(f"Pushing model and processor of {model_name} to the hub...")
+ model.push_to_hub(f"{model_name}")
+ processor.push_to_hub(f"{model_name}")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--model_name",
+ default="focalnet-tiny",
+ type=str,
+ help="Name of the FocalNet model you'd like to convert.",
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
+ )
+ parser.add_argument(
+ "--push_to_hub",
+ action="store_true",
+ help="Whether to push the model and processor to the hub.",
+ )
+
+ args = parser.parse_args()
+ convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/focalnet/modeling_focalnet.py b/venv/lib/python3.10/site-packages/transformers/models/focalnet/modeling_focalnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..a452f4171d1b6a65671a2592a76f74c6c8d07ebe
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/focalnet/modeling_focalnet.py
@@ -0,0 +1,1032 @@
+# coding=utf-8
+# Copyright 2023 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch FocalNet model."""
+
+
+import collections.abc
+import math
+from dataclasses import dataclass
+from typing import Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import BackboneOutput
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from ...utils.backbone_utils import BackboneMixin
+from .configuration_focalnet import FocalNetConfig
+
+
+logger = logging.get_logger(__name__)
+
+# General docstring
+_CONFIG_FOR_DOC = "FocalNetConfig"
+
+# Base docstring
+_CHECKPOINT_FOR_DOC = "microsoft/focalnet-tiny"
+_EXPECTED_OUTPUT_SHAPE = [1, 49, 768]
+
+# Image classification docstring
+_IMAGE_CLASS_CHECKPOINT = "microsoft/focalnet-tiny"
+_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
+
+
+from ..deprecated._archive_maps import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+@dataclass
+class FocalNetEncoderOutput(ModelOutput):
+ """
+ FocalNet encoder's outputs, with potential hidden states.
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+
+ reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
+ shape `(batch_size, hidden_size, height, width)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
+ include the spatial dimensions.
+ """
+
+ last_hidden_state: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+class FocalNetModelOutput(ModelOutput):
+ """
+ FocalNet model's outputs that also contains a pooling of the last hidden states.
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed):
+ Average pooling of the last layer hidden-state.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
+ shape `(batch_size, hidden_size, height, width)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
+ include the spatial dimensions.
+ """
+
+ last_hidden_state: torch.FloatTensor = None
+ pooler_output: Optional[torch.FloatTensor] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+class FocalNetMaskedImageModelingOutput(ModelOutput):
+ """
+ FocalNet masked image model outputs.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `bool_masked_pos` is provided):
+ Masked image modeling (MLM) loss.
+ reconstruction (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Reconstructed pixel values.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
+ shape `(batch_size, hidden_size, height, width)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
+ include the spatial dimensions.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ reconstruction: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+class FocalNetImageClassifierOutput(ModelOutput):
+ """
+ FocalNet outputs for image classification.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Classification (or regression if config.num_labels==1) loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
+ shape `(batch_size, hidden_size, height, width)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
+ include the spatial dimensions.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+
+
+class FocalNetEmbeddings(nn.Module):
+ """
+ Construct the patch embeddings and layernorm. Optionally, also the mask token.
+ """
+
+ def __init__(self, config, use_mask_token=False):
+ super().__init__()
+
+ self.patch_embeddings = FocalNetPatchEmbeddings(
+ config=config,
+ image_size=config.image_size,
+ patch_size=config.patch_size,
+ num_channels=config.num_channels,
+ embed_dim=config.embed_dim,
+ use_conv_embed=config.use_conv_embed,
+ is_stem=True,
+ )
+ self.patch_grid = self.patch_embeddings.grid_size
+ self.mask_token = nn.Parameter(torch.zeros(1, 1, config.embed_dim)) if use_mask_token else None
+
+ self.norm = nn.LayerNorm(config.embed_dim, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(
+ self, pixel_values: Optional[torch.FloatTensor], bool_masked_pos: Optional[torch.BoolTensor] = None
+ ) -> Tuple[torch.Tensor]:
+ embeddings, output_dimensions = self.patch_embeddings(pixel_values)
+ embeddings = self.norm(embeddings)
+ batch_size, seq_len, _ = embeddings.size()
+
+ if bool_masked_pos is not None:
+ mask_tokens = self.mask_token.expand(batch_size, seq_len, -1)
+ # replace the masked visual tokens by mask_tokens
+ mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
+ embeddings = embeddings * (1.0 - mask) + mask_tokens * mask
+
+ embeddings = self.dropout(embeddings)
+ return embeddings, output_dimensions
+
+
+class FocalNetPatchEmbeddings(nn.Module):
+ def __init__(
+ self,
+ config,
+ image_size,
+ patch_size,
+ num_channels,
+ embed_dim,
+ add_norm=False,
+ use_conv_embed=False,
+ is_stem=False,
+ ):
+ super().__init__()
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_channels = num_channels
+ self.num_patches = num_patches
+ self.grid_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
+
+ if use_conv_embed:
+ # if we choose to use conv embedding, then we treat the stem and non-stem differently
+ if is_stem:
+ kernel_size = 7
+ padding = 2
+ stride = 4
+ else:
+ kernel_size = 3
+ padding = 1
+ stride = 2
+ self.projection = nn.Conv2d(
+ num_channels, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding
+ )
+ else:
+ self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=patch_size, stride=patch_size)
+
+ if add_norm:
+ self.norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
+ else:
+ self.norm = None
+
+ def maybe_pad(self, pixel_values, height, width):
+ if width % self.patch_size[1] != 0:
+ pad_values = (0, self.patch_size[1] - width % self.patch_size[1])
+ pixel_values = nn.functional.pad(pixel_values, pad_values)
+ if height % self.patch_size[0] != 0:
+ pad_values = (0, 0, 0, self.patch_size[0] - height % self.patch_size[0])
+ pixel_values = nn.functional.pad(pixel_values, pad_values)
+ return pixel_values
+
+ def forward(self, pixel_values: Optional[torch.FloatTensor]) -> Tuple[torch.Tensor, Tuple[int]]:
+ _, num_channels, height, width = pixel_values.shape
+ if num_channels != self.num_channels:
+ raise ValueError(
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
+ )
+ # pad the input to be divisible by self.patch_size, if needed
+ pixel_values = self.maybe_pad(pixel_values, height, width)
+ embeddings = self.projection(pixel_values)
+ _, _, height, width = embeddings.shape
+ output_dimensions = (height, width)
+ embeddings = embeddings.flatten(2).transpose(1, 2)
+
+ if self.norm is not None:
+ embeddings = self.norm(embeddings)
+
+ return embeddings, output_dimensions
+
+
+# Copied from transformers.models.beit.modeling_beit.drop_path
+def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
+ """
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
+
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
+ argument.
+ """
+ if drop_prob == 0.0 or not training:
+ return input
+ keep_prob = 1 - drop_prob
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
+ random_tensor.floor_() # binarize
+ output = input.div(keep_prob) * random_tensor
+ return output
+
+
+# Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->FocalNet
+class FocalNetDropPath(nn.Module):
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
+
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
+ super().__init__()
+ self.drop_prob = drop_prob
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ return drop_path(hidden_states, self.drop_prob, self.training)
+
+ def extra_repr(self) -> str:
+ return "p={}".format(self.drop_prob)
+
+
+class FocalNetModulation(nn.Module):
+ def __init__(self, config, index, dim, focal_factor=2, bias=True, projection_dropout=0.0):
+ super().__init__()
+
+ self.dim = dim
+ self.focal_window = config.focal_windows[index]
+ self.focal_level = config.focal_levels[index]
+ self.focal_factor = focal_factor
+ self.use_post_layernorm_in_modulation = config.use_post_layernorm_in_modulation
+ self.normalize_modulator = config.normalize_modulator
+
+ self.projection_in = nn.Linear(dim, 2 * dim + (self.focal_level + 1), bias=bias)
+ self.projection_context = nn.Conv2d(dim, dim, kernel_size=1, stride=1, bias=bias)
+
+ self.activation = nn.GELU()
+ self.projection_out = nn.Linear(dim, dim)
+ self.projection_dropout = nn.Dropout(projection_dropout)
+ self.focal_layers = nn.ModuleList()
+
+ self.kernel_sizes = []
+ for k in range(self.focal_level):
+ kernel_size = self.focal_factor * k + self.focal_window
+ self.focal_layers.append(
+ nn.Sequential(
+ nn.Conv2d(
+ dim, dim, kernel_size=kernel_size, stride=1, groups=dim, padding=kernel_size // 2, bias=False
+ ),
+ nn.GELU(),
+ )
+ )
+ self.kernel_sizes.append(kernel_size)
+ if self.use_post_layernorm_in_modulation:
+ self.layernorm = nn.LayerNorm(dim, eps=config.layer_norm_eps)
+
+ def forward(self, hidden_state):
+ """
+ Args:
+ hidden_state:
+ Input features with shape of (batch_size, height, width, num_channels)
+ """
+ num_channels = hidden_state.shape[-1]
+
+ # pre linear projection
+ x = self.projection_in(hidden_state).permute(0, 3, 1, 2).contiguous()
+ q, ctx, self.gates = torch.split(x, (num_channels, num_channels, self.focal_level + 1), 1)
+
+ # context aggreation
+ ctx_all = 0
+ for level in range(self.focal_level):
+ ctx = self.focal_layers[level](ctx)
+ ctx_all = ctx_all + ctx * self.gates[:, level : level + 1]
+ ctx_global = self.activation(ctx.mean(2, keepdim=True).mean(3, keepdim=True))
+ ctx_all = ctx_all + ctx_global * self.gates[:, self.focal_level :]
+
+ # normalize context
+ if self.normalize_modulator:
+ ctx_all = ctx_all / (self.focal_level + 1)
+
+ # focal modulation
+ self.modulator = self.projection_context(ctx_all)
+ x_out = q * self.modulator
+ x_out = x_out.permute(0, 2, 3, 1).contiguous()
+ if self.use_post_layernorm_in_modulation:
+ x_out = self.layernorm(x_out)
+
+ # post linear porjection
+ x_out = self.projection_out(x_out)
+ x_out = self.projection_dropout(x_out)
+ return x_out
+
+
+class FocalNetMlp(nn.Module):
+ def __init__(self, config, in_features, hidden_features=None, out_features=None, drop=0.0):
+ super().__init__()
+ out_features = out_features or in_features
+ hidden_features = hidden_features or in_features
+ self.fc1 = nn.Linear(in_features, hidden_features)
+ self.activation = ACT2FN[config.hidden_act]
+ self.fc2 = nn.Linear(hidden_features, out_features)
+ self.drop = nn.Dropout(drop)
+
+ def forward(self, hidden_state):
+ hidden_state = self.fc1(hidden_state)
+ hidden_state = self.activation(hidden_state)
+ hidden_state = self.drop(hidden_state)
+ hidden_state = self.fc2(hidden_state)
+ hidden_state = self.drop(hidden_state)
+ return hidden_state
+
+
+class FocalNetLayer(nn.Module):
+ r"""Focal Modulation Network layer (block).
+
+ Args:
+ config (`FocalNetConfig`):
+ Model config.
+ index (`int`):
+ Layer index.
+ dim (`int`):
+ Number of input channels.
+ input_resolution (`Tuple[int]`):
+ Input resulotion.
+ drop_path (`float`, *optional*, defaults to 0.0):
+ Stochastic depth rate.
+ """
+
+ def __init__(self, config, index, dim, input_resolution, drop_path=0.0):
+ super().__init__()
+
+ self.config = config
+
+ # layer-specific attributes
+ self.dim = dim
+ self.input_resolution = input_resolution
+
+ # general attributes
+ self.drop = config.hidden_dropout_prob
+ self.use_post_layernorm = config.use_post_layernorm
+
+ self.norm1 = nn.LayerNorm(dim, eps=config.layer_norm_eps)
+ self.modulation = FocalNetModulation(
+ config=config,
+ index=index,
+ dim=dim,
+ projection_dropout=self.drop,
+ )
+
+ self.drop_path = FocalNetDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
+ self.norm2 = nn.LayerNorm(dim, eps=config.layer_norm_eps)
+ mlp_hidden_dim = int(dim * config.mlp_ratio)
+ self.mlp = FocalNetMlp(config=config, in_features=dim, hidden_features=mlp_hidden_dim, drop=self.drop)
+
+ self.gamma_1 = 1.0
+ self.gamma_2 = 1.0
+ if config.use_layerscale:
+ self.gamma_1 = nn.Parameter(config.layerscale_value * torch.ones((dim)), requires_grad=True)
+ self.gamma_2 = nn.Parameter(config.layerscale_value * torch.ones((dim)), requires_grad=True)
+
+ def forward(self, hidden_state, input_dimensions):
+ height, width = input_dimensions
+ batch_size, _, num_channels = hidden_state.shape
+ shortcut = hidden_state
+
+ # Focal Modulation
+ hidden_state = hidden_state if self.use_post_layernorm else self.norm1(hidden_state)
+ hidden_state = hidden_state.view(batch_size, height, width, num_channels)
+ hidden_state = self.modulation(hidden_state).view(batch_size, height * width, num_channels)
+ hidden_state = hidden_state if not self.use_post_layernorm else self.norm1(hidden_state)
+
+ # FFN
+ hidden_state = shortcut + self.drop_path(self.gamma_1 * hidden_state)
+ hidden_state = hidden_state + self.drop_path(
+ self.gamma_2
+ * (self.norm2(self.mlp(hidden_state)) if self.use_post_layernorm else self.mlp(self.norm2(hidden_state)))
+ )
+
+ return hidden_state
+
+
+class FocalNetStage(nn.Module):
+ def __init__(self, config, index, input_resolution):
+ super().__init__()
+
+ self.config = config
+ self.num_stages = len(config.depths)
+
+ embed_dim = [config.embed_dim * (2**i) for i in range(self.num_stages)]
+ dim = embed_dim[index]
+ out_dim = embed_dim[index + 1] if (index < self.num_stages - 1) else None
+ downsample = FocalNetPatchEmbeddings if (index < self.num_stages - 1) else None
+
+ # stochastic depth decay rule
+ dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))]
+ drop_path = dpr[sum(config.depths[:index]) : sum(config.depths[: index + 1])]
+
+ self.layers = nn.ModuleList(
+ [
+ FocalNetLayer(
+ config=config,
+ index=index,
+ dim=dim,
+ input_resolution=input_resolution,
+ drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
+ )
+ for i in range(config.depths[index])
+ ]
+ )
+
+ if downsample is not None:
+ self.downsample = downsample(
+ config=config,
+ image_size=input_resolution,
+ patch_size=2,
+ num_channels=dim,
+ embed_dim=out_dim,
+ add_norm=True,
+ use_conv_embed=config.use_conv_embed,
+ is_stem=False,
+ )
+ else:
+ self.downsample = None
+
+ self.pointing = False
+
+ def forward(self, hidden_states: torch.Tensor, input_dimensions: Tuple[int, int]) -> Tuple[torch.Tensor]:
+ height, width = input_dimensions
+ for layer_module in self.layers:
+ hidden_states = layer_module(hidden_states, input_dimensions)
+
+ hidden_states_before_downsampling = hidden_states
+ if self.downsample is not None:
+ height, width = input_dimensions
+ hidden_states = hidden_states.transpose(1, 2).reshape(
+ hidden_states_before_downsampling.shape[0], -1, height, width
+ )
+ hidden_states, output_dimensions = self.downsample(hidden_states)
+
+ else:
+ output_dimensions = (height, width, height, width)
+
+ stage_outputs = (hidden_states, hidden_states_before_downsampling, output_dimensions)
+
+ return stage_outputs
+
+
+class FocalNetEncoder(nn.Module):
+ def __init__(self, config, grid_size):
+ super().__init__()
+ self.num_stages = len(config.depths)
+ self.config = config
+
+ self.stages = nn.ModuleList(
+ [
+ FocalNetStage(
+ config=config,
+ index=i_layer,
+ input_resolution=(grid_size[0] // (2**i_layer), grid_size[1] // (2**i_layer)),
+ )
+ for i_layer in range(self.num_stages)
+ ]
+ )
+
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ input_dimensions: Tuple[int, int],
+ output_hidden_states: Optional[bool] = False,
+ output_hidden_states_before_downsampling: Optional[bool] = False,
+ return_dict: Optional[bool] = True,
+ ) -> Union[Tuple, FocalNetEncoderOutput]:
+ all_hidden_states = () if output_hidden_states else None
+ all_reshaped_hidden_states = () if output_hidden_states else None
+
+ if output_hidden_states:
+ batch_size, _, hidden_size = hidden_states.shape
+ # rearrange b (h w) c -> b c h w
+ reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
+ reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
+ all_hidden_states += (hidden_states,)
+ all_reshaped_hidden_states += (reshaped_hidden_state,)
+
+ for i, stage_module in enumerate(self.stages):
+ if self.gradient_checkpointing and self.training:
+ stage_outputs = self._gradient_checkpointing_func(
+ stage_module.__call__,
+ hidden_states,
+ input_dimensions,
+ )
+ else:
+ stage_outputs = stage_module(hidden_states, input_dimensions)
+
+ hidden_states = stage_outputs[0]
+ hidden_states_before_downsampling = stage_outputs[1]
+ output_dimensions = stage_outputs[2]
+
+ input_dimensions = (output_dimensions[-2], output_dimensions[-1])
+
+ if output_hidden_states and output_hidden_states_before_downsampling:
+ batch_size, _, hidden_size = hidden_states_before_downsampling.shape
+ # rearrange b (h w) c -> b c h w
+ # here we use the original (not downsampled) height and width
+ reshaped_hidden_state = hidden_states_before_downsampling.view(
+ batch_size, *(output_dimensions[0], output_dimensions[1]), hidden_size
+ )
+ reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
+ all_hidden_states += (hidden_states_before_downsampling,)
+ all_reshaped_hidden_states += (reshaped_hidden_state,)
+ elif output_hidden_states and not output_hidden_states_before_downsampling:
+ batch_size, _, hidden_size = hidden_states.shape
+ # rearrange b (h w) c -> b c h w
+ reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
+ reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
+ all_hidden_states += (hidden_states,)
+ all_reshaped_hidden_states += (reshaped_hidden_state,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
+
+ return FocalNetEncoderOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ reshaped_hidden_states=all_reshaped_hidden_states,
+ )
+
+
+# Copied from transformers.models.swin.modeling_swin.SwinPreTrainedModel with Swin->FocalNet,swin->focalnet
+class FocalNetPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = FocalNetConfig
+ base_model_prefix = "focalnet"
+ main_input_name = "pixel_values"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+FOCALNET_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`FocalNetConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+FOCALNET_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
+ [`AutoImageProcessor.__call__`] for details.
+
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare FocalNet Model outputting raw hidden-states without any specific head on top.",
+ FOCALNET_START_DOCSTRING,
+)
+class FocalNetModel(FocalNetPreTrainedModel):
+ def __init__(self, config, add_pooling_layer=True, use_mask_token=False):
+ super().__init__(config)
+ self.config = config
+ self.num_stages = len(config.depths)
+ self.num_features = int(config.embed_dim * 2 ** (self.num_stages - 1))
+
+ self.embeddings = FocalNetEmbeddings(config, use_mask_token=use_mask_token)
+ self.encoder = FocalNetEncoder(config, self.embeddings.patch_grid)
+
+ self.layernorm = nn.LayerNorm(self.num_features, eps=config.layer_norm_eps)
+ self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.patch_embeddings
+
+ @add_start_docstrings_to_model_forward(FOCALNET_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=FocalNetModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ modality="vision",
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
+ )
+ def forward(
+ self,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ bool_masked_pos: Optional[torch.BoolTensor] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, FocalNetModelOutput]:
+ r"""
+ bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
+ """
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ embedding_output, input_dimensions = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos)
+
+ encoder_outputs = self.encoder(
+ embedding_output,
+ input_dimensions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = encoder_outputs[0]
+ sequence_output = self.layernorm(sequence_output)
+
+ pooled_output = None
+ if self.pooler is not None:
+ pooled_output = self.pooler(sequence_output.transpose(1, 2))
+ pooled_output = torch.flatten(pooled_output, 1)
+
+ if not return_dict:
+ output = (sequence_output, pooled_output) + encoder_outputs[1:]
+
+ return output
+
+ return FocalNetModelOutput(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ reshaped_hidden_states=encoder_outputs.reshaped_hidden_states,
+ )
+
+
+@add_start_docstrings(
+ """FocalNet Model with a decoder on top for masked image modeling.
+
+ This follows the same implementation as in [SimMIM](https://arxiv.org/abs/2111.09886).
+
+
+
+ Note that we provide a script to pre-train this model on custom data in our [examples
+ directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining).
+
+
+ """,
+ FOCALNET_START_DOCSTRING,
+)
+class FocalNetForMaskedImageModeling(FocalNetPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.focalnet = FocalNetModel(config, add_pooling_layer=False, use_mask_token=True)
+
+ self.num_stages = len(config.depths)
+ num_features = int(config.embed_dim * 2 ** (self.num_stages - 1))
+ self.decoder = nn.Sequential(
+ nn.Conv2d(
+ in_channels=num_features, out_channels=config.encoder_stride**2 * config.num_channels, kernel_size=1
+ ),
+ nn.PixelShuffle(config.encoder_stride),
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(FOCALNET_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=FocalNetMaskedImageModelingOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ bool_masked_pos: Optional[torch.BoolTensor] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, FocalNetMaskedImageModelingOutput]:
+ r"""
+ bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
+
+ Returns:
+
+ Examples:
+ ```python
+ >>> from transformers import AutoImageProcessor, FocalNetConfig, FocalNetForMaskedImageModeling
+ >>> import torch
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/focalnet-base-simmim-window6-192")
+ >>> config = FocalNetConfig()
+ >>> model = FocalNetForMaskedImageModeling(config)
+
+ >>> num_patches = (model.config.image_size // model.config.patch_size) ** 2
+ >>> pixel_values = image_processor(images=image, return_tensors="pt").pixel_values
+ >>> # create random boolean mask of shape (batch_size, num_patches)
+ >>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool()
+
+ >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)
+ >>> loss, reconstructed_pixel_values = outputs.loss, outputs.logits
+ >>> list(reconstructed_pixel_values.shape)
+ [1, 3, 192, 192]
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.focalnet(
+ pixel_values,
+ bool_masked_pos=bool_masked_pos,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+ # Reshape to (batch_size, num_channels, height, width)
+ sequence_output = sequence_output.transpose(1, 2)
+ batch_size, num_channels, sequence_length = sequence_output.shape
+ height = width = math.floor(sequence_length**0.5)
+ sequence_output = sequence_output.reshape(batch_size, num_channels, height, width)
+
+ # Reconstruct pixel values
+ reconstructed_pixel_values = self.decoder(sequence_output)
+
+ masked_im_loss = None
+ if bool_masked_pos is not None:
+ size = self.config.image_size // self.config.patch_size
+ bool_masked_pos = bool_masked_pos.reshape(-1, size, size)
+ mask = (
+ bool_masked_pos.repeat_interleave(self.config.patch_size, 1)
+ .repeat_interleave(self.config.patch_size, 2)
+ .unsqueeze(1)
+ .contiguous()
+ )
+ reconstruction_loss = nn.functional.l1_loss(pixel_values, reconstructed_pixel_values, reduction="none")
+ masked_im_loss = (reconstruction_loss * mask).sum() / (mask.sum() + 1e-5) / self.config.num_channels
+
+ if not return_dict:
+ output = (reconstructed_pixel_values,) + outputs[2:]
+ return ((masked_im_loss,) + output) if masked_im_loss is not None else output
+
+ return FocalNetMaskedImageModelingOutput(
+ loss=masked_im_loss,
+ reconstruction=reconstructed_pixel_values,
+ hidden_states=outputs.hidden_states,
+ reshaped_hidden_states=outputs.reshaped_hidden_states,
+ )
+
+
+@add_start_docstrings(
+ """
+ FocalNet Model with an image classification head on top (a linear layer on top of the pooled output) e.g. for
+ ImageNet.
+ """,
+ FOCALNET_START_DOCSTRING,
+)
+class FocalNetForImageClassification(FocalNetPreTrainedModel):
+ # Copied from transformers.models.swin.modeling_swin.SwinForImageClassification.__init__ with Swin->FocalNet, swin->focalnet
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.num_labels = config.num_labels
+ self.focalnet = FocalNetModel(config)
+
+ # Classifier head
+ self.classifier = (
+ nn.Linear(self.focalnet.num_features, config.num_labels) if config.num_labels > 0 else nn.Identity()
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(FOCALNET_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
+ output_type=FocalNetImageClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
+ )
+ def forward(
+ self,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, FocalNetImageClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.focalnet(
+ pixel_values,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = outputs[1]
+
+ logits = self.classifier(pooled_output)
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return FocalNetImageClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ reshaped_hidden_states=outputs.reshaped_hidden_states,
+ )
+
+
+@add_start_docstrings(
+ """
+ FocalNet backbone, to be used with frameworks like X-Decoder.
+ """,
+ FOCALNET_START_DOCSTRING,
+)
+class FocalNetBackbone(FocalNetPreTrainedModel, BackboneMixin):
+ def __init__(self, config: FocalNetConfig):
+ super().__init__(config)
+ super()._init_backbone(config)
+
+ self.num_features = [config.embed_dim] + config.hidden_sizes
+ self.focalnet = FocalNetModel(config)
+
+ # initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(FOCALNET_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: torch.Tensor,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> BackboneOutput:
+ """
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoImageProcessor, AutoBackbone
+ >>> import torch
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> processor = AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny-lrf")
+ >>> model = AutoBackbone.from_pretrained("microsoft/focalnet-tiny-lrf")
+
+ >>> inputs = processor(image, return_tensors="pt")
+ >>> outputs = model(**inputs)
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+
+ outputs = self.focalnet(pixel_values, output_hidden_states=True, return_dict=True)
+
+ hidden_states = outputs.reshaped_hidden_states
+
+ feature_maps = ()
+ for idx, stage in enumerate(self.stage_names):
+ if stage in self.out_features:
+ feature_maps += (hidden_states[idx],)
+
+ if not return_dict:
+ output = (feature_maps,)
+ if output_hidden_states:
+ output += (outputs.hidden_states,)
+ return output
+
+ return BackboneOutput(
+ feature_maps=feature_maps,
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
+ attentions=None,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/tvlt/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/tvlt/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..86c0f7c1c0b99d1bfaff6d2b644d7b7c7b67441a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/tvlt/__init__.py
@@ -0,0 +1,88 @@
+# flake8: noqa
+# There's no way to ignore "F401 '...' imported but unused" warnings in this
+# module, but to preserve other warnings. So, don't check this module at all.
+
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_torch_available,
+ is_vision_available,
+)
+
+
+_import_structure = {
+ "configuration_tvlt": ["TVLT_PRETRAINED_CONFIG_ARCHIVE_MAP", "TvltConfig"],
+ "feature_extraction_tvlt": ["TvltFeatureExtractor"],
+ "processing_tvlt": ["TvltProcessor"],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tvlt"] = [
+ "TVLT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TvltModel",
+ "TvltForPreTraining",
+ "TvltForAudioVisualClassification",
+ "TvltPreTrainedModel",
+ ]
+
+try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["image_processing_tvlt"] = ["TvltImageProcessor"]
+
+
+if TYPE_CHECKING:
+ from .configuration_tvlt import TVLT_PRETRAINED_CONFIG_ARCHIVE_MAP, TvltConfig
+ from .processing_tvlt import TvltProcessor
+ from .feature_extraction_tvlt import TvltFeatureExtractor
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tvlt import (
+ TVLT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ TvltForAudioVisualClassification,
+ TvltForPreTraining,
+ TvltModel,
+ TvltPreTrainedModel,
+ )
+
+ try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .image_processing_tvlt import TvltImageProcessor
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/tvlt/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/tvlt/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..46de8a5e36436ee6912c027e2fbad29be5393bd9
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/tvlt/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/tvlt/__pycache__/configuration_tvlt.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/tvlt/__pycache__/configuration_tvlt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6fd4e4d3ab2c529fd3574ceaf65d3bf7fec5ae70
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/tvlt/__pycache__/configuration_tvlt.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/tvlt/__pycache__/feature_extraction_tvlt.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/tvlt/__pycache__/feature_extraction_tvlt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ad9fdce8c53cb951ddbcffe25c730911f80d3a0f
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/tvlt/__pycache__/feature_extraction_tvlt.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/tvlt/__pycache__/image_processing_tvlt.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/tvlt/__pycache__/image_processing_tvlt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bb1bebacc5499710f9ab92a5e937e80c4a114afb
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/tvlt/__pycache__/image_processing_tvlt.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/tvlt/__pycache__/modeling_tvlt.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/tvlt/__pycache__/modeling_tvlt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..00401a8df7dd20250b76f38822b59eccb4890424
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/tvlt/__pycache__/modeling_tvlt.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/tvlt/__pycache__/processing_tvlt.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/tvlt/__pycache__/processing_tvlt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3b27b2952c1dc1d17d5489f760e2e5bf1a02e9e2
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/tvlt/__pycache__/processing_tvlt.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/tvlt/configuration_tvlt.py b/venv/lib/python3.10/site-packages/transformers/models/tvlt/configuration_tvlt.py
new file mode 100644
index 0000000000000000000000000000000000000000..063befc9d77f92d97c034928a85cd638b7c1d5bc
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/tvlt/configuration_tvlt.py
@@ -0,0 +1,187 @@
+# coding=utf-8
+# Copyright 2023 MURGe-Lab and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" TVLT model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import TVLT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class TvltConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`TvltModel`]. It is used to instantiate a TVLT
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the TVLT
+ [ZinengTang/tvlt-base](https://huggingface.co/ZinengTang/tvlt-base) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ image_size (`int`, *optional*, defaults to 224):
+ The size (resolution) of each image.
+ spectrogram_length (`int`, *optional*, defaults to 2048):
+ The time length of each audio spectrogram.
+ frequency_length (`int`, *optional*, defaults to 128):
+ The frequency length of audio spectrogram.
+ image_patch_size (`List[int]`, *optional*, defaults to `[16, 16]`):
+ The size (resolution) of each image patch.
+ audio_patch_size (`List[int]`, *optional*, defaults to `[16, 16]`):
+ The size (resolution) of each audio patch.
+ num_image_channels (`int`, *optional*, defaults to 3):
+ The number of input image channels.
+ num_audio_channels (`int`, *optional*, defaults to 1):
+ The number of input audio channels.
+ num_frames (`int`, *optional*, defaults to 8):
+ The maximum number of frames for an input video.
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
+ The epsilon used by the layer normalization layers.
+ qkv_bias (`bool`, *optional*, defaults to `True`):
+ Whether to add a bias to the queries, keys and values.
+ use_mean_pooling (`bool`, *optional*, defaults to `False`):
+ Whether to mean pool the final hidden states instead of using the final hidden state of the [CLS] token.
+ decoder_num_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the decoder.
+ decoder_hidden_size (`int`, *optional*, defaults to 512):
+ Dimensionality of the decoder.
+ decoder_num_hidden_layers (`int`, *optional*, defaults to 8):
+ Number of hidden layers in the decoder.
+ decoder_intermediate_size (`int`, *optional*, defaults to 2048):
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the decoder.
+ pixel_mask_ratio (`float`, *optional*, defaults to 0.75):
+ Image patch masking ratio.
+ audio_mask_ratio (`float`, *optional*, defaults to 0.15):
+ Audio patch masking ratio.
+ audio_mask_type (`str`, *optional*, defaults to `"frame-level"`):
+ Audio patch masking type, choose between "frame-level" and "patch-level".
+ task_matching (`bool`, *optional*, defaults to `True`):
+ Whether to use vision audio matching task in pretraining.
+ task_mae (`bool`, *optional*, defaults to `True`):
+ Whether to use the masked auto-encoder (MAE) in pretraining.
+ loss_type (`str`, *optional*, defaults to `"classification"`):
+ Loss types including regression and classification.
+
+ Example:
+
+ ```python
+ >>> from transformers import TvltConfig, TvltModel
+
+ >>> # # Initializing a TVLT ZinengTang/tvlt-base style configuration
+ >>> configuration = TvltConfig()
+
+ >>> # # Initializing a model (with random weights) from the ZinengTang/tvlt-base style configuration
+ >>> model = TvltModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "tvlt"
+
+ def __init__(
+ self,
+ image_size=224,
+ spectrogram_length=2048,
+ frequency_length=128,
+ image_patch_size=[16, 16],
+ audio_patch_size=[16, 16],
+ num_image_channels=3,
+ num_audio_channels=1,
+ num_frames=8,
+ hidden_size=768,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ intermediate_size=3072,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.0,
+ attention_probs_dropout_prob=0.0,
+ initializer_range=0.02,
+ layer_norm_eps=1e-6,
+ qkv_bias=True,
+ use_mean_pooling=False,
+ decoder_num_attention_heads=16,
+ decoder_hidden_size=512,
+ decoder_num_hidden_layers=8,
+ decoder_intermediate_size=2048,
+ pixel_mask_ratio=0.75,
+ audio_mask_ratio=0.15,
+ audio_mask_type="frame-level",
+ task_matching=True,
+ task_mae=True,
+ loss_type="classification",
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ if audio_mask_type not in ("frame-level", "patch_level"):
+ raise ValueError(
+ "audio_mask_type must be one of two acceptable strategies - {'frame_level', 'patch-level') "
+ f"got {audio_mask_type}"
+ )
+
+ self.image_size = image_size
+ self.spectrogram_length = spectrogram_length
+ self.frequency_length = frequency_length
+ self.image_patch_size = image_patch_size
+ self.audio_patch_size = audio_patch_size
+ self.num_image_channels = num_image_channels
+ self.num_audio_channels = num_audio_channels
+ self.num_frames = num_frames
+
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.intermediate_size = intermediate_size
+ self.hidden_act = hidden_act
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.qkv_bias = qkv_bias
+ self.use_mean_pooling = use_mean_pooling
+
+ self.decoder_num_attention_heads = decoder_num_attention_heads
+ self.decoder_hidden_size = decoder_hidden_size
+ self.decoder_num_hidden_layers = decoder_num_hidden_layers
+ self.decoder_intermediate_size = decoder_intermediate_size
+ self.pixel_mask_ratio = pixel_mask_ratio
+ self.audio_mask_ratio = audio_mask_ratio
+ self.audio_mask_type = audio_mask_type
+
+ self.task_matching = task_matching
+ self.task_mae = task_mae
+ self.loss_type = loss_type
diff --git a/venv/lib/python3.10/site-packages/transformers/models/tvlt/feature_extraction_tvlt.py b/venv/lib/python3.10/site-packages/transformers/models/tvlt/feature_extraction_tvlt.py
new file mode 100644
index 0000000000000000000000000000000000000000..7dc5e0463138c526b3d2d1ab1d922315d7d4c792
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/tvlt/feature_extraction_tvlt.py
@@ -0,0 +1,230 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Feature extractor class for TVLT."""
+
+from math import ceil
+from typing import List, Optional, Union
+
+import numpy as np
+
+from ...audio_utils import mel_filter_bank, spectrogram, window_function
+from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
+from ...utils import TensorType, logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class TvltFeatureExtractor(SequenceFeatureExtractor):
+ r"""
+ Constructs a TVLT audio feature extractor. This feature extractor can be used to prepare audios for the model.
+
+ This feature extractor inherits from [`FeatureExtractionMixin`] which contains most of the main methods. Users
+ should refer to this superclass for more information regarding those methods.
+
+ Args:
+ spectrogram_length (`Dict[str, int]` *optional*, defaults to 2048):
+ The time length of each audio spectrogram.
+ num_channels (`int` *optional*, defaults to 1):
+ Number of audio channels.
+ patch_size (`List[int]` *optional*, defaults to `[16, 16]`):
+ The patch size of audio patch embedding.
+ feature_size (`int`, *optional*, defaults to 128):
+ The frequency length of audio spectrogram.
+ sampling_rate (`int`, *optional*, defaults to 44100):
+ The sampling rate at which the audio files should be digitalized expressed in Hertz (Hz).
+ hop_length_to_sampling_rate (`int`, *optional*, defaults to 86):
+ Hop length is length of the overlaping windows for the STFT used to obtain the Mel Frequency coefficients.
+ For example, with sampling rate 44100, the hop length is 512, with 44100 / 512 = 86
+ n_fft (`int`, *optional*, defaults to 2048):
+ Size of the Fourier transform.
+ padding_value (`float`, *optional*, defaults to 0.0):
+ Padding value used to pad the audio. Should correspond to silences.
+ """
+
+ model_input_names = ["audio_values", "audio_mask"]
+
+ def __init__(
+ self,
+ spectrogram_length=2048,
+ num_channels=1,
+ patch_size=[16, 16],
+ feature_size=128,
+ sampling_rate=44100,
+ hop_length_to_sampling_rate=86,
+ n_fft=2048,
+ padding_value=0.0,
+ **kwargs,
+ ):
+ super().__init__(
+ feature_size=feature_size,
+ sampling_rate=sampling_rate,
+ padding_value=padding_value,
+ **kwargs,
+ )
+
+ self.spectrogram_length = spectrogram_length
+ self.num_channels = num_channels
+ self.patch_size = patch_size
+ self.freq_len = feature_size // self.patch_size[1]
+ self.n_fft = n_fft
+ self.hop_length = sampling_rate // hop_length_to_sampling_rate
+ self.sampling_rate = sampling_rate
+ self.padding_value = padding_value
+ self.mel_filters = mel_filter_bank(
+ num_frequency_bins=1 + n_fft // 2,
+ num_mel_filters=feature_size,
+ min_frequency=0.0,
+ max_frequency=22050.0,
+ sampling_rate=sampling_rate,
+ norm="slaney",
+ mel_scale="slaney",
+ ).T
+
+ def _np_extract_fbank_features(self, waveform: np.array) -> np.ndarray:
+ """
+ Compute the log-mel spectrogram of the provided audio, gives similar results to Whisper's original torch
+ implementation with 1e-5 tolerance.
+ """
+ log_spec = spectrogram(
+ waveform,
+ window_function(self.n_fft, "hann"),
+ frame_length=self.n_fft,
+ hop_length=self.hop_length,
+ power=2.0,
+ mel_filters=self.mel_filters.T,
+ log_mel="dB",
+ db_range=80.0,
+ )
+ log_spec = log_spec[:, :-1]
+ log_spec = log_spec - 20.0
+ log_spec = np.clip(log_spec / 40.0, -2.0, 0.0) + 1.0
+ return log_spec
+
+ def __call__(
+ self,
+ raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_attention_mask: Optional[bool] = True,
+ sampling_rate: Optional[int] = None,
+ resample: bool = False,
+ mask_audio: bool = False,
+ **kwargs,
+ ) -> BatchFeature:
+ """
+ Main method to prepare one or several audio(s) for the model.
+
+ Args:
+ raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`):
+ The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float
+ values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not
+ stereo, i.e. single float per timestep.
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
+ If set, will return tensors instead of list of python integers. Acceptable values are:
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
+ - `'np'`: Return Numpy `np.ndarray` objects.
+ return_attention_mask (`bool`, *optional*, default to `True`):
+ Whether to return the attention mask. If left to the default, will return the attention mask according
+ to the specific feature_extractor's default. [What are attention masks?](../glossary#attention-mask)
+
+
+
+ For TvltTransformer models, `attention_mask` should alwys be passed for batched inference, to avoid
+ subtle bugs.
+
+
+
+ sampling_rate (`int`, *optional*):
+ The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
+ `sampling_rate` at the forward call to prevent silent errors and allow automatic speech recognition
+ pipeline. Current model supports sampling rate 16000 and 44100.
+ resample (`bool`, *optional*, defaults to `False`):
+ If the sampling rate is not matched, resample the input audio to match.
+ mask_audio (`bool`, *optional*, defaults to `False`):
+ Whether or not to mask input audio for MAE task.
+
+ Returns:
+ [`BatchFeature`]: A [`BatchFeature`] with the following fields:
+
+ - **audio_values** -- Audio values to be fed to a model, of shape (batch_size, num_channels, height,
+ width).
+
+ - **audio_mask** -- Audio masks to be fed to a model, of shape (batch_size, num_audio_patches).
+ """
+
+ if sampling_rate is not None:
+ if sampling_rate != self.sampling_rate:
+ raise ValueError(
+ "This feature extractor is set to support sampling rate"
+ f" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
+ f" with {self.sampling_rate} and not {sampling_rate}."
+ )
+ else:
+ logger.warning(
+ "It is strongly recommended to pass the `sampling_rate` argument to this function. "
+ "Failing to do so can result in silent errors that might be hard to debug."
+ )
+
+ is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1
+ if is_batched_numpy and len(raw_speech.shape) > 2:
+ raise ValueError(f"Only mono-channel audio is supported for input to {self}")
+ is_batched = is_batched_numpy or (
+ isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list)))
+ )
+ if is_batched:
+ raw_speech = [np.asarray([speech], dtype=np.float32).T for speech in raw_speech]
+ elif not is_batched and not isinstance(raw_speech, np.ndarray):
+ raw_speech = np.asarray(raw_speech, dtype=np.float32)
+ elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64):
+ raw_speech = raw_speech.astype(np.float32)
+ # always return batch
+ if not is_batched:
+ raw_speech = [np.asarray([raw_speech]).T]
+
+ # Convert audio signals to log mel spectrograms, truncate by time axis
+ audio_features = [
+ self._np_extract_fbank_features(waveform.squeeze()).T[: self.spectrogram_length] for waveform in raw_speech
+ ]
+ if isinstance(audio_features[0], List):
+ audio_features = [np.asarray(feature, dtype=np.float32) for feature in audio_features]
+
+ # Create audio attention mask
+ max_patch_len = max(
+ [ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len for feature in audio_features]
+ ) # The maximum number of audio patches in a batch
+ if return_attention_mask:
+ audio_mask = [
+ (ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len) * [1]
+ + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len) * [0]
+ for feature in audio_features
+ ]
+ audio_mask = np.array(audio_mask).astype(np.float32)
+
+ # convert into correct format for padding
+ max_time_len = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
+ padded_audio_features = np.ones([len(audio_features), 1, max_time_len, self.feature_size]).astype(np.float32)
+ padded_audio_features = padded_audio_features * self.padding_value
+ for i in range(len(audio_features)):
+ feature = audio_features[i]
+ padded_audio_features[i, :, : feature.shape[0], :] = feature
+
+ # return as BatchFeature
+ if return_attention_mask:
+ data = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
+ else:
+ data = {"audio_values": padded_audio_features}
+
+ encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)
+ return encoded_inputs
diff --git a/venv/lib/python3.10/site-packages/transformers/models/tvlt/image_processing_tvlt.py b/venv/lib/python3.10/site-packages/transformers/models/tvlt/image_processing_tvlt.py
new file mode 100644
index 0000000000000000000000000000000000000000..f13101c15a96152c6dc35f179009e760437300e0
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/tvlt/image_processing_tvlt.py
@@ -0,0 +1,434 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Image processor class for TVLT."""
+from typing import Dict, List, Optional, Union
+
+import numpy as np
+
+from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
+from ...image_transforms import (
+ get_resize_output_image_size,
+ resize,
+ to_channel_dimension_format,
+)
+from ...image_utils import (
+ IMAGENET_STANDARD_MEAN,
+ IMAGENET_STANDARD_STD,
+ ChannelDimension,
+ ImageInput,
+ PILImageResampling,
+ infer_channel_dimension_format,
+ is_scaled_image,
+ is_valid_image,
+ to_numpy_array,
+ valid_images,
+ validate_kwargs,
+ validate_preprocess_arguments,
+)
+from ...utils import TensorType, logging
+
+
+logger = logging.get_logger(__name__)
+
+
+def make_batched(videos) -> List[List[ImageInput]]:
+ if isinstance(videos, (list, tuple)) and isinstance(videos[0], (list, tuple)):
+ return videos
+
+ elif isinstance(videos, (list, tuple)) and is_valid_image(videos[0]):
+ videos_dim = np.array(videos[0]).ndim
+ if videos_dim == 3:
+ return [videos]
+ elif videos_dim == 4:
+ return videos
+
+ elif is_valid_image(videos):
+ videos_dim = np.array(videos).ndim
+ if videos_dim == 3:
+ return [[videos]]
+ elif videos_dim == 4:
+ return [videos]
+ elif videos_dim == 5:
+ return videos
+
+ raise ValueError(f"Could not make batched video from {videos}")
+
+
+class TvltImageProcessor(BaseImageProcessor):
+ r"""
+ Constructs a TVLT image processor.
+
+ This processor can be used to prepare either videos or images for the model by converting images to 1-frame videos.
+
+ Args:
+ do_resize (`bool`, *optional*, defaults to `True`):
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
+ `do_resize` parameter in the `preprocess` method.
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
+ Size of the output image after resizing. The shortest edge of the image will be resized to
+ `size["shortest_edge"]` while maintaining the aspect ratio of the original image. Can be overriden by
+ `size` in the `preprocess` method.
+ patch_size (`List[int]` *optional*, defaults to [16,16]):
+ The patch size of image patch embedding.
+ num_frames (`int` *optional*, defaults to 8):
+ The maximum number of video frames.
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
+ Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
+ `preprocess` method.
+ do_center_crop (`bool`, *optional*, defaults to `True`):
+ Whether to center crop the image to the specified `crop_size`. Can be overridden by the `do_center_crop`
+ parameter in the `preprocess` method.
+ crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
+ Size of the image after applying the center crop. Can be overridden by the `crop_size` parameter in the
+ `preprocess` method.
+ do_rescale (`bool`, *optional*, defaults to `True`):
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
+ parameter in the `preprocess` method.
+ rescale_factor (`int` or `float`, *optional*, defaults to 1/255):
+ Defines the scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter
+ in the `preprocess` method.
+ do_normalize (`bool`, *optional*, defaults to `True`):
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
+ method.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
+ """
+
+ model_input_names = [
+ "pixel_values",
+ "pixel_mask",
+ "pixel_values_mixed",
+ "pixel_mask_mixed",
+ ]
+
+ def __init__(
+ self,
+ do_resize: bool = True,
+ size: Dict[str, int] = None,
+ patch_size: List[int] = [16, 16],
+ num_frames: int = 8,
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
+ do_center_crop: bool = True,
+ crop_size: Dict[str, int] = None,
+ do_rescale: bool = True,
+ rescale_factor: Union[int, float] = 1 / 255,
+ do_normalize: bool = True,
+ image_mean: Optional[Union[float, List[float]]] = IMAGENET_STANDARD_MEAN,
+ image_std: Optional[Union[float, List[float]]] = IMAGENET_STANDARD_STD,
+ init_mask_generator=False,
+ **kwargs,
+ ) -> None:
+ super().__init__(**kwargs)
+ size = size if size is not None else {"shortest_edge": 224}
+ size = get_size_dict(size, default_to_square=False)
+ crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
+
+ self.do_resize = do_resize
+ self.size = size
+ self.patch_size = patch_size
+ self.num_frames = num_frames
+ self.do_center_crop = do_center_crop
+ self.crop_size = crop_size
+ self.resample = resample
+ self.do_rescale = do_rescale
+ self.rescale_factor = rescale_factor
+ self.do_normalize = do_normalize
+ self.image_mean = image_mean
+ self.image_std = image_std
+ self._valid_processor_keys = [
+ "videos",
+ "do_resize",
+ "size",
+ "patch_size",
+ "num_frames",
+ "resample",
+ "do_center_crop",
+ "crop_size",
+ "do_rescale",
+ "rescale_factor",
+ "do_normalize",
+ "image_mean",
+ "image_std",
+ "is_mixed",
+ "return_tensors",
+ "data_format",
+ "input_data_format",
+ ]
+
+ def resize(
+ self,
+ image: np.ndarray,
+ size: Dict[str, int],
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> np.ndarray:
+ """
+ Resize an image.
+
+ Args:
+ image (`np.ndarray`):
+ Image to resize.
+ size (`Dict[str, int]`):
+ Size of the output image. If `size` is of the form `{"height": h, "width": w}`, the output image will
+ have the size `(h, w)`. If `size` is of the form `{"shortest_edge": s}`, the output image will have its
+ shortest edge of length `s` while keeping the aspect ratio of the original image.
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
+ Resampling filter to use when resiizing the image.
+ data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
+ input_data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred.
+ """
+ size = get_size_dict(size, default_to_square=False)
+ if "shortest_edge" in size:
+ output_size = get_resize_output_image_size(
+ image, size["shortest_edge"], default_to_square=False, input_data_format=input_data_format
+ )
+ elif "height" in size and "width" in size:
+ output_size = (size["height"], size["width"])
+ else:
+ raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}")
+ return resize(
+ image,
+ size=output_size,
+ resample=resample,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ **kwargs,
+ )
+
+ def _preprocess_image(
+ self,
+ image: ImageInput,
+ do_resize: bool = None,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = None,
+ do_center_crop: bool = None,
+ crop_size: Dict[str, int] = None,
+ do_rescale: bool = None,
+ rescale_factor: float = None,
+ do_normalize: bool = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ) -> np.ndarray:
+ """Preprocesses a single image."""
+
+ validate_preprocess_arguments(
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_normalize=do_normalize,
+ image_mean=image_mean,
+ image_std=image_std,
+ do_center_crop=do_center_crop,
+ crop_size=crop_size,
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ )
+
+ # All transformations expect numpy arrays.
+ image = to_numpy_array(image)
+
+ if is_scaled_image(image) and do_rescale:
+ logger.warning_once(
+ "It looks like you are trying to rescale already rescaled images. If the input"
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
+ )
+
+ if input_data_format is None:
+ input_data_format = infer_channel_dimension_format(image)
+
+ if do_resize:
+ image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
+
+ if do_center_crop:
+ image = self.center_crop(image, size=crop_size, input_data_format=input_data_format)
+
+ if do_rescale:
+ image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
+
+ if do_normalize:
+ image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
+ image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
+ return image
+
+ def preprocess(
+ self,
+ videos: ImageInput,
+ do_resize: bool = None,
+ size: Dict[str, int] = None,
+ patch_size: List[int] = None,
+ num_frames: int = None,
+ resample: PILImageResampling = None,
+ do_center_crop: bool = None,
+ crop_size: Dict[str, int] = None,
+ do_rescale: bool = None,
+ rescale_factor: float = None,
+ do_normalize: bool = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ is_mixed: bool = False,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ data_format: ChannelDimension = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> BatchFeature:
+ """
+ Preprocess an videos or image or batch of videos or images.
+
+ Args:
+ videos (`ImageInput`):
+ Images or videos to preprocess. Expects a single or batch of frames with pixel values ranging from 0 to
+ 255. If passing in frames with pixel values between 0 and 1, set `do_rescale=False`.
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
+ Whether to resize the image.
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
+ Size of the image after applying resize.
+ patch_size (`List[int]` *optional*, defaults to self.patch_size):
+ The patch size of image patch embedding.
+ num_frames (`int` *optional*, defaults to self.num_frames):
+ The maximum number of video frames.
+ resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
+ has an effect if `do_resize` is set to `True`.
+ do_center_crop (`bool`, *optional*, defaults to `self.do_centre_crop`):
+ Whether to centre crop the image.
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
+ Size of the image after applying the centre crop.
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
+ Whether to rescale the image values between [0 - 1].
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
+ Whether to normalize the image.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
+ Image mean.
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
+ Image standard deviation.
+ is_mixed (`bool`, *optional*):
+ If the input video has negative samples.
+ return_tensors (`str` or `TensorType`, *optional*):
+ The type of tensors to return. Can be one of:
+ - Unset: Return a list of `np.ndarray`.
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
+ The channel dimension format for the output image. Can be one of:
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - Unset: Use the inferred channel dimension format of the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+
+ Returns:
+ [`BatchFeature`]: A [`BatchFeature`] with the following fields:
+
+ - **pixel_values** -- Pixel values to be fed to a model, of shape (batch_size, num_channels, height,
+ width).
+
+ - **pixel_mask** -- Pixel masks to be fed to a model, of shape (batch_size, num_pixel_patches).
+
+ - **pixel_values_mixed** -- Pixel values with both postive or negative to be fed to a model, of shape
+ (batch_size, num_channels, height, width).
+
+ - **pixel_mask_mixed** -- Pixel masks with both postive or negative to be fed to a model, of shape
+ (batch_size, num_pixel_patches).
+ """
+ do_resize = do_resize if do_resize is not None else self.do_resize
+ resample = resample if resample is not None else self.resample
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
+ image_mean = image_mean if image_mean is not None else self.image_mean
+ image_std = image_std if image_std is not None else self.image_std
+
+ size = size if size is not None else self.size
+ size = get_size_dict(size, default_to_square=False)
+ crop_size = crop_size if crop_size is not None else self.crop_size
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
+ patch_size = patch_size if patch_size is not None else self.patch_size
+ num_frames = num_frames if patch_size is not None else self.num_frames
+
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
+
+ if not valid_images(videos):
+ raise ValueError(
+ "Invalid image or video type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+
+ videos = make_batched(videos)
+
+ # Check number of frames is fewer than maximum frames
+ for video in videos:
+ if len(video) > self.num_frames:
+ raise ValueError(
+ f"number of frames must not be greater than the maximum frames of the model {self.num_frames}."
+ )
+
+ max_num_frames = max([len(video) for video in videos])
+ num_patches_per_image = (size["shortest_edge"] // patch_size[0]) ** 2
+ video_masks = np.array(
+ [
+ len(video) * num_patches_per_image * [1] + (max_num_frames - len(video)) * num_patches_per_image * [0]
+ for video in videos
+ ]
+ )
+
+ videos = [
+ [
+ self._preprocess_image(
+ image=img,
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ do_center_crop=do_center_crop,
+ crop_size=crop_size,
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_normalize=do_normalize,
+ image_mean=image_mean,
+ image_std=image_std,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ )
+ for img in video
+ ]
+ for video in videos
+ ]
+
+ # If videos contain both positive/negative, use mixed key for video-audio matching task
+ if is_mixed:
+ data = {"pixel_values_mixed": videos, "pixel_mask_mixed": video_masks}
+ else:
+ data = {"pixel_values": videos, "pixel_mask": video_masks}
+
+ return BatchFeature(data=data, tensor_type=return_tensors)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/tvlt/modeling_tvlt.py b/venv/lib/python3.10/site-packages/transformers/models/tvlt/modeling_tvlt.py
new file mode 100644
index 0000000000000000000000000000000000000000..f841c47ea4bc568fc00464a5cdd36720ae1fd8d4
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/tvlt/modeling_tvlt.py
@@ -0,0 +1,1299 @@
+# coding=utf-8
+# Copyright 2023 MURGe-Lab and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch TVLT model."""
+
+
+import collections.abc
+import math
+from copy import deepcopy
+from dataclasses import dataclass
+from typing import Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import BaseModelOutput, SequenceClassifierOutput
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ ModelOutput,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_tvlt import TvltConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "TvltConfig"
+_CHECKPOINT_FOR_DOC = "ZinengTang/tvlt-base"
+
+
+from ..deprecated._archive_maps import TVLT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+@dataclass
+class TvltModelOutput(ModelOutput):
+ """
+ Class for TvltModel's outputs, with potential hidden states and attentions.
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ last_pixel_hidden_state (`torch.FloatTensor` of shape `(batch_size, pixel_sequence_length, hidden_size)`):
+ Pixel sequence of hidden-states at the output of the last layer of the model.
+ last_audio_hidden_state (`torch.FloatTensor` of shape `(batch_size, audio_sequence_length, hidden_size)`):
+ Audio sequence of hidden-states at the output of the last layer of the model.
+ pixel_label_masks (`torch.FloatTensor` of shape `(batch_size, pixel_patch_length)`):
+ Tensor indicating which pixel patches are masked (1) and which are not (0).
+ audio_label_masks (`torch.FloatTensor` of shape `(batch_size, audio_patch_length)`):
+ Tensor indicating which audio patches are masked (1) and which are not (0).
+ pixel_ids_restore (`torch.LongTensor` of shape `(batch_size, pixel_patch_length)`):
+ Tensor containing the ids permutation of pixel masking.
+ audio_ids_restore (`torch.LongTensor` of shape `(batch_size, audio_patch_length)`):
+ Tensor containing the ids permutation of audio masking.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
+ plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+ """
+
+ last_hidden_state: torch.FloatTensor = None
+ last_pixel_hidden_state: torch.FloatTensor = None
+ last_audio_hidden_state: torch.FloatTensor = None
+ pixel_label_masks: torch.LongTensor = None
+ audio_label_masks: torch.LongTensor = None
+ pixel_ids_restore: torch.LongTensor = None
+ audio_ids_restore: torch.LongTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+@dataclass
+class TvltDecoderOutput(ModelOutput):
+ """
+ Class for TvltDecoder's outputs, with potential hidden states and attentions.
+
+ Args:
+ logits (`torch.FloatTensor` of shape `(batch_size, patch_size ** 2 * num_channels)`):
+ Pixel reconstruction logits.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
+ plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+ """
+
+ logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+@dataclass
+class TvltForPreTrainingOutput(ModelOutput):
+ """
+ Class for TvltForPreTraining's outputs, with potential hidden states and attentions.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`):
+ Pixel reconstruction loss.
+ matching_logits (`torch.FloatTensor` of shape `(batch_size, 1)`):
+ Matching objective logits.
+ pixel_logits (`torch.FloatTensor` of shape
+ `(batch_size, pixel_patch_length, image_patch_size ** 3 * pixel_num_channels)`): Pixel reconstruction
+ logits.
+ audio_logits (`torch.FloatTensor` of shape
+ `(batch_size, audio_patch_length, image_patch_size[0] * image_patch_size[1])`): Audio reconstruction
+ logits.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
+ plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ matching_logits: torch.FloatTensor = None
+ pixel_logits: torch.FloatTensor = None
+ audio_logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+def generate_pixel_mask_noise(pixel_values, pixel_mask=None, mask_ratio=0.75):
+ """Generate noise for audio masking."""
+
+ batch_size, seq_len = pixel_values.shape[:2]
+ noise = torch.rand((batch_size, seq_len), device=pixel_values.device) # noise in [0, 1]
+ len_keep = int(seq_len * (1 - mask_ratio))
+ return noise, len_keep
+
+
+def generate_audio_mask_noise(audio_values, audio_mask=None, mask_ratio=0.75, mask_type="patch-level", freq_len=8):
+ """Generate noise for audio masking."""
+
+ batch_size, seq_len = audio_values.shape[:2]
+ if mask_type == "frame-level":
+ num_time_patches = seq_len // freq_len
+ noise = (
+ torch.rand(batch_size, num_time_patches, device=audio_values.device)
+ .unsqueeze(-1)
+ .repeat(1, 1, freq_len)
+ .view(batch_size, seq_len)
+ ) # noise in [0, 1]
+ elif mask_type == "patch-level":
+ noise = torch.rand(batch_size, seq_len, device=audio_values.device) # noise in [0, 1]
+ len_keep = int(seq_len * (1 - mask_ratio))
+ return noise, len_keep
+
+
+def random_masking(sequence, noise, len_keep, attention_masks=None):
+ """
+ Perform random masking by per-sample shuffling on frame-level. Per-sample shuffling is done by argsort random
+ noise. sequence: [batch_size, seq_len, hidden_dim], sequence
+ """
+
+ batch_size, seq_len, hidden_dim = sequence.shape
+
+ # sort noise for each sample
+ ids_shuffle = torch.argsort(noise, dim=1) # ascend: small is keep, large is remove
+ ids_restore = torch.argsort(ids_shuffle, dim=1)
+
+ # keep the first subset
+ ids_keep = ids_shuffle[:, :len_keep]
+ sequence_masked = torch.gather(sequence, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, hidden_dim))
+
+ # generate the binary mask: 0 is keep, 1 is remove
+ label_masks = torch.ones([batch_size, seq_len], device=sequence.device)
+ label_masks[:, :len_keep] = 0
+ # unshuffle to get the binary mask
+ label_masks = torch.gather(label_masks, dim=1, index=ids_restore)
+
+ if attention_masks is not None:
+ label_masks *= attention_masks
+ attention_masks = torch.gather(attention_masks, dim=1, index=ids_keep)
+
+ return sequence_masked, attention_masks, label_masks, ids_restore
+
+
+class TvltPixelEmbeddings(nn.Module):
+ """Construct the patch and position embeddings."""
+
+ def __init__(self, config):
+ super().__init__()
+
+ self.patch_embeddings = TvltPixelPatchEmbeddings(config)
+ self.num_patches_per_image = self.patch_embeddings.num_patches_per_image
+
+ self.type_embed_v = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
+ self.temporal_embed = nn.Parameter(torch.zeros(1, config.num_frames, config.hidden_size))
+ self.pos_embed_v = nn.Parameter(torch.zeros(1, self.num_patches_per_image, config.hidden_size))
+
+ self.config = config
+
+ def forward(self, pixel_values, attention_masks=None):
+ # create patch embeddings
+ batch_size, num_frames, num_channels, height, width = pixel_values.shape
+
+ embeddings = self.patch_embeddings(pixel_values)
+ embeddings += self.pos_embed_v.repeat(1, num_frames, 1)
+ embeddings += torch.repeat_interleave(self.temporal_embed[:, :num_frames], self.num_patches_per_image, dim=1)
+ embeddings += self.type_embed_v
+
+ return embeddings, attention_masks
+
+
+class TvltAudioEmbeddings(nn.Module):
+ """Construct the patch and position embeddings."""
+
+ def __init__(self, config):
+ super().__init__()
+
+ self.patch_embeddings = TvltAudioPatchEmbeddings(config)
+ self.num_patches = self.patch_embeddings.num_patches
+
+ self.type_embed_a = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
+ self.num_freq_patches = config.frequency_length // config.audio_patch_size[1]
+ self.pos_embed_a = nn.Parameter(torch.zeros(1, self.num_patches // self.num_freq_patches, config.hidden_size))
+ self.freq_embed = nn.Parameter(torch.zeros(1, self.num_freq_patches, config.hidden_size))
+
+ self.num_freq_patches = config.frequency_length // config.audio_patch_size[1]
+ self.config = config
+
+ def forward(self, audio_values, attention_masks=None):
+ # create patch embeddings
+ embeddings = self.patch_embeddings(audio_values)
+
+ num_time_patches = embeddings.size(1) // self.num_freq_patches
+ embeddings += self.freq_embed.repeat(1, num_time_patches, 1)
+ embeddings += torch.repeat_interleave(self.pos_embed_a[:, :num_time_patches], self.num_freq_patches, dim=1)
+ embeddings += self.type_embed_a
+
+ return embeddings, attention_masks
+
+
+class TvltPixelPatchEmbeddings(nn.Module):
+ """
+ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
+ `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
+ Transformer.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ image_size, patch_size = config.image_size, config.image_patch_size
+ num_channels, hidden_size = config.num_image_channels, config.hidden_size
+
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
+ num_patches_per_image = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_channels = num_channels
+ self.num_patches_per_image = num_patches_per_image
+ self.hidden_size = hidden_size
+
+ self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
+
+ def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
+ batch_size, num_frames, num_channels, height, width = pixel_values.shape
+ if num_channels != self.num_channels:
+ raise ValueError(
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
+ )
+ if height != self.image_size[0] or width != self.image_size[1]:
+ raise ValueError(
+ f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})."
+ )
+
+ pixel_values = pixel_values.reshape(batch_size * num_frames, num_channels, height, width)
+ embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2)
+ embeddings = embeddings.reshape(batch_size, num_frames * self.num_patches_per_image, self.hidden_size)
+
+ return embeddings
+
+
+class TvltAudioPatchEmbeddings(nn.Module):
+ """
+ This class turns `audio_values` of shape `(batch_size, num_channels, height, width)` into the initial
+ `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
+ Transformer.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ spectrogram_length, frequency_length, patch_size = (
+ config.spectrogram_length,
+ config.frequency_length,
+ config.audio_patch_size,
+ )
+ num_channels, hidden_size = config.num_audio_channels, config.hidden_size
+
+ spectrogram_size = (spectrogram_length, frequency_length)
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
+ num_patches = (spectrogram_size[1] // patch_size[1]) * (spectrogram_size[0] // patch_size[0])
+ patch_shape = (spectrogram_size[0] // patch_size[0], spectrogram_size[1] // patch_size[1])
+ self.spectrogram_size = spectrogram_size
+ self.patch_size = patch_size
+ self.num_channels = num_channels
+ self.num_patches = num_patches
+ self.patch_shape = patch_shape
+
+ self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
+
+ def forward(self, audio_values: torch.Tensor) -> torch.Tensor:
+ batch_size, num_channels, height, width = audio_values.shape
+ if num_channels != self.num_channels:
+ raise ValueError(
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
+ )
+ if height > self.spectrogram_size[0] or width != self.spectrogram_size[1]:
+ raise ValueError(
+ f"Input audio size ({height}*{width}) doesn't match model"
+ f" ({self.spectrogram_size[0]}*{self.spectrogram_size[1]})."
+ )
+ embeddings = self.projection(audio_values).flatten(2).transpose(1, 2)
+
+ return embeddings
+
+
+# Copied from transformers.models.vilt.modeling_vilt.ViltSelfAttention with Vilt->Tvlt
+class TvltSelfAttention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
+ f"heads {config.num_attention_heads}."
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
+ self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+
+ def transpose_for_scores(self, x):
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(*new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False):
+ mixed_query_layer = self.query(hidden_states)
+
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
+ attention_scores = attention_scores + attention_mask
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.Softmax(dim=-1)(attention_scores)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(*new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ return outputs
+
+
+# Copied from transformers.models.vilt.modeling_vilt.ViltSelfOutput with Vilt->Tvlt
+class TvltSelfOutput(nn.Module):
+ """
+ The residual connection is defined in TvltLayer instead of here (as is the case with other models), due to the
+ layernorm applied before each block.
+ """
+
+ def __init__(self, config: TvltConfig) -> None:
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ return hidden_states
+
+
+# Copied from transformers.models.vilt.modeling_vilt.ViltAttention with Vilt->Tvlt
+class TvltAttention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.attention = TvltSelfAttention(config)
+ self.output = TvltSelfOutput(config)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.attention.query = prune_linear_layer(self.attention.query, index)
+ self.attention.key = prune_linear_layer(self.attention.key, index)
+ self.attention.value = prune_linear_layer(self.attention.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
+ self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False):
+ self_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions)
+
+ attention_output = self.output(self_outputs[0], hidden_states)
+
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.vilt.modeling_vilt.ViltIntermediate with Vilt->Tvlt
+class TvltIntermediate(nn.Module):
+ def __init__(self, config: TvltConfig) -> None:
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+
+ return hidden_states
+
+
+# Copied from transformers.models.vilt.modeling_vilt.ViltOutput with Vilt->Tvlt
+class TvltOutput(nn.Module):
+ def __init__(self, config: TvltConfig) -> None:
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ hidden_states = hidden_states + input_tensor
+
+ return hidden_states
+
+
+# Copied from transformers.models.vilt.modeling_vilt.ViltLayer with Vilt->Tvlt
+class TvltLayer(nn.Module):
+ """This corresponds to the Block class in the timm implementation."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.attention = TvltAttention(config)
+ self.intermediate = TvltIntermediate(config)
+ self.output = TvltOutput(config)
+ self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False):
+ self_attention_outputs = self.attention(
+ self.layernorm_before(hidden_states), # in ViLT, layernorm is applied before self-attention
+ attention_mask,
+ head_mask,
+ output_attentions=output_attentions,
+ )
+ attention_output = self_attention_outputs[0]
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ # first residual connection
+ hidden_states = attention_output + hidden_states.to(attention_output.device)
+
+ # in ViLT, layernorm is also applied after self-attention
+ layer_output = self.layernorm_after(hidden_states)
+ layer_output = self.intermediate(layer_output)
+
+ # second residual connection is done here
+ layer_output = self.output(layer_output, hidden_states)
+
+ outputs = (layer_output,) + outputs
+
+ return outputs
+
+
+# Copied from transformers.models.vilt.modeling_vilt.ViltEncoder with Vilt->Tvlt
+class TvltEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.layer = nn.ModuleList([TvltLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ output_attentions=False,
+ output_hidden_states=False,
+ return_dict=True,
+ ):
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, output_attentions)
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+
+class TvltPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = TvltConfig
+ base_model_prefix = "tvlt"
+ main_input_name = "pixel_values"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+TVLT_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`TvltConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+TVLT_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`TvltProcessor`]. See [`TvltProcessor.__call__`] for
+ details.
+
+ audio_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Audio values. Audio values can be obtained using [`TvltProcessor`]. See [`TvltProcessor.__call__`] for
+ details.
+
+ pixel_mask (`torch.FloatTensor` of shape `(batch_size, num_pixel_patches)`):
+ Pixel masks. Pixel masks can be obtained using [`TvltProcessor`]. See [`TvltProcessor.__call__`] for
+ details.
+
+ audio_mask (`torch.FloatTensor` of shape `(batch_size, num_audio_patches)`):
+ Audio masks. Audio masks can be obtained using [`TvltProcessor`]. See [`TvltProcessor.__call__`] for
+ details.
+
+ pixel_values_mixed (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`):
+ Pixel values that mix positive and negative samples in Tvlt vision-audio matching. Pixel values mixed can
+ be obtained using [`TvltProcessor`]. See [`TvltProcessor.__call__`] for details.
+
+ pixel_mask_mixed (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel masks of pixel_values_mixed. Pixel masks mixed can be obtained using [`TvltProcessor`]. See
+ [`TvltProcessor.__call__`] for details.
+
+ mask_pixel (`bool`, *optional*):
+ Whether to mask pixel for MAE tasks. Only set to True in TvltForPreTraining.
+
+ mask_audio (`bool`, *optional*):
+ Whether to mask audio for MAE tasks. Only set to True in TvltForPreTraining.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare TVLT Model transformer outputting raw hidden-states without any specific head on top.",
+ TVLT_START_DOCSTRING,
+)
+class TvltModel(TvltPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.config = config
+
+ self.pixel_embeddings = TvltPixelEmbeddings(config)
+ self.audio_embeddings = TvltAudioEmbeddings(config)
+ self.encoder = TvltEncoder(config)
+
+ self.cls_embedding = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
+
+ if config.use_mean_pooling:
+ self.layernorm = None
+ else:
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.pixel_embeddings.patch_embeddings, self.audio_embeddings.patch_embeddings
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(TVLT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=TvltModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: torch.FloatTensor,
+ audio_values: torch.FloatTensor,
+ pixel_mask: Optional[torch.FloatTensor] = None,
+ audio_mask: Optional[torch.FloatTensor] = None,
+ mask_pixel: bool = False,
+ mask_audio: bool = False,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.FloatTensor], TvltModelOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import TvltProcessor, TvltModel
+ >>> import numpy as np
+ >>> import torch
+
+ >>> num_frames = 8
+ >>> images = list(np.random.randn(num_frames, 3, 224, 224))
+ >>> audio = list(np.random.randn(10000))
+
+ >>> processor = TvltProcessor.from_pretrained("ZinengTang/tvlt-base")
+ >>> model = TvltModel.from_pretrained("ZinengTang/tvlt-base")
+
+ >>> input_dict = processor(images, audio, sampling_rate=44100, return_tensors="pt")
+
+ >>> outputs = model(**input_dict)
+ >>> loss = outputs.loss
+ ```"""
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ pixel_embedding_output, pixel_mask = self.pixel_embeddings(pixel_values, pixel_mask)
+
+ audio_embedding_output, audio_mask = self.audio_embeddings(audio_values, audio_mask)
+
+ # Mask pixel if mask_pixel is True
+ pixel_label_masks = None
+ pixel_ids_restore = None
+ if mask_pixel:
+ pixel_mask_noise, pixel_len_keep = generate_pixel_mask_noise(
+ pixel_embedding_output, pixel_mask=pixel_mask, mask_ratio=self.config.pixel_mask_ratio
+ )
+ pixel_embedding_output, pixel_mask, pixel_label_masks, pixel_ids_restore = random_masking(
+ pixel_embedding_output,
+ pixel_mask_noise,
+ pixel_len_keep,
+ attention_masks=pixel_mask,
+ )
+
+ # Mask audio if mask_audio is True
+ audio_label_masks = None
+ audio_ids_restore = None
+ if mask_audio:
+ num_freq_patches = self.config.frequency_length // self.config.audio_patch_size[1]
+ audio_mask_noise, audio_len_keep = generate_audio_mask_noise(
+ audio_embedding_output,
+ audio_mask=audio_mask,
+ mask_ratio=self.config.audio_mask_ratio,
+ mask_type=self.config.audio_mask_type,
+ freq_len=num_freq_patches,
+ )
+ audio_embedding_output, audio_mask, audio_label_masks, audio_ids_restore = random_masking(
+ audio_embedding_output,
+ audio_mask_noise,
+ audio_len_keep,
+ attention_masks=audio_mask,
+ )
+
+ # Prepare for encoder inputs and attention masks
+ batch_size = pixel_values.size(0)
+ embedding_output = torch.cat(
+ [self.cls_embedding.repeat(batch_size, 1, 1), pixel_embedding_output, audio_embedding_output], 1
+ )
+ masked_pixel_len = pixel_embedding_output.size(1)
+
+ attention_mask = None
+ if pixel_mask is not None and audio_mask is not None:
+ attention_mask = torch.cat([pixel_mask[:, :1], pixel_mask, audio_mask], 1)
+
+ input_shape = embedding_output.size()
+ extended_attention_mask = None
+ if attention_mask is not None:
+ extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)
+
+ encoder_outputs = self.encoder(
+ embedding_output,
+ attention_mask=extended_attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+ if self.layernorm is not None:
+ sequence_output = self.layernorm(sequence_output)
+
+ pixel_sequence_output = sequence_output[:, 1 : 1 + masked_pixel_len]
+ audio_sequence_output = sequence_output[:, 1 + masked_pixel_len :]
+ if not return_dict:
+ return (
+ sequence_output,
+ pixel_sequence_output,
+ audio_sequence_output,
+ pixel_label_masks,
+ audio_label_masks,
+ pixel_ids_restore,
+ audio_ids_restore,
+ ) + encoder_outputs[1:]
+
+ return TvltModelOutput(
+ last_hidden_state=sequence_output,
+ last_pixel_hidden_state=pixel_sequence_output,
+ last_audio_hidden_state=audio_sequence_output,
+ pixel_label_masks=pixel_label_masks,
+ audio_label_masks=audio_label_masks,
+ pixel_ids_restore=pixel_ids_restore,
+ audio_ids_restore=audio_ids_restore,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+class TvltDecoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+
+ decoder_config = deepcopy(config)
+ decoder_config.hidden_size = config.decoder_hidden_size
+ decoder_config.num_hidden_layers = config.decoder_num_hidden_layers
+ decoder_config.num_attention_heads = config.decoder_num_attention_heads
+ decoder_config.intermediate_size = config.decoder_intermediate_size
+ self.decoder_layers = nn.ModuleList(
+ [TvltLayer(decoder_config) for _ in range(config.decoder_num_hidden_layers)]
+ )
+
+ self.layernorm = nn.LayerNorm(config.decoder_hidden_size, eps=config.layer_norm_eps)
+
+ self.gradient_checkpointing = False
+ self.config = config
+
+ def forward(
+ self,
+ hidden_states,
+ output_attentions=False,
+ output_hidden_states=False,
+ return_dict=True,
+ ):
+ # apply Transformer layers (blocks)
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+ for i, layer_module in enumerate(self.decoder_layers):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ None,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(hidden_states, output_attentions=output_attentions)
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ # predictor projection
+ logits = self.layernorm(hidden_states)
+
+ if not return_dict:
+ return tuple(v for v in [logits, all_hidden_states, all_self_attentions] if v is not None)
+ return TvltDecoderOutput(logits=logits, hidden_states=all_hidden_states, attentions=all_self_attentions)
+
+
+@add_start_docstrings(
+ "The TVLT Model transformer with the decoder on top for self-supervised pre-training.",
+ TVLT_START_DOCSTRING,
+)
+class TvltForPreTraining(TvltPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.config = config
+
+ self.task_matching = config.task_matching
+ self.task_mae = config.task_mae
+ if not (self.task_matching or self.task_mae):
+ raise ValueError("Must set at least one of matching task and MAE task to true")
+
+ self.tvlt = TvltModel(config)
+
+ if self.task_matching:
+ self.matching_head = TvltMatchingHead(config)
+
+ if self.task_mae:
+ self.encoder_to_decoder = nn.Linear(config.hidden_size, config.decoder_hidden_size, bias=True)
+
+ self.pixel_mask_token = nn.Parameter(torch.zeros(1, 1, config.decoder_hidden_size))
+ self.audio_mask_token = nn.Parameter(torch.zeros(1, 1, config.decoder_hidden_size))
+
+ self.decoder = TvltDecoder(config)
+
+ decoder_hidden_size = config.decoder_hidden_size
+
+ num_frames = config.num_frames
+ num_patches_per_image = self.tvlt.pixel_embeddings.num_patches_per_image
+ self.decoder_pixel_pos_embed = nn.Parameter(torch.zeros(1, num_patches_per_image, decoder_hidden_size))
+ self.decoder_temporal_embed = nn.Parameter(torch.zeros(1, config.num_frames, decoder_hidden_size))
+ self.decoder_pixel_type_embed = nn.Parameter(torch.zeros(1, 1, decoder_hidden_size))
+
+ num_audio_patches = self.tvlt.audio_embeddings.num_patches
+ num_freq_patches = config.frequency_length // config.audio_patch_size[1]
+ self.decoder_audio_pos_embed = nn.Parameter(
+ torch.zeros(1, num_audio_patches // num_freq_patches, decoder_hidden_size)
+ )
+ self.decoder_freq_embed = nn.Parameter(torch.zeros(1, num_freq_patches, decoder_hidden_size))
+ self.decoder_audio_type_embed = nn.Parameter(torch.zeros(1, 1, decoder_hidden_size))
+
+ pixel_mae_output_dim = self.config.image_patch_size[0] ** 2 * self.config.num_image_channels
+ self.pixel_mae_head = TvltMAEHead(config, pixel_mae_output_dim)
+ audio_mae_output_dim = (
+ self.config.audio_patch_size[0] * self.config.audio_patch_size[1] * self.config.num_audio_channels
+ )
+ self.audio_mae_head = TvltMAEHead(config, audio_mae_output_dim)
+
+ self.num_frames = num_frames
+ self.num_patches_per_image = num_patches_per_image
+ self.num_freq_patches = num_freq_patches
+ self.image_patch_size = config.image_patch_size
+ self.audio_patch_size = config.audio_patch_size
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def patchify_pixel(self, pixel_values):
+ """
+ pixel_values: [batch_size, num_frames, 3, height, width]
+ """
+ batch_size, num_frames, num_channels, height, width = pixel_values.shape
+ num_patches_height = pixel_values.shape[3] // self.image_patch_size[0]
+ num_patches_width = pixel_values.shape[4] // self.image_patch_size[1]
+ patchified_pixel_values = pixel_values.reshape(
+ shape=(
+ batch_size,
+ num_frames,
+ num_channels,
+ num_patches_height,
+ self.image_patch_size[0],
+ num_patches_width,
+ self.image_patch_size[1],
+ )
+ )
+ patchified_pixel_values = torch.einsum("ntchpwq->nthwpqc", patchified_pixel_values)
+ patchified_pixel_values = patchified_pixel_values.reshape(
+ shape=(
+ batch_size,
+ num_patches_height * num_patches_width * num_frames,
+ self.image_patch_size[0] * self.image_patch_size[1] * num_channels,
+ )
+ )
+ return patchified_pixel_values
+
+ def patchify_audio(self, audio_values):
+ """
+ audio_values: [batch_size, 1, height, width]
+ """
+ batch_size, num_channels, height, width = audio_values.shape
+ num_patches_height = height // self.audio_patch_size[0]
+ num_patches_width = width // self.audio_patch_size[1]
+ patchified_audio_values = audio_values.reshape(
+ shape=(
+ batch_size,
+ num_channels,
+ num_patches_height,
+ self.audio_patch_size[0],
+ num_patches_width,
+ self.audio_patch_size[1],
+ )
+ )
+ patchified_audio_values = torch.einsum("nchpwq->nhwpqc", patchified_audio_values)
+ patchified_audio_values = patchified_audio_values.reshape(
+ shape=(
+ batch_size,
+ num_patches_height * num_patches_width,
+ self.audio_patch_size[0] * self.audio_patch_size[1] * num_channels,
+ )
+ )
+ return patchified_audio_values
+
+ def pixel_mae_loss(self, pixel_values, pixel_predictions, mask):
+ patchified_pixel_values = self.patchify_pixel(pixel_values)
+ loss = (pixel_predictions - patchified_pixel_values) ** 2
+ loss = loss.mean(dim=-1) # [batch_size, pixel_pixel_length], mean loss per patch
+ loss = (loss * mask).sum() / mask.sum() # mean loss on removed patches
+ return loss
+
+ def audio_mae_loss(self, audio_values, audio_predictions, mask):
+ patchified_audio_values = self.patchify_audio(audio_values)
+ loss = (audio_predictions - patchified_audio_values) ** 2
+ loss = loss.mean(dim=-1) # [batch_size, audio_pixel_length], mean loss per patch
+ loss = (loss * mask).sum() / mask.sum() # mean loss on removed patches
+ return loss
+
+ def concatenate_mask(self, mask_token, sequence, ids_restore):
+ batch_size, seq_length, dim = sequence.shape
+ mask_tokens = mask_token.repeat(batch_size, ids_restore.shape[1] - seq_length, 1)
+ padded_sequence = torch.cat([sequence, mask_tokens], dim=1)
+ padded_sequence = torch.gather(
+ padded_sequence, dim=1, index=ids_restore.unsqueeze(-1).repeat(1, 1, dim)
+ ) # unshuffle
+ return padded_sequence
+
+ @add_start_docstrings_to_model_forward(TVLT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=TvltForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: torch.FloatTensor,
+ audio_values: torch.FloatTensor,
+ pixel_mask: Optional[torch.FloatTensor] = None,
+ audio_mask: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ pixel_values_mixed: Optional[torch.FloatTensor] = None,
+ pixel_mask_mixed: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.FloatTensor], TvltForPreTrainingOutput]:
+ r"""
+ pixel_values_mixed (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`):
+ Pixel values that mix positive and negative samples in Tvlt vision-audio matching. Audio values can be
+ obtained using [`TvltProcessor`]. See [`TvltProcessor.__call__`] for details.
+
+ pixel_mask_mixed (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel masks of pixel_values_mixed. Pixel values mixed can be obtained using [`TvltProcessor`]. See
+ [`TvltProcessor.__call__`] for details.
+
+ labels (`torch.LongTensor` of shape `(batch_size, num_labels)`, *optional*):
+ Labels for computing the vision audio matching loss. Indices should be in `[0, 1]`. num_labels has to be 1.
+
+ Return:
+
+ Examples:
+
+ ```python
+ >>> from transformers import TvltProcessor, TvltForPreTraining
+ >>> import numpy as np
+ >>> import torch
+
+ >>> num_frames = 8
+ >>> images = list(np.random.randn(num_frames, 3, 224, 224))
+ >>> images_mixed = list(np.random.randn(num_frames, 3, 224, 224))
+ >>> audio = list(np.random.randn(10000))
+ >>> processor = TvltProcessor.from_pretrained("ZinengTang/tvlt-base")
+ >>> model = TvltForPreTraining.from_pretrained("ZinengTang/tvlt-base")
+ >>> input_dict = processor(
+ ... images, audio, images_mixed, sampling_rate=44100, mask_pixel=True, mask_audio=True, return_tensors="pt"
+ ... )
+
+ >>> outputs = model(**input_dict)
+ >>> loss = outputs.loss
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ total_loss = 0.0
+
+ if self.task_matching:
+ if labels is None:
+ raise ValueError("Matching task requires labels")
+ if pixel_values_mixed is None:
+ raise ValueError("Matching task requires pixel_values_mixed")
+
+ outputs = self.tvlt(
+ pixel_values_mixed,
+ audio_values,
+ pixel_mask=pixel_mask_mixed,
+ audio_mask=audio_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+ matching_logits = self.matching_head(sequence_output)
+
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(matching_logits.view(-1), labels.view(-1))
+ total_loss += loss
+
+ pixel_logits = None
+ audio_logits = None
+ if self.task_mae and self.training:
+ outputs = self.tvlt(
+ pixel_values,
+ audio_values,
+ pixel_mask=pixel_mask,
+ audio_mask=audio_mask,
+ mask_pixel=True,
+ mask_audio=True,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ pixel_sequence_output = outputs.last_pixel_hidden_state if return_dict else outputs[1]
+ audio_sequence_output = outputs.last_audio_hidden_state if return_dict else outputs[2]
+ pixel_label_masks = outputs.pixel_label_masks if return_dict else outputs[3]
+ audio_label_masks = outputs.audio_label_masks if return_dict else outputs[4]
+ pixel_ids_restore = outputs.pixel_ids_restore if return_dict else outputs[5]
+ audio_ids_restore = outputs.audio_ids_restore if return_dict else outputs[6]
+
+ pixel_decoder_input = self.encoder_to_decoder(
+ pixel_sequence_output
+ ) # [batch_size, num_masked_pixel_patches, decoder_hidden_size]
+ audio_decoder_input = self.encoder_to_decoder(
+ audio_sequence_output
+ ) # [batch_size, num_masked_audio_patches, decoder_hidden_size]
+ num_frames = pixel_values.size(1)
+ pixel_decoder_input = self.concatenate_mask(self.pixel_mask_token, pixel_decoder_input, pixel_ids_restore)
+ pixel_decoder_input = pixel_decoder_input + self.decoder_pixel_pos_embed.repeat(1, num_frames, 1)
+ pixel_decoder_input = pixel_decoder_input + torch.repeat_interleave(
+ self.decoder_temporal_embed[:, :num_frames], self.num_patches_per_image, dim=1
+ )
+ pixel_decoder_input = pixel_decoder_input + self.decoder_pixel_type_embed
+ pixel_decoder_outputs = self.decoder(pixel_decoder_input)
+ pixel_logits = self.pixel_mae_head(pixel_decoder_outputs.logits)
+
+ audio_decoder_input = self.concatenate_mask(self.audio_mask_token, audio_decoder_input, audio_ids_restore)
+ num_time_patches = audio_decoder_input.size(1) // self.num_freq_patches
+ audio_decoder_input = audio_decoder_input + self.decoder_freq_embed.repeat(1, num_time_patches, 1)
+ audio_decoder_input = audio_decoder_input + torch.repeat_interleave(
+ self.decoder_audio_pos_embed[:, :num_time_patches], self.num_freq_patches, dim=1
+ )
+ audio_decoder_input = audio_decoder_input + self.decoder_audio_type_embed
+ audio_decoder_outputs = self.decoder(audio_decoder_input)
+ audio_logits = self.audio_mae_head(audio_decoder_outputs.logits)
+
+ loss = self.pixel_mae_loss(pixel_values, pixel_logits, pixel_label_masks) + self.audio_mae_loss(
+ audio_values, audio_logits, audio_label_masks
+ )
+ total_loss += loss
+
+ if not return_dict:
+ output = (matching_logits, pixel_logits, audio_logits) + outputs[7:]
+ return ((total_loss,) + output) if loss is not None else output
+
+ return TvltForPreTrainingOutput(
+ loss=total_loss,
+ matching_logits=matching_logits,
+ pixel_logits=pixel_logits,
+ audio_logits=audio_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+class TvltPooler(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.activation = nn.Tanh()
+
+ def forward(self, hidden_states):
+ first_token_tensor = hidden_states[:, 0]
+ pooled_output = self.dense(first_token_tensor)
+ pooled_output = self.activation(pooled_output)
+ return pooled_output
+
+
+class TvltMatchingHead(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.pooler = TvltPooler(config)
+ self.fc = nn.Linear(config.hidden_size, 1)
+
+ def forward(self, hidden_states):
+ hidden_states = self.fc(self.pooler(hidden_states))
+ return hidden_states
+
+
+class TvltMAEHead(nn.Module):
+ def __init__(self, config, output_dim=None):
+ super().__init__()
+ self.config = config
+ self.decoder = nn.Linear(config.decoder_hidden_size, output_dim)
+
+ def forward(self, hidden_states):
+ hidden_states = self.decoder(hidden_states)
+ return hidden_states
+
+
+@add_start_docstrings(
+ """
+ Tvlt Model transformer with a classifier head on top (an MLP on top of the final hidden state of the [CLS] token)
+ for audiovisual classification tasks, e.g. CMU-MOSEI Sentiment Analysis and Audio to Video Retrieval.
+ """,
+ TVLT_START_DOCSTRING,
+)
+class TvltForAudioVisualClassification(TvltPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.tvlt = TvltModel(config)
+
+ # Classifier head
+ self.classifier = nn.Sequential(
+ nn.Linear(config.hidden_size, config.hidden_size * 2),
+ nn.LayerNorm(config.hidden_size * 2, eps=config.layer_norm_eps),
+ nn.GELU(),
+ nn.Linear(config.hidden_size * 2, config.num_labels),
+ )
+ self.config = config
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(TVLT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: torch.FloatTensor,
+ audio_values: torch.FloatTensor,
+ pixel_mask: Optional[torch.FloatTensor] = None,
+ audio_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: Optional[torch.LongTensor] = None,
+ ) -> Union[Tuple[torch.FloatTensor], SequenceClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, num_labels)`, *optional*):
+ Labels for computing the audiovisual loss. Indices should be in `[0, ..., num_classes-1]` where num_classes
+ refers to the number of classes in audiovisual tasks.
+
+ Return:
+
+ Examples:
+ ```python
+ >>> from transformers import TvltProcessor, TvltForAudioVisualClassification
+ >>> import numpy as np
+ >>> import torch
+
+ >>> num_frames = 8
+ >>> images = list(np.random.randn(num_frames, 3, 224, 224))
+ >>> audio = list(np.random.randn(10000))
+ >>> processor = TvltProcessor.from_pretrained("ZinengTang/tvlt-base")
+ >>> model = TvltForAudioVisualClassification.from_pretrained("ZinengTang/tvlt-base")
+ >>> input_dict = processor(images, audio, sampling_rate=44100, return_tensors="pt")
+
+ >>> outputs = model(**input_dict)
+ >>> loss = outputs.loss
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.tvlt(
+ pixel_values,
+ audio_values,
+ pixel_mask=pixel_mask,
+ audio_mask=audio_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = outputs[0][:, 0]
+ logits = self.classifier(sequence_output) # rank value
+
+ loss = None
+ if labels is not None:
+ if self.config.loss_type == "regression":
+ loss_fct = MSELoss()
+ loss = loss_fct(logits, labels)
+ elif self.config.loss_type == "classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits, labels)
+
+ if not return_dict:
+ output = (logits,) + outputs[4:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/tvlt/processing_tvlt.py b/venv/lib/python3.10/site-packages/transformers/models/tvlt/processing_tvlt.py
new file mode 100644
index 0000000000000000000000000000000000000000..c67a3a8c6d6df01080479a44a2de343695c0f42a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/tvlt/processing_tvlt.py
@@ -0,0 +1,89 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Processor class for TVLT.
+"""
+
+from ...processing_utils import ProcessorMixin
+
+
+class TvltProcessor(ProcessorMixin):
+ r"""
+ Constructs a TVLT processor which wraps a TVLT image processor and TVLT feature extractor into a single processor.
+
+ [`TvltProcessor`] offers all the functionalities of [`TvltImageProcessor`] and [`TvltFeatureExtractor`]. See the
+ docstring of [`~TvltProcessor.__call__`] for more information.
+
+ Args:
+ image_processor (`TvltImageProcessor`):
+ An instance of [`TvltImageProcessor`]. The image processor is a required input.
+ feature_extractor (`TvltFeatureExtractor`):
+ An instance of [`TvltFeatureExtractor`]. The feature extractor is a required input.
+ """
+
+ attributes = ["image_processor", "feature_extractor"]
+ image_processor_class = "TvltImageProcessor"
+ feature_extractor_class = "TvltFeatureExtractor"
+
+ def __init__(self, image_processor, feature_extractor):
+ super().__init__(image_processor=image_processor, feature_extractor=feature_extractor)
+
+ self.image_processor = image_processor
+ self.feature_extractor = feature_extractor
+
+ def __call__(
+ self,
+ images=None,
+ audio=None,
+ images_mixed=None,
+ sampling_rate=None,
+ mask_audio=False,
+ mask_pixel=False,
+ *args,
+ **kwargs,
+ ):
+ """
+ Forwards the `images` argument to TvltImageProcessor's [`~TvltImageProcessor.preprocess`] and the `audio`
+ argument to TvltFeatureExtractor's [`~TvltFeatureExtractor.__call__`]. Please refer to the docstring of the
+ above two methods for more information.
+ """
+
+ if images is None and audio is None:
+ raise ValueError("You need to specify either an `images` or `audio` input to process.")
+
+ images_mixed_dict = None
+ if images is not None:
+ images_dict = self.image_processor(images, mask_pixel=mask_pixel, *args, **kwargs)
+ if images_mixed is not None:
+ images_mixed_dict = self.image_processor(images_mixed, is_mixed=True, *args, **kwargs)
+ if audio is not None:
+ audio_dict = self.feature_extractor(
+ audio, *args, sampling_rate=sampling_rate, mask_audio=mask_audio, **kwargs
+ )
+
+ output_dict = {}
+ if audio is not None:
+ output_dict.update(audio_dict)
+ if images is not None:
+ output_dict.update(images_dict)
+ if images_mixed_dict is not None:
+ output_dict.update(images_mixed_dict)
+ return output_dict
+
+ @property
+ def model_input_names(self):
+ image_processor_input_names = self.image_processor.model_input_names
+ feature_extractor_input_names = self.feature_extractor.model_input_names
+ return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))