diff --git a/ckpts/universal/global_step20/zero/15.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/15.attention.dense.weight/exp_avg_sq.pt
new file mode 100644
index 0000000000000000000000000000000000000000..0be9803980f023d8dfeccca640fc13da157f0c0b
--- /dev/null
+++ b/ckpts/universal/global_step20/zero/15.attention.dense.weight/exp_avg_sq.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6b06a5e68fdff959c564fe2e0059f1ed8e30f41ec8853f4ddd1dab57c0969668
+size 16778411
diff --git a/ckpts/universal/global_step20/zero/23.attention.query_key_value.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/23.attention.query_key_value.weight/exp_avg_sq.pt
new file mode 100644
index 0000000000000000000000000000000000000000..8f87d117b438210e19381c1c0acdd110f957f885
--- /dev/null
+++ b/ckpts/universal/global_step20/zero/23.attention.query_key_value.weight/exp_avg_sq.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b6978ce95f91738af50ffee8d0361627c695bf9f70b281e6efb4db8fb02a2d34
+size 50332843
diff --git a/lm-evaluation-harness/tests/testdata/blimp_distractor_agreement_relational_noun-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_distractor_agreement_relational_noun-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..d8ce0672c29ac799339056d0464c733e3f169745
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_distractor_agreement_relational_noun-v0-res.json
@@ -0,0 +1 @@
+{"results": {"blimp_distractor_agreement_relational_noun": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_distractor_agreement_relational_noun": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_left_branch_island_simple_question-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/blimp_left_branch_island_simple_question-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..22adb2995e9b5d4173b4ae7096714514022c8e9f
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_left_branch_island_simple_question-v0-loglikelihood
@@ -0,0 +1 @@
+6cb36bbdae7754f8832f50872c3dd511ce12547e00fa0771deb747be3355eb85
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_tough_vs_raising_2-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_tough_vs_raising_2-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..c9b8c7d06179f5427a99dda5e6b24245e2ea0dbb
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_tough_vs_raising_2-v0-res.json
@@ -0,0 +1 @@
+{"results": {"blimp_tough_vs_raising_2": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_tough_vs_raising_2": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_wh_vs_that_with_gap-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/blimp_wh_vs_that_with_gap-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..4c15f2283eb93c5ab4b9cdbddf3e91117211918d
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_wh_vs_that_with_gap-v0-loglikelihood
@@ -0,0 +1 @@
+d41a9b85e4c31e445bf9b46b8642df02203ccc02b4a9b254bf76066d5c54b4b7
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/boolq-v1-res.json b/lm-evaluation-harness/tests/testdata/boolq-v1-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..291b9f122d0219c93c941daeb9ae362c439bb4e0
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/boolq-v1-res.json
@@ -0,0 +1 @@
+{"results": {"boolq": {"acc": 0.5048929663608562, "acc_stderr": 0.00874463623355505}}, "versions": {"boolq": 1}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/cola-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/cola-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..45737909e7c21c528a647e91cceca3d2534869fc
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/cola-v0-loglikelihood
@@ -0,0 +1 @@
+e8635578ed8ee70b707a666d35e468b9321db24470f80c92080651e2bfa01751
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/crows_pairs_english_autre-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/crows_pairs_english_autre-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..ab0d9a4db42a5e4da196834b40457a95bf9a9129
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/crows_pairs_english_autre-v0-loglikelihood
@@ -0,0 +1 @@
+a197ccc8538231404a8e43f5ed0fbbfb2c317b4da337f6e7aa9642131aeb426a
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/crows_pairs_english_nationality-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/crows_pairs_english_nationality-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..1186c252981559fc1e9859252f82aaea27310c4f
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/crows_pairs_english_nationality-v0-loglikelihood
@@ -0,0 +1 @@
+b85bc849811ccfa9971a6ee3fca7342752c314c0cb6f126e10d9ec4d0450c541
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/gsm8k-v0-res.json b/lm-evaluation-harness/tests/testdata/gsm8k-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..fb6514a0e750d4e3737cf33766fcc851f79bfa48
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/gsm8k-v0-res.json
@@ -0,0 +1 @@
+{"results": {"gsm8k": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"gsm8k": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-astronomy-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/hendrycksTest-astronomy-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..8ecb637cfe4eaf6d3bbca863c7bab6188b85425b
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-astronomy-v0-loglikelihood
@@ -0,0 +1 @@
+bed1e47127cc2893c6aef63b9a0909cca31aa351a703da2a166b01cae03c3311
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-sociology-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/hendrycksTest-sociology-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..d3f581c9f256191c2c0403a582fd72696150b34a
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-sociology-v0-loglikelihood
@@ -0,0 +1 @@
+f99a3caece11169f2a5cc951001f92027104afd25d29b2a399883bd4bf118605
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/iwslt17-en-ar-v0-res.json b/lm-evaluation-harness/tests/testdata/iwslt17-en-ar-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..a22fa9036c790cb48e142bd05a59da7824a9c83f
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/iwslt17-en-ar-v0-res.json
@@ -0,0 +1 @@
+{"results": {"iwslt17-en-ar": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.0, "chrf_stderr": 0.0, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"iwslt17-en-ar": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/lambada_mt_de-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/lambada_mt_de-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..ae19de0e6951bd90cd1e713d14816767496044e8
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/lambada_mt_de-v0-loglikelihood
@@ -0,0 +1 @@
+5ad125e1708499832b2cee8c3388f89f9c0277010fd96fbd3359039ce8105984
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/pile_hackernews-v1-loglikelihood_rolling b/lm-evaluation-harness/tests/testdata/pile_hackernews-v1-loglikelihood_rolling
new file mode 100644
index 0000000000000000000000000000000000000000..48b767bfe706bb035e4553ea9c4119347303bab9
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/pile_hackernews-v1-loglikelihood_rolling
@@ -0,0 +1 @@
+ec1082ee5a5326e0d57aa4e73b634937140c1de9af95f154e8ab57b05d9b422b
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/pile_pubmed-central-v1-loglikelihood_rolling b/lm-evaluation-harness/tests/testdata/pile_pubmed-central-v1-loglikelihood_rolling
new file mode 100644
index 0000000000000000000000000000000000000000..283109f32e0aac45adcbc90c7c8fb41114e7771f
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/pile_pubmed-central-v1-loglikelihood_rolling
@@ -0,0 +1 @@
+40b39d120d99a145690444e86acc3e3e24d41e6e0538a75e26929ad84926e5e0
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/pile_wikipedia-v0-res.json b/lm-evaluation-harness/tests/testdata/pile_wikipedia-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..bfffde9938833ae29f5665130d844630c7fb9735
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/pile_wikipedia-v0-res.json
@@ -0,0 +1 @@
+{"results": {"pile_wikipedia": {"bits_per_byte": 0.00016834722287561703, "byte_perplexity": 1.0001683613940646, "word_perplexity": 1.001084677949439}}, "versions": {"pile_wikipedia": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/wmt16-en-ro-v0-greedy_until b/lm-evaluation-harness/tests/testdata/wmt16-en-ro-v0-greedy_until
new file mode 100644
index 0000000000000000000000000000000000000000..291492556e5182600291565c640a463da7f00616
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/wmt16-en-ro-v0-greedy_until
@@ -0,0 +1 @@
+4be7fdda313394f19b5995b00ada1dfa3bb158ee1f020ef8d07ecea260fa60b2
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/wmt20-en-km-v0-greedy_until b/lm-evaluation-harness/tests/testdata/wmt20-en-km-v0-greedy_until
new file mode 100644
index 0000000000000000000000000000000000000000..ddce46a79fdcb08c3eee1a534c11fc4dd796be53
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/wmt20-en-km-v0-greedy_until
@@ -0,0 +1 @@
+eb5365c46f22ffec9a157991627d6e1fd1117fccffaedfc73619e93bafb5a408
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/wmt20-en-zh-v0-greedy_until b/lm-evaluation-harness/tests/testdata/wmt20-en-zh-v0-greedy_until
new file mode 100644
index 0000000000000000000000000000000000000000..db79b7f03fcfc8f7720f1344339e7d94d8a01ebf
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/wmt20-en-zh-v0-greedy_until
@@ -0,0 +1 @@
+67f0333ddbcb07d7a9ac12919129a18fe4fea24e4826a11bbdde4fd5ed5ed83f
\ No newline at end of file
diff --git a/venv/lib/python3.10/site-packages/transformers/models/efficientformer/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/efficientformer/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..25d60d1ee765efb08eaa6242530bf9e8a93fafa9
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/efficientformer/__init__.py
@@ -0,0 +1,109 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_tf_available,
+ is_torch_available,
+ is_vision_available,
+)
+
+
+_import_structure = {
+ "configuration_efficientformer": [
+ "EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
+ "EfficientFormerConfig",
+ ]
+}
+
+try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["image_processing_efficientformer"] = ["EfficientFormerImageProcessor"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_efficientformer"] = [
+ "EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "EfficientFormerForImageClassification",
+ "EfficientFormerForImageClassificationWithTeacher",
+ "EfficientFormerModel",
+ "EfficientFormerPreTrainedModel",
+ ]
+
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tf_efficientformer"] = [
+ "TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TFEfficientFormerForImageClassification",
+ "TFEfficientFormerForImageClassificationWithTeacher",
+ "TFEfficientFormerModel",
+ "TFEfficientFormerPreTrainedModel",
+ ]
+
+if TYPE_CHECKING:
+ from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
+
+ try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .image_processing_efficientformer import EfficientFormerImageProcessor
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_efficientformer import (
+ EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
+ EfficientFormerForImageClassification,
+ EfficientFormerForImageClassificationWithTeacher,
+ EfficientFormerModel,
+ EfficientFormerPreTrainedModel,
+ )
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tf_efficientformer import (
+ TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
+ TFEfficientFormerForImageClassification,
+ TFEfficientFormerForImageClassificationWithTeacher,
+ TFEfficientFormerModel,
+ TFEfficientFormerPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..828f4e628b125aa76a14d923da301c54330c0c78
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/configuration_efficientformer.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/configuration_efficientformer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b3867380a1e3e727ff312f59f4ac17f199885f74
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/configuration_efficientformer.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/convert_efficientformer_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/convert_efficientformer_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..22a7af10c1238af04e569bc13b18ca521456a49c
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/convert_efficientformer_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/image_processing_efficientformer.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/image_processing_efficientformer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..526c4f0866fa9cbbe436c1e133d9e9f8ad048649
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/image_processing_efficientformer.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/modeling_efficientformer.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/modeling_efficientformer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f5c8827e1c75fad759c635dcfd82d5c1386eac79
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/modeling_efficientformer.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/modeling_tf_efficientformer.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/modeling_tf_efficientformer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1e88c609cfc34161a7266b27d1c16b358014a1f4
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/modeling_tf_efficientformer.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/efficientformer/configuration_efficientformer.py b/venv/lib/python3.10/site-packages/transformers/models/efficientformer/configuration_efficientformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..1641c90711f5d4c9e0f8619ab1ff8a5c450f9959
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/efficientformer/configuration_efficientformer.py
@@ -0,0 +1,170 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" EfficientFormer model configuration"""
+
+from typing import List
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class EfficientFormerConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of an [`EfficientFormerModel`]. It is used to
+ instantiate an EfficientFormer model according to the specified arguments, defining the model architecture.
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the EfficientFormer
+ [snap-research/efficientformer-l1](https://huggingface.co/snap-research/efficientformer-l1) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ depths (`List(int)`, *optional*, defaults to `[3, 2, 6, 4]`)
+ Depth of each stage.
+ hidden_sizes (`List(int)`, *optional*, defaults to `[48, 96, 224, 448]`)
+ Dimensionality of each stage.
+ downsamples (`List(bool)`, *optional*, defaults to `[True, True, True, True]`)
+ Whether or not to downsample inputs between two stages.
+ dim (`int`, *optional*, defaults to 448):
+ Number of channels in Meta3D layers
+ key_dim (`int`, *optional*, defaults to 32):
+ The size of the key in meta3D block.
+ attention_ratio (`int`, *optional*, defaults to 4):
+ Ratio of the dimension of the query and value to the dimension of the key in MSHA block
+ resolution (`int`, *optional*, defaults to 7)
+ Size of each patch
+ num_hidden_layers (`int`, *optional*, defaults to 5):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 8):
+ Number of attention heads for each attention layer in the 3D MetaBlock.
+ mlp_expansion_ratio (`int`, *optional*, defaults to 4):
+ Ratio of size of the hidden dimensionality of an MLP to the dimensionality of its input.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings and encoder.
+ patch_size (`int`, *optional*, defaults to 16):
+ The size (resolution) of each patch.
+ num_channels (`int`, *optional*, defaults to 3):
+ The number of input channels.
+ pool_size (`int`, *optional*, defaults to 3):
+ Kernel size of pooling layers.
+ downsample_patch_size (`int`, *optional*, defaults to 3):
+ The size of patches in downsampling layers.
+ downsample_stride (`int`, *optional*, defaults to 2):
+ The stride of convolution kernels in downsampling layers.
+ downsample_pad (`int`, *optional*, defaults to 1):
+ Padding in downsampling layers.
+ drop_path_rate (`int`, *optional*, defaults to 0):
+ Rate at which to increase dropout probability in DropPath.
+ num_meta3d_blocks (`int`, *optional*, defaults to 1):
+ The number of 3D MetaBlocks in the last stage.
+ distillation (`bool`, *optional*, defaults to `True`):
+ Whether to add a distillation head.
+ use_layer_scale (`bool`, *optional*, defaults to `True`):
+ Whether to scale outputs from token mixers.
+ layer_scale_init_value (`float`, *optional*, defaults to 1e-5):
+ Factor by which outputs from token mixers are scaled.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ image_size (`int`, *optional*, defaults to `224`):
+ The size (resolution) of each image.
+
+ Example:
+
+ ```python
+ >>> from transformers import EfficientFormerConfig, EfficientFormerModel
+
+ >>> # Initializing a EfficientFormer efficientformer-l1 style configuration
+ >>> configuration = EfficientFormerConfig()
+
+ >>> # Initializing a EfficientFormerModel (with random weights) from the efficientformer-l3 style configuration
+ >>> model = EfficientFormerModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "efficientformer"
+
+ def __init__(
+ self,
+ depths: List[int] = [3, 2, 6, 4],
+ hidden_sizes: List[int] = [48, 96, 224, 448],
+ downsamples: List[bool] = [True, True, True, True],
+ dim: int = 448,
+ key_dim: int = 32,
+ attention_ratio: int = 4,
+ resolution: int = 7,
+ num_hidden_layers: int = 5,
+ num_attention_heads: int = 8,
+ mlp_expansion_ratio: int = 4,
+ hidden_dropout_prob: float = 0.0,
+ patch_size: int = 16,
+ num_channels: int = 3,
+ pool_size: int = 3,
+ downsample_patch_size: int = 3,
+ downsample_stride: int = 2,
+ downsample_pad: int = 1,
+ drop_path_rate: float = 0.0,
+ num_meta3d_blocks: int = 1,
+ distillation: bool = True,
+ use_layer_scale: bool = True,
+ layer_scale_init_value: float = 1e-5,
+ hidden_act: str = "gelu",
+ initializer_range: float = 0.02,
+ layer_norm_eps: float = 1e-12,
+ image_size: int = 224,
+ batch_norm_eps: float = 1e-05,
+ **kwargs,
+ ) -> None:
+ super().__init__(**kwargs)
+
+ self.hidden_act = hidden_act
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.hidden_sizes = hidden_sizes
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.patch_size = patch_size
+ self.num_channels = num_channels
+ self.depths = depths
+ self.mlp_expansion_ratio = mlp_expansion_ratio
+ self.downsamples = downsamples
+ self.dim = dim
+ self.key_dim = key_dim
+ self.attention_ratio = attention_ratio
+ self.resolution = resolution
+ self.pool_size = pool_size
+ self.downsample_patch_size = downsample_patch_size
+ self.downsample_stride = downsample_stride
+ self.downsample_pad = downsample_pad
+ self.drop_path_rate = drop_path_rate
+ self.num_meta3d_blocks = num_meta3d_blocks
+ self.distillation = distillation
+ self.use_layer_scale = use_layer_scale
+ self.layer_scale_init_value = layer_scale_init_value
+ self.image_size = image_size
+ self.batch_norm_eps = batch_norm_eps
diff --git a/venv/lib/python3.10/site-packages/transformers/models/efficientformer/convert_efficientformer_original_pytorch_checkpoint_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/efficientformer/convert_efficientformer_original_pytorch_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..7431cd6136a593e7bd65f33d847e6b9346abfe46
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/efficientformer/convert_efficientformer_original_pytorch_checkpoint_to_pytorch.py
@@ -0,0 +1,252 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Convert EfficientFormer checkpoints from the original repository.
+
+URL: https://github.com/snap-research/EfficientFormer
+"""
+
+import argparse
+import re
+from pathlib import Path
+
+import requests
+import torch
+from PIL import Image
+from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
+
+from transformers import (
+ EfficientFormerConfig,
+ EfficientFormerForImageClassificationWithTeacher,
+ EfficientFormerImageProcessor,
+)
+from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
+
+
+def rename_key(old_name, num_meta4D_last_stage):
+ new_name = old_name
+
+ if "patch_embed" in old_name:
+ _, layer, param = old_name.split(".")
+
+ if layer == "0":
+ new_name = old_name.replace("0", "convolution1")
+ elif layer == "1":
+ new_name = old_name.replace("1", "batchnorm_before")
+ elif layer == "3":
+ new_name = old_name.replace("3", "convolution2")
+ else:
+ new_name = old_name.replace("4", "batchnorm_after")
+
+ if "network" in old_name and re.search(r"\d\.\d", old_name):
+ two_digit_num = r"\b\d{2}\b"
+ if bool(re.search(two_digit_num, old_name)):
+ match = re.search(r"\d\.\d\d.", old_name).group()
+ else:
+ match = re.search(r"\d\.\d.", old_name).group()
+ if int(match[0]) < 6:
+ trimmed_name = old_name.replace(match, "")
+ trimmed_name = trimmed_name.replace("network", match[0] + ".meta4D_layers.blocks." + match[2:-1])
+ new_name = "intermediate_stages." + trimmed_name
+ else:
+ trimmed_name = old_name.replace(match, "")
+ if int(match[2]) < num_meta4D_last_stage:
+ trimmed_name = trimmed_name.replace("network", "meta4D_layers.blocks." + match[2])
+ else:
+ layer_index = str(int(match[2]) - num_meta4D_last_stage)
+ trimmed_name = trimmed_name.replace("network", "meta3D_layers.blocks." + layer_index)
+ if "norm1" in old_name:
+ trimmed_name = trimmed_name.replace("norm1", "layernorm1")
+ elif "norm2" in old_name:
+ trimmed_name = trimmed_name.replace("norm2", "layernorm2")
+ elif "fc1" in old_name:
+ trimmed_name = trimmed_name.replace("fc1", "linear_in")
+ elif "fc2" in old_name:
+ trimmed_name = trimmed_name.replace("fc2", "linear_out")
+
+ new_name = "last_stage." + trimmed_name
+
+ elif "network" in old_name and re.search(r".\d.", old_name):
+ new_name = old_name.replace("network", "intermediate_stages")
+
+ if "fc" in new_name:
+ new_name = new_name.replace("fc", "convolution")
+ elif ("norm1" in new_name) and ("layernorm1" not in new_name):
+ new_name = new_name.replace("norm1", "batchnorm_before")
+ elif ("norm2" in new_name) and ("layernorm2" not in new_name):
+ new_name = new_name.replace("norm2", "batchnorm_after")
+ if "proj" in new_name:
+ new_name = new_name.replace("proj", "projection")
+ if "dist_head" in new_name:
+ new_name = new_name.replace("dist_head", "distillation_classifier")
+ elif "head" in new_name:
+ new_name = new_name.replace("head", "classifier")
+ elif "patch_embed" in new_name:
+ new_name = "efficientformer." + new_name
+ elif new_name == "norm.weight" or new_name == "norm.bias":
+ new_name = new_name.replace("norm", "layernorm")
+ new_name = "efficientformer." + new_name
+ else:
+ new_name = "efficientformer.encoder." + new_name
+
+ return new_name
+
+
+def convert_torch_checkpoint(checkpoint, num_meta4D_last_stage):
+ for key in checkpoint.copy().keys():
+ val = checkpoint.pop(key)
+ checkpoint[rename_key(key, num_meta4D_last_stage)] = val
+
+ return checkpoint
+
+
+# We will verify our results on a COCO image
+def prepare_img():
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ image = Image.open(requests.get(url, stream=True).raw)
+
+ return image
+
+
+def convert_efficientformer_checkpoint(
+ checkpoint_path: Path, efficientformer_config_file: Path, pytorch_dump_path: Path, push_to_hub: bool
+):
+ orig_state_dict = torch.load(checkpoint_path, map_location="cpu")["model"]
+ config = EfficientFormerConfig.from_json_file(efficientformer_config_file)
+ model = EfficientFormerForImageClassificationWithTeacher(config)
+ model_name = "_".join(checkpoint_path.split("/")[-1].split(".")[0].split("_")[:-1])
+
+ num_meta4D_last_stage = config.depths[-1] - config.num_meta3d_blocks + 1
+ new_state_dict = convert_torch_checkpoint(orig_state_dict, num_meta4D_last_stage)
+
+ model.load_state_dict(new_state_dict)
+ model.eval()
+
+ pillow_resamplings = {
+ "bilinear": PILImageResampling.BILINEAR,
+ "bicubic": PILImageResampling.BICUBIC,
+ "nearest": PILImageResampling.NEAREST,
+ }
+
+ # prepare image
+ image = prepare_img()
+ image_size = 256
+ crop_size = 224
+ processor = EfficientFormerImageProcessor(
+ size={"shortest_edge": image_size},
+ crop_size={"height": crop_size, "width": crop_size},
+ resample=pillow_resamplings["bicubic"],
+ )
+ pixel_values = processor(images=image, return_tensors="pt").pixel_values
+
+ # original processing pipeline
+ image_transforms = Compose(
+ [
+ Resize(image_size, interpolation=pillow_resamplings["bicubic"]),
+ CenterCrop(crop_size),
+ ToTensor(),
+ Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD),
+ ]
+ )
+ original_pixel_values = image_transforms(image).unsqueeze(0)
+
+ assert torch.allclose(original_pixel_values, pixel_values)
+
+ outputs = model(pixel_values)
+ logits = outputs.logits
+
+ expected_shape = (1, 1000)
+
+ if "l1" in model_name:
+ expected_logits = torch.Tensor(
+ [-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328]
+ )
+ assert torch.allclose(logits[0, :10], expected_logits, atol=1e-3)
+ assert logits.shape == expected_shape
+ elif "l3" in model_name:
+ expected_logits = torch.Tensor(
+ [-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127]
+ )
+ assert torch.allclose(logits[0, :10], expected_logits, atol=1e-3)
+ assert logits.shape == expected_shape
+ elif "l7" in model_name:
+ expected_logits = torch.Tensor(
+ [-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878]
+ )
+ assert logits.shape == expected_shape
+ else:
+ raise ValueError(
+ f"Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7"
+ )
+
+ # Save Checkpoints
+ Path(pytorch_dump_path).mkdir(exist_ok=True)
+ model.save_pretrained(pytorch_dump_path)
+ print(f"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}")
+ processor.save_pretrained(pytorch_dump_path)
+ print(f"Processor successfuly saved at {pytorch_dump_path}")
+
+ if push_to_hub:
+ print("Pushing model to the hub...")
+
+ model.push_to_hub(
+ repo_id=f"Bearnardd/{pytorch_dump_path}",
+ commit_message="Add model",
+ use_temp_dir=True,
+ )
+ processor.push_to_hub(
+ repo_id=f"Bearnardd/{pytorch_dump_path}",
+ commit_message="Add image processor",
+ use_temp_dir=True,
+ )
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--pytorch_model_path",
+ default=None,
+ type=str,
+ required=True,
+ help="Path to EfficientFormer pytorch checkpoint.",
+ )
+ parser.add_argument(
+ "--config_file",
+ default=None,
+ type=str,
+ required=True,
+ help="The json file for EfficientFormer model config.",
+ )
+ parser.add_argument(
+ "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
+ )
+
+ parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
+ parser.add_argument(
+ "--no-push_to_hub",
+ dest="push_to_hub",
+ action="store_false",
+ help="Do not push model and image processor to the hub",
+ )
+ parser.set_defaults(push_to_hub=True)
+
+ args = parser.parse_args()
+ convert_efficientformer_checkpoint(
+ checkpoint_path=args.pytorch_model_path,
+ efficientformer_config_file=args.config_file,
+ pytorch_dump_path=args.pytorch_dump_path,
+ push_to_hub=args.push_to_hub,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/efficientformer/image_processing_efficientformer.py b/venv/lib/python3.10/site-packages/transformers/models/efficientformer/image_processing_efficientformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..38756f7c958f5d1441c6f2c1d4ec5987c664c7ae
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/efficientformer/image_processing_efficientformer.py
@@ -0,0 +1,321 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Image processor class for EfficientFormer."""
+
+from typing import Dict, List, Optional, Union
+
+import numpy as np
+
+from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
+from ...image_transforms import (
+ get_resize_output_image_size,
+ resize,
+ to_channel_dimension_format,
+)
+from ...image_utils import (
+ IMAGENET_DEFAULT_MEAN,
+ IMAGENET_DEFAULT_STD,
+ ChannelDimension,
+ ImageInput,
+ PILImageResampling,
+ infer_channel_dimension_format,
+ is_batched,
+ is_scaled_image,
+ to_numpy_array,
+ valid_images,
+ validate_kwargs,
+ validate_preprocess_arguments,
+)
+from ...utils import TensorType, logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class EfficientFormerImageProcessor(BaseImageProcessor):
+ r"""
+ Constructs a EfficientFormer image processor.
+
+ Args:
+ do_resize (`bool`, *optional*, defaults to `True`):
+ Whether to resize the image's (height, width) dimensions to the specified `(size["height"],
+ size["width"])`. Can be overridden by the `do_resize` parameter in the `preprocess` method.
+ size (`dict`, *optional*, defaults to `{"height": 224, "width": 224}`):
+ Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
+ method.
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
+ Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
+ `preprocess` method.
+ do_center_crop (`bool`, *optional*, defaults to `True`):
+ Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the
+ `preprocess` method.
+ crop_size (`Dict[str, int]` *optional*, defaults to 224):
+ Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess`
+ method.
+ do_rescale (`bool`, *optional*, defaults to `True`):
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
+ parameter in the `preprocess` method.
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
+ `preprocess` method.
+ do_normalize:
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
+ method.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
+ """
+
+ model_input_names = ["pixel_values"]
+
+ def __init__(
+ self,
+ do_resize: bool = True,
+ size: Optional[Dict[str, int]] = None,
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
+ do_center_crop: bool = True,
+ do_rescale: bool = True,
+ rescale_factor: Union[int, float] = 1 / 255,
+ crop_size: Dict[str, int] = None,
+ do_normalize: bool = True,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ **kwargs,
+ ) -> None:
+ super().__init__(**kwargs)
+ size = size if size is not None else {"height": 224, "width": 224}
+ size = get_size_dict(size)
+ crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
+ crop_size = get_size_dict(crop_size, default_to_square=True, param_name="crop_size")
+
+ self.do_resize = do_resize
+ self.do_rescale = do_rescale
+ self.do_normalize = do_normalize
+ self.do_center_crop = do_center_crop
+ self.crop_size = crop_size
+ self.size = size
+ self.resample = resample
+ self.rescale_factor = rescale_factor
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
+ self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
+ self._valid_processor_keys = [
+ "images",
+ "do_resize",
+ "size",
+ "resample",
+ "do_center_crop",
+ "crop_size",
+ "do_rescale",
+ "rescale_factor",
+ "do_normalize",
+ "image_mean",
+ "image_std",
+ "return_tensors",
+ "data_format",
+ "input_data_format",
+ ]
+
+ def resize(
+ self,
+ image: np.ndarray,
+ size: Dict[str, int],
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> np.ndarray:
+ """
+ Resize an image to `(size["height"], size["width"])`.
+
+ Args:
+ image (`np.ndarray`):
+ Image to resize.
+ size (`Dict[str, int]`):
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
+ resample:
+ `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
+ data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
+ image is used. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred.
+
+ Returns:
+ `np.ndarray`: The resized image.
+ """
+ size = get_size_dict(size)
+
+ if "shortest_edge" in size:
+ size = get_resize_output_image_size(
+ image, size=size["shortest_edge"], default_to_square=False, input_data_format=input_data_format
+ )
+ # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
+ elif "height" in size and "width" in size:
+ size = (size["height"], size["width"])
+ else:
+ raise ValueError(f"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}")
+ return resize(
+ image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs
+ )
+
+ def preprocess(
+ self,
+ images: ImageInput,
+ do_resize: Optional[bool] = None,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = None,
+ do_center_crop: bool = None,
+ crop_size: int = None,
+ do_rescale: Optional[bool] = None,
+ rescale_factor: Optional[float] = None,
+ do_normalize: Optional[bool] = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> BatchFeature:
+ """
+ Preprocess an image or batch of images.
+
+ Args:
+ images (`ImageInput`):
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
+ Whether to resize the image.
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
+ Dictionary in the format `{"height": h, "width": w}` specifying the size of the output image after
+ resizing.
+ resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`):
+ `PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has
+ an effect if `do_resize` is set to `True`.
+ do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
+ Whether to center crop the image.
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
+ Whether to rescale the image values between [0 - 1].
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
+ Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
+ Whether to normalize the image.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
+ Image mean to use if `do_normalize` is set to `True`.
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
+ Image standard deviation to use if `do_normalize` is set to `True`.
+ return_tensors (`str` or `TensorType`, *optional*):
+ The type of tensors to return. Can be one of:
+ - Unset: Return a list of `np.ndarray`.
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
+ The channel dimension format for the output image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - Unset: Use the channel dimension format of the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ """
+ do_resize = do_resize if do_resize is not None else self.do_resize
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
+ crop_size = crop_size if crop_size is not None else self.crop_size
+ crop_size = get_size_dict(crop_size, param_name="crop_size", default_to_square=True)
+ resample = resample if resample is not None else self.resample
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
+ image_mean = image_mean if image_mean is not None else self.image_mean
+ image_std = image_std if image_std is not None else self.image_std
+
+ size = size if size is not None else self.size
+ size_dict = get_size_dict(size)
+
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
+
+ if not is_batched(images):
+ images = [images]
+
+ if not valid_images(images):
+ raise ValueError(
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+ validate_preprocess_arguments(
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_normalize=do_normalize,
+ image_mean=image_mean,
+ image_std=image_std,
+ do_center_crop=do_center_crop,
+ crop_size=crop_size,
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ )
+ # All transformations expect numpy arrays.
+ images = [to_numpy_array(image) for image in images]
+
+ if is_scaled_image(images[0]) and do_rescale:
+ logger.warning_once(
+ "It looks like you are trying to rescale already rescaled images. If the input"
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
+ )
+
+ if input_data_format is None:
+ # We assume that all images have the same channel dimension format.
+ input_data_format = infer_channel_dimension_format(images[0])
+
+ if do_resize:
+ images = [
+ self.resize(image=image, size=size_dict, resample=resample, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ if do_center_crop:
+ images = [
+ self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images
+ ]
+
+ if do_rescale:
+ images = [
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ if do_normalize:
+ images = [
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ images = [
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
+ ]
+
+ data = {"pixel_values": images}
+ return BatchFeature(data=data, tensor_type=return_tensors)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/efficientformer/modeling_efficientformer.py b/venv/lib/python3.10/site-packages/transformers/models/efficientformer/modeling_efficientformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..70075cff55d7d9838e5757e21302af9aa650cad7
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/efficientformer/modeling_efficientformer.py
@@ -0,0 +1,803 @@
+# coding=utf-8
+# Copyright 2022 Snapchat Research and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch EfficientFormer model."""
+
+import itertools
+from dataclasses import dataclass
+from typing import Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+)
+from .configuration_efficientformer import EfficientFormerConfig
+
+
+logger = logging.get_logger(__name__)
+
+# General docstring
+_CONFIG_FOR_DOC = "EfficientFormerConfig"
+
+# Base docstring
+_CHECKPOINT_FOR_DOC = "snap-research/efficientformer-l1-300"
+_EXPECTED_OUTPUT_SHAPE = [1, 49, 448]
+
+# Image classification docstring
+_IMAGE_CLASS_CHECKPOINT = "snap-research/efficientformer-l1-300"
+_IMAGE_CLASS_EXPECTED_OUTPUT = "Egyptian cat"
+
+
+from ..deprecated._archive_maps import EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+class EfficientFormerPatchEmbeddings(nn.Module):
+ """
+ This class performs downsampling between two stages. For the input tensor with the shape [batch_size, num_channels,
+ height, width] it produces output tensor with the shape [batch_size, num_channels, height/stride, width/stride]
+ """
+
+ def __init__(self, config: EfficientFormerConfig, num_channels: int, embed_dim: int, apply_norm: bool = True):
+ super().__init__()
+ self.num_channels = num_channels
+
+ self.projection = nn.Conv2d(
+ num_channels,
+ embed_dim,
+ kernel_size=config.downsample_patch_size,
+ stride=config.downsample_stride,
+ padding=config.downsample_pad,
+ )
+ self.norm = nn.BatchNorm2d(embed_dim, eps=config.batch_norm_eps) if apply_norm else nn.Identity()
+
+ def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
+ batch_size, num_channels, height, width = pixel_values.shape
+ if num_channels != self.num_channels:
+ raise ValueError(
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
+ )
+
+ embeddings = self.projection(pixel_values)
+ embeddings = self.norm(embeddings)
+
+ return embeddings
+
+
+class EfficientFormerSelfAttention(nn.Module):
+ def __init__(self, dim: int, key_dim: int, num_heads: int, attention_ratio: int, resolution: int):
+ super().__init__()
+
+ self.num_heads = num_heads
+ self.key_dim = key_dim
+ self.attention_ratio = attention_ratio
+ self.scale = key_dim**-0.5
+ self.total_key_dim = key_dim * num_heads
+ self.expanded_key_dim = int(attention_ratio * key_dim)
+ self.total_expanded_key_dim = int(self.expanded_key_dim * num_heads)
+ hidden_size = self.total_expanded_key_dim + self.total_key_dim * 2
+ self.qkv = nn.Linear(dim, hidden_size)
+ self.projection = nn.Linear(self.total_expanded_key_dim, dim)
+ points = list(itertools.product(range(resolution), range(resolution)))
+ num_points = len(points)
+ attention_offsets = {}
+ idxs = []
+ for point_1 in points:
+ for point_2 in points:
+ offset = (abs(point_1[0] - point_2[0]), abs(point_1[1] - point_2[1]))
+ if offset not in attention_offsets:
+ attention_offsets[offset] = len(attention_offsets)
+ idxs.append(attention_offsets[offset])
+ self.attention_biases = torch.nn.Parameter(torch.zeros(num_heads, len(attention_offsets)))
+ self.register_buffer("attention_bias_idxs", torch.LongTensor(idxs).view(num_points, num_points))
+
+ @torch.no_grad()
+ def train(self, mode=True):
+ super().train(mode)
+ if mode and hasattr(self, "ab"):
+ del self.ab
+ else:
+ self.ab = self.attention_biases[:, self.attention_bias_idxs]
+
+ def forward(self, hidden_states: torch.Tensor, output_attentions: bool = False) -> Tuple[torch.Tensor]:
+ batch_size, sequence_length, num_channels = hidden_states.shape
+ qkv = self.qkv(hidden_states)
+ query_layer, key_layer, value_layer = qkv.reshape(batch_size, sequence_length, self.num_heads, -1).split(
+ [self.key_dim, self.key_dim, self.expanded_key_dim], dim=3
+ )
+ query_layer = query_layer.permute(0, 2, 1, 3)
+ key_layer = key_layer.permute(0, 2, 1, 3)
+ value_layer = value_layer.permute(0, 2, 1, 3)
+
+ # set `model.to(torch_device)` won't change `self.ab.device`, if there is no follow-up `train` or `eval` call.
+ # Let's do it manually here, so users won't have to do this everytime.
+ if not self.training:
+ self.ab = self.ab.to(self.attention_biases.device)
+ attention_probs = (torch.matmul(query_layer, key_layer.transpose(-2, -1))) * self.scale + (
+ self.attention_biases[:, self.attention_bias_idxs] if self.training else self.ab
+ )
+
+ attention_probs = attention_probs.softmax(dim=-1)
+
+ context_layer = torch.matmul(attention_probs, value_layer).transpose(1, 2)
+ context_layer = context_layer.reshape(batch_size, sequence_length, self.total_expanded_key_dim)
+ context_layer = self.projection(context_layer)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ return outputs
+
+
+class EfficientFormerConvStem(nn.Module):
+ def __init__(self, config: EfficientFormerConfig, out_channels: int):
+ super().__init__()
+
+ self.convolution1 = nn.Conv2d(config.num_channels, out_channels // 2, kernel_size=3, stride=2, padding=1)
+ self.batchnorm_before = nn.BatchNorm2d(out_channels // 2, eps=config.batch_norm_eps)
+
+ self.convolution2 = nn.Conv2d(out_channels // 2, out_channels, kernel_size=3, stride=2, padding=1)
+ self.batchnorm_after = nn.BatchNorm2d(out_channels, eps=config.batch_norm_eps)
+
+ self.activation = nn.ReLU()
+
+ def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
+ features = self.batchnorm_before(self.convolution1(pixel_values))
+ features = self.activation(features)
+ features = self.batchnorm_after(self.convolution2(features))
+ features = self.activation(features)
+
+ return features
+
+
+class EfficientFormerPooling(nn.Module):
+ def __init__(self, pool_size: int):
+ super().__init__()
+ self.pool = nn.AvgPool2d(pool_size, stride=1, padding=pool_size // 2, count_include_pad=False)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ output = self.pool(hidden_states) - hidden_states
+ return output
+
+
+class EfficientFormerDenseMlp(nn.Module):
+ def __init__(
+ self,
+ config: EfficientFormerConfig,
+ in_features: int,
+ hidden_features: Optional[int] = None,
+ out_features: Optional[int] = None,
+ ):
+ super().__init__()
+ out_features = out_features or in_features
+ hidden_features = hidden_features or in_features
+
+ self.linear_in = nn.Linear(in_features, hidden_features)
+ self.activation = ACT2FN[config.hidden_act]
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.linear_out = nn.Linear(hidden_features, out_features)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.linear_in(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.linear_out(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ return hidden_states
+
+
+class EfficientFormerConvMlp(nn.Module):
+ def __init__(
+ self,
+ config: EfficientFormerConfig,
+ in_features: int,
+ hidden_features: Optional[int] = None,
+ out_features: Optional[int] = None,
+ drop: float = 0.0,
+ ):
+ super().__init__()
+ out_features = out_features or in_features
+ hidden_features = hidden_features or in_features
+
+ self.convolution1 = nn.Conv2d(in_features, hidden_features, 1)
+ self.activation = ACT2FN[config.hidden_act]
+ self.convolution2 = nn.Conv2d(hidden_features, out_features, 1)
+ self.dropout = nn.Dropout(drop)
+
+ self.batchnorm_before = nn.BatchNorm2d(hidden_features, eps=config.batch_norm_eps)
+ self.batchnorm_after = nn.BatchNorm2d(out_features, eps=config.batch_norm_eps)
+
+ def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
+ hidden_state = self.convolution1(hidden_state)
+ hidden_state = self.batchnorm_before(hidden_state)
+
+ hidden_state = self.activation(hidden_state)
+ hidden_state = self.dropout(hidden_state)
+ hidden_state = self.convolution2(hidden_state)
+
+ hidden_state = self.batchnorm_after(hidden_state)
+ hidden_state = self.dropout(hidden_state)
+
+ return hidden_state
+
+
+# Copied from transformers.models.convnext.modeling_convnext.drop_path
+def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
+ """
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
+
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
+ argument.
+ """
+ if drop_prob == 0.0 or not training:
+ return input
+ keep_prob = 1 - drop_prob
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
+ random_tensor.floor_() # binarize
+ output = input.div(keep_prob) * random_tensor
+ return output
+
+
+# Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->EfficientFormer
+class EfficientFormerDropPath(nn.Module):
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
+
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
+ super().__init__()
+ self.drop_prob = drop_prob
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ return drop_path(hidden_states, self.drop_prob, self.training)
+
+ def extra_repr(self) -> str:
+ return "p={}".format(self.drop_prob)
+
+
+class EfficientFormerFlat(nn.Module):
+ def __init__(self):
+ super().__init__()
+
+ def forward(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor]:
+ hidden_states = hidden_states.flatten(2).transpose(1, 2)
+ return hidden_states
+
+
+class EfficientFormerMeta3D(nn.Module):
+ def __init__(self, config: EfficientFormerConfig, dim: int, drop_path: float = 0.0):
+ super().__init__()
+
+ self.token_mixer = EfficientFormerSelfAttention(
+ dim=config.dim,
+ key_dim=config.key_dim,
+ num_heads=config.num_attention_heads,
+ attention_ratio=config.attention_ratio,
+ resolution=config.resolution,
+ )
+
+ self.layernorm1 = nn.LayerNorm(dim, eps=config.layer_norm_eps)
+ self.layernorm2 = nn.LayerNorm(dim, eps=config.layer_norm_eps)
+
+ mlp_hidden_dim = int(dim * config.mlp_expansion_ratio)
+ self.mlp = EfficientFormerDenseMlp(config, in_features=dim, hidden_features=mlp_hidden_dim)
+
+ self.drop_path = EfficientFormerDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
+ self.use_layer_scale = config.use_layer_scale
+ if config.use_layer_scale:
+ self.layer_scale_1 = nn.Parameter(config.layer_scale_init_value * torch.ones((dim)), requires_grad=True)
+ self.layer_scale_2 = nn.Parameter(config.layer_scale_init_value * torch.ones((dim)), requires_grad=True)
+
+ def forward(self, hidden_states: torch.Tensor, output_attentions: bool = False) -> Tuple[torch.Tensor]:
+ self_attention_outputs = self.token_mixer(self.layernorm1(hidden_states), output_attentions)
+ attention_output = self_attention_outputs[0]
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ if self.use_layer_scale:
+ layer_output = hidden_states + self.drop_path(
+ self.layer_scale_1.unsqueeze(0).unsqueeze(0) * attention_output
+ )
+ layer_output = layer_output + self.drop_path(
+ self.layer_scale_2.unsqueeze(0).unsqueeze(0) * self.mlp(self.layernorm2(layer_output))
+ )
+ else:
+ layer_output = hidden_states + self.drop_path(attention_output)
+ layer_output = layer_output + self.drop_path(self.mlp(self.layernorm2(layer_output)))
+
+ outputs = (layer_output,) + outputs
+
+ return outputs
+
+
+class EfficientFormerMeta3DLayers(nn.Module):
+ def __init__(self, config: EfficientFormerConfig):
+ super().__init__()
+ drop_paths = [
+ config.drop_path_rate * (block_idx + sum(config.depths[:-1]))
+ for block_idx in range(config.num_meta3d_blocks)
+ ]
+ self.blocks = nn.ModuleList(
+ [EfficientFormerMeta3D(config, config.hidden_sizes[-1], drop_path=drop_path) for drop_path in drop_paths]
+ )
+
+ def forward(self, hidden_states: torch.Tensor, output_attentions: bool = False) -> Tuple[torch.Tensor]:
+ all_attention_outputs = () if output_attentions else None
+
+ for layer_module in self.blocks:
+ if isinstance(hidden_states, tuple):
+ hidden_states = hidden_states[0]
+
+ hidden_states = layer_module(hidden_states, output_attentions)
+
+ if output_attentions:
+ all_attention_outputs = all_attention_outputs + (hidden_states[1],)
+
+ if output_attentions:
+ outputs = (hidden_states[0],) + all_attention_outputs
+ return outputs
+
+ return hidden_states
+
+
+class EfficientFormerMeta4D(nn.Module):
+ def __init__(self, config: EfficientFormerConfig, dim: int, drop_path: float = 0.0):
+ super().__init__()
+ pool_size = config.pool_size if config.pool_size is not None else 3
+ self.token_mixer = EfficientFormerPooling(pool_size=pool_size)
+ mlp_hidden_dim = int(dim * config.mlp_expansion_ratio)
+ self.mlp = EfficientFormerConvMlp(
+ config, in_features=dim, hidden_features=mlp_hidden_dim, drop=config.hidden_dropout_prob
+ )
+
+ self.drop_path = EfficientFormerDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
+ self.use_layer_scale = config.use_layer_scale
+ if config.use_layer_scale:
+ self.layer_scale_1 = nn.Parameter(config.layer_scale_init_value * torch.ones((dim)), requires_grad=True)
+ self.layer_scale_2 = nn.Parameter(config.layer_scale_init_value * torch.ones((dim)), requires_grad=True)
+
+ def forward(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor]:
+ outputs = self.token_mixer(hidden_states)
+
+ if self.use_layer_scale:
+ layer_output = hidden_states + self.drop_path(self.layer_scale_1.unsqueeze(-1).unsqueeze(-1) * outputs)
+
+ layer_output = layer_output + self.drop_path(
+ self.layer_scale_2.unsqueeze(-1).unsqueeze(-1) * self.mlp(layer_output)
+ )
+ else:
+ layer_output = hidden_states + self.drop_path(outputs)
+ layer_output = layer_output + self.drop_path(self.mlp(layer_output))
+
+ return layer_output
+
+
+class EfficientFormerMeta4DLayers(nn.Module):
+ def __init__(self, config: EfficientFormerConfig, stage_idx: int):
+ super().__init__()
+ num_layers = (
+ config.depths[stage_idx] if stage_idx != -1 else config.depths[stage_idx] - config.num_meta3d_blocks
+ )
+ drop_paths = [
+ config.drop_path_rate * (block_idx + sum(config.depths[:stage_idx])) for block_idx in range(num_layers)
+ ]
+
+ self.blocks = nn.ModuleList(
+ [
+ EfficientFormerMeta4D(config, config.hidden_sizes[stage_idx], drop_path=drop_path)
+ for drop_path in drop_paths
+ ]
+ )
+
+ def forward(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor]:
+ for layer_module in self.blocks:
+ hidden_states = layer_module(hidden_states)
+ return hidden_states
+
+
+class EfficientFormerIntermediateStage(nn.Module):
+ def __init__(self, config: EfficientFormerConfig, index: int):
+ super().__init__()
+ self.meta4D_layers = EfficientFormerMeta4DLayers(config, index)
+
+ def forward(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor]:
+ hidden_states = self.meta4D_layers(hidden_states)
+ return hidden_states
+
+
+class EfficientFormerLastStage(nn.Module):
+ def __init__(self, config: EfficientFormerConfig):
+ super().__init__()
+ self.meta4D_layers = EfficientFormerMeta4DLayers(config, -1)
+ self.flat = EfficientFormerFlat()
+ self.meta3D_layers = EfficientFormerMeta3DLayers(config)
+
+ def forward(self, hidden_states: torch.Tensor, output_attentions: bool = False) -> Tuple[torch.Tensor]:
+ hidden_states = self.meta4D_layers(hidden_states)
+ hidden_states = self.flat(hidden_states)
+ hidden_states = self.meta3D_layers(hidden_states, output_attentions)
+
+ return hidden_states
+
+
+class EfficientFormerEncoder(nn.Module):
+ def __init__(self, config: EfficientFormerConfig):
+ super().__init__()
+ self.config = config
+ num_intermediate_stages = len(config.depths) - 1
+ downsamples = [
+ config.downsamples[i] or config.hidden_sizes[i] != config.hidden_sizes[i + 1]
+ for i in range(num_intermediate_stages)
+ ]
+ intermediate_stages = []
+
+ for i in range(num_intermediate_stages):
+ intermediate_stages.append(EfficientFormerIntermediateStage(config, i))
+ if downsamples[i]:
+ intermediate_stages.append(
+ EfficientFormerPatchEmbeddings(config, config.hidden_sizes[i], config.hidden_sizes[i + 1])
+ )
+
+ self.intermediate_stages = nn.ModuleList(intermediate_stages)
+ self.last_stage = EfficientFormerLastStage(config)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ output_hidden_states: bool = False,
+ output_attentions: bool = False,
+ return_dict: bool = True,
+ ) -> BaseModelOutput:
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ for layer_module in self.intermediate_stages:
+ hidden_states = layer_module(hidden_states)
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_output = self.last_stage(hidden_states, output_attentions=output_attentions)
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + layer_output[1:]
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (layer_output[0],)
+
+ if not return_dict:
+ return tuple(v for v in [layer_output[0], all_hidden_states, all_self_attentions] if v is not None)
+
+ return BaseModelOutput(
+ last_hidden_state=layer_output[0],
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+
+class EfficientFormerPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = EfficientFormerConfig
+ base_model_prefix = "efficientformer"
+ main_input_name = "pixel_values"
+ supports_gradient_checkpointing = False
+
+ def _init_weights(self, module: nn.Module):
+ """Initialize the weights"""
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+EFFICIENTFORMER_START_DOCSTRING = r"""
+ This model is a PyTorch [nn.Module](https://pytorch.org/docs/stable/nn.html#nn.Module) subclass. Use it as a
+ regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.
+
+ Parameters:
+ config ([`EfficientFormerConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+EFFICIENTFORMER_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`ViTImageProcessor`]. See
+ [`ViTImageProcessor.preprocess`] for details.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare EfficientFormer Model transformer outputting raw hidden-states without any specific head on top.",
+ EFFICIENTFORMER_START_DOCSTRING,
+)
+class EfficientFormerModel(EfficientFormerPreTrainedModel):
+ def __init__(self, config: EfficientFormerConfig):
+ super().__init__(config)
+ self.config = config
+
+ self.patch_embed = EfficientFormerConvStem(config, config.hidden_sizes[0])
+ self.encoder = EfficientFormerEncoder(config)
+ self.layernorm = nn.LayerNorm(config.hidden_sizes[-1], eps=config.layer_norm_eps)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(EFFICIENTFORMER_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutputWithPooling,
+ config_class=_CONFIG_FOR_DOC,
+ modality="vision",
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
+ )
+ def forward(
+ self,
+ pixel_values: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[tuple, BaseModelOutput]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ embedding_output = self.patch_embed(pixel_values)
+ encoder_outputs = self.encoder(
+ embedding_output, output_attentions=output_attentions, output_hidden_states=output_hidden_states
+ )
+
+ sequence_output = encoder_outputs[0]
+ sequence_output = self.layernorm(sequence_output)
+
+ if not return_dict:
+ head_outputs = (sequence_output,)
+ return head_outputs + encoder_outputs[1:]
+
+ return BaseModelOutput(
+ last_hidden_state=sequence_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ EfficientFormer Model transformer with an image classification head on top (a linear layer on top of the final
+ hidden state of the [CLS] token) e.g. for ImageNet.
+ """,
+ EFFICIENTFORMER_START_DOCSTRING,
+)
+class EfficientFormerForImageClassification(EfficientFormerPreTrainedModel):
+ def __init__(self, config: EfficientFormerConfig):
+ super().__init__(config)
+
+ self.num_labels = config.num_labels
+ self.efficientformer = EfficientFormerModel(config)
+
+ # Classifier head
+ self.classifier = (
+ nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(EFFICIENTFORMER_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
+ output_type=ImageClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
+ )
+ def forward(
+ self,
+ pixel_values: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[tuple, ImageClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.efficientformer(
+ pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ logits = self.classifier(sequence_output.mean(-2))
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return ImageClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@dataclass
+class EfficientFormerForImageClassificationWithTeacherOutput(ModelOutput):
+ """
+ Output type of [`EfficientFormerForImageClassificationWithTeacher`].
+
+ Args:
+ logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
+ Prediction scores as the average of the cls_logits and distillation logits.
+ cls_logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
+ Prediction scores of the classification head (i.e. the linear layer on top of the final hidden state of the
+ class token).
+ distillation_logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
+ Prediction scores of the distillation head (i.e. the linear layer on top of the final hidden state of the
+ distillation token).
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
+ plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+ """
+
+ logits: torch.FloatTensor = None
+ cls_logits: torch.FloatTensor = None
+ distillation_logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@add_start_docstrings(
+ """
+ EfficientFormer Model transformer with image classification heads on top (a linear layer on top of the final hidden
+ state of the [CLS] token and a linear layer on top of the final hidden state of the distillation token) e.g. for
+ ImageNet.
+
+
+
+ This model supports inference-only. Fine-tuning with distillation (i.e. with a teacher) is not yet
+ supported.
+
+
+ """,
+ EFFICIENTFORMER_START_DOCSTRING,
+)
+class EfficientFormerForImageClassificationWithTeacher(EfficientFormerPreTrainedModel):
+ def __init__(self, config: EfficientFormerConfig):
+ super().__init__(config)
+
+ self.num_labels = config.num_labels
+ self.efficientformer = EfficientFormerModel(config)
+
+ # Classifier head
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
+ # Distillation head
+ self.distillation_classifier = (
+ nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(EFFICIENTFORMER_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
+ output_type=EfficientFormerForImageClassificationWithTeacherOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
+ )
+ def forward(
+ self,
+ pixel_values: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[tuple, EfficientFormerForImageClassificationWithTeacherOutput]:
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ outputs = self.efficientformer(
+ pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ cls_logits = self.classifier(sequence_output.mean(-2))
+ distillation_logits = self.distillation_classifier(sequence_output.mean(-2))
+
+ # during inference, return the average of both classifier predictions
+ logits = (cls_logits + distillation_logits) / 2
+
+ if not return_dict:
+ output = (logits, cls_logits, distillation_logits) + outputs[1:]
+ return output
+
+ return EfficientFormerForImageClassificationWithTeacherOutput(
+ logits=logits,
+ cls_logits=cls_logits,
+ distillation_logits=distillation_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/efficientformer/modeling_tf_efficientformer.py b/venv/lib/python3.10/site-packages/transformers/models/efficientformer/modeling_tf_efficientformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..77b62999e772ecc3d3ebd119da7fa8e8d80ff4be
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/efficientformer/modeling_tf_efficientformer.py
@@ -0,0 +1,1193 @@
+# coding=utf-8
+# Copyright 2023 Snapchat Research and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" TensorFlow EfficientFormer model."""
+
+import itertools
+from dataclasses import dataclass
+from typing import Optional, Tuple, Union
+
+import tensorflow as tf
+
+from ...activations_tf import ACT2FN
+from ...modeling_tf_outputs import (
+ TFBaseModelOutput,
+ TFBaseModelOutputWithPooling,
+ TFImageClassifierOutput,
+)
+from ...modeling_tf_utils import (
+ TFPreTrainedModel,
+ TFSequenceClassificationLoss,
+ get_initializer,
+ keras,
+ keras_serializable,
+ unpack_inputs,
+)
+from ...tf_utils import shape_list, stable_softmax
+from ...utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+)
+from .configuration_efficientformer import EfficientFormerConfig
+
+
+logger = logging.get_logger(__name__)
+
+# General docstring
+_CONFIG_FOR_DOC = "EfficientFormerConfig"
+
+# Base docstring
+_CHECKPOINT_FOR_DOC = "snap-research/efficientformer-l1-300"
+_EXPECTED_OUTPUT_SHAPE = [1, 49, 448]
+
+# Image classification docstring
+_IMAGE_CLASS_CHECKPOINT = "snap-research/efficientformer-l1-300"
+_IMAGE_CLASS_EXPECTED_OUTPUT = "LABEL_281"
+
+
+from ..deprecated._archive_maps import TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+class TFEfficientFormerPatchEmbeddings(keras.layers.Layer):
+ """
+ This class performs downsampling between two stages. For the input tensor with the shape [batch_size, num_channels,
+ height, width] it produces output tensor with the shape [batch_size, num_channels, height/stride, width/stride]
+ """
+
+ def __init__(
+ self, config: EfficientFormerConfig, num_channels: int, embed_dim: int, apply_norm: bool = True, **kwargs
+ ) -> None:
+ super().__init__(**kwargs)
+ self.num_channels = num_channels
+
+ self.padding = keras.layers.ZeroPadding2D(padding=config.downsample_pad)
+ self.projection = keras.layers.Conv2D(
+ filters=embed_dim,
+ kernel_size=config.downsample_patch_size,
+ strides=config.downsample_stride,
+ padding="valid",
+ name="projection",
+ )
+ # Use same default momentum and epsilon as PyTorch equivalent for BatchNormalization
+ self.norm = (
+ keras.layers.BatchNormalization(axis=-1, epsilon=config.batch_norm_eps, momentum=0.9, name="norm")
+ if apply_norm
+ else tf.identity
+ )
+ self.embed_dim = embed_dim
+
+ def call(self, pixel_values: tf.Tensor, training: bool = False) -> tf.Tensor:
+ tf.debugging.assert_shapes(
+ [(pixel_values, (..., None, None, self.num_channels))],
+ message="Make sure that the channel dimension of the pixel values match with the one set in the configuration.",
+ )
+ embeddings = self.projection(self.padding(pixel_values))
+ embeddings = self.norm(embeddings, training=training)
+ return embeddings
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "projection", None) is not None:
+ with tf.name_scope(self.projection.name):
+ self.projection.build([None, None, None, self.num_channels])
+ if getattr(self, "norm", None) is not None:
+ if hasattr(self.norm, "name"):
+ with tf.name_scope(self.norm.name):
+ self.norm.build([None, None, None, self.embed_dim])
+
+
+class TFEfficientFormerSelfAttention(keras.layers.Layer):
+ def __init__(
+ self,
+ dim: int,
+ key_dim: int,
+ num_heads: int,
+ attention_ratio: int,
+ resolution: int,
+ config: EfficientFormerConfig,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.num_heads = num_heads
+ self.key_dim = key_dim
+ self.attention_ratio = attention_ratio
+ self.scale = key_dim**-0.5
+ self.total_key_dim = key_dim * num_heads
+ self.expanded_key_dim = int(attention_ratio * key_dim)
+ self.total_expanded_key_dim = int(self.expanded_key_dim * num_heads)
+ hidden_size = self.total_expanded_key_dim + self.total_key_dim * 2
+
+ self.qkv = keras.layers.Dense(
+ units=hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="qkv"
+ )
+ self.projection = keras.layers.Dense(
+ units=dim, kernel_initializer=get_initializer(config.initializer_range), name="projection"
+ )
+ self.resolution = resolution
+ self.dim = dim
+
+ def build(self, input_shape: tf.TensorShape) -> None:
+ points = list(itertools.product(range(self.resolution), range(self.resolution)))
+ num_points = len(points)
+ attention_offsets = {}
+
+ idxs = []
+
+ for point_1 in points:
+ for point_2 in points:
+ offset = (abs(point_1[0] - point_2[0]), abs(point_1[1] - point_2[1]))
+ if offset not in attention_offsets:
+ attention_offsets[offset] = len(attention_offsets)
+ idxs.append(attention_offsets[offset])
+
+ self.attention_biases = self.add_weight(
+ shape=(self.num_heads, len(attention_offsets)),
+ initializer=keras.initializers.zeros(),
+ trainable=True,
+ name="attention_biases",
+ )
+ self.attention_bias_idxs = self.add_weight(
+ shape=(num_points, num_points),
+ trainable=False,
+ dtype=tf.int32,
+ name="attention_bias_idxs",
+ )
+
+ self.attention_bias_idxs.assign(tf.reshape(tf.cast(idxs, dtype=tf.int32), (num_points, num_points)))
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "qkv", None) is not None:
+ with tf.name_scope(self.qkv.name):
+ self.qkv.build([None, None, self.dim])
+ if getattr(self, "projection", None) is not None:
+ with tf.name_scope(self.projection.name):
+ self.projection.build([None, None, self.total_expanded_key_dim])
+
+ def call(
+ self, hidden_states: tf.Tensor, output_attentions: bool = False, training: bool = False
+ ) -> Tuple[tf.Tensor]:
+ batch_size, sequence_length, *_ = shape_list(hidden_states)
+ qkv = self.qkv(inputs=hidden_states)
+
+ query_layer, key_layer, value_layer = tf.split(
+ tf.reshape(tensor=qkv, shape=(batch_size, sequence_length, self.num_heads, -1)),
+ num_or_size_splits=[self.key_dim, self.key_dim, self.expanded_key_dim],
+ axis=3,
+ )
+
+ query_layer = tf.transpose(query_layer, perm=[0, 2, 1, 3])
+ key_layer = tf.transpose(key_layer, perm=[0, 2, 1, 3])
+ value_layer = tf.transpose(value_layer, perm=[0, 2, 1, 3])
+
+ attention_probs = tf.matmul(query_layer, tf.transpose(key_layer, perm=[0, 1, 3, 2]))
+ scale = tf.cast(self.scale, dtype=attention_probs.dtype)
+ attention_probs = tf.multiply(attention_probs, scale)
+
+ attention_biases = tf.gather(params=self.attention_biases, indices=self.attention_bias_idxs, axis=1)
+ attention_probs = attention_probs + attention_biases
+ attention_probs = stable_softmax(logits=attention_probs, axis=-1)
+
+ context_layer = tf.matmul(attention_probs, value_layer)
+ context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
+
+ context_layer = tf.reshape(
+ tensor=context_layer, shape=(batch_size, sequence_length, self.total_expanded_key_dim)
+ )
+ context_layer = self.projection(context_layer)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ return outputs
+
+
+class TFEfficientFormerConvStem(keras.layers.Layer):
+ def __init__(self, config: EfficientFormerConfig, out_channels: int, **kwargs):
+ super().__init__(**kwargs)
+
+ self.padding = keras.layers.ZeroPadding2D(padding=1)
+ self.convolution1 = keras.layers.Conv2D(
+ filters=out_channels // 2, kernel_size=3, strides=2, padding="valid", name="convolution1"
+ )
+ # Use same default momentum and epsilon as PyTorch equivalent for BatchNormalization
+ self.batchnorm_before = keras.layers.BatchNormalization(
+ axis=-1, epsilon=config.batch_norm_eps, momentum=0.9, name="batchnorm_before"
+ )
+
+ self.convolution2 = keras.layers.Conv2D(
+ filters=out_channels,
+ kernel_size=3,
+ strides=2,
+ padding="valid",
+ name="convolution2",
+ )
+ # Use same default momentum and epsilon as PyTorch equivalent for BatchNormalization
+ self.batchnorm_after = keras.layers.BatchNormalization(
+ axis=-1, epsilon=config.batch_norm_eps, momentum=0.9, name="batchnorm_after"
+ )
+
+ self.activation = keras.layers.Activation(activation=keras.activations.relu, name="activation")
+ self.out_channels = out_channels
+ self.config = config
+
+ def call(self, pixel_values: tf.Tensor, training: bool = False) -> tf.Tensor:
+ features = self.batchnorm_before(self.convolution1(self.padding(pixel_values)), training=training)
+ features = self.activation(features)
+ features = self.batchnorm_after(self.convolution2(self.padding(features)), training=training)
+ features = self.activation(features)
+ return features
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "convolution1", None) is not None:
+ with tf.name_scope(self.convolution1.name):
+ self.convolution1.build([None, None, None, self.config.num_channels])
+ if getattr(self, "batchnorm_before", None) is not None:
+ with tf.name_scope(self.batchnorm_before.name):
+ self.batchnorm_before.build([None, None, None, self.out_channels // 2])
+ if getattr(self, "convolution2", None) is not None:
+ with tf.name_scope(self.convolution2.name):
+ self.convolution2.build([None, None, None, self.out_channels // 2])
+ if getattr(self, "batchnorm_after", None) is not None:
+ with tf.name_scope(self.batchnorm_after.name):
+ self.batchnorm_after.build([None, None, None, self.out_channels])
+ if getattr(self, "activation", None) is not None:
+ with tf.name_scope(self.activation.name):
+ self.activation.build(None)
+
+
+class TFEfficientFormerPooling(keras.layers.Layer):
+ def __init__(self, pool_size: int, **kwargs):
+ super().__init__(**kwargs)
+ self.pool = keras.layers.AveragePooling2D(pool_size=pool_size, strides=1, padding="same")
+
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
+ output = self.pool(hidden_states)
+ output = output - hidden_states
+ return output
+
+
+class TFEfficientFormerDenseMlp(keras.layers.Layer):
+ def __init__(
+ self,
+ config: EfficientFormerConfig,
+ in_features: int,
+ hidden_features: Optional[int] = None,
+ out_features: Optional[int] = None,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+ out_features = out_features or in_features
+ hidden_features = hidden_features or in_features
+
+ self.linear_in = keras.layers.Dense(
+ units=hidden_features, kernel_initializer=get_initializer(config.initializer_range), name="linear_in"
+ )
+ self.activation = ACT2FN[config.hidden_act]
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
+
+ self.linear_out = keras.layers.Dense(
+ units=out_features, kernel_initializer=get_initializer(config.initializer_range), name="linear_out"
+ )
+ self.hidden_features = hidden_features
+ self.in_features = in_features
+
+ def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
+ hidden_states = self.linear_in(inputs=hidden_states)
+ hidden_states = self.activation(hidden_states)
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
+ hidden_states = self.linear_out(inputs=hidden_states)
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
+
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "linear_in", None) is not None:
+ with tf.name_scope(self.linear_in.name):
+ self.linear_in.build([None, None, self.in_features])
+ if getattr(self, "linear_out", None) is not None:
+ with tf.name_scope(self.linear_out.name):
+ self.linear_out.build([None, None, self.hidden_features])
+
+
+class TFEfficientFormerConvMlp(keras.layers.Layer):
+ def __init__(
+ self,
+ config: EfficientFormerConfig,
+ in_features: int,
+ hidden_features: Optional[int] = None,
+ out_features: Optional[int] = None,
+ drop: float = 0.0,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+ out_features = out_features or in_features
+ hidden_features = hidden_features or in_features
+
+ self.convolution1 = keras.layers.Conv2D(
+ filters=hidden_features,
+ kernel_size=1,
+ name="convolution1",
+ padding="valid",
+ )
+
+ self.activation = ACT2FN[config.hidden_act]
+
+ self.convolution2 = keras.layers.Conv2D(
+ filters=out_features,
+ kernel_size=1,
+ name="convolution2",
+ padding="valid",
+ )
+
+ self.dropout = keras.layers.Dropout(rate=drop)
+
+ # Use same default momentum and epsilon as PyTorch equivalent for BatchNormalization
+ self.batchnorm_before = keras.layers.BatchNormalization(
+ axis=-1, epsilon=config.batch_norm_eps, momentum=0.9, name="batchnorm_before"
+ )
+ # Use same default momentum and epsilon as PyTorch equivalent for BatchNormalization
+ self.batchnorm_after = keras.layers.BatchNormalization(
+ axis=-1, epsilon=config.batch_norm_eps, momentum=0.9, name="batchnorm_after"
+ )
+ self.hidden_features = hidden_features
+ self.in_features = in_features
+ self.out_features = out_features
+
+ def call(self, hidden_state: tf.Tensor, training: bool = False) -> tf.Tensor:
+ hidden_state = self.convolution1(hidden_state)
+ hidden_state = self.batchnorm_before(hidden_state, training=training)
+ hidden_state = self.activation(hidden_state)
+ hidden_state = self.dropout(hidden_state, training=training)
+ hidden_state = self.convolution2(hidden_state)
+ hidden_state = self.batchnorm_after(hidden_state, training=training)
+ hidden_state = self.dropout(hidden_state, training=training)
+ return hidden_state
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "convolution1", None) is not None:
+ with tf.name_scope(self.convolution1.name):
+ self.convolution1.build([None, None, None, self.in_features])
+ if getattr(self, "convolution2", None) is not None:
+ with tf.name_scope(self.convolution2.name):
+ self.convolution2.build([None, None, None, self.hidden_features])
+ if getattr(self, "batchnorm_before", None) is not None:
+ with tf.name_scope(self.batchnorm_before.name):
+ self.batchnorm_before.build([None, None, None, self.hidden_features])
+ if getattr(self, "batchnorm_after", None) is not None:
+ with tf.name_scope(self.batchnorm_after.name):
+ self.batchnorm_after.build([None, None, None, self.out_features])
+
+
+# Copied from transformers.models.convnext.modeling_tf_convnext.TFConvNextDropPath with ConvNext->EfficientFormer
+class TFEfficientFormerDropPath(keras.layers.Layer):
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
+ References:
+ (1) github.com:rwightman/pytorch-image-models
+ """
+
+ def __init__(self, drop_path: float, **kwargs):
+ super().__init__(**kwargs)
+ self.drop_path = drop_path
+
+ def call(self, x: tf.Tensor, training=None):
+ if training:
+ keep_prob = 1 - self.drop_path
+ shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1)
+ random_tensor = keep_prob + tf.random.uniform(shape, 0, 1)
+ random_tensor = tf.floor(random_tensor)
+ return (x / keep_prob) * random_tensor
+ return x
+
+
+class TFEfficientFormerFlat(keras.layers.Layer):
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+
+ def call(self, hidden_states: tf.Tensor) -> Tuple[tf.Tensor]:
+ batch_size, _, _, in_channels = shape_list(hidden_states)
+ hidden_states = tf.reshape(hidden_states, shape=[batch_size, -1, in_channels])
+ return hidden_states
+
+
+class TFEfficientFormerMeta3D(keras.layers.Layer):
+ def __init__(self, config: EfficientFormerConfig, dim: int, drop_path: float = 0.0, **kwargs):
+ super().__init__(**kwargs)
+
+ self.token_mixer = TFEfficientFormerSelfAttention(
+ dim=config.dim,
+ key_dim=config.key_dim,
+ num_heads=config.num_attention_heads,
+ attention_ratio=config.attention_ratio,
+ resolution=config.resolution,
+ name="token_mixer",
+ config=config,
+ )
+ self.dim = dim
+ self.config = config
+
+ self.layernorm1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm1")
+ self.layernorm2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm2")
+ mlp_hidden_dim = int(dim * config.mlp_expansion_ratio)
+ self.mlp = TFEfficientFormerDenseMlp(config, in_features=dim, hidden_features=mlp_hidden_dim, name="mlp")
+
+ # Using `layers.Activation` instead of `tf.identity` to better control `training' behavior.
+ self.drop_path = (
+ TFEfficientFormerDropPath(drop_path)
+ if drop_path > 0.0
+ else keras.layers.Activation("linear", name="drop_path")
+ )
+ self.config = config
+
+ def build(self, input_shape=None):
+ self.layer_scale_1 = None
+ self.layer_scale_2 = None
+
+ if self.config.use_layer_scale:
+ self.layer_scale_1 = self.add_weight(
+ shape=(self.dim,),
+ initializer=keras.initializers.Constant(value=self.config.layer_scale_init_value),
+ trainable=True,
+ name="layer_scale_1",
+ )
+ self.layer_scale_2 = self.add_weight(
+ shape=(self.dim,),
+ initializer=keras.initializers.Constant(value=self.config.layer_scale_init_value),
+ trainable=True,
+ name="layer_scale_2",
+ )
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "token_mixer", None) is not None:
+ with tf.name_scope(self.token_mixer.name):
+ self.token_mixer.build(None)
+ if getattr(self, "layernorm1", None) is not None:
+ with tf.name_scope(self.layernorm1.name):
+ self.layernorm1.build([None, None, self.dim])
+ if getattr(self, "layernorm2", None) is not None:
+ with tf.name_scope(self.layernorm2.name):
+ self.layernorm2.build([None, None, self.dim])
+ if getattr(self, "mlp", None) is not None:
+ with tf.name_scope(self.mlp.name):
+ self.mlp.build(None)
+ if getattr(self, "drop_path", None) is not None:
+ with tf.name_scope(self.drop_path.name):
+ self.drop_path.build(None)
+
+ def call(
+ self, hidden_states: tf.Tensor, output_attentions: bool = False, training: bool = False
+ ) -> Tuple[tf.Tensor]:
+ self_attention_outputs = self.token_mixer(
+ hidden_states=self.layernorm1(hidden_states, training=training),
+ output_attentions=output_attentions,
+ training=training,
+ )
+
+ attention_output = self_attention_outputs[0]
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ if self.config.use_layer_scale:
+ layer_output = hidden_states + self.drop_path(
+ tf.expand_dims(tf.expand_dims(self.layer_scale_1, 0), 0) * attention_output,
+ training=training,
+ )
+ layer_output = layer_output + self.drop_path(
+ tf.expand_dims(tf.expand_dims(self.layer_scale_2, 0), 0)
+ * self.mlp(hidden_states=self.layernorm2(inputs=layer_output, training=training), training=training),
+ training=training,
+ )
+ else:
+ layer_output = hidden_states + self.drop_path(attention_output, training=training)
+ layer_output = layer_output + self.drop_path(
+ self.mlp(hidden_states=self.layernorm2(inputs=layer_output, training=training), training=training),
+ training=training,
+ )
+
+ outputs = (layer_output,) + outputs
+
+ return outputs
+
+
+class TFEfficientFormerMeta3DLayers(keras.layers.Layer):
+ def __init__(self, config: EfficientFormerConfig, **kwargs):
+ super().__init__(**kwargs)
+ drop_paths = [
+ config.drop_path_rate * (block_idx + sum(config.depths[:-1]))
+ for block_idx in range(config.num_meta3d_blocks)
+ ]
+ self.blocks = [
+ TFEfficientFormerMeta3D(config, config.hidden_sizes[-1], drop_path=drop_path, name=f"blocks.{i}")
+ for i, drop_path in enumerate(drop_paths)
+ ]
+
+ def call(
+ self, hidden_states: tf.Tensor, output_attentions: bool = False, training: bool = False
+ ) -> Tuple[tf.Tensor]:
+ all_attention_outputs = () if output_attentions else None
+
+ for i, layer_module in enumerate(self.blocks):
+ if isinstance(hidden_states, tuple):
+ hidden_states = hidden_states[0]
+
+ hidden_states = layer_module(
+ hidden_states=hidden_states, output_attentions=output_attentions, training=training
+ )
+ if output_attentions:
+ all_attention_outputs = all_attention_outputs + (hidden_states[1],)
+
+ if output_attentions:
+ outputs = (hidden_states[0],) + all_attention_outputs
+ return outputs
+
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "blocks", None) is not None:
+ for layer in self.blocks:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+class TFEfficientFormerMeta4D(keras.layers.Layer):
+ def __init__(self, config: EfficientFormerConfig, dim: int, drop_path: float = 0.0, **kwargs):
+ super().__init__(**kwargs)
+ pool_size = config.pool_size if config.pool_size is not None else 3
+ self.token_mixer = TFEfficientFormerPooling(pool_size=pool_size, name="token_mixer")
+ self.dim = dim
+ mlp_hidden_dim = int(dim * config.mlp_expansion_ratio)
+ self.mlp = TFEfficientFormerConvMlp(
+ config=config, in_features=dim, hidden_features=mlp_hidden_dim, drop=config.hidden_dropout_prob, name="mlp"
+ )
+
+ self.drop_path = (
+ TFEfficientFormerDropPath(drop_path, name="drop_path")
+ if drop_path > 0.0
+ else keras.layers.Activation("linear", name="drop_path")
+ )
+ self.config = config
+
+ def build(self, input_shape=None):
+ self.layer_scale_1 = None
+ self.layer_scale_2 = None
+
+ if self.config.use_layer_scale:
+ self.layer_scale_1 = self.add_weight(
+ shape=(self.dim),
+ initializer=keras.initializers.Constant(value=self.config.layer_scale_init_value),
+ trainable=True,
+ name="layer_scale_1",
+ )
+ self.layer_scale_2 = self.add_weight(
+ shape=(self.dim),
+ initializer=keras.initializers.Constant(value=self.config.layer_scale_init_value),
+ trainable=True,
+ name="layer_scale_2",
+ )
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "token_mixer", None) is not None:
+ with tf.name_scope(self.token_mixer.name):
+ self.token_mixer.build(None)
+ if getattr(self, "mlp", None) is not None:
+ with tf.name_scope(self.mlp.name):
+ self.mlp.build(None)
+ if getattr(self, "drop_path", None) is not None:
+ with tf.name_scope(self.drop_path.name):
+ self.drop_path.build(None)
+
+ def call(self, hidden_states: tf.Tensor, training: bool = False) -> Tuple[tf.Tensor]:
+ outputs = self.token_mixer(hidden_states)
+
+ if self.config.use_layer_scale:
+ layer_output = hidden_states + self.drop_path(
+ tf.expand_dims(tf.expand_dims(self.layer_scale_1, 0), 0) * outputs,
+ training=training,
+ )
+
+ layer_output = layer_output + self.drop_path(
+ tf.expand_dims(tf.expand_dims(self.layer_scale_2, 0), 0)
+ * self.mlp(hidden_state=layer_output, training=training),
+ training=training,
+ )
+
+ else:
+ layer_output = hidden_states + self.drop_path(outputs, training=training)
+ layer_output = layer_output + self.drop_path(
+ self.mlp(hidden_state=layer_output, training=training), training=training
+ )
+
+ return layer_output
+
+
+class TFEfficientFormerMeta4DLayers(keras.layers.Layer):
+ def __init__(self, config: EfficientFormerConfig, stage_idx: int, **kwargs):
+ super().__init__(**kwargs)
+ num_layers = (
+ config.depths[stage_idx] if stage_idx != -1 else config.depths[stage_idx] - config.num_meta3d_blocks
+ )
+ drop_paths = [
+ config.drop_path_rate * (block_idx + sum(config.depths[:stage_idx])) for block_idx in range(num_layers)
+ ]
+
+ self.blocks = [
+ TFEfficientFormerMeta4D(
+ config=config, dim=config.hidden_sizes[stage_idx], drop_path=drop_paths[i], name=f"blocks.{i}"
+ )
+ for i in range(len(drop_paths))
+ ]
+
+ def call(self, hidden_states: tf.Tensor, training: bool = False) -> Tuple[tf.Tensor]:
+ for layer_module in self.blocks:
+ hidden_states = layer_module(hidden_states=hidden_states, training=training)
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "blocks", None) is not None:
+ for layer in self.blocks:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+class TFEfficientFormerIntermediateStage(keras.layers.Layer):
+ def __init__(self, config: EfficientFormerConfig, index: int, **kwargs):
+ super().__init__(**kwargs)
+ self.meta4D_layers = TFEfficientFormerMeta4DLayers(config=config, stage_idx=index, name="meta4D_layers")
+
+ def call(self, hidden_states: tf.Tensor, training: bool = False) -> Tuple[tf.Tensor]:
+ hidden_states = self.meta4D_layers(hidden_states=hidden_states, training=training)
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "meta4D_layers", None) is not None:
+ with tf.name_scope(self.meta4D_layers.name):
+ self.meta4D_layers.build(None)
+
+
+class TFEfficientFormerLastStage(keras.layers.Layer):
+ def __init__(self, config: EfficientFormerConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.meta4D_layers = TFEfficientFormerMeta4DLayers(config=config, stage_idx=-1, name="meta4D_layers")
+ self.flat = TFEfficientFormerFlat(name="flat")
+ self.meta3D_layers = TFEfficientFormerMeta3DLayers(config, name="meta3D_layers")
+
+ def call(
+ self, hidden_states: tf.Tensor, output_attentions: bool = False, training: bool = False
+ ) -> Tuple[tf.Tensor]:
+ hidden_states = self.meta4D_layers(hidden_states=hidden_states, training=training)
+ hidden_states = self.flat(hidden_states=hidden_states)
+ hidden_states = self.meta3D_layers(
+ hidden_states=hidden_states, output_attentions=output_attentions, training=training
+ )
+
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "meta4D_layers", None) is not None:
+ with tf.name_scope(self.meta4D_layers.name):
+ self.meta4D_layers.build(None)
+ if getattr(self, "flat", None) is not None:
+ with tf.name_scope(self.flat.name):
+ self.flat.build(None)
+ if getattr(self, "meta3D_layers", None) is not None:
+ with tf.name_scope(self.meta3D_layers.name):
+ self.meta3D_layers.build(None)
+
+
+class TFEfficientFormerEncoder(keras.layers.Layer):
+ def __init__(self, config: EfficientFormerConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ num_intermediate_stages = len(config.depths) - 1
+ downsamples = [
+ config.downsamples[i] or config.hidden_sizes[i] != config.hidden_sizes[i + 1]
+ for i in range(num_intermediate_stages)
+ ]
+
+ intermediate_stages = []
+ layer_count = -1
+ for i in range(num_intermediate_stages):
+ layer_count += 1
+ intermediate_stages.append(
+ TFEfficientFormerIntermediateStage(config, i, name=f"intermediate_stages.{layer_count}")
+ )
+ if downsamples[i]:
+ layer_count += 1
+ intermediate_stages.append(
+ TFEfficientFormerPatchEmbeddings(
+ config,
+ config.hidden_sizes[i],
+ config.hidden_sizes[i + 1],
+ name=f"intermediate_stages.{layer_count}",
+ )
+ )
+ self.intermediate_stages = intermediate_stages
+ self.last_stage = TFEfficientFormerLastStage(config, name="last_stage")
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ output_hidden_states: bool,
+ output_attentions: bool,
+ return_dict: bool,
+ training: bool = False,
+ ) -> TFBaseModelOutput:
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ for layer_module in self.intermediate_stages:
+ hidden_states = layer_module(hidden_states, training=training)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_output = self.last_stage(hidden_states, output_attentions=output_attentions, training=training)
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + layer_output[1:]
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (layer_output[0],)
+
+ if not return_dict:
+ return tuple(v for v in [layer_output[0], all_hidden_states, all_self_attentions] if v is not None)
+
+ return TFBaseModelOutput(
+ last_hidden_state=layer_output[0],
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "last_stage", None) is not None:
+ with tf.name_scope(self.last_stage.name):
+ self.last_stage.build(None)
+ for layer in self.intermediate_stages:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+@keras_serializable
+class TFEfficientFormerMainLayer(keras.layers.Layer):
+ config_class = EfficientFormerConfig
+
+ def __init__(self, config: EfficientFormerConfig, **kwargs) -> None:
+ super().__init__(**kwargs)
+ self.config = config
+
+ self.patch_embed = TFEfficientFormerConvStem(config, config.hidden_sizes[0], name="patch_embed")
+ self.encoder = TFEfficientFormerEncoder(config, name="encoder")
+ self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm")
+
+ @unpack_inputs
+ def call(
+ self,
+ pixel_values: Optional[tf.Tensor] = None,
+ output_attentions: Optional[tf.Tensor] = None,
+ output_hidden_states: Optional[tf.Tensor] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor, ...]]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ # When running on CPU, keras.layers.Conv2D and keras.layers.AveragePool2D do not
+ # support channels first NCHW format. A number of blocks contain both.
+ # So change the input format from (batch_size, num_channels, height, width) to
+ # (batch_size, height, width, num_channels) here.
+ # shape = (batch_size, in_height, in_width, in_channels=num_channels)
+ pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
+ embedding_output = self.patch_embed(pixel_values, training=training)
+
+ encoder_outputs = self.encoder(
+ hidden_states=embedding_output,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ sequence_output = encoder_outputs[0]
+ sequence_output = self.layernorm(sequence_output, training=training)
+
+ # Change the hidden states from (batch_size, height, width, num_channels) to
+ # (batch_size, num_channels, height, width).
+ # The hidden states are in (batch_size, height, width, num_channels)
+ # shape after all stages except the MB3D blocks.
+ if output_hidden_states:
+ hidden_states = tuple([tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1][:-1]]) + (
+ encoder_outputs[1][-1],
+ )
+
+ if not return_dict:
+ head_outputs = (sequence_output,)
+ return head_outputs + encoder_outputs[1:]
+
+ return TFBaseModelOutput(
+ last_hidden_state=sequence_output,
+ hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "patch_embed", None) is not None:
+ with tf.name_scope(self.patch_embed.name):
+ self.patch_embed.build(None)
+ if getattr(self, "encoder", None) is not None:
+ with tf.name_scope(self.encoder.name):
+ self.encoder.build(None)
+ if getattr(self, "layernorm", None) is not None:
+ with tf.name_scope(self.layernorm.name):
+ self.layernorm.build([None, None, self.config.hidden_sizes[-1]])
+
+
+class TFEfficientFormerPreTrainedModel(TFPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = EfficientFormerConfig
+ base_model_prefix = "efficientformer"
+ main_input_name = "pixel_values"
+
+
+EFFICIENTFORMER_START_DOCSTRING = r"""
+ This model is a TensorFlow
+ [keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer). Use it as a regular
+ TensorFlow Module and refer to the TensorFlow documentation for all matter related to general usage and behavior.
+
+
+ Parameters:
+ config ([`EfficientFormerConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+EFFICIENTFORMER_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values ((`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
+ [`EfficientFormerImageProcessor.__call__`] for details.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare EfficientFormer Model transformer outputting raw hidden-states without any specific head on top.",
+ EFFICIENTFORMER_START_DOCSTRING,
+)
+class TFEfficientFormerModel(TFEfficientFormerPreTrainedModel):
+ def __init__(self, config: EfficientFormerConfig, **kwargs) -> None:
+ super().__init__(config, **kwargs)
+
+ self.efficientformer = TFEfficientFormerMainLayer(config, name="efficientformer")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(EFFICIENTFORMER_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFBaseModelOutputWithPooling,
+ config_class=_CONFIG_FOR_DOC,
+ modality="vision",
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
+ )
+ def call(
+ self,
+ pixel_values: Optional[tf.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[Tuple, TFBaseModelOutput]:
+ outputs = self.efficientformer(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "efficientformer", None) is not None:
+ with tf.name_scope(self.efficientformer.name):
+ self.efficientformer.build(None)
+
+
+@add_start_docstrings(
+ """
+ EfficientFormer Model transformer with an image classification head on top of pooled last hidden state, e.g. for
+ ImageNet.
+ """,
+ EFFICIENTFORMER_START_DOCSTRING,
+)
+class TFEfficientFormerForImageClassification(TFEfficientFormerPreTrainedModel, TFSequenceClassificationLoss):
+ def __init__(self, config: EfficientFormerConfig):
+ super().__init__(config)
+
+ self.num_labels = config.num_labels
+ self.efficientformer = TFEfficientFormerMainLayer(config, name="efficientformer")
+
+ # Classifier head
+ self.classifier = (
+ keras.layers.Dense(config.num_labels, name="classifier")
+ if config.num_labels > 0
+ else keras.layers.Activation("linear", name="classifier")
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(EFFICIENTFORMER_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
+ output_type=TFImageClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
+ )
+ def call(
+ self,
+ pixel_values: Optional[tf.Tensor] = None,
+ labels: Optional[tf.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[tf.Tensor, TFImageClassifierOutput]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.efficientformer(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ sequence_output = outputs[0]
+
+ logits = self.classifier(tf.reduce_mean(sequence_output, axis=-2))
+
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFImageClassifierOutput(
+ loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "efficientformer", None) is not None:
+ with tf.name_scope(self.efficientformer.name):
+ self.efficientformer.build(None)
+ if getattr(self, "classifier", None) is not None:
+ if hasattr(self.classifier, "name"):
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.hidden_sizes[-1]])
+
+
+@dataclass
+class TFEfficientFormerForImageClassificationWithTeacherOutput(ModelOutput):
+ """
+ Args:
+ Output type of [`EfficientFormerForImageClassificationWithTeacher`].
+ logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`):
+ Prediction scores as the average of the cls_logits and distillation logits.
+ cls_logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`):
+ Prediction scores of the classification head (i.e. the linear layer on top of the final hidden state of the
+ class token).
+ distillation_logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`):
+ Prediction scores of the distillation head (i.e. the linear layer on top of the final hidden state of the
+ distillation token).
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when
+ `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus
+ the initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when
+ `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+ """
+
+ logits: tf.Tensor = None
+ cls_logits: tf.Tensor = None
+ distillation_logits: tf.Tensor = None
+ hidden_states: Optional[Tuple[tf.Tensor]] = None
+ attentions: Optional[Tuple[tf.Tensor]] = None
+
+
+@add_start_docstrings(
+ """
+ EfficientFormer Model transformer with image classification heads on top (a linear layer on top of the final hidden
+ state and a linear layer on top of the final hidden state of the distillation token) e.g. for ImageNet.
+
+ .. warning::
+ This model supports inference-only. Fine-tuning with distillation (i.e. with a teacher) is not yet
+ supported.
+ """,
+ EFFICIENTFORMER_START_DOCSTRING,
+)
+class TFEfficientFormerForImageClassificationWithTeacher(TFEfficientFormerPreTrainedModel):
+ def __init__(self, config: EfficientFormerConfig) -> None:
+ super().__init__(config)
+
+ self.num_labels = config.num_labels
+ self.efficientformer = TFEfficientFormerMainLayer(config, name="efficientformer")
+
+ # Classifier heads
+ self.classifier = (
+ keras.layers.Dense(config.num_labels, name="classifier")
+ if config.num_labels > 0
+ else keras.layers.Activation("linear", name="classifier")
+ )
+ self.distillation_classifier = (
+ keras.layers.Dense(config.num_labels, name="distillation_classifier")
+ if config.num_labels > 0
+ else keras.layers.Activation("linear", name="distillation_classifier")
+ )
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(EFFICIENTFORMER_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
+ output_type=TFEfficientFormerForImageClassificationWithTeacherOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
+ )
+ def call(
+ self,
+ pixel_values: Optional[tf.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[tuple, TFEfficientFormerForImageClassificationWithTeacherOutput]:
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if training:
+ raise Exception(
+ "This model supports inference-only. Fine-tuning with distillation (i.e. with a teacher) is not yet supported."
+ )
+
+ outputs = self.efficientformer(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ sequence_output = outputs[0]
+
+ cls_logits = self.classifier(tf.reduce_mean(sequence_output, axis=-2))
+ distillation_logits = self.distillation_classifier(tf.reduce_mean(sequence_output, axis=-2))
+ logits = (cls_logits + distillation_logits) / 2
+
+ if not return_dict:
+ output = (logits, cls_logits, distillation_logits) + outputs[1:]
+ return output
+
+ return TFEfficientFormerForImageClassificationWithTeacherOutput(
+ logits=logits,
+ cls_logits=cls_logits,
+ distillation_logits=distillation_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "efficientformer", None) is not None:
+ with tf.name_scope(self.efficientformer.name):
+ self.efficientformer.build(None)
+ if getattr(self, "classifier", None) is not None:
+ if hasattr(self.classifier, "name"):
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.hidden_sizes[-1]])
+ if getattr(self, "distillation_classifier", None) is not None:
+ if hasattr(self.distillation_classifier, "name"):
+ with tf.name_scope(self.distillation_classifier.name):
+ self.distillation_classifier.build([None, None, self.config.hidden_sizes[-1]])
diff --git a/venv/lib/python3.10/site-packages/transformers/models/fuyu/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/fuyu/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..51a72a5366140362b65c77c7cd9dbb337ee992ea
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/fuyu/__init__.py
@@ -0,0 +1,73 @@
+# Copyright 2023 AdeptAI and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
+
+
+_import_structure = {
+ "configuration_fuyu": ["FUYU_PRETRAINED_CONFIG_ARCHIVE_MAP", "FuyuConfig"],
+}
+
+
+try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["image_processing_fuyu"] = ["FuyuImageProcessor"]
+ _import_structure["processing_fuyu"] = ["FuyuProcessor"]
+
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_fuyu"] = [
+ "FuyuForCausalLM",
+ "FuyuPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_fuyu import FUYU_PRETRAINED_CONFIG_ARCHIVE_MAP, FuyuConfig
+
+ try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .image_processing_fuyu import FuyuImageProcessor
+ from .processing_fuyu import FuyuProcessor
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_fuyu import (
+ FuyuForCausalLM,
+ FuyuPreTrainedModel,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..633f0a0467363b5f57928f68577d4fbdb57f20c7
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/configuration_fuyu.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/configuration_fuyu.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2abb2f8606bc1d570a7d603870ed1dd3ec5b5e2b
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/configuration_fuyu.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/convert_fuyu_model_weights_to_hf.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/convert_fuyu_model_weights_to_hf.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ce895476e13042d3acd28d3e6708dc0cd6bb1d4a
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/convert_fuyu_model_weights_to_hf.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/image_processing_fuyu.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/image_processing_fuyu.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..22550794e2a1a2443891bb87fa45adf887a18bfb
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/image_processing_fuyu.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/modeling_fuyu.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/modeling_fuyu.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a8c6c975d618e3ebfa1bb9a544c2e186ac8bf6f8
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/modeling_fuyu.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/processing_fuyu.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/processing_fuyu.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..547ac3dcb4e55da1229c66184c8a77dffeffeb5f
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/processing_fuyu.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/fuyu/configuration_fuyu.py b/venv/lib/python3.10/site-packages/transformers/models/fuyu/configuration_fuyu.py
new file mode 100644
index 0000000000000000000000000000000000000000..40b09492d8f16130f90465a6890fb467ccbca296
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/fuyu/configuration_fuyu.py
@@ -0,0 +1,211 @@
+# coding=utf-8
+# Copyright 2023 Adept AI and the HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Fuyu model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+from ..auto import CONFIG_MAPPING
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import FUYU_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class FuyuConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`FuyuForCausalLM`]. It is used to instantiate an
+ Fuyu model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the
+ [adept/fuyu-8b](https://huggingface.co/adept/fuyu-8b).
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 262144):
+ Vocabulary size of the Fuyu model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`FuyuForCausalLM`]
+ hidden_size (`int`, *optional*, defaults to 4096):
+ Dimension of the hidden representations.
+ intermediate_size (`int`, *optional*, defaults to 16384):
+ Dimension of the MLP representations.
+ num_hidden_layers (`int`, *optional*, defaults to 36):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 64):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ hidden_act (`str` or `function`, *optional*, defaults to `"relu2"`):
+ The non-linear activation function (function or string) in the decoder.
+ max_position_embeddings (`int`, *optional*, defaults to 16384):
+ The maximum sequence length that this model might ever be used with.
+ image_size (`int`, *optional*, defaults to 300):
+ The input image size.
+ patch_size (`int`, *optional*, defaults to 30):
+ The input vision transformer encoding patch size.
+ num_channels (`int`, *optional*, defaults to 3):
+ The input image number of channels.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
+ The epsilon used by the rms normalization layers.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
+ relevant if `config.is_decoder=True`. Whether to tie weight embeddings
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
+ Whether to tie input and output embeddings.
+ rope_theta (`float`, *optional*, defaults to 25000.0):
+ The base period of the RoPE embeddings.
+ rope_scaling (`Dict`, *optional*):
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
+ strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
+ `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
+ `max_position_embeddings` to the expected new maximum. See the following thread for more information on how
+ these scaling strategies behave:
+ https://www.reddit.com/r/LocalFuyu/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
+ experimental feature, subject to breaking API changes in future versions.
+ qk_layernorm (`bool`, *optional*, defaults to `True`):
+ Whether or not to normalize the Queries and Keys after projecting the hidden states
+ hidden_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio after applying the MLP to the hidden states.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio after computing the attention scores.
+ partial_rotary_factor (`float`, *optional*, defaults to 0.5):
+ Percentage of the query and keys which will have rotary embedding.
+
+ pad_token_id (`int`, *optional*):
+ The id of the *padding* token.
+ bos_token_id (`int`, *optional*, defaults to 1):
+ The id of the *beginning-of-sequence* token.
+ eos_token_id (`Union[int, List[int]]`, *optional*, defaults to 2):
+ The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
+ text_config (`dict`, *optional*):
+ Dictionary of configuration options used to initialize the `language``[`Aut`].
+
+ ```python
+ >>> from transformers import FuyuConfig
+
+ >>> # Initializing a Fuyu fuyu-7b style configuration
+ >>> configuration = FuyuConfig()
+ ```"""
+
+ model_type = "fuyu"
+ keys_to_ignore_at_inference = ["past_key_values"]
+
+ def __init__(
+ self,
+ vocab_size=262144,
+ hidden_size=4096,
+ intermediate_size=16384,
+ num_hidden_layers=36,
+ num_attention_heads=64,
+ hidden_act="relu2",
+ max_position_embeddings=16384,
+ image_size=300,
+ patch_size=30,
+ num_channels=3,
+ initializer_range=0.02,
+ layer_norm_eps=1e-5,
+ use_cache=True,
+ tie_word_embeddings=False,
+ rope_theta=25000.0,
+ rope_scaling=None,
+ qk_layernorm=True,
+ hidden_dropout=0.0,
+ attention_dropout=0.0,
+ partial_rotary_factor=0.5,
+ pad_token_id=None,
+ bos_token_id=1,
+ eos_token_id=2,
+ text_config=None,
+ **kwargs,
+ ):
+ if text_config is None:
+ text_config = {
+ "vocab_size": vocab_size,
+ "max_position_embeddings": max_position_embeddings,
+ "hidden_size": hidden_size,
+ "intermediate_size": intermediate_size,
+ "num_hidden_layers": num_hidden_layers,
+ "num_attention_heads": num_attention_heads,
+ "hidden_act": hidden_act,
+ "initializer_range": initializer_range,
+ "layer_norm_eps": layer_norm_eps,
+ "use_cache": use_cache,
+ "rope_theta": rope_theta,
+ "rope_scaling": rope_scaling,
+ "qk_layernorm": qk_layernorm,
+ "hidden_dropout": hidden_dropout,
+ "attention_dropout": attention_dropout,
+ "partial_rotary_factor": partial_rotary_factor,
+ "pad_token_id": pad_token_id,
+ "bos_token_id": bos_token_id,
+ "eos_token_id": eos_token_id,
+ "tie_word_embeddings": tie_word_embeddings,
+ }
+ logger.info("text_config is None. initializing the text model with default values.")
+ text_model_type = text_config["model_type"] if "model_type" in text_config else "persimmon"
+ self.text_config = CONFIG_MAPPING[text_model_type](**text_config)
+
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_channels = num_channels
+ self.hidden_size = hidden_size
+ self.intermediate_size = intermediate_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.hidden_act = hidden_act
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.use_cache = use_cache
+ self.rope_theta = rope_theta
+ self.rope_scaling = rope_scaling
+ self.qk_layernorm = qk_layernorm
+ self.hidden_dropout = hidden_dropout
+ self.attention_dropout = attention_dropout
+ self.partial_rotary_factor = partial_rotary_factor
+ self._rope_scaling_validation()
+
+ super().__init__(
+ pad_token_id=pad_token_id,
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ tie_word_embeddings=tie_word_embeddings,
+ **kwargs,
+ )
+
+ # Copied from transformers.models.llama.configuration_llama.LlamaConfig._rope_scaling_validation
+ def _rope_scaling_validation(self):
+ """
+ Validate the `rope_scaling` configuration.
+ """
+ if self.rope_scaling is None:
+ return
+
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
+ raise ValueError(
+ "`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}"
+ )
+ rope_scaling_type = self.rope_scaling.get("type", None)
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
+ raise ValueError(
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
+ )
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
+ raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
diff --git a/venv/lib/python3.10/site-packages/transformers/models/fuyu/convert_fuyu_model_weights_to_hf.py b/venv/lib/python3.10/site-packages/transformers/models/fuyu/convert_fuyu_model_weights_to_hf.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d029c0d13ab850e80f3a36f0a48cb51360a8ce1
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/fuyu/convert_fuyu_model_weights_to_hf.py
@@ -0,0 +1,134 @@
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import argparse
+import os
+import sys
+import warnings
+
+import flatdict
+import torch
+
+from transformers import FuyuConfig, FuyuForCausalLM, LlamaTokenizer
+
+
+try:
+ from transformers import LlamaTokenizerFast
+
+ tokenizer_class = LlamaTokenizerFast
+except ImportError as e:
+ warnings.warn(e)
+ warnings.warn(
+ "The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"
+ )
+ tokenizer_class = LlamaTokenizer
+
+"""
+Sample usage: # TODO fix clone links from persimmon to fuyu
+```
+git clone https://github.com/adept-ai-labs/adept-inference
+wget https://axtkn4xl5cip.objectstorage.us-phoenix-1.oci.customer-oci.com/n/axtkn4xl5cip/b/adept-public-data/o/8b_base_model_release.tar
+wget https://axtkn4xl5cip.objectstorage.us-phoenix-1.oci.customer-oci.com/n/axtkn4xl5cip/b/adept-public-data/o/8b_chat_model_release.tar
+python src/transformers/models/fuyu/convert_fuyu_weights_to_hf.py --input_dir /path/to/downloaded/fuyu/weights/ --output_dir /output/path
+```
+
+Thereafter, models can be loaded via:
+
+```py
+from transformers import FuyuForCausalLM, FuyuTokenizer
+
+model = FuyuForCausalLM.from_pretrained("/output/path")
+tokenizer = FuyuTokenizer.from_pretrained("/output/path")
+```
+
+Important note: you need to be able to host the whole model in RAM to execute this script (even if the biggest versions
+come in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM).
+"""
+
+
+KEYS_TO_MODIFY_MAPPING = {
+ "self_attention": "self_attn",
+ "language_model.encoder": "language_model.model",
+ "word_embeddings_for_head": "language_model.lm_head",
+ "language_model.embedding.word_embeddings": "language_model.model.embed_tokens",
+ "vit_encoder.linear_encoder": "vision_embed_tokens",
+}
+
+KEYS_TO_REMOVE = {
+ "rotary_emb.inv_freq",
+ "image_patch_projection",
+ "image_patch_projection.weight",
+ "image_patch_projection.bias",
+}
+
+
+def rename_state_dict(state_dict):
+ model_state_dict = {}
+ for key, value in state_dict.items():
+ for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
+ if key_to_modify in key:
+ key = key.replace(key_to_modify, new_key)
+ # if KEYS_TO_REMOVE in key:
+ if key in KEYS_TO_REMOVE:
+ continue
+ model_state_dict[key] = value
+ return model_state_dict
+
+
+def convert_fuyu_checkpoint(pytorch_dump_folder_path, ada_lib_path, pt_model_path, safe_serialization=False):
+ sys.path.insert(0, ada_lib_path)
+ model_state_dict_base = torch.load(pt_model_path, map_location="cpu")
+ state_dict = flatdict.FlatDict(model_state_dict_base["model"], ".")
+ state_dict = rename_state_dict(state_dict)
+
+ transformers_config = FuyuConfig()
+ model = FuyuForCausalLM(transformers_config).to(torch.bfloat16)
+ model.load_state_dict(state_dict)
+ model.save_pretrained(pytorch_dump_folder_path, safe_serialization=safe_serialization)
+ transformers_config.save_pretrained(pytorch_dump_folder_path)
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--input_dir",
+ help="Location of Fuyu weights, which contains tokenizer.model and model folders",
+ )
+ parser.add_argument(
+ "--pt_model_path",
+ help="Location of Fuyu `model_optim_rng.pt`",
+ )
+ parser.add_argument(
+ "--output_dir",
+ help="Location to write HF model and tokenizer",
+ )
+ parser.add_argument(
+ "--ada_lib_path",
+ help="Location of original source code from adept to deserialize .pt checkpoint",
+ )
+ parser.add_argument("--safe_serialization", type=bool, help="Whether or not to save using `safetensors`.")
+ args = parser.parse_args()
+ spm_path = os.path.join(args.input_dir, "adept_vocab.model")
+
+ convert_fuyu_checkpoint(
+ pytorch_dump_folder_path=args.output_dir,
+ pt_model_path=args.pt_model_path,
+ safe_serialization=args.safe_serialization,
+ ada_lib_path=args.ada_lib_path,
+ )
+ tokenizer = tokenizer_class(spm_path, bos_token="|ENDOFTEXT|", eos_token="|ENDOFTEXT|")
+ tokenizer.save_pretrained(args.output_dir)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/venv/lib/python3.10/site-packages/transformers/models/fuyu/image_processing_fuyu.py b/venv/lib/python3.10/site-packages/transformers/models/fuyu/image_processing_fuyu.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec5e1a36abb75ceb6cd9b817d3166451d559f611
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/fuyu/image_processing_fuyu.py
@@ -0,0 +1,736 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Image processor class for Fuyu."""
+
+import math
+from typing import Dict, List, Optional, Union
+
+import numpy as np
+
+from ...image_processing_utils import BaseImageProcessor, BatchFeature
+from ...image_transforms import (
+ pad,
+ resize,
+ to_channel_dimension_format,
+)
+from ...image_utils import (
+ ChannelDimension,
+ ImageInput,
+ PILImageResampling,
+ get_image_size,
+ infer_channel_dimension_format,
+ is_scaled_image,
+ is_valid_image,
+ make_list_of_images,
+ to_numpy_array,
+ validate_preprocess_arguments,
+)
+from ...utils import (
+ TensorType,
+ is_torch_available,
+ is_torch_device,
+ is_torch_dtype,
+ logging,
+ requires_backends,
+)
+
+
+if is_torch_available():
+ import torch
+
+
+logger = logging.get_logger(__name__)
+
+
+def make_list_of_list_of_images(
+ images: Union[List[List[ImageInput]], List[ImageInput], ImageInput],
+) -> List[List[ImageInput]]:
+ if is_valid_image(images):
+ return [[images]]
+
+ if isinstance(images, list) and all(isinstance(image, list) for image in images):
+ return images
+
+ if isinstance(images, list):
+ return [make_list_of_images(image) for image in images]
+
+ raise ValueError("images must be a list of list of images or a list of images or an image.")
+
+
+class FuyuBatchFeature(BatchFeature):
+ """
+ BatchFeature class for Fuyu image processor and processor.
+
+ The outputs dictionary from the processors contains a mix of tensors and lists of tensors.
+ """
+
+ def convert_to_tensors(self, tensor_type: Optional[Union[str, TensorType]] = None):
+ """
+ Convert the inner content to tensors.
+
+ Args:
+ tensor_type (`str` or [`~utils.TensorType`], *optional*):
+ The type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If
+ `None`, no modification is done.
+ """
+ if tensor_type is None:
+ return self
+
+ is_tensor, as_tensor = self._get_is_as_tensor_fns(tensor_type=tensor_type)
+
+ def _convert_tensor(elem):
+ if is_tensor(elem):
+ return elem
+ return as_tensor(elem)
+
+ def _safe_convert_tensor(elem):
+ try:
+ return _convert_tensor(elem)
+ except: # noqa E722
+ if key == "overflowing_values":
+ raise ValueError("Unable to create tensor returning overflowing values of different lengths. ")
+ raise ValueError(
+ "Unable to create tensor, you should probably activate padding "
+ "with 'padding=True' to have batched tensors with the same length."
+ )
+
+ # Do the tensor conversion in batch
+ for key, value in self.items():
+ if isinstance(value, list) and isinstance(value[0], list):
+ # List[List[Any]] -> List[List[Tensor]]
+ self[key] = [[_safe_convert_tensor(elem) for elem in elems] for elems in value]
+ elif isinstance(value, list):
+ # List[Any] -> List[Tensor]
+ self[key] = [_safe_convert_tensor(elem) for elem in value]
+ else:
+ # Any -> Tensor
+ self[key] = _safe_convert_tensor(value)
+ return self
+
+ def to(self, *args, **kwargs) -> "BatchFeature":
+ """
+ Send all values to device by calling `v.to(*args, **kwargs)` (PyTorch only). This should support casting in
+ different `dtypes` and sending the `BatchFeature` to a different `device`.
+
+ Args:
+ args (`Tuple`):
+ Will be passed to the `to(...)` function of the tensors.
+ kwargs (`Dict`, *optional*):
+ Will be passed to the `to(...)` function of the tensors.
+
+ Returns:
+ [`BatchFeature`]: The same instance after modification.
+ """
+ requires_backends(self, ["torch"])
+ import torch # noqa
+
+ new_data = {}
+ device = kwargs.get("device")
+ # Check if the args are a device or a dtype
+ if device is None and len(args) > 0:
+ # device should be always the first argument
+ arg = args[0]
+ if is_torch_dtype(arg):
+ # The first argument is a dtype
+ pass
+ elif isinstance(arg, str) or is_torch_device(arg) or isinstance(arg, int):
+ device = arg
+ else:
+ # it's something else
+ raise ValueError(f"Attempting to cast a BatchFeature to type {str(arg)}. This is not supported.")
+
+ def _to(elem):
+ # check if v is a floating point
+ if torch.is_floating_point(elem):
+ # cast and send to device
+ return elem.to(*args, **kwargs)
+ if device is not None:
+ return elem.to(device=device)
+
+ return elem
+
+ # We cast only floating point tensors to avoid issues with tokenizers casting `LongTensor` to `FloatTensor`
+ for k, v in self.items():
+ if isinstance(v, list) and isinstance(v[0], list):
+ # Data structure is a list of lists
+ new_v = []
+ for elems in v:
+ new_v.append([_to(elem) for elem in elems])
+ new_data[k] = new_v
+ elif isinstance(v, list):
+ # Data structure is a list
+ new_data[k] = [_to(elem) for elem in v]
+ else:
+ new_data[k] = _to(v)
+ self.data = new_data
+ return self
+
+
+class FuyuImageProcessor(BaseImageProcessor):
+ """
+ This class should handle the image processing part before the main FuyuForCausalLM. In particular, it should
+ handle:
+
+ - Processing Images:
+ Taking a batch of images as input. If the images are variable-sized, it resizes them based on the desired patch
+ dimensions. The image output is always img_h, img_w of (1080, 1920)
+
+ Then, it patches up these images using the patchify_image function.
+
+ - Creating Image Input IDs:
+ For each patch, a placeholder ID is given to identify where these patches belong in a token sequence. For
+ variable-sized images, each line of patches is terminated with a newline ID.
+
+ - Image Patch Indices:
+ For each image patch, the code maintains an index where these patches should be inserted in a token stream.
+
+
+ Args:
+ do_resize (`bool`, *optional*, defaults to `True`):
+ Whether to resize the image to `size`.
+ size (`Dict[str, int]`, *optional*, defaults to `{"height": 1080, "width": 1920}`):
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
+ `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
+ do_pad (`bool`, *optional*, defaults to `True`):
+ Whether to pad the image to `size`.
+ padding_value (`float`, *optional*, defaults to 1.0):
+ The value to pad the image with.
+ padding_mode (`str`, *optional*, defaults to `"constant"`):
+ The padding mode to use when padding the image.
+ do_normalize (`bool`, *optional*, defaults to `True`):
+ Whether to normalize the image.
+ image_mean (`float`, *optional*, defaults to 0.5):
+ The mean to use when normalizing the image.
+ image_std (`float`, *optional*, defaults to 0.5):
+ The standard deviation to use when normalizing the image.
+ do_rescale (`bool`, *optional*, defaults to `True`):
+ Whether to rescale the image.
+ rescale_factor (`float`, *optional*, defaults to `1 / 255`):
+ The factor to use when rescaling the image.
+ patch_size (`Dict[str, int]`, *optional*, defaults to `{"height": 30, "width": 30}`):
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches.
+ """
+
+ model_input_names = [
+ "images",
+ "image_input_ids",
+ "image_patches",
+ "image_patch_indices_per_batch",
+ "image_patch_indices_per_subsequence",
+ ]
+
+ def __init__(
+ self,
+ do_resize: bool = True,
+ size: Optional[Dict[str, int]] = None,
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
+ do_pad: bool = True,
+ padding_value: float = 1.0,
+ padding_mode: str = "constant",
+ do_normalize: bool = True,
+ image_mean: Union[float, List[float]] = 0.5,
+ image_std: Union[float, List[float]] = 0.5,
+ do_rescale: bool = True,
+ rescale_factor: float = 1 / 255,
+ patch_size: Optional[Dict[str, int]] = None,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+ self.do_resize = do_resize
+ self.size = size if size is not None else {"height": 1080, "width": 1920}
+ self.resample = resample
+ self.do_pad = do_pad
+ self.padding_value = padding_value
+ self.padding_mode = padding_mode
+ self.do_normalize = do_normalize
+ self.image_mean = image_mean
+ self.image_std = image_std
+ self.do_rescale = do_rescale
+ self.rescale_factor = rescale_factor
+ self.patch_size = patch_size if patch_size is not None else {"height": 30, "width": 30}
+ self._valid_processor_keys = [
+ "images",
+ "do_resize",
+ "size",
+ "resample",
+ "do_pad",
+ "padding_value",
+ "padding_mode",
+ "do_normalize",
+ "image_mean",
+ "image_std",
+ "do_rescale",
+ "rescale_factor",
+ "patch_size",
+ "return_tensors",
+ "data_format",
+ "input_data_format",
+ ]
+
+ def resize(
+ self,
+ image: np.ndarray,
+ size: Dict[str, int],
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> np.ndarray:
+ """
+ Resize an image to `(size["height"], size["width"])`.
+
+ Args:
+ image (`np.ndarray`):
+ Image to resize.
+ size (`Dict[str, int]`):
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
+ `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
+ data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
+ image is used. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+
+ Returns:
+ `np.ndarray`: The resized image.
+ """
+ image_height, image_width = get_image_size(image, input_data_format)
+ target_height, target_width = size["height"], size["width"]
+
+ if image_width <= target_width and image_height <= target_height:
+ return image
+
+ height_scale_factor = target_height / image_height
+ width_scale_factor = target_width / image_width
+ optimal_scale_factor = min(height_scale_factor, width_scale_factor)
+
+ new_height = int(image_height * optimal_scale_factor)
+ new_width = int(image_width * optimal_scale_factor)
+
+ scaled_image = resize(
+ image=image,
+ size=(new_height, new_width),
+ resample=resample,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ **kwargs,
+ )
+ return scaled_image
+
+ def pad_image(
+ self,
+ image: np.ndarray,
+ size: Dict[str, int],
+ mode: str = "constant",
+ constant_values: float = 1.0,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ) -> np.ndarray:
+ """
+ Pad an image to `(size["height"], size["width"])`.
+
+ Args:
+ image (`np.ndarray`):
+ Image to pad.
+ size (`Dict[str, int]`):
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
+ data_format (`ChannelDimension` or `str`, *optional*):
+ The data format of the output image. If unset, the same format as the input image is used.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred.
+ """
+ image_height, image_width = get_image_size(image, input_data_format)
+ target_height, target_width = size["height"], size["width"]
+ padding_top = 0
+ padding_left = 0
+ padding_bottom = target_height - image_height
+ padding_right = target_width - image_width
+ padded_image = pad(
+ image,
+ padding=((padding_top, padding_bottom), (padding_left, padding_right)),
+ mode=mode,
+ constant_values=constant_values,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ )
+ return padded_image
+
+ def preprocess(
+ self,
+ images,
+ do_resize: Optional[bool] = None,
+ size: Optional[Dict[str, int]] = None,
+ resample: Optional[PILImageResampling] = None,
+ do_pad: Optional[bool] = None,
+ padding_value: Optional[float] = None,
+ padding_mode: Optional[str] = None,
+ do_normalize: Optional[bool] = None,
+ image_mean: Optional[float] = None,
+ image_std: Optional[float] = None,
+ do_rescale: Optional[bool] = None,
+ rescale_factor: Optional[float] = None,
+ patch_size: Optional[Dict[str, int]] = None,
+ data_format: Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ return_tensors: Optional[TensorType] = None,
+ ):
+ """
+
+ Utility function to preprocess the images and extract necessary information about original formats.
+
+ Args:
+ images (`ImageInput`):
+ Images to preprocess. Expects a single image, a list or images or a list of lists of images. Pixel
+ values range from 0 to 255, or between 0 and 1 if `do_rescale` is `False`.
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
+ Whether to resize the image to `size`.
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
+ resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
+ `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
+ do_pad (`bool`, *optional*, defaults to `self.do_pad`):
+ Whether to pad the image to `size`.
+ padding_value (`float`, *optional*, defaults to `self.padding_value`):
+ The value to pad the image with.
+ padding_mode (`str`, *optional*, defaults to `self.padding_mode`):
+ The padding mode to use when padding the image.
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
+ Whether to normalize the image.
+ image_mean (`float`, *optional*, defaults to `self.image_mean`):
+ The mean to use when normalizing the image.
+ image_std (`float`, *optional*, defaults to `self.image_std`):
+ The standard deviation to use when normalizing the image.
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
+ Whether to rescale the image.
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
+ The factor to use when rescaling the image.
+ patch_size (`Dict[str, int]`, *optional*, defaults to `self.patch_size`):
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches.
+ return_tensors (`str` or `TensorType`, *optional*):
+ The type of tensors to return. Can be one of:
+ - Unset: Return a list of `np.ndarray`.
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
+ The channel dimension format of the output image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ """
+
+ do_resize = do_resize if do_resize is not None else self.do_resize
+ size = size if size is not None else self.size
+ resample = resample if resample is not None else self.resample
+ do_pad = do_pad if do_pad is not None else self.do_pad
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
+ image_mean = image_mean if image_mean is not None else self.image_mean
+ image_std = image_std if image_std is not None else self.image_std
+ padding_value = padding_value if padding_value is not None else self.padding_value
+ padding_mode = padding_mode if padding_mode is not None else self.padding_mode
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
+ patch_size = patch_size if patch_size is not None else self.patch_size
+
+ if isinstance(images, list) and any(isinstance(elem, list) and len(elem) >= 2 for elem in images):
+ raise ValueError("Multiple images for a single sample are not yet supported.")
+
+ batch_images = make_list_of_list_of_images(images)
+
+ validate_preprocess_arguments(
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_normalize=do_normalize,
+ image_mean=image_mean,
+ image_std=image_std,
+ do_pad=do_pad,
+ size_divisibility=size, # There is no pad divisibility in this processor, but pad requires the size arg.
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ )
+ # All transformations expect numpy arrays.
+ batch_images = [[to_numpy_array(image) for image in images] for images in batch_images]
+
+ if is_scaled_image(batch_images[0][0]) and do_rescale:
+ logger.warning_once(
+ "It looks like you are trying to rescale already rescaled images. If the input"
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
+ )
+
+ if input_data_format is None:
+ # We assume that all images have the same channel dimension format.
+ input_data_format = infer_channel_dimension_format(batch_images[0][0])
+
+ original_image_sizes = [get_image_size(images[0], channel_dim=input_data_format) for images in batch_images]
+
+ if do_resize:
+ batch_images = [
+ [self.resize(image, size=size, input_data_format=input_data_format) for image in images]
+ for images in batch_images
+ ]
+
+ image_sizes = [get_image_size(images[0], channel_dim=input_data_format) for images in batch_images]
+ image_unpadded_heights = [[image_size[0]] for image_size in image_sizes]
+ image_unpadded_widths = [[image_size[1]] for image_size in image_sizes]
+
+ # scale_h is the same as scale_w
+ image_scale_factors = [
+ [resized_size[0] / original_size[0]]
+ for original_size, resized_size in zip(original_image_sizes, image_sizes)
+ ]
+
+ if do_pad:
+ batch_images = [
+ [
+ self.pad_image(
+ image,
+ size=size,
+ mode=padding_mode,
+ constant_values=padding_value,
+ input_data_format=input_data_format,
+ )
+ for image in images
+ ]
+ for images in batch_images
+ ]
+
+ if do_rescale:
+ batch_images = [
+ [self.rescale(image, scale=rescale_factor, input_data_format=input_data_format) for image in images]
+ for images in batch_images
+ ]
+
+ if do_normalize:
+ batch_images = [
+ [
+ self.normalize(image, mean=image_mean, std=image_std, input_data_format=input_data_format)
+ for image in images
+ ]
+ for images in batch_images
+ ]
+
+ if data_format is not None:
+ batch_images = [
+ [to_channel_dimension_format(image, data_format, input_data_format) for image in images]
+ for images in batch_images
+ ]
+
+ data = {
+ "images": batch_images,
+ "image_unpadded_heights": image_unpadded_heights,
+ "image_unpadded_widths": image_unpadded_widths,
+ "image_scale_factors": image_scale_factors,
+ }
+ return FuyuBatchFeature(data=data, tensor_type=return_tensors)
+
+ def get_num_patches(self, image_height: int, image_width: int, patch_size: Dict[str, int] = None) -> int:
+ """
+ Calculate number of patches required to encode an image.
+
+ Args:
+ image_height (`int`):
+ Height of the image.
+ image_width (`int`):
+ Width of the image.
+ patch_size (`Dict[str, int]`, *optional*, defaults to `self.patch_size`):
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches.
+ """
+ patch_size = patch_size if patch_size is not None else self.patch_size
+ patch_height, patch_width = self.patch_size["height"], self.patch_size["width"]
+
+ if image_height % patch_height != 0:
+ raise ValueError(f"{image_height=} must be divisible by {patch_height}")
+ if image_width % patch_width != 0:
+ raise ValueError(f"{image_width=} must be divisible by {patch_width}")
+
+ num_patches_per_dim_h = image_height // patch_height
+ num_patches_per_dim_w = image_width // patch_width
+ num_patches = num_patches_per_dim_h * num_patches_per_dim_w
+ return num_patches
+
+ def patchify_image(self, image: "torch.Tensor", patch_size: Optional[Dict[str, int]] = None) -> "torch.Tensor":
+ """
+ Convert an image into a tensor of patches.
+
+ Args:
+ image (`torch.Tensor`):
+ Image to convert. Shape: [batch, channels, height, width]
+ patch_size (`Dict[str, int]`, *optional*, defaults to `self.patch_size`):
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches.
+ """
+ requires_backends(self, ["torch"])
+ patch_size = patch_size if patch_size is not None else self.patch_size
+ patch_height, patch_width = patch_size["height"], patch_size["width"]
+
+ # TODO refer to https://github.com/ArthurZucker/transformers/blob/0f0a3fe5ca5697ee58faeb5b53f049af720b5e98/src/transformers/models/vit_mae/modeling_vit_mae.py#L871
+ # torch implementation is faster but does not handle non-squares
+
+ batch_size, channels, _, _ = image.shape
+ unfolded_along_height = image.unfold(2, patch_height, patch_height)
+ patches = unfolded_along_height.unfold(3, patch_width, patch_width)
+ patches = patches.contiguous()
+ patches = patches.view(batch_size, channels, -1, patch_height, patch_width)
+ patches = patches.permute(0, 2, 3, 4, 1)
+ patches = patches.reshape(batch_size, -1, channels * patch_height * patch_width)
+ return patches
+
+ def preprocess_with_tokenizer_info(
+ self,
+ image_input: "torch.Tensor",
+ image_present: "torch.Tensor",
+ image_unpadded_h: "torch.Tensor",
+ image_unpadded_w: "torch.Tensor",
+ image_placeholder_id: int,
+ image_newline_id: int,
+ variable_sized: bool,
+ patch_size: Optional[Dict[str, int]] = None,
+ ) -> FuyuBatchFeature:
+ """Process images for model input. In particular, variable-sized images are handled here.
+
+ Args:
+ image_input (`torch.Tensor` of shape [batch_size, subsequence_size, num_channels, height, width]):
+ Tensor of images padded to model input size.
+ image_present (`torch.Tensor` of shape [batch_size, subsequence_size, num_images]):
+ Tensor of 1s and 0s indicating whether an image is present.
+ image_unpadded_h (`torch.Tensor` of shape [batch_size, subsequence_size]):
+ Tensor of unpadded image heights.
+ image_unpadded_w (`torch.Tensor` of shape [batch_size, subsequence_size]):
+ Tensor of unpadded image widths.
+ image_placeholder_id (int):
+ The id of the image placeholder token. Comes from an associated tokenizer.
+ image_newline_id (int):
+ The id of the image newline token. Comes from an associated tokenizer.
+ variable_sized (bool):
+ Whether to process images as variable-sized.
+ patch_size (`Dict[str, int]`, *optional*, defaults to `self.patch_size`):
+ Size of the patches.
+ """
+ requires_backends(self, ["torch"])
+
+ patch_size = patch_size if patch_size is not None else self.patch_size
+ patch_height, patch_width = patch_size["height"], patch_size["width"]
+
+ # Only images that are present.
+ images: List[List[torch.Tensor]] = []
+ batch_image_patches: List[List[torch.Tensor]] = []
+ # Image input ids for every subsequence, including ones with no image present.
+ batch_image_input_ids: List[List[torch.Tensor]] = []
+ for batch_index in range(image_input.shape[0]):
+ image_input_ids = []
+ image_patches = []
+ for subseq_index in range(image_input.shape[1]):
+ if image_present[batch_index, subseq_index]:
+ image = image_input[batch_index, subseq_index]
+ image_height, image_width = image.shape[1], image.shape[2]
+ if variable_sized:
+ # The min() is required here due to floating point issues:
+ # math.ceil(torch.tensor(300).cuda() / 30) == 11
+ new_h = min(
+ image_height,
+ math.ceil(image_unpadded_h[batch_index, subseq_index] / patch_height) * patch_height,
+ )
+ new_w = min(
+ image_width,
+ math.ceil(image_unpadded_w[batch_index, subseq_index] / patch_width) * patch_width,
+ )
+ image = image[:, :new_h, :new_w]
+ image_height, image_width = new_h, new_w
+
+ num_patches = self.get_num_patches(image_height=image_height, image_width=image_width)
+ tensor_of_image_ids = torch.full(
+ [num_patches], image_placeholder_id, dtype=torch.int32, device=image_input.device
+ )
+ patches = self.patchify_image(image=image.unsqueeze(0)).squeeze(0)
+ assert num_patches == patches.shape[0]
+
+ if variable_sized:
+ # Now terminate each line with |NEWLINE|.
+ tensor_of_image_ids = tensor_of_image_ids.reshape(-1, image_width // patch_width)
+ newline_ids = torch.full(
+ [tensor_of_image_ids.shape[0], 1],
+ image_newline_id,
+ dtype=torch.int32,
+ device=image_input.device,
+ )
+ tensor_of_image_ids = torch.cat([tensor_of_image_ids, newline_ids], dim=1)
+ tensor_of_image_ids = tensor_of_image_ids.reshape(-1)
+
+ images.append([image])
+ image_input_ids.append(tensor_of_image_ids)
+ image_patches.append(patches)
+ else:
+ image_input_ids.append(torch.tensor([], dtype=torch.int32, device=image_input.device))
+
+ batch_image_input_ids.append(image_input_ids)
+ batch_image_patches.append(image_patches)
+
+ # Create image_patch_input_indices, where non-negative values correspond to image patches to be inserted in
+ # the stream.
+ image_patch_indices_per_batch: List[List[torch.Tensor]] = []
+ image_patch_indices_per_subsequence: List[List[torch.Tensor]] = []
+
+ for sample_image_input_ids in batch_image_input_ids:
+ index_offset = 0
+ per_batch_indices = []
+ per_subsequence_indices = []
+ for subseq_image_input_ids in sample_image_input_ids:
+ # Indices of image patches.
+ patches_mask = subseq_image_input_ids == image_placeholder_id
+ num_patches = torch.count_nonzero(patches_mask)
+ indices = torch.arange(num_patches, dtype=torch.int64, device=subseq_image_input_ids.device).type_as(
+ subseq_image_input_ids
+ )
+
+ # Place those indices in the image input ids token stream, with -1 representing non-index tokens.
+ indices_in_stream_per_batch = torch.full_like(subseq_image_input_ids, -1)
+ indices_in_stream_per_subsequence = torch.full_like(subseq_image_input_ids, -1)
+ patches_inds = torch.nonzero(patches_mask, as_tuple=True)[0]
+
+ indices_in_stream_per_batch[patches_inds] = indices + index_offset
+ indices_in_stream_per_subsequence[patches_inds] = indices
+
+ per_batch_indices.append(indices_in_stream_per_batch)
+ per_subsequence_indices.append(indices_in_stream_per_subsequence)
+ index_offset += num_patches
+
+ image_patch_indices_per_batch.append(per_batch_indices)
+ image_patch_indices_per_subsequence.append(per_subsequence_indices)
+
+ return FuyuBatchFeature(
+ data={
+ "images": images,
+ "image_input_ids": batch_image_input_ids,
+ "image_patches": batch_image_patches,
+ "image_patch_indices_per_batch": image_patch_indices_per_batch,
+ "image_patch_indices_per_subsequence": image_patch_indices_per_subsequence,
+ }
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/fuyu/modeling_fuyu.py b/venv/lib/python3.10/site-packages/transformers/models/fuyu/modeling_fuyu.py
new file mode 100644
index 0000000000000000000000000000000000000000..8e9a41954aee9c96945a787f08d698091baa0d4b
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/fuyu/modeling_fuyu.py
@@ -0,0 +1,358 @@
+# coding=utf-8
+# Copyright 2023 HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch Fuyu model."""
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+
+from ...modeling_outputs import CausalLMOutputWithPast
+from ...modeling_utils import PreTrainedModel
+from ...models.auto.modeling_auto import AutoModelForCausalLM
+from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
+from .configuration_fuyu import FuyuConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "FuyuConfig"
+
+
+FUYU_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`FuyuConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+@add_start_docstrings(
+ "The bare Fuyu Model outputting raw hidden-states without any specific head on top.",
+ FUYU_START_DOCSTRING,
+)
+class FuyuPreTrainedModel(PreTrainedModel):
+ config_class = FuyuConfig
+ base_model_prefix = "fuyu"
+ supports_gradient_checkpointing = True
+ _no_split_modules = []
+ _skip_keys_device_placement = "past_key_values"
+
+ def _init_weights(self, module):
+ std = self.config.initializer_range
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+
+FUYU_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
+ information on the default strategy.
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+ image_patches (`torch.FloatTensor` of shape `(batch_size, num_total_patches, patch_size_ x patch_size x num_channels)`, *optional*):
+ Image patches to be used as continuous embeddings. The patches are flattened and then projected to the
+ hidden size of the model.
+ image_patches_indices (`torch.LongTensor` of shape `(batch_size, num_total_patches + number_of_newline_tokens + number_of_text_tokens, patch_size_ x patch_size x num_channels )`, *optional*):
+ Indices indicating at which position the image_patches have to be inserted in input_embeds.
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.n_positions - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "Fuyu Model with a language modeling head on top for causal language model conditioned on image patches and text.",
+ FUYU_START_DOCSTRING,
+)
+class FuyuForCausalLM(FuyuPreTrainedModel):
+ def __init__(self, config: FuyuConfig):
+ super().__init__(config)
+ self.padding_idx = config.pad_token_id
+ self.vocab_size = config.vocab_size
+ self.language_model = AutoModelForCausalLM.from_config(config.text_config)
+
+ self.vision_embed_tokens = nn.Linear(
+ config.patch_size * config.patch_size * config.num_channels, config.hidden_size
+ )
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.language_model.get_input_embeddings()
+
+ def set_input_embeddings(self, value):
+ self.language_model.set_input_embeddings(value)
+
+ def gather_continuous_embeddings(
+ self,
+ word_embeddings: torch.Tensor,
+ continuous_embeddings: List[torch.Tensor],
+ image_patch_input_indices: torch.Tensor,
+ ) -> torch.Tensor:
+ """This function places the continuous_embeddings into the word_embeddings at the locations
+ indicated by image_patch_input_indices. Different batch elements can have different numbers of continuous
+ embeddings.
+
+ Args:
+ word_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Tensor of word embeddings.
+ continuous_embeddings (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`):
+ Tensor of continuous embeddings. The length of the list is the batch size. Each entry is shape
+ [num_image_embeddings, hidden], and num_image_embeddings needs to match the number of non-negative
+ indices in image_patch_input_indices for that batch element.
+ image_patch_input_indices (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Tensor of indices of the image patches in the input_ids tensor.
+ """
+ if not (word_embeddings.shape[0] == len(continuous_embeddings)):
+ raise ValueError(
+ f"Batch sizes must match! Got {len(continuous_embeddings)=} and {word_embeddings.shape[0]=}"
+ )
+
+ output_embeddings = word_embeddings.clone()
+ for batch_idx in range(word_embeddings.shape[0]):
+ # First, find the positions of all the non-negative values in image_patch_input_indices, those are the
+ # positions in word_embeddings that we want to replace with content from continuous_embeddings.
+ dst_indices = torch.nonzero(image_patch_input_indices[batch_idx] >= 0, as_tuple=True)[0]
+ # Next look up those indices in image_patch_input_indices to find the indices in continuous_embeddings that we
+ # want to use to replace the values in word_embeddings.
+ src_indices = image_patch_input_indices[batch_idx][dst_indices]
+ # Check if we have more indices than embeddings. Note that we could have fewer indices if images got truncated.
+ if src_indices.shape[0] > continuous_embeddings[batch_idx].shape[0]:
+ raise ValueError(
+ f"Number of continuous embeddings {continuous_embeddings[batch_idx].shape=} does not match "
+ f"number of continuous token ids {src_indices.shape=} in batch element {batch_idx}."
+ )
+ output_embeddings[batch_idx, dst_indices] = continuous_embeddings[batch_idx][src_indices]
+ return output_embeddings
+
+ @add_start_docstrings_to_model_forward(FUYU_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ image_patches: torch.Tensor = None, # [batch_size, num_total_patches, patch_size_ x patch_size x num_channels ]
+ image_patches_indices: torch.Tensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import FuyuProcessor, FuyuForCausalLM
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> processor = FuyuProcessor.from_pretrained("adept/fuyu-8b")
+ >>> model = FuyuForCausalLM.from_pretrained("adept/fuyu-8b")
+
+ >>> url = "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/bus.png"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+ >>> prompt = "Generate a coco-style caption.\n"
+
+ >>> inputs = processor(text=prompt, images=image, return_tensors="pt")
+ >>> outputs = model(**inputs)
+
+ >>> generated_ids = model.generate(**inputs, max_new_tokens=7)
+ >>> generation_text = processor.batch_decode(generated_ids[:, -7:], skip_special_tokens=True)
+ >>> print(generation_text[0])
+ A blue bus parked on the side of a road.
+ ```"""
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ batch_size, seq_length = input_ids.shape
+ elif inputs_embeds is not None:
+ batch_size, seq_length, _ = inputs_embeds.shape
+ else:
+ raise ValueError("You have to specify either input_is or inputs_embeds")
+
+ seq_length_with_past = seq_length
+ past_key_values_length = 0
+
+ if past_key_values is not None:
+ past_key_values_length = past_key_values[0][0].shape[2]
+ seq_length_with_past = seq_length_with_past + past_key_values_length
+
+ if position_ids is None:
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+ position_ids = torch.arange(
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
+ )
+ position_ids = position_ids.unsqueeze(0)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.language_model.get_input_embeddings()(input_ids)
+ if image_patches is not None and past_key_values is None:
+ patch_embeddings = [
+ self.vision_embed_tokens(patch.to(self.vision_embed_tokens.weight.dtype))
+ .squeeze(0)
+ .to(inputs_embeds.device)
+ for patch in image_patches
+ ]
+ inputs_embeds = self.gather_continuous_embeddings(
+ word_embeddings=inputs_embeds,
+ continuous_embeddings=patch_embeddings,
+ image_patch_input_indices=image_patches_indices,
+ )
+
+ outputs = self.language_model(
+ inputs_embeds=inputs_embeds,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ labels=labels,
+ use_cache=use_cache,
+ return_dict=return_dict,
+ )
+
+ return outputs
+
+ def prepare_inputs_for_generation(
+ self,
+ input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ inputs_embeds=None,
+ image_patches=None,
+ image_patches_indices=None,
+ **kwargs,
+ ):
+ if past_key_values:
+ input_ids = input_ids[:, -1:]
+
+ position_ids = kwargs.get("position_ids", None)
+ if attention_mask is not None and position_ids is None:
+ # create position_ids on the fly for batch generation
+ position_ids = attention_mask.long().cumsum(-1) - 1
+ position_ids.masked_fill_(attention_mask == 0, 1)
+ if past_key_values:
+ position_ids = position_ids[:, -1].unsqueeze(-1)
+
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
+ if inputs_embeds is not None and past_key_values is None:
+ model_inputs = {"inputs_embeds": inputs_embeds}
+ else:
+ model_inputs = {"input_ids": input_ids}
+
+ if image_patches_indices is not None:
+ model_inputs["image_patches_indices"] = image_patches_indices
+
+ model_inputs.update(
+ {
+ "position_ids": position_ids,
+ "past_key_values": past_key_values,
+ "use_cache": kwargs.get("use_cache"),
+ "attention_mask": attention_mask,
+ "image_patches_indices": image_patches_indices if past_key_values is None else None,
+ "image_patches": image_patches if past_key_values is None else None,
+ }
+ )
+ return model_inputs
diff --git a/venv/lib/python3.10/site-packages/transformers/models/fuyu/processing_fuyu.py b/venv/lib/python3.10/site-packages/transformers/models/fuyu/processing_fuyu.py
new file mode 100644
index 0000000000000000000000000000000000000000..ffa215f1a0652ec5df567ddf28b8975cbb2e467c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/fuyu/processing_fuyu.py
@@ -0,0 +1,694 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Image/Text processor class for GIT
+"""
+import re
+from typing import Dict, List, Optional, Tuple, Union
+
+import numpy as np
+
+from ...processing_utils import ProcessorMixin
+from ...tokenization_utils_base import PaddingStrategy, TruncationStrategy
+from ...utils import TensorType, is_torch_available, logging, requires_backends
+
+
+if is_torch_available():
+ from .image_processing_fuyu import FuyuBatchFeature
+
+
+logger = logging.get_logger(__name__)
+
+
+if is_torch_available():
+ import torch
+
+
+TEXT_REPR_BBOX_OPEN = ""
+TEXT_REPR_BBOX_CLOSE = ""
+TEXT_REPR_POINT_OPEN = ""
+TEXT_REPR_POINT_CLOSE = ""
+
+TOKEN_BBOX_OPEN_STRING = "<0x00>" #
+TOKEN_BBOX_CLOSE_STRING = "<0x01>" #
+TOKEN_POINT_OPEN_STRING = "<0x02>" #
+TOKEN_POINT_CLOSE_STRING = "<0x03>" #
+BEGINNING_OF_ANSWER_STRING = "<0x04>" #
+
+
+def full_unpacked_stream_to_tensor(
+ all_bi_tokens_to_place: List[int],
+ full_unpacked_stream: List["torch.Tensor"],
+ fill_value: int,
+ batch_size: int,
+ new_seq_len: int,
+ offset: int,
+) -> "torch.Tensor":
+ """Takes an unpacked stream of tokens (i.e. a list of tensors, one for each item in the batch) and does
+ the required padding to create a single tensor for the batch of shape batch_size x new_seq_len.
+ """
+
+ assert len(all_bi_tokens_to_place) == batch_size
+ assert len(full_unpacked_stream) == batch_size
+
+ # Create padded tensors for the full batch.
+ new_padded_tensor = torch.full(
+ [batch_size, new_seq_len],
+ fill_value=fill_value,
+ dtype=full_unpacked_stream[0].dtype,
+ device=full_unpacked_stream[0].device,
+ )
+
+ # Place each batch entry into the batch tensor.
+ for bi in range(batch_size):
+ tokens_to_place = all_bi_tokens_to_place[bi]
+ new_padded_tensor[bi, :tokens_to_place] = full_unpacked_stream[bi][offset : tokens_to_place + offset]
+
+ return new_padded_tensor
+
+
+def construct_full_unpacked_stream(
+ num_real_text_tokens: Union[List[List[int]], "torch.Tensor"],
+ input_stream: "torch.Tensor",
+ image_tokens: List[List["torch.Tensor"]],
+ batch_size: int,
+ num_sub_sequences: int,
+) -> List["torch.Tensor"]:
+ """Takes an input_stream tensor of shape B x S x ?. For each subsequence, adds any required
+ padding to account for images and then unpacks the subsequences to create a single sequence per item in the batch.
+ Returns a list of tensors, one for each item in the batch."""
+
+ all_bi_stream = []
+
+ for batch_index in range(batch_size):
+ all_si_stream = []
+
+ # First, construct full token stream (including image placeholder tokens) and loss mask for each subsequence
+ # and append to lists. We use lists rather than tensors because each subsequence is variable-sized.
+ # TODO Remove this logic in a subsequent release since subsequences are not supported.
+ image_adjustment = image_tokens[batch_index][0]
+ subsequence_stream = torch.cat([image_adjustment, input_stream[batch_index, 0]], dim=0)
+ num_real_tokens = image_adjustment.shape[0] + num_real_text_tokens[batch_index][0]
+ all_si_stream.append(subsequence_stream[:num_real_tokens])
+ all_bi_stream.append(torch.cat(all_si_stream, dim=0))
+
+ return all_bi_stream
+
+
+def _replace_string_repr_with_token_tags(prompt: str) -> str:
+ prompt = prompt.replace(TEXT_REPR_POINT_OPEN, TOKEN_POINT_OPEN_STRING)
+ prompt = prompt.replace(TEXT_REPR_POINT_CLOSE, TOKEN_POINT_CLOSE_STRING)
+ prompt = prompt.replace(TEXT_REPR_BBOX_OPEN, TOKEN_BBOX_OPEN_STRING)
+ prompt = prompt.replace(TEXT_REPR_BBOX_CLOSE, TOKEN_BBOX_CLOSE_STRING)
+ return prompt
+
+
+def _segment_prompt_into_text_token_conversions(prompt: str) -> List:
+ """
+ Given a string prompt, converts the prompt into a list of TextTokenConversions.
+ """
+ # Wherever, we notice the [TOKEN_OPEN_STRING, TOKEN_CLOSE_STRING], we split the prompt
+ prompt_text_list: List = []
+ regex_pattern = re.compile(
+ f"({TOKEN_BBOX_OPEN_STRING}|{TOKEN_BBOX_CLOSE_STRING}|{TOKEN_POINT_OPEN_STRING}|{TOKEN_POINT_CLOSE_STRING})"
+ )
+ # Split by the regex pattern
+ prompt_split = regex_pattern.split(prompt)
+ for i, elem in enumerate(prompt_split):
+ if len(elem) == 0 or elem in [
+ TOKEN_BBOX_OPEN_STRING,
+ TOKEN_BBOX_CLOSE_STRING,
+ TOKEN_POINT_OPEN_STRING,
+ TOKEN_POINT_CLOSE_STRING,
+ ]:
+ continue
+ prompt_text_list.append(
+ (elem, i > 1 and prompt_split[i - 1] in [TOKEN_BBOX_OPEN_STRING, TOKEN_POINT_OPEN_STRING])
+ )
+ return prompt_text_list
+
+
+def _transform_coordinates_and_tokenize(prompt: str, scale_factor: float, tokenizer) -> List[int]:
+ """
+ This function transforms the prompt in the following fashion:
+ - and to their respective token mappings
+ - extract the coordinates from the tag
+ - transform the coordinates into the transformed image space
+ - return the prompt tokens with the transformed coordinates and new tags
+
+ Bounding boxes and points MUST be in the following format: y1, x1, y2, x2 x, y The spaces
+ and punctuation added above are NOT optional.
+ """
+ # Make a namedtuple that stores "text" and "is_bbox"
+
+ # We want to do the following: Tokenize the code normally -> when we see a point or box, tokenize using the tokenize_within_tag function
+ # When point or box close tag, continue tokenizing normally
+ # First, we replace the point and box tags with their respective tokens
+ prompt = _replace_string_repr_with_token_tags(prompt)
+ # Tokenize the prompt
+ # Convert prompt into a list split
+ prompt_text_list = _segment_prompt_into_text_token_conversions(prompt)
+ transformed_prompt_tokens: List[int] = []
+ for elem in prompt_text_list:
+ if elem[1]:
+ # This is a location, we need to tokenize it
+ within_tag_tokenized = _transform_within_tags(elem[0], scale_factor, tokenizer)
+ # Surround the text with the open and close tags
+ transformed_prompt_tokens.extend(within_tag_tokenized)
+ else:
+ transformed_prompt_tokens.extend(tokenizer(elem[0], add_special_tokens=False).input_ids)
+ return transformed_prompt_tokens
+
+
+def _transform_within_tags(text: str, scale_factor: float, tokenizer) -> List[int]:
+ """
+ Given a bounding box of the fashion 1, 2, 3, 4 | 1, 2 This function is responsible for
+ converting 1, 2, 3, 4 into tokens of 1 2 3 4 without any commas.
+ """
+ # Convert the text into a list of strings.
+ num_int_strs = text.split(",")
+ if len(num_int_strs) == 2:
+ # If there are any open or close tags, remove them.
+ token_space_open_string = tokenizer.vocab[TOKEN_POINT_OPEN_STRING]
+ token_space_close_string = tokenizer.vocab[TOKEN_POINT_CLOSE_STRING]
+ else:
+ token_space_open_string = tokenizer.vocab[TOKEN_BBOX_OPEN_STRING]
+ token_space_close_string = tokenizer.vocab[TOKEN_BBOX_CLOSE_STRING]
+
+ # Remove all spaces from num_ints
+ num_ints = [float(num.strip()) for num in num_int_strs]
+ # scale to transformed image siz
+ if len(num_ints) == 2:
+ num_ints_translated = scale_point_to_transformed_image(x=num_ints[0], y=num_ints[1], scale_factor=scale_factor)
+ elif len(num_ints) == 4:
+ num_ints_translated = scale_bbox_to_transformed_image(
+ top=num_ints[0],
+ left=num_ints[1],
+ bottom=num_ints[2],
+ right=num_ints[3],
+ scale_factor=scale_factor,
+ )
+ else:
+ raise ValueError(f"Invalid number of ints: {len(num_ints)}")
+ # Tokenize the text, skipping the
+ tokens = [tokenizer.vocab[str(num)] for num in num_ints_translated]
+ return [token_space_open_string] + tokens + [token_space_close_string]
+
+
+def _tokenize_prompts_with_image_and_batch(
+ tokenizer,
+ prompts: List[List[str]],
+ scale_factors: Optional[List[List["torch.Tensor"]]],
+ max_tokens_to_generate: int,
+ max_position_embeddings: int,
+ add_BOS: bool, # Same issue with types as above
+ add_beginning_of_answer_token: bool,
+) -> Tuple["torch.Tensor", "torch.Tensor"]:
+ """
+ Given a set of prompts and number of tokens to generate:
+ - tokenize prompts
+ - set the sequence length to be the max of length of prompts plus the number of tokens we would like to generate
+ - pad all the sequences to this length so we can convert them into a 3D tensor.
+ """
+
+ # If not tool use, tranform the coordinates while tokenizing
+ if scale_factors is not None:
+ transformed_prompt_tokens = []
+ for prompt_seq, scale_factor_seq in zip(prompts, scale_factors):
+ transformed_prompt_tokens.append(
+ [
+ _transform_coordinates_and_tokenize(prompt, scale_factor.item(), tokenizer)
+ for prompt, scale_factor in zip(prompt_seq, scale_factor_seq)
+ ]
+ )
+ else:
+ transformed_prompt_tokens = [[tokenizer.tokenize(prompt) for prompt in prompt_seq] for prompt_seq in prompts]
+
+ prompts_tokens = transformed_prompt_tokens
+
+ if add_BOS:
+ bos_token = tokenizer.vocab[""]
+ else:
+ bos_token = tokenizer.vocab["|ENDOFTEXT|"]
+ prompts_tokens = [[[bos_token] + x for x in prompt_seq] for prompt_seq in prompts_tokens]
+ if add_beginning_of_answer_token:
+ boa = tokenizer.vocab[BEGINNING_OF_ANSWER_STRING]
+ # Only add bbox open token to the last subsequence since that is what will be completed
+ for token_seq in prompts_tokens:
+ token_seq[-1].append(boa)
+
+ # Now we have a list of list of tokens which each list has a different
+ # size. We want to extend this list to:
+ # - incorporate the tokens that need to be generated
+ # - make all the sequences equal length.
+ # Get the prompts length.
+
+ prompts_length = [[len(x) for x in prompts_tokens_seq] for prompts_tokens_seq in prompts_tokens]
+ # Get the max prompts length.
+ max_prompt_len: int = np.max(prompts_length)
+ # Number of tokens in the each sample of the batch.
+ samples_length = min(max_prompt_len + max_tokens_to_generate, max_position_embeddings)
+ if max_prompt_len + max_tokens_to_generate > max_position_embeddings:
+ logger.warning(
+ f"Max subsequence prompt length of {max_prompt_len} + max tokens to generate {max_tokens_to_generate}",
+ f"exceeds context length of {max_position_embeddings}. Will generate as many tokens as possible.",
+ )
+ # Now update the list of list to be of the same size: samples_length.
+ for prompt_tokens_seq, prompts_length_seq in zip(prompts_tokens, prompts_length):
+ for prompt_tokens, prompt_length in zip(prompt_tokens_seq, prompts_length_seq):
+ if len(prompt_tokens) > samples_length:
+ raise ValueError("Length of subsequence prompt exceeds sequence length.")
+ padding_size = samples_length - prompt_length
+ prompt_tokens.extend([tokenizer.vocab["|ENDOFTEXT|"]] * padding_size)
+
+ # Now we are in a structured format, we can convert to tensors.
+ prompts_tokens_tensor = torch.tensor(prompts_tokens, dtype=torch.int64)
+ prompts_length_tensor = torch.tensor(prompts_length, dtype=torch.int64)
+
+ return prompts_tokens_tensor, prompts_length_tensor
+
+
+# Simplified assuming self.crop_top = self.padding_top = 0
+def original_to_transformed_h_coords(original_coords, scale_h):
+ return np.round(original_coords * scale_h).astype(np.int32)
+
+
+# Simplified assuming self.crop_left = self.padding_left = 0
+def original_to_transformed_w_coords(original_coords, scale_w):
+ return np.round(original_coords * scale_w).astype(np.int32)
+
+
+def scale_point_to_transformed_image(x: float, y: float, scale_factor: float) -> List[int]:
+ x_scaled = original_to_transformed_w_coords(np.array([x / 2]), scale_factor)[0]
+ y_scaled = original_to_transformed_h_coords(np.array([y / 2]), scale_factor)[0]
+ return [x_scaled, y_scaled]
+
+
+def scale_bbox_to_transformed_image(
+ top: float, left: float, bottom: float, right: float, scale_factor: float
+) -> List[int]:
+ top_scaled = original_to_transformed_w_coords(np.array([top / 2]), scale_factor)[0]
+ left_scaled = original_to_transformed_h_coords(np.array([left / 2]), scale_factor)[0]
+ bottom_scaled = original_to_transformed_w_coords(np.array([bottom / 2]), scale_factor)[0]
+ right_scaled = original_to_transformed_h_coords(np.array([right / 2]), scale_factor)[0]
+ return [top_scaled, left_scaled, bottom_scaled, right_scaled]
+
+
+class FuyuProcessor(ProcessorMixin):
+ r"""
+ Constructs a Fuyu processor which wraps a Fuyu image processor and a Llama tokenizer into a single processor.
+
+ [`FuyuProcessor`] offers all the functionalities of [`FuyuImageProcessor`] and [`LlamaTokenizerFast`]. See the
+ [`~FuyuProcessor.__call__`] and [`~FuyuProcessor.decode`] for more information.
+
+ Args:
+ image_processor ([`FuyuImageProcessor`]):
+ The image processor is a required input.
+ tokenizer ([`LlamaTokenizerFast`]):
+ The tokenizer is a required input.
+ """
+
+ attributes = ["image_processor", "tokenizer"]
+ image_processor_class = "FuyuImageProcessor"
+ tokenizer_class = "AutoTokenizer"
+
+ def __init__(self, image_processor, tokenizer):
+ super().__init__(image_processor=image_processor, tokenizer=tokenizer)
+ self.image_processor = image_processor
+ self.tokenizer = tokenizer
+ self.max_tokens_to_generate = 10
+ self.max_position_embeddings = 16384 # TODO Can't derive this from model files: where to set it?
+ self.pad_token_id = 0
+ self.dummy_image_index = -1
+
+ def _left_pad_inputs_with_attention_mask(self, model_inputs: List[Dict], return_attention_mask: bool):
+ max_length_input_ids = max(entry["input_ids"].shape[1] for entry in model_inputs)
+ max_length_image_patch_indices = max(entry["image_patches_indices"].shape[1] for entry in model_inputs)
+
+ batched_inputs = {"input_ids": [], "image_patches": [], "image_patches_indices": [], "attention_mask": []}
+
+ for entry in model_inputs:
+ for key, tensor in entry.items():
+ if key == "input_ids":
+ num_padding_tokens = max_length_input_ids - tensor.shape[1]
+ padded_input_ids = torch.cat(
+ [
+ torch.full((tensor.shape[0], num_padding_tokens), self.pad_token_id, dtype=torch.long),
+ tensor,
+ ],
+ dim=1,
+ )
+ batched_inputs[key].append(padded_input_ids)
+
+ attention_mask = torch.cat(
+ [torch.zeros(tensor.shape[0], num_padding_tokens, dtype=torch.long), torch.ones_like(tensor)],
+ dim=1,
+ )
+ batched_inputs["attention_mask"].append(attention_mask)
+
+ elif key == "image_patches":
+ # For image_patches, we don't pad but just append them to the list.
+ batched_inputs[key].append(tensor)
+
+ else: # for image_patches_indices
+ num_padding_indices = max_length_image_patch_indices - tensor.shape[1]
+ padded_indices = torch.cat(
+ [
+ torch.full(
+ (tensor.shape[0], num_padding_indices), self.dummy_image_index, dtype=torch.long
+ ),
+ tensor,
+ ],
+ dim=1,
+ )
+ batched_inputs[key].append(padded_indices)
+ batched_keys = ["input_ids", "image_patches_indices"]
+ if return_attention_mask:
+ batched_keys.append("attention_mask")
+ for key in batched_keys:
+ batched_inputs[key] = torch.cat(batched_inputs[key], dim=0)
+
+ return batched_inputs
+
+ def get_sample_encoding(
+ self,
+ prompts,
+ scale_factors,
+ image_unpadded_heights,
+ image_unpadded_widths,
+ image_placeholder_id,
+ image_newline_id,
+ tensor_batch_images,
+ ):
+ image_present = torch.ones(1, 1, 1)
+ model_image_input = self.image_processor.preprocess_with_tokenizer_info(
+ image_input=tensor_batch_images,
+ image_present=image_present,
+ image_unpadded_h=image_unpadded_heights,
+ image_unpadded_w=image_unpadded_widths,
+ image_placeholder_id=image_placeholder_id,
+ image_newline_id=image_newline_id,
+ variable_sized=True,
+ )
+ # FIXME max_tokens_to_generate is embedded into this processor's call.
+ prompt_tokens, prompts_length = _tokenize_prompts_with_image_and_batch(
+ tokenizer=self.tokenizer,
+ prompts=prompts,
+ scale_factors=scale_factors,
+ max_tokens_to_generate=self.max_tokens_to_generate,
+ max_position_embeddings=self.max_position_embeddings,
+ add_BOS=True,
+ add_beginning_of_answer_token=True,
+ )
+ image_padded_unpacked_tokens = construct_full_unpacked_stream(
+ num_real_text_tokens=prompts_length,
+ input_stream=prompt_tokens,
+ image_tokens=model_image_input["image_input_ids"],
+ batch_size=1,
+ num_sub_sequences=self.subsequence_length,
+ )
+ # Construct inputs for image patch indices.
+ unpacked_image_patch_indices_per_batch = construct_full_unpacked_stream(
+ num_real_text_tokens=prompts_length,
+ input_stream=torch.full_like(prompt_tokens, -1),
+ image_tokens=model_image_input["image_patch_indices_per_batch"],
+ batch_size=1,
+ num_sub_sequences=self.subsequence_length,
+ )
+ max_prompt_length = max(x.shape[-1] for x in image_padded_unpacked_tokens)
+ max_seq_len_batch = min(max_prompt_length + self.max_tokens_to_generate, self.max_position_embeddings)
+ tokens_to_place = min(max_seq_len_batch, max(0, image_padded_unpacked_tokens[0].shape[0]))
+
+ # Use same packing logic for the image patch indices.
+ image_patch_input_indices = full_unpacked_stream_to_tensor(
+ all_bi_tokens_to_place=[tokens_to_place],
+ full_unpacked_stream=unpacked_image_patch_indices_per_batch,
+ fill_value=-1,
+ batch_size=1,
+ new_seq_len=max_seq_len_batch,
+ offset=0,
+ )
+ image_patches_tensor = torch.stack([img[0] for img in model_image_input["image_patches"]])
+ batch_encoding = {
+ "input_ids": image_padded_unpacked_tokens[0].unsqueeze(0),
+ "image_patches": image_patches_tensor,
+ "image_patches_indices": image_patch_input_indices,
+ }
+ return batch_encoding
+
+ def __call__(
+ self,
+ text=None,
+ images=None,
+ add_special_tokens: bool = True,
+ return_attention_mask: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_token_type_ids: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ **kwargs,
+ ) -> "FuyuBatchFeature":
+ """
+ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
+ and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to
+ encode the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to
+ FuyuImageProcessor's [`~FuyuImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
+ of the above two methods for more information.
+
+ Args:
+ text (`str`, `List[str]`):
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
+ images (`PIL.Image.Image`, `List[PIL.Image.Image]`):
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
+ tensor. Both channels-first and channels-last formats are supported.
+
+ Returns:
+ [`FuyuBatchEncoding`]: A [`FuyuBatchEncoding`] with the following fields:
+
+ - **input_ids** -- Tensor of token ids to be fed to a model. Returned when `text` is not `None`.
+ - **image_patches** -- List of Tensor of image patches. Returned when `images` is not `None`.
+ - **image_patches_indices** -- Tensor of indices where patch embeddings have to be inserted by the model.
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model when
+ `return_attention_mask=True`.
+ """
+ requires_backends(self, ["torch"])
+
+ # --- Check input validity ---
+ if not return_attention_mask:
+ raise ValueError("`return_attention_mask=False` is not supported for this model.")
+ if text is None and images is None:
+ raise ValueError("You have to specify either text or images. Both cannot be None.")
+ if text is not None and images is None:
+ logger.warning("You are processing a text with no associated image. Make sure it is intended.")
+ self.current_processor = self.tokenizer
+ text_encoding = self.tokenizer(
+ text=text,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_token_type_ids=return_token_type_ids,
+ return_length=return_length,
+ verbose=verbose,
+ return_tensors=return_tensors,
+ **kwargs,
+ )
+ return text_encoding
+
+ if text is None and images is not None:
+ logger.warning("You are processing an image with no associated text. Make sure it is intended.")
+ prompts = [[""]]
+ if text is not None and images is not None:
+ if isinstance(text, str):
+ prompts = [[text]]
+ elif isinstance(text, list):
+ prompts = [[text_seq] for text_seq in text]
+
+ # --- Preprocess images using self.image_processor ---
+
+ # FIXME - We hard code "pt" here because the rest of the processing assumes torch tensors
+ image_encoding = self.image_processor.preprocess(images, return_tensors="pt")
+ batch_images = image_encoding["images"]
+ image_unpadded_heights = image_encoding["image_unpadded_heights"]
+ image_unpadded_widths = image_encoding["image_unpadded_widths"]
+ scale_factors = image_encoding["image_scale_factors"]
+ self.subsequence_length = 1 # Each batch contains only one sequence.
+ self.batch_size = len(batch_images)
+
+ # --- Use self.tokenizer to get the ids of special tokens to insert into image ids ---
+
+ image_placeholder_id = self.tokenizer("|SPEAKER|", add_special_tokens=False)["input_ids"][1]
+ image_newline_id = self.tokenizer("|NEWLINE|", add_special_tokens=False)["input_ids"][1]
+ tensor_batch_images = torch.stack([img[0] for img in batch_images]).unsqueeze(1)
+
+ # --- Use self.image_processor again to obtain the full token ids and batch inputs ---
+ all_encodings = []
+
+ for prompt, scale_factor, image_unpadded_height, image_unpadded_width, tensor_batch_image in zip(
+ prompts, scale_factors, image_unpadded_heights, image_unpadded_widths, tensor_batch_images
+ ):
+ sample_encoding = self.get_sample_encoding(
+ prompts=[prompt],
+ scale_factors=[scale_factor],
+ image_unpadded_heights=torch.tensor([image_unpadded_height]),
+ image_unpadded_widths=torch.tensor([image_unpadded_width]),
+ image_placeholder_id=image_placeholder_id,
+ image_newline_id=image_newline_id,
+ tensor_batch_images=tensor_batch_image.unsqueeze(0),
+ )
+ all_encodings.append(sample_encoding)
+ batch_encoding = self._left_pad_inputs_with_attention_mask(
+ model_inputs=all_encodings, return_attention_mask=return_attention_mask
+ )
+ return FuyuBatchFeature(data=batch_encoding)
+
+ def post_process_box_coordinates(self, outputs, target_sizes=None):
+ """
+ Transforms raw coordinates detected by [`FuyuForCausalLM`] to the original images' coordinate space.
+ Coordinates will be returned in "box" format, with the following pattern:
+ `top, left, bottom, right`
+
+ Point coordinates are not supported yet.
+
+ Args:
+ outputs ([`GenerateOutput`]):
+ Raw outputs from `generate`.
+ target_sizes (`torch.Tensor`, *optional*):
+ Tensor of shape (batch_size, 2) where each entry is the (height, width) of the corresponding image in
+ the batch. If set, found coordinates in the output sequence are rescaled to the target sizes. If left
+ to None, coordinates will not be rescaled.
+
+ Returns:
+ `GenerateOutput`: Same output type returned by `generate`, with output token ids replaced with
+ boxed and possible rescaled coordinates.
+ """
+
+ def scale_factor_to_fit(original_size, target_size=None):
+ height, width = original_size
+ if target_size is None:
+ max_height = self.image_processor.size["height"]
+ max_width = self.image_processor.size["width"]
+ else:
+ max_height, max_width = target_size
+ if width <= max_width and height <= max_height:
+ return 1.0
+ return min(max_height / height, max_width / width)
+
+ def find_delimiters_pair(tokens, start_token, end_token):
+ start_id = self.tokenizer.convert_tokens_to_ids(start_token)
+ end_id = self.tokenizer.convert_tokens_to_ids(end_token)
+
+ starting_positions = (tokens == start_id).nonzero(as_tuple=True)[0]
+ ending_positions = (tokens == end_id).nonzero(as_tuple=True)[0]
+
+ if torch.any(starting_positions) and torch.any(ending_positions):
+ return (starting_positions[0], ending_positions[0])
+ return (None, None)
+
+ def tokens_to_boxes(tokens, original_size):
+ while (pair := find_delimiters_pair(tokens, TOKEN_BBOX_OPEN_STRING, TOKEN_BBOX_CLOSE_STRING)) != (
+ None,
+ None,
+ ):
+ start, end = pair
+ if end != start + 5:
+ continue
+
+ # Retrieve transformed coordinates from tokens
+ coords = self.tokenizer.convert_ids_to_tokens(tokens[start + 1 : end])
+
+ # Scale back to original image size and multiply by 2
+ scale = scale_factor_to_fit(original_size)
+ top, left, bottom, right = [2 * int(float(c) / scale) for c in coords]
+
+ # Replace the IDs so they get detokenized right
+ replacement = f" {TEXT_REPR_BBOX_OPEN}{top}, {left}, {bottom}, {right}{TEXT_REPR_BBOX_CLOSE}"
+ replacement = self.tokenizer.tokenize(replacement)[1:]
+ replacement = self.tokenizer.convert_tokens_to_ids(replacement)
+ replacement = torch.tensor(replacement).to(tokens)
+
+ tokens = torch.cat([tokens[:start], replacement, tokens[end + 1 :]], 0)
+ return tokens
+
+ def tokens_to_points(tokens, original_size):
+ while (pair := find_delimiters_pair(tokens, TOKEN_POINT_OPEN_STRING, TOKEN_POINT_CLOSE_STRING)) != (
+ None,
+ None,
+ ):
+ start, end = pair
+ if end != start + 3:
+ continue
+
+ # Retrieve transformed coordinates from tokens
+ coords = self.tokenizer.convert_ids_to_tokens(tokens[start + 1 : end])
+
+ # Scale back to original image size and multiply by 2
+ scale = scale_factor_to_fit(original_size)
+ x, y = [2 * int(float(c) / scale) for c in coords]
+
+ # Replace the IDs so they get detokenized right
+ replacement = f" {TEXT_REPR_POINT_OPEN}{x}, {y}{TEXT_REPR_POINT_CLOSE}"
+ replacement = self.tokenizer.tokenize(replacement)[1:]
+ replacement = self.tokenizer.convert_tokens_to_ids(replacement)
+ replacement = torch.tensor(replacement).to(tokens)
+
+ tokens = torch.cat([tokens[:start], replacement, tokens[end + 1 :]], 0)
+ return tokens
+
+ if target_sizes is None:
+ target_sizes = ((self.image_processor.size["height"], self.image_processor.size["width"]),) * len(outputs)
+ elif target_sizes.shape[1] != 2:
+ raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch")
+
+ if len(outputs) != len(target_sizes):
+ raise ValueError("Make sure that you pass in as many target sizes as output sequences")
+
+ results = []
+ for seq, size in zip(outputs, target_sizes):
+ seq = tokens_to_boxes(seq, size)
+ seq = tokens_to_points(seq, size)
+ results.append(seq)
+
+ return results
+
+ def batch_decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
+ refer to the docstring of this method for more information.
+ """
+ return self.tokenizer.batch_decode(*args, **kwargs)
+
+ def decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
+ the docstring of this method for more information.
+ """
+ return self.tokenizer.decode(*args, **kwargs)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/hubert/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/hubert/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f0b72a1f297bf8972f7c815dd572909d06ab0517
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/hubert/__init__.py
@@ -0,0 +1,83 @@
+# Copyright 2021 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
+
+
+_import_structure = {"configuration_hubert": ["HUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "HubertConfig"]}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_hubert"] = [
+ "HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "HubertForCTC",
+ "HubertForSequenceClassification",
+ "HubertModel",
+ "HubertPreTrainedModel",
+ ]
+
+
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tf_hubert"] = [
+ "TF_HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TFHubertForCTC",
+ "TFHubertModel",
+ "TFHubertPreTrainedModel",
+ ]
+
+if TYPE_CHECKING:
+ from .configuration_hubert import HUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, HubertConfig
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_hubert import (
+ HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ HubertForCTC,
+ HubertForSequenceClassification,
+ HubertModel,
+ HubertPreTrainedModel,
+ )
+
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tf_hubert import (
+ TF_HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ TFHubertForCTC,
+ TFHubertModel,
+ TFHubertPreTrainedModel,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..469a7e6ab9f47f051f5e38c91d8b91e9b889995e
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/configuration_hubert.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/configuration_hubert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c90197c986303ff06fe6e9be79db05860fbe3c36
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/configuration_hubert.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/convert_distilhubert_original_s3prl_checkpoint_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/convert_distilhubert_original_s3prl_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..740d75e3fdf37615ad7673156f17f38e8237aeb8
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/convert_distilhubert_original_s3prl_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/convert_hubert_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/convert_hubert_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cdde6605aea0bd1bf3dcddbdac89b7a5bf982fc7
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/convert_hubert_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/convert_hubert_original_s3prl_checkpoint_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/convert_hubert_original_s3prl_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..27dee59bfb80f53e6caf0b235e4c981fdbf9569a
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/convert_hubert_original_s3prl_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/modeling_hubert.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/modeling_hubert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0ceec5c75e3c5c5802c97e1b01576c890e174eff
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/modeling_hubert.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/modeling_tf_hubert.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/modeling_tf_hubert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9b678ab1391d6aa4fd2efea6f0ef87b9de33eeba
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/modeling_tf_hubert.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/hubert/configuration_hubert.py b/venv/lib/python3.10/site-packages/transformers/models/hubert/configuration_hubert.py
new file mode 100644
index 0000000000000000000000000000000000000000..00a3244a31074d1f1011bc78c2d9c30269d1951b
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/hubert/configuration_hubert.py
@@ -0,0 +1,261 @@
+# coding=utf-8
+# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Hubert model configuration"""
+
+import functools
+import operator
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import HUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class HubertConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`HubertModel`]. It is used to instantiate an
+ Hubert model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the Hubert
+ [facebook/hubert-base-ls960](https://huggingface.co/facebook/hubert-base-ls960) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 32):
+ Vocabulary size of the Hubert model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`HubertModel`]. Vocabulary size of the model. Defines the different
+ tokens that can be represented by the *inputs_ids* passed to the forward method of [`HubertModel`].
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
+ hidden_dropout(`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ activation_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for activations inside the fully connected layer.
+ attention_dropout(`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ final_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for the final projection layer of [`Wav2Vec2ForCTC`].
+ layerdrop (`float`, *optional*, defaults to 0.1):
+ The LayerDrop probability. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more
+ details.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ feat_extract_norm (`str`, *optional*, defaults to `"group"`):
+ The norm to be applied to 1D convolutional layers in feature encoder. One of `"group"` for group
+ normalization of only the first 1D convolutional layer or `"layer"` for layer normalization of all 1D
+ convolutional layers.
+ feat_proj_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout probability for output of the feature encoder.
+ feat_proj_layer_norm (`bool`, *optional*, defaults to `True`):
+ Whether to apply LayerNorm to the output of the feature encoder.
+ feat_extract_activation (`str, `optional`, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the 1D convolutional layers of the feature
+ extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported.
+ conv_dim (`Tuple[int]`, *optional*, defaults to `(512, 512, 512, 512, 512, 512, 512)`):
+ A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the
+ feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers.
+ conv_stride (`Tuple[int]`, *optional*, defaults to `(5, 2, 2, 2, 2, 2, 2)`):
+ A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length
+ of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*.
+ conv_kernel (`Tuple[int]`, *optional*, defaults to `(10, 3, 3, 3, 3, 3, 3)`):
+ A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The
+ length of *conv_kernel* defines the number of convolutional layers and has to match the length of
+ *conv_dim*.
+ conv_bias (`bool`, *optional*, defaults to `False`):
+ Whether the 1D convolutional layers have a bias.
+ num_conv_pos_embeddings (`int`, *optional*, defaults to 128):
+ Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional
+ embeddings layer.
+ num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16):
+ Number of groups of 1D convolutional positional embeddings layer.
+ do_stable_layer_norm (`bool`, *optional*, defaults to `False`):
+ Whether do apply *stable* layer norm architecture of the Transformer encoder. `do_stable_layer_norm is
+ True` corresponds to applying layer norm before the attention layer, whereas `do_stable_layer_norm is
+ False` corresponds to applying layer norm after the attention layer.
+ apply_spec_augment (`bool`, *optional*, defaults to `True`):
+ Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see
+ [SpecAugment: A Simple Data Augmentation Method for Automatic Speech
+ Recognition](https://arxiv.org/abs/1904.08779).
+ mask_time_prob (`float`, *optional*, defaults to 0.05):
+ Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking
+ procecure generates ''mask_time_prob*len(time_axis)/mask_time_length'' independent masks over the axis. If
+ reasoning from the propability of each feature vector to be chosen as the start of the vector span to be
+ masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the
+ actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`.
+ mask_time_length (`int`, *optional*, defaults to 10):
+ Length of vector span along the time axis.
+ mask_time_min_masks (`int`, *optional*, defaults to 2),:
+ The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step,
+ irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length <
+ mask_time_min_masks''
+ mask_feature_prob (`float`, *optional*, defaults to 0.0):
+ Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The
+ masking procecure generates ''mask_feature_prob*len(feature_axis)/mask_time_length'' independent masks over
+ the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector
+ span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap
+ may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is
+ True`.
+ mask_feature_length (`int`, *optional*, defaults to 10):
+ Length of vector span along the feature axis.
+ mask_feature_min_masks (`int`, *optional*, defaults to 0),:
+ The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time
+ step, irrespectively of `mask_feature_prob`. Only relevant if
+ ''mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks''
+ ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`):
+ Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an
+ instance of [`HubertForCTC`].
+ ctc_zero_infinity (`bool`, *optional*, defaults to `False`):
+ Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly
+ occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance
+ of [`HubertForCTC`].
+ use_weighted_layer_sum (`bool`, *optional*, defaults to `False`):
+ Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an
+ instance of [`HubertForSequenceClassification`].
+ classifier_proj_size (`int`, *optional*, defaults to 256):
+ Dimensionality of the projection before token mean-pooling for classification.
+
+ Example:
+
+ ```python
+ >>> from transformers import HubertModel, HubertConfig
+
+ >>> # Initializing a Hubert facebook/hubert-base-ls960 style configuration
+ >>> configuration = HubertConfig()
+
+ >>> # Initializing a model from the facebook/hubert-base-ls960 style configuration
+ >>> model = HubertModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "hubert"
+
+ def __init__(
+ self,
+ vocab_size=32,
+ hidden_size=768,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ intermediate_size=3072,
+ hidden_act="gelu",
+ hidden_dropout=0.1,
+ activation_dropout=0.1,
+ attention_dropout=0.1,
+ feat_proj_layer_norm=True,
+ feat_proj_dropout=0.0,
+ final_dropout=0.1,
+ layerdrop=0.1,
+ initializer_range=0.02,
+ layer_norm_eps=1e-5,
+ feat_extract_norm="group",
+ feat_extract_activation="gelu",
+ conv_dim=(512, 512, 512, 512, 512, 512, 512),
+ conv_stride=(5, 2, 2, 2, 2, 2, 2),
+ conv_kernel=(10, 3, 3, 3, 3, 2, 2),
+ conv_bias=False,
+ num_conv_pos_embeddings=128,
+ num_conv_pos_embedding_groups=16,
+ do_stable_layer_norm=False,
+ apply_spec_augment=True,
+ mask_time_prob=0.05,
+ mask_time_length=10,
+ mask_time_min_masks=2,
+ mask_feature_prob=0.0,
+ mask_feature_length=10,
+ mask_feature_min_masks=0,
+ ctc_loss_reduction="sum",
+ ctc_zero_infinity=False,
+ use_weighted_layer_sum=False,
+ classifier_proj_size=256,
+ pad_token_id=0,
+ bos_token_id=1,
+ eos_token_id=2,
+ **kwargs,
+ ):
+ super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)
+ self.hidden_size = hidden_size
+ self.feat_extract_norm = feat_extract_norm
+ self.feat_extract_activation = feat_extract_activation
+ self.conv_dim = list(conv_dim)
+ self.conv_stride = list(conv_stride)
+ self.conv_kernel = list(conv_kernel)
+ self.conv_bias = conv_bias
+ self.num_conv_pos_embeddings = num_conv_pos_embeddings
+ self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
+ self.num_feat_extract_layers = len(self.conv_dim)
+ self.num_hidden_layers = num_hidden_layers
+ self.intermediate_size = intermediate_size
+ self.hidden_act = hidden_act
+ self.num_attention_heads = num_attention_heads
+ self.hidden_dropout = hidden_dropout
+ self.attention_dropout = attention_dropout
+ self.activation_dropout = activation_dropout
+ self.feat_proj_layer_norm = feat_proj_layer_norm
+ self.feat_proj_dropout = feat_proj_dropout
+ self.final_dropout = final_dropout
+ self.layerdrop = layerdrop
+ self.layer_norm_eps = layer_norm_eps
+ self.initializer_range = initializer_range
+ self.vocab_size = vocab_size
+ self.do_stable_layer_norm = do_stable_layer_norm
+ self.use_weighted_layer_sum = use_weighted_layer_sum
+ self.classifier_proj_size = classifier_proj_size
+
+ if (
+ (len(self.conv_stride) != self.num_feat_extract_layers)
+ or (len(self.conv_kernel) != self.num_feat_extract_layers)
+ or (len(self.conv_dim) != self.num_feat_extract_layers)
+ ):
+ raise ValueError(
+ "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
+ " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
+ f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
+ f" `len(config.conv_kernel) = {len(self.conv_kernel)}`."
+ )
+
+ # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
+ self.apply_spec_augment = apply_spec_augment
+ self.mask_time_prob = mask_time_prob
+ self.mask_time_length = mask_time_length
+ self.mask_time_min_masks = mask_time_min_masks
+ self.mask_feature_prob = mask_feature_prob
+ self.mask_feature_length = mask_feature_length
+ self.mask_feature_min_masks = mask_feature_min_masks
+
+ # ctc loss
+ self.ctc_loss_reduction = ctc_loss_reduction
+ self.ctc_zero_infinity = ctc_zero_infinity
+
+ @property
+ def inputs_to_logits_ratio(self):
+ return functools.reduce(operator.mul, self.conv_stride, 1)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/hubert/convert_distilhubert_original_s3prl_checkpoint_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/hubert/convert_distilhubert_original_s3prl_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..571761e022846f669f106735e3f5a9c6e7037165
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/hubert/convert_distilhubert_original_s3prl_checkpoint_to_pytorch.py
@@ -0,0 +1,223 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert Hubert checkpoint."""
+
+
+import argparse
+
+import torch
+from s3prl.hub import distilhubert
+
+from transformers import HubertConfig, HubertModel, Wav2Vec2FeatureExtractor, logging
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+MAPPING = {
+ "post_extract_proj": "feature_projection.projection",
+ "encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
+ "self_attn.k_proj": "encoder.layers.*.attention.k_proj",
+ "self_attn.v_proj": "encoder.layers.*.attention.v_proj",
+ "self_attn.q_proj": "encoder.layers.*.attention.q_proj",
+ "self_attn.out_proj": "encoder.layers.*.attention.out_proj",
+ "self_attn_layer_norm": "encoder.layers.*.layer_norm",
+ "fc1": "encoder.layers.*.feed_forward.intermediate_dense",
+ "fc2": "encoder.layers.*.feed_forward.output_dense",
+ "final_layer_norm": "encoder.layers.*.final_layer_norm",
+ "encoder.layer_norm": "encoder.layer_norm",
+ "mask_emb": "masked_spec_embed",
+}
+
+
+def set_recursively(hf_pointer, key, value, full_name, weight_type):
+ for attribute in key.split("."):
+ hf_pointer = getattr(hf_pointer, attribute)
+
+ if weight_type is not None:
+ hf_shape = getattr(hf_pointer, weight_type).shape
+ else:
+ hf_shape = hf_pointer.shape
+
+ assert hf_shape == value.shape, (
+ f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
+ f" {value.shape} for {full_name}"
+ )
+
+ if weight_type == "weight":
+ hf_pointer.weight.data = value
+ elif weight_type == "weight_g":
+ hf_pointer.weight_g.data = value
+ elif weight_type == "weight_v":
+ hf_pointer.weight_v.data = value
+ elif weight_type == "bias":
+ hf_pointer.bias.data = value
+ else:
+ hf_pointer.data = value
+
+ logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.")
+
+
+def recursively_load_weights(fairseq_model, hf_model):
+ unused_weights = []
+ fairseq_dict = fairseq_model.state_dict()
+
+ feature_extractor = hf_model.feature_extractor
+
+ for name, value in fairseq_dict.items():
+ is_used = False
+ if "conv_layers" in name:
+ load_conv_layer(
+ name,
+ value,
+ feature_extractor,
+ unused_weights,
+ hf_model.config.feat_extract_norm == "group",
+ )
+ is_used = True
+ else:
+ for key, mapped_key in MAPPING.items():
+ mapped_key = mapped_key
+
+ if key in name:
+ is_used = True
+ if "*" in mapped_key:
+ layer_index = name.split(key)[0].split(".")[-2]
+ mapped_key = mapped_key.replace("*", layer_index)
+ if "weight_g" in name:
+ weight_type = "weight_g"
+ elif "weight_v" in name:
+ weight_type = "weight_v"
+ elif "weight" in name:
+ weight_type = "weight"
+ elif "bias" in name:
+ weight_type = "bias"
+ else:
+ weight_type = None
+ set_recursively(hf_model, mapped_key, value, name, weight_type)
+ continue
+ if not is_used:
+ unused_weights.append(name)
+
+ logger.warning(f"Unused weights: {unused_weights}")
+
+
+def load_conv_layer(full_name, value, feature_extractor, unused_weights, use_group_norm):
+ name = full_name.split("conv_layers.")[-1]
+ items = name.split(".")
+ layer_id = int(items[0])
+ type_id = int(items[1])
+
+ if type_id == 0:
+ if "bias" in name:
+ assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
+ f"{full_name} has size {value.shape}, but"
+ f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
+ )
+ feature_extractor.conv_layers[layer_id].conv.bias.data = value
+ logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
+ elif "weight" in name:
+ assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
+ f"{full_name} has size {value.shape}, but"
+ f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
+ )
+ feature_extractor.conv_layers[layer_id].conv.weight.data = value
+ logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
+ elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
+ if "bias" in name:
+ assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
+ f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
+ " found."
+ )
+ feature_extractor.conv_layers[layer_id].layer_norm.bias.data = value
+ logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
+ elif "weight" in name:
+ assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
+ f"{full_name} has size {value.shape}, but"
+ f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
+ )
+ feature_extractor.conv_layers[layer_id].layer_norm.weight.data = value
+ logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
+ else:
+ unused_weights.append(full_name)
+
+
+def convert_config(model):
+ config = HubertConfig()
+ fs_config = model.config
+
+ config.activation_dropout = fs_config.activation_dropout
+ config.apply_spec_augment = False
+ config.attention_dropout = fs_config.attention_dropout
+ config.conv_bias = False
+ conv_layers = eval(fs_config.extractor_conv_feature_layers)
+ config.conv_dim = [x[0] for x in conv_layers]
+ config.conv_kernel = [x[1] for x in conv_layers]
+ config.conv_stride = [x[2] for x in conv_layers]
+ config.feat_extract_activation = "gelu"
+ config.feat_extract_norm = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
+ config.feat_proj_layer_norm = False
+ config.feat_proj_dropout = 0.0
+ config.final_dropout = 0.0
+ config.hidden_act = fs_config.activation_fn
+ config.hidden_dropout = fs_config.dropout
+ config.hidden_size = fs_config.encoder_embed_dim
+ config.initializer_range = 0.02
+ config.intermediate_size = fs_config.encoder_ffn_embed_dim
+ config.layer_norm_eps = 1e-5
+ config.layerdrop = 0.0
+ config.num_attention_heads = fs_config.encoder_attention_heads
+ config.num_conv_pos_embedding_groups = fs_config.conv_pos_groups
+ config.num_conv_pos_embeddings = fs_config.conv_pos
+ config.num_feat_extract_layers = len(conv_layers)
+ config.num_hidden_layers = fs_config.encoder_layers
+
+ return config
+
+
+@torch.no_grad()
+def convert_hubert_checkpoint(pytorch_dump_folder_path, config_path=None):
+ """
+ Copy/paste/tweak model's weights to transformers design.
+ """
+ model = distilhubert().model.model
+
+ if config_path is not None:
+ config = HubertConfig.from_pretrained(config_path)
+ else:
+ config = convert_config(model)
+ model = model.eval()
+
+ feature_extractor = Wav2Vec2FeatureExtractor(
+ feature_size=1,
+ sampling_rate=16000,
+ padding_value=0,
+ do_normalize=False,
+ return_attention_mask=False,
+ )
+ hf_model = HubertModel(config)
+
+ recursively_load_weights(model, hf_model)
+
+ feature_extractor.save_pretrained(pytorch_dump_folder_path)
+ hf_model.save_pretrained(pytorch_dump_folder_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
+ parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
+ args = parser.parse_args()
+ convert_hubert_checkpoint(args.pytorch_dump_folder_path, args.config_path)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/hubert/convert_hubert_original_pytorch_checkpoint_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/hubert/convert_hubert_original_pytorch_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a70fb6db710f49e265a3fa449cd01cec281accb
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/hubert/convert_hubert_original_pytorch_checkpoint_to_pytorch.py
@@ -0,0 +1,249 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert Hubert checkpoint."""
+
+
+import argparse
+import json
+import os
+
+import fairseq
+import torch
+from fairseq.data import Dictionary
+
+from transformers import (
+ HubertConfig,
+ HubertForCTC,
+ HubertModel,
+ Wav2Vec2CTCTokenizer,
+ Wav2Vec2FeatureExtractor,
+ Wav2Vec2Processor,
+ logging,
+)
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+MAPPING = {
+ "post_extract_proj": "feature_projection.projection",
+ "encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
+ "self_attn.k_proj": "encoder.layers.*.attention.k_proj",
+ "self_attn.v_proj": "encoder.layers.*.attention.v_proj",
+ "self_attn.q_proj": "encoder.layers.*.attention.q_proj",
+ "self_attn.out_proj": "encoder.layers.*.attention.out_proj",
+ "self_attn_layer_norm": "encoder.layers.*.layer_norm",
+ "fc1": "encoder.layers.*.feed_forward.intermediate_dense",
+ "fc2": "encoder.layers.*.feed_forward.output_dense",
+ "final_layer_norm": "encoder.layers.*.final_layer_norm",
+ "encoder.layer_norm": "encoder.layer_norm",
+ "w2v_model.layer_norm": "feature_projection.layer_norm",
+ "w2v_encoder.proj": "lm_head",
+ "mask_emb": "masked_spec_embed",
+}
+
+
+def set_recursively(hf_pointer, key, value, full_name, weight_type):
+ for attribute in key.split("."):
+ hf_pointer = getattr(hf_pointer, attribute)
+
+ if weight_type is not None:
+ hf_shape = getattr(hf_pointer, weight_type).shape
+ else:
+ hf_shape = hf_pointer.shape
+
+ assert hf_shape == value.shape, (
+ f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
+ f" {value.shape} for {full_name}"
+ )
+
+ if weight_type == "weight":
+ hf_pointer.weight.data = value
+ elif weight_type == "weight_g":
+ hf_pointer.weight_g.data = value
+ elif weight_type == "weight_v":
+ hf_pointer.weight_v.data = value
+ elif weight_type == "bias":
+ hf_pointer.bias.data = value
+ else:
+ hf_pointer.data = value
+
+ logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.")
+
+
+def recursively_load_weights(fairseq_model, hf_model, is_finetuned):
+ unused_weights = []
+ fairseq_dict = fairseq_model.state_dict()
+
+ feature_extractor = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
+
+ for name, value in fairseq_dict.items():
+ is_used = False
+ if "conv_layers" in name:
+ load_conv_layer(
+ name,
+ value,
+ feature_extractor,
+ unused_weights,
+ hf_model.config.feat_extract_norm == "group",
+ )
+ is_used = True
+ else:
+ for key, mapped_key in MAPPING.items():
+ mapped_key = "hubert." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
+
+ if key in name or (key.split("w2v_model.")[-1] == name.split(".")[0] and not is_finetuned):
+ is_used = True
+ if "*" in mapped_key:
+ layer_index = name.split(key)[0].split(".")[-2]
+ mapped_key = mapped_key.replace("*", layer_index)
+ if "weight_g" in name:
+ weight_type = "weight_g"
+ elif "weight_v" in name:
+ weight_type = "weight_v"
+ elif "weight" in name:
+ weight_type = "weight"
+ elif "bias" in name:
+ weight_type = "bias"
+ else:
+ weight_type = None
+ set_recursively(hf_model, mapped_key, value, name, weight_type)
+ continue
+ if not is_used:
+ unused_weights.append(name)
+
+ logger.warning(f"Unused weights: {unused_weights}")
+
+
+def load_conv_layer(full_name, value, feature_extractor, unused_weights, use_group_norm):
+ name = full_name.split("conv_layers.")[-1]
+ items = name.split(".")
+ layer_id = int(items[0])
+ type_id = int(items[1])
+
+ if type_id == 0:
+ if "bias" in name:
+ assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
+ f"{full_name} has size {value.shape}, but"
+ f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
+ )
+ feature_extractor.conv_layers[layer_id].conv.bias.data = value
+ logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
+ elif "weight" in name:
+ assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
+ f"{full_name} has size {value.shape}, but"
+ f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
+ )
+ feature_extractor.conv_layers[layer_id].conv.weight.data = value
+ logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
+ elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
+ if "bias" in name:
+ assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
+ f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
+ " found."
+ )
+ feature_extractor.conv_layers[layer_id].layer_norm.bias.data = value
+ logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
+ elif "weight" in name:
+ assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
+ f"{full_name} has size {value.shape}, but"
+ f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
+ )
+ feature_extractor.conv_layers[layer_id].layer_norm.weight.data = value
+ logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
+ else:
+ unused_weights.append(full_name)
+
+
+@torch.no_grad()
+def convert_hubert_checkpoint(
+ checkpoint_path, pytorch_dump_folder_path, config_path=None, dict_path=None, is_finetuned=True
+):
+ """
+ Copy/paste/tweak model's weights to transformers design.
+ """
+ if config_path is not None:
+ config = HubertConfig.from_pretrained(config_path)
+ else:
+ config = HubertConfig()
+
+ if is_finetuned:
+ if dict_path:
+ target_dict = Dictionary.load(dict_path)
+
+ # important change bos & pad token id since CTC symbol is and
+ # not as in fairseq
+ config.bos_token_id = target_dict.pad_index
+ config.pad_token_id = target_dict.bos_index
+ config.eos_token_id = target_dict.eos_index
+ config.vocab_size = len(target_dict.symbols)
+ vocab_path = os.path.join(pytorch_dump_folder_path, "vocab.json")
+ if not os.path.isdir(pytorch_dump_folder_path):
+ logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(pytorch_dump_folder_path))
+ return
+ os.makedirs(pytorch_dump_folder_path, exist_ok=True)
+ with open(vocab_path, "w", encoding="utf-8") as vocab_handle:
+ json.dump(target_dict.indices, vocab_handle)
+ tokenizer = Wav2Vec2CTCTokenizer(
+ vocab_path,
+ unk_token=target_dict.unk_word,
+ pad_token=target_dict.pad_word,
+ bos_token=target_dict.bos_word,
+ eos_token=target_dict.eos_word,
+ word_delimiter_token="|",
+ do_lower_case=False,
+ )
+ return_attention_mask = True if config.feat_extract_norm == "layer" else False
+ feature_extractor = Wav2Vec2FeatureExtractor(
+ feature_size=1,
+ sampling_rate=16000,
+ padding_value=0,
+ do_normalize=True,
+ return_attention_mask=return_attention_mask,
+ )
+ processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
+ processor.save_pretrained(pytorch_dump_folder_path)
+
+ hf_wav2vec = HubertForCTC(config)
+ else:
+ hf_wav2vec = HubertModel(config)
+
+ if is_finetuned:
+ model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
+ [checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/")[:-1])}
+ )
+ else:
+ model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
+
+ model = model[0].eval()
+
+ recursively_load_weights(model, hf_wav2vec, is_finetuned)
+
+ hf_wav2vec.save_pretrained(pytorch_dump_folder_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
+ parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
+ parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
+ parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
+ parser.add_argument(
+ "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
+ )
+ args = parser.parse_args()
+ convert_hubert_checkpoint(
+ args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/hubert/convert_hubert_original_s3prl_checkpoint_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/hubert/convert_hubert_original_s3prl_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..51908f930242c6580d2d154bec7e632e7af568fe
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/hubert/convert_hubert_original_s3prl_checkpoint_to_pytorch.py
@@ -0,0 +1,69 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert Hubert checkpoint."""
+
+
+import argparse
+
+import torch
+
+from transformers import HubertConfig, HubertForSequenceClassification, Wav2Vec2FeatureExtractor, logging
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+SUPPORTED_MODELS = ["UtteranceLevel"]
+
+
+@torch.no_grad()
+def convert_s3prl_checkpoint(base_model_name, config_path, checkpoint_path, model_dump_path):
+ """
+ Copy/paste/tweak model's weights to transformers design.
+ """
+ checkpoint = torch.load(checkpoint_path, map_location="cpu")
+ if checkpoint["Config"]["downstream_expert"]["modelrc"]["select"] not in SUPPORTED_MODELS:
+ raise NotImplementedError(f"The supported s3prl models are {SUPPORTED_MODELS}")
+
+ downstream_dict = checkpoint["Downstream"]
+
+ hf_congfig = HubertConfig.from_pretrained(config_path)
+ hf_model = HubertForSequenceClassification.from_pretrained(base_model_name, config=hf_congfig)
+ hf_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
+ base_model_name, return_attention_mask=True, do_normalize=False
+ )
+
+ if hf_congfig.use_weighted_layer_sum:
+ hf_model.layer_weights.data = checkpoint["Featurizer"]["weights"]
+
+ hf_model.projector.weight.data = downstream_dict["projector.weight"]
+ hf_model.projector.bias.data = downstream_dict["projector.bias"]
+ hf_model.classifier.weight.data = downstream_dict["model.post_net.linear.weight"]
+ hf_model.classifier.bias.data = downstream_dict["model.post_net.linear.bias"]
+
+ hf_feature_extractor.save_pretrained(model_dump_path)
+ hf_model.save_pretrained(model_dump_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
+ )
+ parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
+ parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
+ parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
+ args = parser.parse_args()
+ convert_s3prl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/hubert/modeling_hubert.py b/venv/lib/python3.10/site-packages/transformers/models/hubert/modeling_hubert.py
new file mode 100644
index 0000000000000000000000000000000000000000..f9e223f9a384d08b0f15e3245dd4b4b9d731e5fa
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/hubert/modeling_hubert.py
@@ -0,0 +1,1386 @@
+# coding=utf-8
+# Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch Hubert model."""
+
+import warnings
+from typing import Optional, Tuple, Union
+
+import numpy as np
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import CrossEntropyLoss
+
+from ...activations import ACT2FN
+from ...integrations.deepspeed import is_deepspeed_zero3_enabled
+from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_hubert import HubertConfig
+
+
+logger = logging.get_logger(__name__)
+
+_HIDDEN_STATES_START_POSITION = 1
+
+# General docstring
+_CONFIG_FOR_DOC = "HubertConfig"
+
+# Base docstring
+_CHECKPOINT_FOR_DOC = "facebook/hubert-large-ls960-ft"
+_EXPECTED_OUTPUT_SHAPE = [1, 292, 768]
+
+# CTC docstring
+_CTC_EXPECTED_OUTPUT = "'MISTER QUILTER IS THE APOSTLE OF THE MIDDLE CLASSES AND WE ARE GLAD TO WELCOME HIS GOSPEL'"
+_CTC_EXPECTED_LOSS = 22.68
+
+# Audio class docstring
+_SEQ_CLASS_CHECKPOINT = "superb/hubert-base-superb-ks"
+_SEQ_CLASS_EXPECTED_OUTPUT = "'_unknown_'"
+_SEQ_CLASS_EXPECTED_LOSS = 8.53
+
+
+from ..deprecated._archive_maps import HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2._compute_mask_indices
+def _compute_mask_indices(
+ shape: Tuple[int, int],
+ mask_prob: float,
+ mask_length: int,
+ attention_mask: Optional[torch.LongTensor] = None,
+ min_masks: int = 0,
+) -> np.ndarray:
+ """
+ Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
+ ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on
+ CPU as part of the preprocessing during training.
+
+ Args:
+ shape: The shape for which to compute masks. This should be of a tuple of size 2 where
+ the first element is the batch size and the second element is the length of the axis to span.
+ mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of
+ independently generated mask spans of length `mask_length` is computed by
+ `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the
+ actual percentage will be smaller.
+ mask_length: size of the mask
+ min_masks: minimum number of masked spans
+ attention_mask: A (right-padded) attention mask which independently shortens the feature axis of
+ each batch dimension.
+ """
+ batch_size, sequence_length = shape
+
+ if mask_length < 1:
+ raise ValueError("`mask_length` has to be bigger than 0.")
+
+ if mask_length > sequence_length:
+ raise ValueError(
+ f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}"
+ f" and `sequence_length`: {sequence_length}`"
+ )
+
+ # epsilon is used for probabilistic rounding
+ epsilon = np.random.rand(1).item()
+
+ def compute_num_masked_span(input_length):
+ """Given input length, compute how many spans should be masked"""
+ num_masked_span = int(mask_prob * input_length / mask_length + epsilon)
+ num_masked_span = max(num_masked_span, min_masks)
+
+ # make sure num masked span <= sequence_length
+ if num_masked_span * mask_length > sequence_length:
+ num_masked_span = sequence_length // mask_length
+
+ # make sure num_masked span is also <= input_length - (mask_length - 1)
+ if input_length - (mask_length - 1) < num_masked_span:
+ num_masked_span = max(input_length - (mask_length - 1), 0)
+
+ return num_masked_span
+
+ # compute number of masked spans in batch
+ input_lengths = (
+ attention_mask.sum(-1).detach().tolist()
+ if attention_mask is not None
+ else [sequence_length for _ in range(batch_size)]
+ )
+
+ # SpecAugment mask to fill
+ spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool)
+ spec_aug_mask_idxs = []
+
+ max_num_masked_span = compute_num_masked_span(sequence_length)
+
+ if max_num_masked_span == 0:
+ return spec_aug_mask
+
+ for input_length in input_lengths:
+ # compute num of masked spans for this input
+ num_masked_span = compute_num_masked_span(input_length)
+
+ # get random indices to mask
+ spec_aug_mask_idx = np.random.choice(
+ np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False
+ )
+
+ # pick first sampled index that will serve as a dummy index to pad vector
+ # to ensure same dimension for all batches due to probabilistic rounding
+ # Picking first sample just pads those vectors twice.
+ if len(spec_aug_mask_idx) == 0:
+ # this case can only happen if `input_length` is strictly smaller then
+ # `sequence_length` in which case the last token has to be a padding
+ # token which we can use as a dummy mask id
+ dummy_mask_idx = sequence_length - 1
+ else:
+ dummy_mask_idx = spec_aug_mask_idx[0]
+
+ spec_aug_mask_idx = np.concatenate(
+ [spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx]
+ )
+ spec_aug_mask_idxs.append(spec_aug_mask_idx)
+
+ spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)
+
+ # expand masked indices to masked spans
+ spec_aug_mask_idxs = np.broadcast_to(
+ spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length)
+ )
+ spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length)
+
+ # add offset to the starting indexes so that indexes now create a span
+ offsets = np.arange(mask_length)[None, None, :]
+ offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(
+ batch_size, max_num_masked_span * mask_length
+ )
+ spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
+
+ # ensure that we cannot have indices larger than sequence_length
+ if spec_aug_mask_idxs.max() > sequence_length - 1:
+ spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1
+
+ # scatter indices to mask
+ np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)
+
+ return spec_aug_mask
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2NoLayerNormConvLayer with Wav2Vec2->Hubert
+class HubertNoLayerNormConvLayer(nn.Module):
+ def __init__(self, config, layer_id=0):
+ super().__init__()
+ self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
+ self.out_conv_dim = config.conv_dim[layer_id]
+
+ self.conv = nn.Conv1d(
+ self.in_conv_dim,
+ self.out_conv_dim,
+ kernel_size=config.conv_kernel[layer_id],
+ stride=config.conv_stride[layer_id],
+ bias=config.conv_bias,
+ )
+ self.activation = ACT2FN[config.feat_extract_activation]
+
+ def forward(self, hidden_states):
+ hidden_states = self.conv(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2LayerNormConvLayer with Wav2Vec2->Hubert
+class HubertLayerNormConvLayer(nn.Module):
+ def __init__(self, config, layer_id=0):
+ super().__init__()
+ self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
+ self.out_conv_dim = config.conv_dim[layer_id]
+
+ self.conv = nn.Conv1d(
+ self.in_conv_dim,
+ self.out_conv_dim,
+ kernel_size=config.conv_kernel[layer_id],
+ stride=config.conv_stride[layer_id],
+ bias=config.conv_bias,
+ )
+ self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True)
+ self.activation = ACT2FN[config.feat_extract_activation]
+
+ def forward(self, hidden_states):
+ hidden_states = self.conv(hidden_states)
+
+ hidden_states = hidden_states.transpose(-2, -1)
+ hidden_states = self.layer_norm(hidden_states)
+ hidden_states = hidden_states.transpose(-2, -1)
+
+ hidden_states = self.activation(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2GroupNormConvLayer with Wav2Vec2->Hubert
+class HubertGroupNormConvLayer(nn.Module):
+ def __init__(self, config, layer_id=0):
+ super().__init__()
+ self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
+ self.out_conv_dim = config.conv_dim[layer_id]
+
+ self.conv = nn.Conv1d(
+ self.in_conv_dim,
+ self.out_conv_dim,
+ kernel_size=config.conv_kernel[layer_id],
+ stride=config.conv_stride[layer_id],
+ bias=config.conv_bias,
+ )
+ self.activation = ACT2FN[config.feat_extract_activation]
+
+ self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True)
+
+ def forward(self, hidden_states):
+ hidden_states = self.conv(hidden_states)
+ hidden_states = self.layer_norm(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2PositionalConvEmbedding with Wav2Vec2->Hubert
+class HubertPositionalConvEmbedding(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.conv = nn.Conv1d(
+ config.hidden_size,
+ config.hidden_size,
+ kernel_size=config.num_conv_pos_embeddings,
+ padding=config.num_conv_pos_embeddings // 2,
+ groups=config.num_conv_pos_embedding_groups,
+ )
+
+ weight_norm = nn.utils.weight_norm
+ if hasattr(nn.utils.parametrizations, "weight_norm"):
+ weight_norm = nn.utils.parametrizations.weight_norm
+
+ if is_deepspeed_zero3_enabled():
+ import deepspeed
+
+ with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0):
+ self.conv = weight_norm(self.conv, name="weight", dim=2)
+ deepspeed.zero.register_external_parameter(self, self.conv.weight_v)
+ deepspeed.zero.register_external_parameter(self, self.conv.weight_g)
+ else:
+ self.conv = weight_norm(self.conv, name="weight", dim=2)
+
+ self.padding = HubertSamePadLayer(config.num_conv_pos_embeddings)
+ self.activation = ACT2FN[config.feat_extract_activation]
+
+ def forward(self, hidden_states):
+ hidden_states = hidden_states.transpose(1, 2)
+
+ hidden_states = self.conv(hidden_states)
+ hidden_states = self.padding(hidden_states)
+ hidden_states = self.activation(hidden_states)
+
+ hidden_states = hidden_states.transpose(1, 2)
+ return hidden_states
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2SamePadLayer with Wav2Vec2->Hubert
+class HubertSamePadLayer(nn.Module):
+ def __init__(self, num_conv_pos_embeddings):
+ super().__init__()
+ self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0
+
+ def forward(self, hidden_states):
+ if self.num_pad_remove > 0:
+ hidden_states = hidden_states[:, :, : -self.num_pad_remove]
+ return hidden_states
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureEncoder with Wav2Vec2->Hubert
+class HubertFeatureEncoder(nn.Module):
+ """Construct the features from raw audio waveform"""
+
+ def __init__(self, config):
+ super().__init__()
+
+ if config.feat_extract_norm == "group":
+ conv_layers = [HubertGroupNormConvLayer(config, layer_id=0)] + [
+ HubertNoLayerNormConvLayer(config, layer_id=i + 1) for i in range(config.num_feat_extract_layers - 1)
+ ]
+ elif config.feat_extract_norm == "layer":
+ conv_layers = [HubertLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)]
+ else:
+ raise ValueError(
+ f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']"
+ )
+ self.conv_layers = nn.ModuleList(conv_layers)
+ self.gradient_checkpointing = False
+ self._requires_grad = True
+
+ def _freeze_parameters(self):
+ for param in self.parameters():
+ param.requires_grad = False
+ self._requires_grad = False
+
+ def forward(self, input_values):
+ hidden_states = input_values[:, None]
+
+ # make sure hidden_states require grad for gradient_checkpointing
+ if self._requires_grad and self.training:
+ hidden_states.requires_grad = True
+
+ for conv_layer in self.conv_layers:
+ if self._requires_grad and self.gradient_checkpointing and self.training:
+ hidden_states = self._gradient_checkpointing_func(
+ conv_layer.__call__,
+ hidden_states,
+ )
+ else:
+ hidden_states = conv_layer(hidden_states)
+
+ return hidden_states
+
+
+class HubertFeatureExtractor(HubertFeatureEncoder):
+ def __init__(self, config):
+ super().__init__(config)
+ warnings.warn(
+ f"The class `{self.__class__.__name__}` has been depreciated "
+ "and will be removed in Transformers v5. "
+ f"Use `{self.__class__.__bases__[0].__name__}` instead.",
+ FutureWarning,
+ )
+
+
+class HubertFeatureProjection(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.feat_proj_layer_norm = config.feat_proj_layer_norm
+ if self.feat_proj_layer_norm:
+ self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps)
+ self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size)
+ self.dropout = nn.Dropout(config.feat_proj_dropout)
+
+ def forward(self, hidden_states):
+ # non-projected hidden states are needed for quantization
+ if self.feat_proj_layer_norm:
+ hidden_states = self.layer_norm(hidden_states)
+ hidden_states = self.projection(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Hubert
+class HubertAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(
+ self,
+ embed_dim: int,
+ num_heads: int,
+ dropout: float = 0.0,
+ is_decoder: bool = False,
+ bias: bool = True,
+ is_causal: bool = False,
+ config: Optional[HubertConfig] = None,
+ ):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.num_heads = num_heads
+ self.dropout = dropout
+ self.head_dim = embed_dim // num_heads
+ self.config = config
+
+ if (self.head_dim * num_heads) != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+ self.is_decoder = is_decoder
+ self.is_causal = is_causal
+
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ key_value_states: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+
+ bsz, tgt_len, _ = hidden_states.size()
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scaling
+ # get key, value proj
+ # `past_key_value[0].shape[2] == key_value_states.shape[1]`
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
+ # the provided `key_value_states` to support prefix tuning
+ if (
+ is_cross_attention
+ and past_key_value is not None
+ and past_key_value[0].shape[2] == key_value_states.shape[1]
+ ):
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
+ elif past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
+ else:
+ # self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
+ key_states = key_states.reshape(*proj_shape)
+ value_states = value_states.reshape(*proj_shape)
+
+ src_len = key_states.size(1)
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
+
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
+ )
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ if layer_head_mask is not None:
+ if layer_head_mask.size() != (self.num_heads,):
+ raise ValueError(
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
+ f" {layer_head_mask.size()}"
+ )
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ if output_attentions:
+ # this operation is a bit awkward, but it's required to
+ # make sure that attn_weights keeps its gradient.
+ # In order to do so, attn_weights have to be reshaped
+ # twice and have to be reused in the following
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
+ else:
+ attn_weights_reshaped = None
+
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ attn_output = torch.bmm(attn_probs, value_states)
+
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
+ attn_output = attn_output.transpose(1, 2)
+
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
+ # partitioned across GPUs when using tensor-parallelism.
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
+
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights_reshaped, past_key_value
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeedForward with Wav2Vec2->Hubert
+class HubertFeedForward(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.intermediate_dropout = nn.Dropout(config.activation_dropout)
+
+ self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.output_dropout = nn.Dropout(config.hidden_dropout)
+
+ def forward(self, hidden_states):
+ hidden_states = self.intermediate_dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ hidden_states = self.intermediate_dropout(hidden_states)
+
+ hidden_states = self.output_dense(hidden_states)
+ hidden_states = self.output_dropout(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderLayer with Wav2Vec2->Hubert
+class HubertEncoderLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.attention = HubertAttention(
+ embed_dim=config.hidden_size,
+ num_heads=config.num_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=False,
+ )
+ self.dropout = nn.Dropout(config.hidden_dropout)
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.feed_forward = HubertFeedForward(config)
+ self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ def forward(self, hidden_states, attention_mask=None, output_attentions=False):
+ attn_residual = hidden_states
+ hidden_states, attn_weights, _ = self.attention(
+ hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
+ )
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = attn_residual + hidden_states
+
+ hidden_states = self.layer_norm(hidden_states)
+ hidden_states = hidden_states + self.feed_forward(hidden_states)
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2AttnAdapterLayer with Wav2Vec2->Hubert
+class HubertAttnAdapterLayer(nn.Module):
+ def __init__(self, config):
+ """
+ Implements adapter modules directly with 3D tensor weight as parameters and without using ModuleList to speed
+ up training throughput.
+ """
+ super().__init__()
+ self.input_dim = config.adapter_attn_dim
+ self.hidden_dim = config.hidden_size
+
+ self.norm = nn.LayerNorm(self.hidden_dim)
+ self.linear_1 = nn.Linear(self.hidden_dim, self.input_dim)
+ self.act_fn = nn.ReLU()
+ self.linear_2 = nn.Linear(self.input_dim, self.hidden_dim)
+
+ def forward(self, hidden_states: torch.FloatTensor):
+ hidden_states = self.norm(hidden_states)
+
+ hidden_states = self.linear_1(hidden_states)
+ hidden_states = self.act_fn(hidden_states)
+ hidden_states = self.linear_2(hidden_states)
+
+ return hidden_states
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderLayerStableLayerNorm with Wav2Vec2->Hubert
+class HubertEncoderLayerStableLayerNorm(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.attention = HubertAttention(
+ embed_dim=config.hidden_size,
+ num_heads=config.num_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=False,
+ )
+ self.dropout = nn.Dropout(config.hidden_dropout)
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.feed_forward = HubertFeedForward(config)
+ self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ if getattr(config, "adapter_attn_dim", None) is not None:
+ self.adapter_layer = HubertAttnAdapterLayer(config)
+ else:
+ self.adapter_layer = None
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ):
+ attn_residual = hidden_states
+ hidden_states = self.layer_norm(hidden_states)
+ hidden_states, attn_weights, _ = self.attention(
+ hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
+ )
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = attn_residual + hidden_states
+ hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states))
+
+ if self.adapter_layer is not None:
+ hidden_states = hidden_states + self.adapter_layer(hidden_states)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Encoder with Wav2Vec2->Hubert
+class HubertEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.pos_conv_embed = HubertPositionalConvEmbedding(config)
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout)
+ self.layers = nn.ModuleList([HubertEncoderLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states: torch.tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ if attention_mask is not None:
+ # make sure padded tokens output 0
+ expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2])
+ hidden_states[~expand_attention_mask] = 0
+
+ # extend attention_mask
+ attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)
+ attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min
+ attention_mask = attention_mask.expand(
+ attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
+ )
+
+ position_embeddings = self.pos_conv_embed(hidden_states)
+ hidden_states = hidden_states + position_embeddings
+ hidden_states = self.layer_norm(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
+
+ for layer in self.layers:
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ dropout_probability = torch.rand([])
+
+ skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False
+ if not skip_the_layer or deepspeed_zero3_is_enabled:
+ # under deepspeed zero3 all gpus must run in sync
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer.__call__,
+ hidden_states,
+ attention_mask,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer(
+ hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
+ )
+ hidden_states = layer_outputs[0]
+
+ if skip_the_layer:
+ layer_outputs = (None, None)
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderStableLayerNorm with Wav2Vec2->Hubert
+class HubertEncoderStableLayerNorm(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.pos_conv_embed = HubertPositionalConvEmbedding(config)
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout)
+ self.layers = nn.ModuleList(
+ [HubertEncoderLayerStableLayerNorm(config) for _ in range(config.num_hidden_layers)]
+ )
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ output_attentions=False,
+ output_hidden_states=False,
+ return_dict=True,
+ ):
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ if attention_mask is not None:
+ # make sure padded tokens are not attended to
+ expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2])
+ hidden_states[~expand_attention_mask] = 0
+
+ # extend attention_mask
+ attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)
+ attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min
+ attention_mask = attention_mask.expand(
+ attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
+ )
+
+ position_embeddings = self.pos_conv_embed(hidden_states)
+ hidden_states = hidden_states + position_embeddings
+ hidden_states = self.dropout(hidden_states)
+
+ deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
+
+ for layer in self.layers:
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ dropout_probability = torch.rand([])
+
+ skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False
+ if not skip_the_layer or deepspeed_zero3_is_enabled:
+ # under deepspeed zero3 all gpus must run in sync
+ # XXX: could optimize this like synced_gpus in generate_utils but not sure if it's worth the code complication
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer.__call__,
+ hidden_states,
+ attention_mask,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer(
+ hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
+ )
+ hidden_states = layer_outputs[0]
+
+ if skip_the_layer:
+ layer_outputs = (None, None)
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+
+ hidden_states = self.layer_norm(hidden_states)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+
+class HubertPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = HubertConfig
+ base_model_prefix = "hubert"
+ main_input_name = "input_values"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, nn.Linear):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+ elif isinstance(module, nn.Conv1d):
+ if is_deepspeed_zero3_enabled():
+ import deepspeed
+
+ if hasattr(module, "weight_v") and hasattr(module, "weight_g"):
+ with deepspeed.zero.GatheredParameters([module.weight_v, module.weight_g], modifier_rank=0):
+ nn.init.kaiming_normal_(module.weight.data)
+ else:
+ with deepspeed.zero.GatheredParameters(module.weight, modifier_rank=0):
+ nn.init.kaiming_normal_(module.weight.data)
+ else:
+ nn.init.kaiming_normal_(module.weight.data)
+
+ if isinstance(module, (nn.Linear, nn.Conv1d)) and module.bias is not None:
+ module.bias.data.zero_()
+
+ def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):
+ """
+ Computes the output length of the convolutional layers
+ """
+
+ def _conv_out_length(input_length, kernel_size, stride):
+ # 1D convolutional layer output length formula taken
+ # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
+ return torch.div(input_length - kernel_size, stride, rounding_mode="floor") + 1
+
+ for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
+ input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
+
+ return input_lengths
+
+ def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor):
+ output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
+ batch_size = attention_mask.shape[0]
+
+ attention_mask = torch.zeros(
+ (batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
+ )
+ # these two operations makes sure that all values before the output lengths idxs are attended to
+ attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1
+ attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
+ return attention_mask
+
+
+HUBERT_START_DOCSTRING = r"""
+ Hubert was proposed in [HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden
+ Units](https://arxiv.org/abs/2106.07447) by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia,
+ Ruslan Salakhutdinov, Abdelrahman Mohamed.
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving etc.).
+
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`HubertConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+HUBERT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
+ Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file
+ into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install
+ soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and
+ conversion into a tensor of type `torch.FloatTensor`. See [`Wav2Vec2Processor.__call__`] for details.
+ attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0,
+ 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+
+
+ `attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask ==
+ True`. For all models whose processor has `config.return_attention_mask == False`, such as
+ [hubert-base](https://huggingface.co/facebook/hubert-base-ls960), `attention_mask` should **not** be passed
+ to avoid degraded performance when doing batched inference. For such models `input_values` should simply be
+ padded with 0 and passed without `attention_mask`. Be aware that these models also yield slightly different
+ results depending on whether `input_values` is padded or not.
+
+
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare Hubert Model transformer outputting raw hidden-states without any specific head on top.",
+ HUBERT_START_DOCSTRING,
+)
+class HubertModel(HubertPreTrainedModel):
+ def __init__(self, config: HubertConfig):
+ super().__init__(config)
+ self.config = config
+ self.feature_extractor = HubertFeatureEncoder(config)
+ self.feature_projection = HubertFeatureProjection(config)
+
+ if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:
+ self.masked_spec_embed = nn.Parameter(torch.FloatTensor(config.hidden_size).uniform_())
+
+ if config.do_stable_layer_norm:
+ self.encoder = HubertEncoderStableLayerNorm(config)
+ else:
+ self.encoder = HubertEncoder(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states
+ def _mask_hidden_states(
+ self,
+ hidden_states: torch.FloatTensor,
+ mask_time_indices: Optional[torch.FloatTensor] = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ ):
+ """
+ Masks extracted features along time axis and/or along feature axis according to
+ [SpecAugment](https://arxiv.org/abs/1904.08779).
+ """
+
+ # `config.apply_spec_augment` can set masking to False
+ if not getattr(self.config, "apply_spec_augment", True):
+ return hidden_states
+
+ # generate indices & apply SpecAugment along time axis
+ batch_size, sequence_length, hidden_size = hidden_states.size()
+
+ if mask_time_indices is not None:
+ # apply SpecAugment along time axis with given mask_time_indices
+ hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
+ elif self.config.mask_time_prob > 0 and self.training:
+ mask_time_indices = _compute_mask_indices(
+ (batch_size, sequence_length),
+ mask_prob=self.config.mask_time_prob,
+ mask_length=self.config.mask_time_length,
+ attention_mask=attention_mask,
+ min_masks=self.config.mask_time_min_masks,
+ )
+ mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool)
+ hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
+
+ if self.config.mask_feature_prob > 0 and self.training:
+ # generate indices & apply SpecAugment along feature axis
+ mask_feature_indices = _compute_mask_indices(
+ (batch_size, hidden_size),
+ mask_prob=self.config.mask_feature_prob,
+ mask_length=self.config.mask_feature_length,
+ min_masks=self.config.mask_feature_min_masks,
+ )
+ mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool)
+ mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)
+ hidden_states[mask_feature_indices] = 0
+
+ return hidden_states
+
+ @add_start_docstrings_to_model_forward(HUBERT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_values: Optional[torch.Tensor],
+ attention_mask: Optional[torch.Tensor] = None,
+ mask_time_indices: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ """
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoProcessor, HubertModel
+ >>> from datasets import load_dataset
+ >>> import soundfile as sf
+
+ >>> processor = AutoProcessor.from_pretrained("facebook/hubert-large-ls960-ft")
+ >>> model = HubertModel.from_pretrained("facebook/hubert-large-ls960-ft")
+
+
+ >>> def map_to_array(batch):
+ ... speech, _ = sf.read(batch["file"])
+ ... batch["speech"] = speech
+ ... return batch
+
+
+ >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
+ >>> ds = ds.map(map_to_array)
+
+ >>> input_values = processor(ds["speech"][0], return_tensors="pt").input_values # Batch size 1
+ >>> hidden_states = model(input_values).last_hidden_state
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ extract_features = self.feature_extractor(input_values)
+ extract_features = extract_features.transpose(1, 2)
+
+ if attention_mask is not None:
+ # compute reduced attention_mask corresponding to feature vectors
+ attention_mask = self._get_feature_vector_attention_mask(extract_features.shape[1], attention_mask)
+
+ hidden_states = self.feature_projection(extract_features)
+ hidden_states = self._mask_hidden_states(hidden_states, mask_time_indices=mask_time_indices)
+
+ encoder_outputs = self.encoder(
+ hidden_states,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = encoder_outputs[0]
+
+ if not return_dict:
+ return (hidden_states,) + encoder_outputs[1:]
+
+ return BaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """Hubert Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""",
+ HUBERT_START_DOCSTRING,
+)
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForCTC with Wav2Vec2->Hubert, wav2vec2->hubert, WAV_2_VEC_2->HUBERT
+class HubertForCTC(HubertPreTrainedModel):
+ def __init__(self, config, target_lang: Optional[str] = None):
+ super().__init__(config)
+
+ self.hubert = HubertModel(config)
+ self.dropout = nn.Dropout(config.final_dropout)
+
+ self.target_lang = target_lang
+
+ if config.vocab_size is None:
+ raise ValueError(
+ f"You are trying to instantiate {self.__class__} with a configuration that "
+ "does not define the vocabulary size of the language model head. Please "
+ "instantiate the model as follows: `HubertForCTC.from_pretrained(..., vocab_size=vocab_size)`. "
+ "or define `vocab_size` of your model's configuration."
+ )
+ output_hidden_size = (
+ config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size
+ )
+ self.lm_head = nn.Linear(output_hidden_size, config.vocab_size)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def tie_weights(self):
+ """
+ This method overwrites [`~PreTrainedModel.tie_weights`] so that adapter weights can be correctly loaded when
+ passing `target_lang=...` to `from_pretrained(...)`.
+
+ This method is **not** supposed to be called by the user and is prone to be changed in the future.
+ """
+
+ # Note that `tie_weights` is usually used to tie input and output embedding weights. The method is re-purposed to
+ # correctly load adapter layers for Hubert so that we do not have to introduce a new API to
+ # [`PreTrainedModel`]. While slightly hacky, Hubert never has to tie input and output embeddings, so that it is
+ # ok to repurpose this function here.
+ target_lang = self.target_lang
+
+ if target_lang is not None and getattr(self.config, "adapter_attn_dim", None) is None:
+ raise ValueError(f"Cannot pass `target_lang`: {target_lang} if `config.adapter_attn_dim` is not defined.")
+ elif target_lang is None and getattr(self.config, "adapter_attn_dim", None) is not None:
+ logger.info("By default `target_lang` is set to 'eng'.")
+ elif target_lang is not None:
+ self.load_adapter(target_lang, force_load=True)
+
+ def freeze_feature_extractor(self):
+ """
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
+ not be updated during training.
+ """
+ warnings.warn(
+ "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
+ "Please use the equivalent `freeze_feature_encoder` method instead.",
+ FutureWarning,
+ )
+ self.freeze_feature_encoder()
+
+ def freeze_feature_encoder(self):
+ """
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
+ not be updated during training.
+ """
+ self.hubert.feature_extractor._freeze_parameters()
+
+ def freeze_base_model(self):
+ """
+ Calling this function will disable the gradient computation for the base model so that its parameters will not
+ be updated during training. Only the classification head will be updated.
+ """
+ for param in self.hubert.parameters():
+ param.requires_grad = False
+
+ @add_start_docstrings_to_model_forward(HUBERT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=CausalLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_CTC_EXPECTED_OUTPUT,
+ expected_loss=_CTC_EXPECTED_LOSS,
+ )
+ def forward(
+ self,
+ input_values: Optional[torch.Tensor],
+ attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: Optional[torch.Tensor] = None,
+ ) -> Union[Tuple, CausalLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):
+ Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to
+ the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.
+ All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
+ config.vocab_size - 1]`.
+ """
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.hubert(
+ input_values,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+ hidden_states = self.dropout(hidden_states)
+
+ logits = self.lm_head(hidden_states)
+
+ loss = None
+ if labels is not None:
+ if labels.max() >= self.config.vocab_size:
+ raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}")
+
+ # retrieve loss input_lengths from attention_mask
+ attention_mask = (
+ attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long)
+ )
+ input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
+
+ # assuming that padded tokens are filled with -100
+ # when not being attended to
+ labels_mask = labels >= 0
+ target_lengths = labels_mask.sum(-1)
+ flattened_targets = labels.masked_select(labels_mask)
+
+ # ctc_loss doesn't support fp16
+ log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)
+
+ with torch.backends.cudnn.flags(enabled=False):
+ loss = nn.functional.ctc_loss(
+ log_probs,
+ flattened_targets,
+ input_lengths,
+ target_lengths,
+ blank=self.config.pad_token_id,
+ reduction=self.config.ctc_loss_reduction,
+ zero_infinity=self.config.ctc_zero_infinity,
+ )
+
+ if not return_dict:
+ output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
+ return ((loss,) + output) if loss is not None else output
+
+ return CausalLMOutput(
+ loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
+ )
+
+
+@add_start_docstrings(
+ """
+ Hubert Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like
+ SUPERB Keyword Spotting.
+ """,
+ HUBERT_START_DOCSTRING,
+)
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification with Wav2Vec2->Hubert, wav2vec2->hubert, WAV_2_VEC_2->HUBERT
+class HubertForSequenceClassification(HubertPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ if hasattr(config, "add_adapter") and config.add_adapter:
+ raise ValueError(
+ "Sequence classification does not support the use of Hubert adapters (config.add_adapter=True)"
+ )
+ self.hubert = HubertModel(config)
+ num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
+ if config.use_weighted_layer_sum:
+ self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
+ self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size)
+ self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def freeze_feature_extractor(self):
+ """
+ Calling this function will disable the gradient computation for the feature encoder so that its parameters will
+ not be updated during training.
+ """
+ warnings.warn(
+ "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
+ "Please use the equivalent `freeze_feature_encoder` method instead.",
+ FutureWarning,
+ )
+ self.freeze_feature_encoder()
+
+ def freeze_feature_encoder(self):
+ """
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
+ not be updated during training.
+ """
+ self.hubert.feature_extractor._freeze_parameters()
+
+ def freeze_base_model(self):
+ """
+ Calling this function will disable the gradient computation for the base model so that its parameters will not
+ be updated during training. Only the classification head will be updated.
+ """
+ for param in self.hubert.parameters():
+ param.requires_grad = False
+
+ @add_start_docstrings_to_model_forward(HUBERT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_SEQ_CLASS_CHECKPOINT,
+ output_type=SequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ modality="audio",
+ expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,
+ expected_loss=_SEQ_CLASS_EXPECTED_LOSS,
+ )
+ def forward(
+ self,
+ input_values: Optional[torch.Tensor],
+ attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: Optional[torch.Tensor] = None,
+ ) -> Union[Tuple, SequenceClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
+
+ outputs = self.hubert(
+ input_values,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ if self.config.use_weighted_layer_sum:
+ hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
+ hidden_states = torch.stack(hidden_states, dim=1)
+ norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
+ hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
+ else:
+ hidden_states = outputs[0]
+
+ hidden_states = self.projector(hidden_states)
+ if attention_mask is None:
+ pooled_output = hidden_states.mean(dim=1)
+ else:
+ padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)
+ hidden_states[~padding_mask] = 0.0
+ pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1)
+
+ logits = self.classifier(pooled_output)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/hubert/modeling_tf_hubert.py b/venv/lib/python3.10/site-packages/transformers/models/hubert/modeling_tf_hubert.py
new file mode 100644
index 0000000000000000000000000000000000000000..0dc696f8a7891787c1e711a793a7dde96e607163
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/hubert/modeling_tf_hubert.py
@@ -0,0 +1,1676 @@
+# coding=utf-8
+# Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" TensorFlow Hubert model."""
+
+from __future__ import annotations
+
+import warnings
+from typing import Any, Optional, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+
+from ...activations_tf import get_tf_activation
+from ...modeling_tf_outputs import TFBaseModelOutput, TFCausalLMOutput
+from ...modeling_tf_utils import (
+ TFPreTrainedModel,
+ get_initializer,
+ keras,
+ keras_serializable,
+ unpack_inputs,
+)
+from ...tf_utils import shape_list, stable_softmax
+from ...utils import (
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_hubert import HubertConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "HubertConfig"
+
+
+from ..deprecated._archive_maps import TF_HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+LARGE_NEGATIVE = -1e8
+
+
+# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2._sample_without_replacement
+def _sample_without_replacement(distribution, num_samples):
+ """
+ Categorical sampling without replacement is currently not implemented. The gumbel-max trick will do for now - see
+ https://github.com/tensorflow/tensorflow/issues/9260 for more info
+ """
+ z = -tf.math.log(tf.random.uniform(shape_list(distribution), 0, 1))
+ _, indices = tf.nn.top_k(distribution + z, num_samples)
+ return indices
+
+
+# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2._scatter_values_on_batch_indices
+def _scatter_values_on_batch_indices(values, batch_indices, output_shape):
+ """
+ Scatter function as in PyTorch with indices in format (batch_dim, indixes)
+ """
+ indices_shape = shape_list(batch_indices)
+ # broadcast batch dim to indices_shape
+ broad_casted_batch_dims = tf.reshape(
+ tf.broadcast_to(tf.expand_dims(tf.range(indices_shape[0]), axis=-1), indices_shape), [1, -1]
+ )
+ # transform batch_indices to pair_indices
+ pair_indices = tf.transpose(tf.concat([broad_casted_batch_dims, tf.reshape(batch_indices, [1, -1])], 0))
+ # scatter values to pair indices
+ return tf.scatter_nd(pair_indices, tf.reshape(values, [-1]), output_shape)
+
+
+# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2._compute_mask_indices
+def _compute_mask_indices(
+ shape: Tuple[int, int],
+ mask_prob: float,
+ mask_length: int,
+ min_masks: int = 0,
+) -> tf.Tensor:
+ """
+ Computes random mask spans for a given shape
+
+ Args:
+ shape: the shape for which to compute masks.
+ should be of size 2 where first element is batch size and 2nd is timesteps
+ attention_mask: optional padding mask of the same size as shape, which will prevent masking padded elements
+ mask_prob:
+ probability for each token to be chosen as start of the span to be masked. this will be multiplied by
+ number of timesteps divided by length of mask span to mask approximately this percentage of all elements.
+ however due to overlaps, the actual number will be smaller (unless no_overlap is True)
+ mask_length: size of the mask
+ min_masks: minimum number of masked spans
+
+ Adapted from [fairseq's
+ data_utils.py](https://github.com/pytorch/fairseq/blob/e0788f7007a8473a76db573985031f3c94201e79/fairseq/data/data_utils.py#L376).
+ """
+ batch_size, sequence_length = shape
+
+ if mask_length < 1:
+ raise ValueError("`mask_length` has to be bigger than 0.")
+
+ tf.debugging.assert_less(
+ mask_length,
+ sequence_length,
+ message=(
+ f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and"
+ f" `sequence_length`: {sequence_length}`"
+ ),
+ )
+
+ # compute number of masked spans in batch
+ num_masked_spans = mask_prob * tf.cast(sequence_length, tf.float32) / mask_length + tf.random.uniform((1,))
+ num_masked_spans = tf.maximum(num_masked_spans, min_masks)
+ num_masked_spans = tf.cast(num_masked_spans, tf.int32)
+
+ # make sure num masked indices <= sequence_length
+ num_masked_spans = tf.math.minimum(sequence_length // mask_length, num_masked_spans)
+ num_masked_spans = tf.squeeze(num_masked_spans)
+
+ # SpecAugment mask to fill
+ spec_aug_mask = tf.zeros((batch_size, sequence_length), dtype=tf.int32)
+
+ # uniform distribution to sample from, make sure that offset samples are < sequence_length
+ uniform_dist = tf.ones((batch_size, sequence_length - (mask_length - 1)))
+
+ # get random indices to mask
+ spec_aug_mask_idxs = _sample_without_replacement(uniform_dist, num_masked_spans)
+
+ # expand masked indices to masked spans
+ spec_aug_mask_idxs = tf.expand_dims(spec_aug_mask_idxs, -1)
+ spec_aug_mask_idxs = tf.tile(spec_aug_mask_idxs, (1, 1, mask_length))
+ spec_aug_mask_idxs = tf.reshape(spec_aug_mask_idxs, (batch_size, num_masked_spans * mask_length))
+
+ offsets = tf.range(mask_length)[tf.newaxis, tf.newaxis, :]
+ offsets = tf.tile(offsets, (batch_size, num_masked_spans, 1))
+ offsets = tf.reshape(offsets, (batch_size, num_masked_spans * mask_length))
+
+ spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
+
+ # scatter indices to mask
+ spec_aug_mask = _scatter_values_on_batch_indices(
+ tf.ones_like(spec_aug_mask_idxs), spec_aug_mask_idxs, tf.shape(spec_aug_mask)
+ )
+
+ return spec_aug_mask
+
+
+# Copied from transformers.models.bart.modeling_tf_bart._expand_mask
+def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None):
+ """
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
+ """
+ src_len = shape_list(mask)[1]
+ tgt_len = tgt_len if tgt_len is not None else src_len
+ one_cst = tf.constant(1.0)
+ mask = tf.cast(mask, dtype=one_cst.dtype)
+ expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
+
+ return (one_cst - expanded_mask) * LARGE_NEGATIVE
+
+
+# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2GroupNorm with Wav2Vec2->Hubert
+class TFHubertGroupNorm(keras.layers.Layer):
+ """
+ From tensorflow-addons https://www.tensorflow.org/addons/api_docs/python/tfa/layers/GroupNormalization
+ """
+
+ def __init__(
+ self,
+ groups: int = 32,
+ axis: int = -1,
+ epsilon: float = 1e-3,
+ center: bool = True,
+ scale: bool = True,
+ beta_initializer: keras.initializers.Initializer = "zeros",
+ gamma_initializer: keras.initializers.Initializer = "ones",
+ beta_regularizer: keras.regularizers.Regularizer = None,
+ gamma_regularizer: keras.regularizers.Regularizer = None,
+ beta_constraint: keras.constraints.Constraint = None,
+ gamma_constraint: keras.constraints.Constraint = None,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+ self.supports_masking = True
+ self.groups = groups
+ self.axis = axis
+ self.epsilon = epsilon
+ self.center = center
+ self.scale = scale
+ self.beta_initializer = keras.initializers.get(beta_initializer)
+ self.gamma_initializer = keras.initializers.get(gamma_initializer)
+ self.beta_regularizer = keras.regularizers.get(beta_regularizer)
+ self.gamma_regularizer = keras.regularizers.get(gamma_regularizer)
+ self.beta_constraint = keras.constraints.get(beta_constraint)
+ self.gamma_constraint = keras.constraints.get(gamma_constraint)
+ self._check_axis()
+
+ def build(self, input_shape):
+ self._check_if_input_shape_is_none(input_shape)
+ self._set_number_of_groups_for_instance_norm(input_shape)
+ self._check_size_of_dimensions(input_shape)
+ self._create_input_spec(input_shape)
+
+ self._add_gamma_weight(input_shape)
+ self._add_beta_weight(input_shape)
+ self.built = True
+ super().build(input_shape)
+
+ def call(self, inputs):
+ input_shape = keras.backend.int_shape(inputs)
+ tensor_input_shape = tf.shape(inputs)
+
+ reshaped_inputs, group_shape = self._reshape_into_groups(inputs, input_shape, tensor_input_shape)
+
+ normalized_inputs = self._apply_normalization(reshaped_inputs, input_shape)
+
+ is_instance_norm = (input_shape[self.axis] // self.groups) == 1
+ if not is_instance_norm:
+ outputs = tf.reshape(normalized_inputs, tensor_input_shape)
+ else:
+ outputs = normalized_inputs
+
+ return outputs
+
+ def get_config(self):
+ config = {
+ "groups": self.groups,
+ "axis": self.axis,
+ "epsilon": self.epsilon,
+ "center": self.center,
+ "scale": self.scale,
+ "beta_initializer": keras.initializers.serialize(self.beta_initializer),
+ "gamma_initializer": keras.initializers.serialize(self.gamma_initializer),
+ "beta_regularizer": keras.regularizers.serialize(self.beta_regularizer),
+ "gamma_regularizer": keras.regularizers.serialize(self.gamma_regularizer),
+ "beta_constraint": keras.constraints.serialize(self.beta_constraint),
+ "gamma_constraint": keras.constraints.serialize(self.gamma_constraint),
+ }
+ base_config = super().get_config()
+ return {**base_config, **config}
+
+ def compute_output_shape(self, input_shape):
+ return input_shape
+
+ def _reshape_into_groups(self, inputs, input_shape, tensor_input_shape):
+ group_shape = [tensor_input_shape[i] for i in range(len(input_shape))]
+ is_instance_norm = (input_shape[self.axis] // self.groups) == 1
+ if not is_instance_norm:
+ group_shape[self.axis] = input_shape[self.axis] // self.groups
+ group_shape.insert(self.axis, self.groups)
+ group_shape = tf.stack(group_shape)
+ reshaped_inputs = tf.reshape(inputs, group_shape)
+ return reshaped_inputs, group_shape
+ else:
+ return inputs, group_shape
+
+ def _apply_normalization(self, reshaped_inputs, input_shape):
+ group_shape = keras.backend.int_shape(reshaped_inputs)
+ group_reduction_axes = list(range(1, len(group_shape)))
+ is_instance_norm = (input_shape[self.axis] // self.groups) == 1
+ if not is_instance_norm:
+ axis = -2 if self.axis == -1 else self.axis - 1
+ else:
+ axis = -1 if self.axis == -1 else self.axis - 1
+ group_reduction_axes.pop(axis)
+
+ mean, variance = tf.nn.moments(reshaped_inputs, group_reduction_axes, keepdims=True)
+
+ gamma, beta = self._get_reshaped_weights(input_shape)
+ normalized_inputs = tf.nn.batch_normalization(
+ reshaped_inputs,
+ mean=mean,
+ variance=variance,
+ scale=gamma,
+ offset=beta,
+ variance_epsilon=self.epsilon,
+ )
+ return normalized_inputs
+
+ def _get_reshaped_weights(self, input_shape):
+ broadcast_shape = self._create_broadcast_shape(input_shape)
+ gamma = None
+ beta = None
+ if self.scale:
+ gamma = tf.reshape(self.gamma, broadcast_shape)
+
+ if self.center:
+ beta = tf.reshape(self.beta, broadcast_shape)
+ return gamma, beta
+
+ def _check_if_input_shape_is_none(self, input_shape):
+ dim = input_shape[self.axis]
+ if dim is None:
+ raise ValueError(
+ "Axis "
+ + str(self.axis)
+ + " of input tensor should have a defined dimension but the layer received an input with shape "
+ + str(input_shape)
+ + "."
+ )
+
+ def _set_number_of_groups_for_instance_norm(self, input_shape):
+ dim = input_shape[self.axis]
+
+ if self.groups == -1:
+ self.groups = dim
+
+ def _check_size_of_dimensions(self, input_shape):
+ dim = input_shape[self.axis]
+ if dim < self.groups:
+ raise ValueError(
+ "Number of groups ("
+ + str(self.groups)
+ + ") cannot be more than the number of channels ("
+ + str(dim)
+ + ")."
+ )
+
+ if dim % self.groups != 0:
+ raise ValueError(
+ "Number of groups ("
+ + str(self.groups)
+ + ") must be a multiple of the number of channels ("
+ + str(dim)
+ + ")."
+ )
+
+ def _check_axis(self):
+ if self.axis == 0:
+ raise ValueError(
+ "You are trying to normalize your batch axis. Do you want to use tf.layer.batch_normalization instead"
+ )
+
+ def _create_input_spec(self, input_shape):
+ dim = input_shape[self.axis]
+ self.input_spec = keras.layers.InputSpec(ndim=len(input_shape), axes={self.axis: dim})
+
+ def _add_gamma_weight(self, input_shape):
+ dim = input_shape[self.axis]
+ shape = (dim,)
+
+ if self.scale:
+ self.gamma = self.add_weight(
+ shape=shape,
+ name="gamma",
+ initializer=self.gamma_initializer,
+ regularizer=self.gamma_regularizer,
+ constraint=self.gamma_constraint,
+ )
+ else:
+ self.gamma = None
+
+ def _add_beta_weight(self, input_shape):
+ dim = input_shape[self.axis]
+ shape = (dim,)
+
+ if self.center:
+ self.beta = self.add_weight(
+ shape=shape,
+ name="beta",
+ initializer=self.beta_initializer,
+ regularizer=self.beta_regularizer,
+ constraint=self.beta_constraint,
+ )
+ else:
+ self.beta = None
+
+ def _create_broadcast_shape(self, input_shape):
+ broadcast_shape = [1] * len(input_shape)
+ is_instance_norm = (input_shape[self.axis] // self.groups) == 1
+ if not is_instance_norm:
+ broadcast_shape[self.axis] = input_shape[self.axis] // self.groups
+ broadcast_shape.insert(self.axis, self.groups)
+ else:
+ broadcast_shape[self.axis] = self.groups
+ return broadcast_shape
+
+
+# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2WeightNormConv1D with Wav2Vec2->Hubert
+class TFHubertWeightNormConv1D(keras.layers.Conv1D):
+ """Adapted from https://www.tensorflow.org/probability/api_docs/python/tfp/layers/weight_norm/WeightNorm"""
+
+ def __init__(self, filters, kernel_size, groups, explicit_padding, **kwargs):
+ super().__init__(
+ filters=filters,
+ kernel_size=kernel_size,
+ groups=groups,
+ padding="valid",
+ use_bias=True,
+ bias_initializer="he_normal",
+ **kwargs,
+ )
+ self.explicit_padding = explicit_padding
+ self.filter_axis = 2
+ self.kernel_norm_axes = tf.constant([0, 1])
+
+ def _init_norm(self):
+ """Set the norm of the weight vector."""
+ kernel_norm = tf.sqrt(tf.reduce_sum(tf.square(self.weight_v), axis=self.kernel_norm_axes))
+ self.weight_g.assign(kernel_norm[:, tf.newaxis, tf.newaxis])
+
+ def _normalize_kernel(self):
+ """Generate normalized weights."""
+ kernel = tf.nn.l2_normalize(self.weight_v, axis=self.kernel_norm_axes) * tf.transpose(self.weight_g)
+ self.kernel = tf.transpose(kernel)
+
+ def build(self, input_shape):
+ if not self.built:
+ super().build(input_shape)
+
+ self.kernel = tf.Variable(tf.transpose(self.kernel), name="weight_v", trainable=True)
+ self.weight_v = self.kernel
+
+ self.weight_g = self.add_weight(
+ name="weight_g",
+ shape=(int(self.weight_v.shape[self.filter_axis]), 1, 1),
+ initializer="ones",
+ dtype=self.weight_v.dtype,
+ trainable=True,
+ )
+ self._init_norm()
+ self.bias = self.add_weight(name="bias", shape=(self.filters,), initializer="zeros", trainable=True)
+
+ def call(self, inputs):
+ # TODO Matt: Assigning to attributes in call() is deeply sinful in TensorFlow, as it should be idempotent.
+ # This whole layer should be replaced by a layer that doesn't inherit from Conv1D, but instead calls
+ # a functional 1d convolution with normalized weights that it generates (but does not store!)
+ self._normalize_kernel()
+
+ padded_inputs = tf.pad(inputs, ((0, 0), (self.explicit_padding, self.explicit_padding), (0, 0)))
+ output = super().call(padded_inputs)
+
+ return output
+
+
+# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2NoLayerNormConvLayer with Wav2Vec2->Hubert
+class TFHubertNoLayerNormConvLayer(keras.layers.Layer):
+ def __init__(self, config: HubertConfig, layer_id: int = 0, **kwargs: Any) -> None:
+ super().__init__(**kwargs)
+ self.in_conv_dim = config.conv_dim[layer_id] if layer_id > 0 else 1
+ self.out_conv_dim = config.conv_dim[layer_id]
+
+ self.conv = keras.layers.Conv1D(
+ filters=self.out_conv_dim,
+ kernel_size=config.conv_kernel[layer_id],
+ strides=config.conv_stride[layer_id],
+ use_bias=config.conv_bias,
+ name="conv",
+ )
+ self.activation = get_tf_activation(config.feat_extract_activation)
+
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
+ hidden_states = self.conv(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "conv", None) is not None:
+ with tf.name_scope(self.conv.name):
+ self.conv.build([None, None, self.in_conv_dim])
+
+
+# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2LayerNormConvLayer with Wav2Vec2->Hubert
+class TFHubertLayerNormConvLayer(keras.layers.Layer):
+ def __init__(self, config: HubertConfig, layer_id: int = 0, **kwargs: Any) -> None:
+ super().__init__(**kwargs)
+ self.in_conv_dim = config.conv_dim[layer_id] if layer_id > 0 else 1
+ self.out_conv_dim = config.conv_dim[layer_id]
+
+ self.conv = keras.layers.Conv1D(
+ filters=self.out_conv_dim,
+ kernel_size=config.conv_kernel[layer_id],
+ strides=config.conv_stride[layer_id],
+ use_bias=config.conv_bias,
+ name="conv",
+ )
+ self.layer_norm = keras.layers.LayerNormalization(name="layer_norm", epsilon=config.layer_norm_eps)
+ self.activation = get_tf_activation(config.feat_extract_activation)
+
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
+ hidden_states = self.conv(hidden_states)
+ hidden_states = self.layer_norm(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "conv", None) is not None:
+ with tf.name_scope(self.conv.name):
+ self.conv.build([None, None, self.in_conv_dim])
+ if getattr(self, "layer_norm", None) is not None:
+ with tf.name_scope(self.layer_norm.name):
+ self.layer_norm.build([None, None, self.out_conv_dim])
+
+
+# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2GroupNormConvLayer with Wav2Vec2->Hubert
+class TFHubertGroupNormConvLayer(keras.layers.Layer):
+ def __init__(self, config: HubertConfig, layer_id: int = 0, **kwargs: Any) -> None:
+ super().__init__(**kwargs)
+ self.in_conv_dim = config.conv_dim[layer_id] if layer_id > 0 else 1
+ self.out_conv_dim = config.conv_dim[layer_id]
+
+ self.conv = keras.layers.Conv1D(
+ filters=self.out_conv_dim,
+ kernel_size=config.conv_kernel[layer_id],
+ strides=config.conv_stride[layer_id],
+ use_bias=config.conv_bias,
+ name="conv",
+ )
+ self.activation = get_tf_activation(config.feat_extract_activation)
+ self.layer_norm = TFHubertGroupNorm(groups=self.out_conv_dim, epsilon=config.layer_norm_eps, name="layer_norm")
+
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
+ hidden_states = self.conv(hidden_states)
+ hidden_states = self.layer_norm(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "conv", None) is not None:
+ with tf.name_scope(self.conv.name):
+ self.conv.build([None, None, self.in_conv_dim])
+ if getattr(self, "layer_norm", None) is not None:
+ with tf.name_scope(self.layer_norm.name):
+ self.layer_norm.build([None, None, self.out_conv_dim])
+
+
+# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2PositionalConvEmbedding with Wav2Vec2->Hubert
+class TFHubertPositionalConvEmbedding(keras.layers.Layer):
+ def __init__(self, config: HubertConfig, **kwargs: Any) -> None:
+ super().__init__(**kwargs)
+ self.conv = TFHubertWeightNormConv1D(
+ filters=config.hidden_size,
+ kernel_size=config.num_conv_pos_embeddings,
+ groups=config.num_conv_pos_embedding_groups,
+ explicit_padding=config.num_conv_pos_embeddings // 2,
+ name="conv",
+ )
+ self.padding = TFHubertSamePadLayer(config.num_conv_pos_embeddings)
+ self.activation = get_tf_activation(config.feat_extract_activation)
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
+ hidden_states = self.conv(hidden_states)
+ hidden_states = self.padding(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "conv", None) is not None:
+ with tf.name_scope(self.conv.name):
+ self.conv.build([None, None, self.config.hidden_size])
+
+
+# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2SamePadLayer with Wav2Vec2->Hubert
+class TFHubertSamePadLayer(keras.layers.Layer):
+ def __init__(self, num_conv_pos_embeddings, **kwargs):
+ super().__init__(**kwargs)
+ self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0
+
+ def call(self, hidden_states):
+ if self.num_pad_remove > 0:
+ hidden_states = hidden_states[:, : -self.num_pad_remove, :]
+ return hidden_states
+
+
+class TFHubertFeatureEncoder(keras.layers.Layer):
+ def __init__(self, config: HubertConfig, **kwargs: Any) -> None:
+ super().__init__(**kwargs)
+
+ if config.feat_extract_norm == "group":
+ conv_layers = [TFHubertGroupNormConvLayer(config, layer_id=0, name=f"conv_layers.{0}")] + [
+ TFHubertNoLayerNormConvLayer(config, layer_id=i + 1, name=f"conv_layers.{i+1}")
+ for i in range(config.num_feat_extract_layers - 1)
+ ]
+ elif config.feat_extract_norm == "layer":
+ conv_layers = [
+ TFHubertLayerNormConvLayer(config, layer_id=i, name=f"conv_layers.{i}")
+ for i in range(config.num_feat_extract_layers)
+ ]
+ else:
+ raise ValueError(
+ f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']"
+ )
+ self.conv_layers = conv_layers
+
+ def call(self, input_values):
+ hidden_states = tf.expand_dims(input_values, -1)
+ for conv_layer in self.conv_layers:
+ hidden_states = conv_layer(hidden_states)
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ for conv_layer in self.conv_layers:
+ with tf.name_scope(conv_layer.name):
+ conv_layer.build(None)
+
+
+class TFHubertFeatureExtractor(TFHubertFeatureEncoder):
+ def __init__(self, config, **kwargs):
+ super().__init__(config, **kwargs)
+ warnings.warn(
+ f"The class `{self.__class__.__name__}` has been depreciated "
+ "and will be removed in Transformers v5. "
+ f"Use `{self.__class__.__bases__[0].__name__}` instead.",
+ FutureWarning,
+ )
+
+
+class TFHubertFeatureProjection(keras.layers.Layer):
+ def __init__(self, config: HubertConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
+ self.projection = keras.layers.Dense(
+ units=config.hidden_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ bias_initializer="zeros",
+ name="projection",
+ )
+ self.dropout = keras.layers.Dropout(rate=config.feat_proj_dropout)
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
+ hidden_states = self.layer_norm(hidden_states)
+ hidden_states = self.projection(hidden_states)
+ hidden_states = self.dropout(hidden_states, training=training)
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "layer_norm", None) is not None:
+ with tf.name_scope(self.layer_norm.name):
+ self.layer_norm.build([None, None, self.config.conv_dim[-1]])
+ if getattr(self, "projection", None) is not None:
+ with tf.name_scope(self.projection.name):
+ self.projection.build([None, None, self.config.conv_dim[-1]])
+
+
+# Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with TFBart->TFHubert
+class TFHubertAttention(keras.layers.Layer):
+ """Multi-headed attention from "Attention Is All You Need"""
+
+ def __init__(
+ self,
+ embed_dim: int,
+ num_heads: int,
+ dropout: float = 0.0,
+ is_decoder: bool = False,
+ bias: bool = True,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+ self.embed_dim = embed_dim
+
+ self.num_heads = num_heads
+ self.dropout = keras.layers.Dropout(dropout)
+ self.head_dim = embed_dim // num_heads
+ if (self.head_dim * num_heads) != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+ self.is_decoder = is_decoder
+
+ self.k_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj")
+ self.q_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj")
+ self.v_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj")
+ self.out_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj")
+
+ def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int):
+ return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3))
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ key_value_states: tf.Tensor | None = None,
+ past_key_value: Tuple[Tuple[tf.Tensor]] | None = None,
+ attention_mask: tf.Tensor | None = None,
+ layer_head_mask: tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Tuple[tf.Tensor, tf.Tensor | None]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+ bsz, tgt_len, embed_dim = shape_list(hidden_states)
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scaling
+ # get key, value proj
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
+ elif past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+ key_states = tf.concat([past_key_value[0], key_states], axis=2)
+ value_states = tf.concat([past_key_value[1], value_states], axis=2)
+ else:
+ # self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape)
+ key_states = tf.reshape(key_states, proj_shape)
+ value_states = tf.reshape(value_states, proj_shape)
+
+ src_len = shape_list(key_states)[1]
+ attn_weights = tf.matmul(query_states, key_states, transpose_b=True)
+
+ tf.debugging.assert_equal(
+ shape_list(attn_weights),
+ [bsz * self.num_heads, tgt_len, src_len],
+ message=(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
+ f" {shape_list(attn_weights)}"
+ ),
+ )
+
+ if attention_mask is not None:
+ tf.debugging.assert_equal(
+ shape_list(attention_mask),
+ [bsz, 1, tgt_len, src_len],
+ message=(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
+ f" {shape_list(attention_mask)}"
+ ),
+ )
+
+ attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype)
+ attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask
+ attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
+
+ attn_weights = stable_softmax(attn_weights, axis=-1)
+
+ if layer_head_mask is not None:
+ tf.debugging.assert_equal(
+ shape_list(layer_head_mask),
+ [self.num_heads],
+ message=(
+ f"Head mask for a single layer should be of size {(self.num_heads)}, but is"
+ f" {shape_list(layer_head_mask)}"
+ ),
+ )
+
+ attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(
+ attn_weights, (bsz, self.num_heads, tgt_len, src_len)
+ )
+ attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
+
+ attn_probs = self.dropout(attn_weights, training=training)
+ attn_output = tf.matmul(attn_probs, value_states)
+
+ tf.debugging.assert_equal(
+ shape_list(attn_output),
+ [bsz * self.num_heads, tgt_len, self.head_dim],
+ message=(
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {shape_list(attn_output)}"
+ ),
+ )
+
+ attn_output = tf.transpose(
+ tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)
+ )
+ attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim))
+
+ attn_output = self.out_proj(attn_output)
+ attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len))
+
+ return attn_output, attn_weights, past_key_value
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "k_proj", None) is not None:
+ with tf.name_scope(self.k_proj.name):
+ self.k_proj.build([None, None, self.embed_dim])
+ if getattr(self, "q_proj", None) is not None:
+ with tf.name_scope(self.q_proj.name):
+ self.q_proj.build([None, None, self.embed_dim])
+ if getattr(self, "v_proj", None) is not None:
+ with tf.name_scope(self.v_proj.name):
+ self.v_proj.build([None, None, self.embed_dim])
+ if getattr(self, "out_proj", None) is not None:
+ with tf.name_scope(self.out_proj.name):
+ self.out_proj.build([None, None, self.embed_dim])
+
+
+# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2FeedForward with Wav2Vec2->Hubert
+class TFHubertFeedForward(keras.layers.Layer):
+ def __init__(self, config: HubertConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.intermediate_dropout = keras.layers.Dropout(config.activation_dropout)
+
+ self.intermediate_dense = keras.layers.Dense(
+ units=config.intermediate_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ bias_initializer="zeros",
+ name="intermediate_dense",
+ )
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
+
+ self.output_dense = keras.layers.Dense(
+ units=config.hidden_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ bias_initializer="zeros",
+ name="output_dense",
+ )
+ self.output_dropout = keras.layers.Dropout(config.hidden_dropout)
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
+ hidden_states = self.intermediate_dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ hidden_states = self.intermediate_dropout(hidden_states, training=training)
+
+ hidden_states = self.output_dense(hidden_states)
+ hidden_states = self.output_dropout(hidden_states, training=training)
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "intermediate_dense", None) is not None:
+ with tf.name_scope(self.intermediate_dense.name):
+ self.intermediate_dense.build([None, None, self.config.hidden_size])
+ if getattr(self, "output_dense", None) is not None:
+ with tf.name_scope(self.output_dense.name):
+ self.output_dense.build([None, None, self.config.intermediate_size])
+
+
+# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2EncoderLayer with Wav2Vec2->Hubert
+class TFHubertEncoderLayer(keras.layers.Layer):
+ def __init__(self, config: HubertConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.attention = TFHubertAttention(
+ embed_dim=config.hidden_size,
+ num_heads=config.num_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=False,
+ name="attention",
+ )
+ self.dropout = keras.layers.Dropout(config.hidden_dropout)
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
+ self.feed_forward = TFHubertFeedForward(config, name="feed_forward")
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="final_layer_norm")
+ self.config = config
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ attention_mask: tf.Tensor | None = None,
+ output_attentions: Optional[bool] = False,
+ training: bool = False,
+ ) -> Tuple[tf.Tensor]:
+ attn_residual = hidden_states
+ hidden_states, attn_weights, _ = self.attention(
+ hidden_states, attention_mask=attention_mask, training=training
+ )
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = attn_residual + hidden_states
+
+ hidden_states = self.layer_norm(hidden_states)
+ hidden_states = hidden_states + self.feed_forward(hidden_states)
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "attention", None) is not None:
+ with tf.name_scope(self.attention.name):
+ self.attention.build(None)
+ if getattr(self, "layer_norm", None) is not None:
+ with tf.name_scope(self.layer_norm.name):
+ self.layer_norm.build([None, None, self.config.hidden_size])
+ if getattr(self, "feed_forward", None) is not None:
+ with tf.name_scope(self.feed_forward.name):
+ self.feed_forward.build(None)
+ if getattr(self, "final_layer_norm", None) is not None:
+ with tf.name_scope(self.final_layer_norm.name):
+ self.final_layer_norm.build([None, None, self.config.hidden_size])
+
+
+# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2EncoderLayerStableLayerNorm with Wav2Vec2->Hubert
+class TFHubertEncoderLayerStableLayerNorm(keras.layers.Layer):
+ def __init__(self, config: HubertConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.attention = TFHubertAttention(
+ embed_dim=config.hidden_size,
+ num_heads=config.num_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=False,
+ name="attention",
+ )
+ self.dropout = keras.layers.Dropout(config.hidden_dropout)
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
+ self.feed_forward = TFHubertFeedForward(config, name="feed_forward")
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="final_layer_norm")
+ self.config = config
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ attention_mask: tf.Tensor | None = None,
+ output_attentions: Optional[bool] = False,
+ training: bool = False,
+ ) -> Tuple[tf.Tensor]:
+ attn_residual = hidden_states
+ hidden_states = self.layer_norm(hidden_states)
+ hidden_states, attn_weights, _ = self.attention(
+ hidden_states, attention_mask=attention_mask, training=training
+ )
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = attn_residual + hidden_states
+ hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states))
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "attention", None) is not None:
+ with tf.name_scope(self.attention.name):
+ self.attention.build(None)
+ if getattr(self, "layer_norm", None) is not None:
+ with tf.name_scope(self.layer_norm.name):
+ self.layer_norm.build([None, None, self.config.hidden_size])
+ if getattr(self, "feed_forward", None) is not None:
+ with tf.name_scope(self.feed_forward.name):
+ self.feed_forward.build(None)
+ if getattr(self, "final_layer_norm", None) is not None:
+ with tf.name_scope(self.final_layer_norm.name):
+ self.final_layer_norm.build([None, None, self.config.hidden_size])
+
+
+# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2Encoder with Wav2Vec2->Hubert
+class TFHubertEncoder(keras.layers.Layer):
+ def __init__(self, config: HubertConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+ self.pos_conv_embed = TFHubertPositionalConvEmbedding(config, name="pos_conv_embed")
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
+ self.dropout = keras.layers.Dropout(config.hidden_dropout)
+ self.layer = [TFHubertEncoderLayer(config, name=f"layers.{i}") for i in range(config.num_hidden_layers)]
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ attention_mask: tf.Tensor | None = None,
+ output_attentions: Optional[bool] = False,
+ output_hidden_states: Optional[bool] = False,
+ return_dict: Optional[bool] = True,
+ training: Optional[bool] = False,
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ if attention_mask is not None:
+ hidden_states = hidden_states * tf.expand_dims(attention_mask, -1)
+ attention_mask = _expand_mask(attention_mask)
+ else:
+ attention_mask = None
+
+ position_embeddings = self.pos_conv_embed(hidden_states)
+ hidden_states = hidden_states + position_embeddings
+ hidden_states = self.layer_norm(hidden_states)
+ hidden_states = self.dropout(hidden_states, training=training)
+
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ dropout_probability = np.random.uniform(0, 1)
+ if training and (dropout_probability < self.config.layerdrop): # skip the layer
+ continue
+
+ layer_outputs = layer_module(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ training=training,
+ )
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+
+ # Add last layer
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
+ return TFBaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "pos_conv_embed", None) is not None:
+ with tf.name_scope(self.pos_conv_embed.name):
+ self.pos_conv_embed.build(None)
+ if getattr(self, "layer_norm", None) is not None:
+ with tf.name_scope(self.layer_norm.name):
+ self.layer_norm.build([None, None, self.config.hidden_size])
+ if getattr(self, "layer", None) is not None:
+ for layer in self.layer:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2EncoderStableLayerNorm with Wav2Vec2->Hubert
+class TFHubertEncoderStableLayerNorm(keras.layers.Layer):
+ def __init__(self, config: HubertConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+ self.pos_conv_embed = TFHubertPositionalConvEmbedding(config, name="pos_conv_embed")
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
+ self.dropout = keras.layers.Dropout(config.hidden_dropout)
+ self.layer = [
+ TFHubertEncoderLayerStableLayerNorm(config, name=f"layers.{i}") for i in range(config.num_hidden_layers)
+ ]
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ attention_mask: tf.Tensor | None = None,
+ output_attentions: Optional[bool] = False,
+ output_hidden_states: Optional[bool] = False,
+ return_dict: Optional[bool] = True,
+ training: Optional[bool] = False,
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ if attention_mask is not None:
+ hidden_states = hidden_states * tf.expand_dims(attention_mask, -1)
+ attention_mask = _expand_mask(attention_mask)
+ else:
+ attention_mask = None
+
+ position_embeddings = self.pos_conv_embed(hidden_states)
+ hidden_states = hidden_states + position_embeddings
+ hidden_states = self.dropout(hidden_states, training=training)
+
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ dropout_probability = np.random.uniform(0, 1)
+ if training and (dropout_probability < self.config.layerdrop): # skip the layer
+ continue
+
+ layer_outputs = layer_module(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ training=training,
+ )
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+
+ hidden_states = self.layer_norm(hidden_states)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
+ return TFBaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "pos_conv_embed", None) is not None:
+ with tf.name_scope(self.pos_conv_embed.name):
+ self.pos_conv_embed.build(None)
+ if getattr(self, "layer_norm", None) is not None:
+ with tf.name_scope(self.layer_norm.name):
+ self.layer_norm.build([None, None, self.config.hidden_size])
+ if getattr(self, "layer", None) is not None:
+ for layer in self.layer:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+@keras_serializable
+class TFHubertMainLayer(keras.layers.Layer):
+ config_class = HubertConfig
+
+ def __init__(self, config: HubertConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+ self.feature_extractor = TFHubertFeatureEncoder(config, name="feature_extractor")
+ self.feature_projection = TFHubertFeatureProjection(config, name="feature_projection")
+
+ if config.do_stable_layer_norm:
+ self.encoder = TFHubertEncoderStableLayerNorm(config, name="encoder")
+ else:
+ self.encoder = TFHubertEncoder(config, name="encoder")
+
+ def build(self, input_shape=None):
+ self.masked_spec_embed = self.add_weight(
+ shape=(self.config.hidden_size,), initializer="uniform", trainable=True, name="masked_spec_embed"
+ )
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "feature_extractor", None) is not None:
+ with tf.name_scope(self.feature_extractor.name):
+ self.feature_extractor.build(None)
+ if getattr(self, "feature_projection", None) is not None:
+ with tf.name_scope(self.feature_projection.name):
+ self.feature_projection.build(None)
+ if getattr(self, "encoder", None) is not None:
+ with tf.name_scope(self.encoder.name):
+ self.encoder.build(None)
+
+ def _get_feat_extract_output_lengths(self, input_lengths: tf.Tensor):
+ """
+ Computes the output length of the convolutional layers
+ """
+
+ def _conv_out_length(input_length, kernel_size, stride):
+ # 1D convolutional layer output length formula taken
+ # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
+ return (input_length - kernel_size) // stride + 1
+
+ for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
+ input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
+
+ return input_lengths
+
+ def _mask_hidden_states(self, hidden_states: tf.Tensor, mask_time_indices: tf.Tensor | None = None):
+ """
+ Masks extracted features along time axis and/or along feature axis according to
+ [SpecAugment](https://arxiv.org/abs/1904.08779).
+ """
+ batch_size, sequence_length, hidden_size = shape_list(hidden_states)
+
+ # `config.apply_spec_augment` can set masking to False
+ if not getattr(self.config, "apply_spec_augment", True):
+ return hidden_states
+
+ if mask_time_indices is not None:
+ # apply SpecAugment along time axis with given mask_time_indices
+ hidden_states = tf.where(
+ tf.cast(mask_time_indices[:, :, tf.newaxis], tf.bool),
+ self.masked_spec_embed[tf.newaxis, tf.newaxis, :],
+ hidden_states,
+ )
+
+ elif self.config.mask_time_prob > 0:
+ # generate indices & apply SpecAugment along time axis
+ mask_time_indices = _compute_mask_indices(
+ (batch_size, sequence_length),
+ mask_prob=self.config.mask_time_prob,
+ mask_length=self.config.mask_time_length,
+ min_masks=2,
+ )
+ hidden_states = tf.where(
+ tf.cast(mask_time_indices[:, :, tf.newaxis], tf.bool),
+ self.masked_spec_embed[tf.newaxis, tf.newaxis, :],
+ hidden_states,
+ )
+
+ # apply SpecAugment along feature axis
+ if self.config.mask_feature_prob > 0:
+ mask_feature_indices = _compute_mask_indices(
+ (batch_size, hidden_size),
+ mask_prob=self.config.mask_feature_prob,
+ mask_length=self.config.mask_feature_length,
+ )
+ hidden_states = tf.where(mask_feature_indices[:, tf.newaxis, :], hidden_states, 0)
+
+ return hidden_states
+
+ @unpack_inputs
+ def call(
+ self,
+ input_values: tf.Tensor,
+ attention_mask: tf.Tensor | None = None,
+ token_type_ids: tf.Tensor | None = None,
+ position_ids: tf.Tensor | None = None,
+ head_mask: tf.Tensor | None = None,
+ inputs_embeds: tf.Tensor | None = None,
+ output_attentions: tf.Tensor | None = None,
+ output_hidden_states: tf.Tensor | None = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ **kwargs: Any,
+ ):
+ hidden_states = self.feature_extractor(tf.cast(input_values, tf.float32), training=training)
+
+ if attention_mask is not None:
+ # compute real output lengths according to convolution formula
+ output_lengths = self._get_feat_extract_output_lengths(tf.reduce_sum(attention_mask, -1))
+
+ attention_mask = tf.sequence_mask(
+ output_lengths, maxlen=shape_list(hidden_states)[1], dtype=hidden_states.dtype
+ )
+
+ hidden_states = self.feature_projection(hidden_states, training=training)
+
+ mask_time_indices = kwargs.get("mask_time_indices", None)
+ if training:
+ hidden_states = self._mask_hidden_states(hidden_states, mask_time_indices=mask_time_indices)
+
+ encoder_outputs = self.encoder(
+ hidden_states,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ hidden_states = encoder_outputs[0]
+
+ if not return_dict:
+ return (hidden_states,) + encoder_outputs[1:]
+
+ return TFBaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+class TFHubertPreTrainedModel(TFPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = HubertConfig
+ base_model_prefix = "hubert"
+ main_input_name = "input_values"
+
+ @property
+ def input_signature(self):
+ return {
+ "input_values": tf.TensorSpec((None, 16000), tf.float32, name="input_values"),
+ "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
+ "token_type_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"),
+ }
+
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ logger.warning(
+ f"\n{self.__class__.__name__} has backpropagation operations that are NOT supported on CPU. If you wish "
+ "to train/fine-tune this model, you need a GPU or a TPU"
+ )
+
+
+HUBERT_START_DOCSTRING = r"""
+
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+
+
+ TensorFlow models and layers in `transformers` accept two formats as input:
+
+ - having all inputs as keyword arguments (like PyTorch models), or
+ - having all inputs as a list, tuple or dict in the first positional argument.
+
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
+ positional argument:
+
+ - a single Tensor with `input_values` only and nothing else: `model(input_values)`
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
+ `model([input_values, attention_mask])` or `model([input_values, attention_mask, token_type_ids])`
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
+ `model({"input_values": input_values, "token_type_ids": token_type_ids})`
+
+ Note that when creating models and layers with
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
+ about any of this, as you can just pass inputs like you would to any other Python function!
+
+
+
+ Args:
+ config ([`HubertConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+HUBERT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` `Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
+ [`PreTrainedTokenizer.encode`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ head_mask (`np.ndarray` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_values` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_values` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
+ config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
+ used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
+ eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False``):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+"""
+
+
+@add_start_docstrings(
+ "The bare TFHubert Model transformer outputing raw hidden-states without any specific head on top.",
+ HUBERT_START_DOCSTRING,
+)
+class TFHubertModel(TFHubertPreTrainedModel):
+ def __init__(self, config: HubertConfig, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.config = config
+ self.hubert = TFHubertMainLayer(config, name="hubert")
+
+ @add_start_docstrings_to_model_forward(HUBERT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC)
+ @unpack_inputs
+ def call(
+ self,
+ input_values: tf.Tensor,
+ attention_mask: tf.Tensor | None = None,
+ token_type_ids: tf.Tensor | None = None,
+ position_ids: tf.Tensor | None = None,
+ head_mask: tf.Tensor | None = None,
+ inputs_embeds: tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
+ """
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoProcessor, TFHubertModel
+ >>> from datasets import load_dataset
+ >>> import soundfile as sf
+
+ >>> processor = AutoProcessor.from_pretrained("facebook/hubert-large-ls960-ft")
+ >>> model = TFHubertModel.from_pretrained("facebook/hubert-large-ls960-ft")
+
+
+ >>> def map_to_array(batch):
+ ... speech, _ = sf.read(batch["file"])
+ ... batch["speech"] = speech
+ ... return batch
+
+
+ >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
+ >>> ds = ds.map(map_to_array)
+
+ >>> input_values = processor(ds["speech"][0], return_tensors="tf").input_values # Batch size 1
+ >>> hidden_states = model(input_values).last_hidden_state
+ ```"""
+
+ output_hidden_states = output_hidden_states if output_hidden_states else self.config.output_hidden_states
+ output_attentions = output_attentions if output_attentions else self.config.output_attentions
+ return_dict = return_dict if return_dict else self.config.return_dict
+
+ outputs = self.hubert(
+ input_values=input_values,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "hubert", None) is not None:
+ with tf.name_scope(self.hubert.name):
+ self.hubert.build(None)
+
+
+@add_start_docstrings(
+ """TFHubert Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""",
+ HUBERT_START_DOCSTRING,
+)
+class TFHubertForCTC(TFHubertPreTrainedModel):
+ def __init__(self, config: HubertConfig, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.hubert = TFHubertMainLayer(config, name="hubert")
+ self.dropout = keras.layers.Dropout(config.final_dropout)
+ self.lm_head = keras.layers.Dense(config.vocab_size, name="lm_head")
+ self.output_hidden_size = (
+ config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size
+ )
+
+ def freeze_feature_extractor(self):
+ """
+ Calling this function will disable the gradient computation for the feature encoder so that its parameters will
+ not be updated during training.
+ """
+ warnings.warn(
+ "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
+ "Please use the equivalent `freeze_feature_encoder` method instead.",
+ FutureWarning,
+ )
+ self.freeze_feature_encoder()
+
+ def freeze_feature_encoder(self):
+ """
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
+ not be updated during training.
+ """
+ self.hubert.feature_extractor.trainable = False
+
+ @add_start_docstrings_to_model_forward(HUBERT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=TFCausalLMOutput, config_class=_CONFIG_FOR_DOC)
+ @unpack_inputs
+ def call(
+ self,
+ input_values: tf.Tensor,
+ attention_mask: tf.Tensor | None = None,
+ token_type_ids: tf.Tensor | None = None,
+ position_ids: tf.Tensor | None = None,
+ head_mask: tf.Tensor | None = None,
+ inputs_embeds: tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ labels: tf.Tensor | None = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFCausalLMOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_values` docstring) Tokens with indices set to `-100` are ignored (masked),
+ the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> import tensorflow as tf
+ >>> from transformers import AutoProcessor, TFHubertForCTC
+ >>> from datasets import load_dataset
+ >>> import soundfile as sf
+
+ >>> processor = AutoProcessor.from_pretrained("facebook/hubert-large-ls960-ft")
+ >>> model = TFHubertForCTC.from_pretrained("facebook/hubert-large-ls960-ft")
+
+
+ >>> def map_to_array(batch):
+ ... speech, _ = sf.read(batch["file"])
+ ... batch["speech"] = speech
+ ... return batch
+
+
+ >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
+ >>> ds = ds.map(map_to_array)
+
+ >>> input_values = processor(ds["speech"][0], return_tensors="tf").input_values # Batch size 1
+ >>> logits = model(input_values).logits
+ >>> predicted_ids = tf.argmax(logits, axis=-1)
+
+ >>> transcription = processor.decode(predicted_ids[0])
+
+ >>> # compute loss
+ >>> target_transcription = "A MAN SAID TO THE UNIVERSE SIR I EXIST"
+
+ >>> # Pass the transcription as text to encode labels
+ >>> labels = processor(text=transcription, return_tensors="tf").input_values
+
+ >>> loss = model(input_values, labels=labels).loss
+ ```"""
+
+ outputs = self.hubert(
+ input_values=input_values,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ hidden_states = outputs[0]
+ hidden_states = self.dropout(hidden_states, training=training)
+
+ logits = self.lm_head(hidden_states)
+
+ if labels is not None:
+ if tf.reduce_max(labels) >= self.config.vocab_size:
+ raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}")
+
+ attention_mask = (
+ attention_mask if attention_mask is not None else tf.ones_like(input_values, dtype=tf.float32)
+ )
+ input_lengths = self.hubert._get_feat_extract_output_lengths(tf.reduce_sum(attention_mask, axis=-1))
+
+ # assuming that padded tokens are filled with -100
+ # when not being attended to
+ labels_mask = tf.cast(labels >= 0, tf.int32)
+ target_lengths = tf.reduce_sum(labels_mask, axis=-1)
+
+ loss = tf.nn.ctc_loss(
+ logits=logits,
+ labels=labels,
+ logit_length=input_lengths,
+ label_length=target_lengths,
+ blank_index=self.config.pad_token_id,
+ logits_time_major=False,
+ )
+
+ if self.config.ctc_loss_reduction == "sum":
+ loss = tf.reduce_sum(loss)
+ loss = tf.reshape(loss, (1,))
+ if self.config.ctc_loss_reduction == "mean":
+ loss = tf.reduce_mean(loss)
+ loss = tf.reshape(loss, (1,))
+ else:
+ loss = None
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFCausalLMOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "hubert", None) is not None:
+ with tf.name_scope(self.hubert.name):
+ self.hubert.build(None)
+ if getattr(self, "lm_head", None) is not None:
+ with tf.name_scope(self.lm_head.name):
+ self.lm_head.build([None, None, self.output_hidden_size])
diff --git a/venv/lib/python3.10/site-packages/transformers/models/longt5/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/longt5/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..93b9121c33f3932a86813cf5d47b102c503a86d8
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/longt5/__init__.py
@@ -0,0 +1,84 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
+
+
+_import_structure = {
+ "configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_longt5"] = [
+ "LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "LongT5EncoderModel",
+ "LongT5ForConditionalGeneration",
+ "LongT5Model",
+ "LongT5PreTrainedModel",
+ ]
+
+try:
+ if not is_flax_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_flax_longt5"] = [
+ "FlaxLongT5ForConditionalGeneration",
+ "FlaxLongT5Model",
+ "FlaxLongT5PreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_longt5 import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongT5Config, LongT5OnnxConfig
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_longt5 import (
+ LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
+ LongT5EncoderModel,
+ LongT5ForConditionalGeneration,
+ LongT5Model,
+ LongT5PreTrainedModel,
+ )
+
+ try:
+ if not is_flax_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_flax_longt5 import (
+ FlaxLongT5ForConditionalGeneration,
+ FlaxLongT5Model,
+ FlaxLongT5PreTrainedModel,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/convert_longt5x_checkpoint_to_flax.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/convert_longt5x_checkpoint_to_flax.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..201fff1c99b23437dea1c8c27461ad2b0d367734
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/convert_longt5x_checkpoint_to_flax.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/longt5/configuration_longt5.py b/venv/lib/python3.10/site-packages/transformers/models/longt5/configuration_longt5.py
new file mode 100644
index 0000000000000000000000000000000000000000..f6e8284ed0af84ec7d661885a39de6cd19c6371f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/longt5/configuration_longt5.py
@@ -0,0 +1,174 @@
+# coding=utf-8
+# Copyright 2022, The LongT5 Authors and HuggingFace Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" LongT5 model configuration"""
+from typing import Mapping
+
+from ...configuration_utils import PretrainedConfig
+from ...onnx import OnnxSeq2SeqConfigWithPast
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class LongT5Config(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`LongT5Model`] or a [`FlaxLongT5Model`]. It is
+ used to instantiate a LongT5 model according to the specified arguments, defining the model architecture.
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the LongT5
+ [google/long-t5-local-base](https://huggingface.co/google/long-t5-local-base) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Arguments:
+ vocab_size (`int`, *optional*, defaults to 32128):
+ Vocabulary size of the LongT5 model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`LongT5Model`].
+ d_model (`int`, *optional*, defaults to 512):
+ Size of the encoder layers and the pooler layer.
+ d_kv (`int`, *optional*, defaults to 64):
+ Size of the key, query, value projections per attention head. `d_kv` has to be equal to `d_model //
+ num_heads`.
+ d_ff (`int`, *optional*, defaults to 2048):
+ Size of the intermediate feed forward layer in each `LongT5Block`.
+ num_layers (`int`, *optional*, defaults to 6):
+ Number of hidden layers in the Transformer encoder.
+ num_decoder_layers (`int`, *optional*):
+ Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set.
+ num_heads (`int`, *optional*, defaults to 8):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ local_radius (`int`, *optional*, defaults to 127)
+ Number of tokens to the left/right for each token to locally self-attend in a local attention mechanism.
+ global_block_size (`int`, *optional*, defaults to 16)
+ Lenght of blocks an input sequence is divided into for a global token representation. Used only for
+ `encoder_attention_type = "transient-global"`.
+ relative_attention_num_buckets (`int`, *optional*, defaults to 32):
+ The number of buckets to use for each attention layer.
+ relative_attention_max_distance (`int`, *optional*, defaults to 128):
+ The maximum distance of the longer sequences for the bucket separation.
+ dropout_rate (`float`, *optional*, defaults to 0.1):
+ The ratio for all dropout layers.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-6):
+ The epsilon used by the layer normalization layers.
+ initializer_factor (`float`, *optional*, defaults to 1):
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
+ testing).
+ feed_forward_proj (`string`, *optional*, defaults to `"relu"`):
+ Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. LongT5v1.1 uses the
+ `"gated-gelu"` feed forward projection. Original LongT5 implementation uses `"gated-gelu"`.
+ encoder_attention_type (`string`, *optional*, defaults to `"local"`):
+ Type of encoder attention to be used. Should be one of `"local"` or `"transient-global"`, which are
+ supported by LongT5 implementation.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models).
+ """
+
+ model_type = "longt5"
+ keys_to_ignore_at_inference = ["past_key_values"]
+ attribute_map = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
+
+ def __init__(
+ self,
+ vocab_size=32128,
+ d_model=512,
+ d_kv=64,
+ d_ff=2048,
+ num_layers=6,
+ num_decoder_layers=None,
+ num_heads=8,
+ local_radius=127,
+ global_block_size=16,
+ relative_attention_num_buckets=32,
+ relative_attention_max_distance=128,
+ dropout_rate=0.1,
+ layer_norm_epsilon=1e-6,
+ initializer_factor=1.0,
+ feed_forward_proj="relu",
+ is_encoder_decoder=True,
+ encoder_attention_type="local",
+ use_cache=True,
+ pad_token_id=0,
+ eos_token_id=1,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.d_model = d_model
+ self.d_kv = d_kv
+ self.d_ff = d_ff
+ self.num_layers = num_layers
+ # default = symmetry
+ self.num_decoder_layers = num_decoder_layers if num_decoder_layers is not None else self.num_layers
+ self.num_heads = num_heads
+ self.local_radius = local_radius
+ self.global_block_size = global_block_size
+ self.relative_attention_num_buckets = relative_attention_num_buckets
+ self.relative_attention_max_distance = relative_attention_max_distance
+ self.dropout_rate = dropout_rate
+ self.layer_norm_epsilon = layer_norm_epsilon
+ self.initializer_factor = initializer_factor
+ self.feed_forward_proj = feed_forward_proj
+ self.encoder_attention_type = encoder_attention_type
+ self.use_cache = use_cache
+
+ act_info = self.feed_forward_proj.split("-")
+ self.dense_act_fn = act_info[-1]
+ self.is_gated_act = act_info[0] == "gated"
+
+ if len(act_info) > 1 and act_info[0] != "gated" or len(act_info) > 2:
+ raise ValueError(
+ f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer. "
+ "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
+ "'gated-gelu' or 'relu'"
+ )
+
+ # for backwards compatibility
+ if feed_forward_proj == "gated-gelu":
+ self.dense_act_fn = "gelu_new"
+
+ super().__init__(
+ pad_token_id=pad_token_id,
+ eos_token_id=eos_token_id,
+ is_encoder_decoder=is_encoder_decoder,
+ **kwargs,
+ )
+
+
+class LongT5OnnxConfig(OnnxSeq2SeqConfigWithPast):
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ common_inputs = {
+ "input_ids": {0: "batch", 1: "encoder_sequence"},
+ "attention_mask": {0: "batch", 1: "encoder_sequence"},
+ }
+ if self.use_past:
+ common_inputs["attention_mask"][1] = "past_encoder_sequence + sequence"
+ common_inputs["decoder_input_ids"] = {0: "batch"}
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"}
+ else:
+ common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"}
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"}
+
+ if self.use_past:
+ self.fill_with_past_key_values_(common_inputs, direction="inputs")
+
+ return common_inputs
+
+ @property
+ def default_onnx_opset(self) -> int:
+ return 13
diff --git a/venv/lib/python3.10/site-packages/transformers/models/longt5/convert_longt5x_checkpoint_to_flax.py b/venv/lib/python3.10/site-packages/transformers/models/longt5/convert_longt5x_checkpoint_to_flax.py
new file mode 100644
index 0000000000000000000000000000000000000000..5a1394c719d2d836ebc59693755671b936291be5
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/longt5/convert_longt5x_checkpoint_to_flax.py
@@ -0,0 +1,215 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Convert T5/LongT5X checkpoints from the original repository to JAX/FLAX model. This script is an extension of
+'src/transformers/models/t5/convert_t5x_checkpoint_to_flax.
+"""
+
+import argparse
+
+from t5x import checkpoints
+
+from transformers import AutoConfig, FlaxAutoModelForSeq2SeqLM
+
+
+def convert_t5x_checkpoint_to_flax(t5x_checkpoint_path, config_name, flax_dump_folder_path):
+ config = AutoConfig.from_pretrained(config_name)
+ flax_model = FlaxAutoModelForSeq2SeqLM.from_config(config=config)
+ t5x_model = checkpoints.load_t5x_checkpoint(t5x_checkpoint_path)
+
+ split_mlp_wi = "wi_0" in t5x_model["target"]["encoder"]["layers_0"]["mlp"]
+
+ if config.model_type == "t5":
+ encoder_attn_name = "SelfAttention"
+ if config.model_type == "longt5" and config.encoder_attention_type == "local":
+ encoder_attn_name = "LocalSelfAttention"
+ elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
+ encoder_attn_name = "TransientGlobalSelfAttention"
+ else:
+ raise ValueError(
+ "Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
+ " attribute with a value from ['local', 'transient-global]."
+ )
+
+ # Encoder
+ for layer_index in range(config.num_layers):
+ layer_name = f"layers_{str(layer_index)}"
+
+ # Self-Attention
+ t5x_attention_key = t5x_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
+ t5x_attention_out = t5x_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
+ t5x_attention_query = t5x_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
+ t5x_attention_value = t5x_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
+
+ # Global input layer norm
+ if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
+ t5x_global_layer_norm = t5x_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
+
+ # Layer Normalization
+ t5x_attention_layer_norm = t5x_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
+
+ if split_mlp_wi:
+ t5x_mlp_wi_0 = t5x_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
+ t5x_mlp_wi_1 = t5x_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
+ else:
+ t5x_mlp_wi = t5x_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
+
+ t5x_mlp_wo = t5x_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
+
+ # Layer Normalization
+ t5x_mlp_layer_norm = t5x_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
+
+ # Assigning
+ flax_model_encoder_layer_block = flax_model.params["encoder"]["block"][str(layer_index)]["layer"]
+ flax_model_encoder_layer_block["0"][encoder_attn_name]["k"]["kernel"] = t5x_attention_key
+ flax_model_encoder_layer_block["0"][encoder_attn_name]["o"]["kernel"] = t5x_attention_out
+ flax_model_encoder_layer_block["0"][encoder_attn_name]["q"]["kernel"] = t5x_attention_query
+ flax_model_encoder_layer_block["0"][encoder_attn_name]["v"]["kernel"] = t5x_attention_value
+
+ flax_model_encoder_layer_block["0"]["layer_norm"]["weight"] = t5x_attention_layer_norm
+
+ # Global input layer norm
+ if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
+ flax_model_encoder_layer_block["0"][encoder_attn_name]["global_input_layer_norm"][
+ "weight"
+ ] = t5x_global_layer_norm
+
+ if split_mlp_wi:
+ flax_model_encoder_layer_block["1"]["DenseReluDense"]["wi_0"]["kernel"] = t5x_mlp_wi_0
+ flax_model_encoder_layer_block["1"]["DenseReluDense"]["wi_1"]["kernel"] = t5x_mlp_wi_1
+ else:
+ flax_model_encoder_layer_block["1"]["DenseReluDense"]["wi"]["kernel"] = t5x_mlp_wi
+
+ flax_model_encoder_layer_block["1"]["DenseReluDense"]["wo"]["kernel"] = t5x_mlp_wo
+ flax_model_encoder_layer_block["1"]["layer_norm"]["weight"] = t5x_mlp_layer_norm
+
+ flax_model.params["encoder"]["block"][str(layer_index)]["layer"] = flax_model_encoder_layer_block
+
+ # Only for layer 0:
+ t5x_encoder_rel_embedding = t5x_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
+ flax_model.params["encoder"]["block"]["0"]["layer"]["0"][encoder_attn_name]["relative_attention_bias"][
+ "embedding"
+ ] = t5x_encoder_rel_embedding
+
+ # Side/global relative position_bias + layer norm
+ if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
+ t5x_encoder_global_rel_embedding = t5x_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
+ flax_model.params["encoder"]["block"]["0"]["layer"]["0"][encoder_attn_name]["global_relative_attention_bias"][
+ "embedding"
+ ] = t5x_encoder_global_rel_embedding
+
+ # Assigning
+ t5x_encoder_norm = t5x_model["target"]["encoder"]["encoder_norm"]["scale"]
+ flax_model.params["encoder"]["final_layer_norm"]["weight"] = t5x_encoder_norm
+
+ # Decoder
+ for layer_index in range(config.num_layers):
+ layer_name = f"layers_{str(layer_index)}"
+
+ # Self-Attention
+ t5x_attention_key = t5x_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
+ t5x_attention_out = t5x_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
+ t5x_attention_query = t5x_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
+ t5x_attention_value = t5x_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
+
+ # Layer Normalization
+ t5x_pre_attention_layer_norm = t5x_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
+ "scale"
+ ]
+
+ # Encoder-Decoder-Attention
+ t5x_enc_dec_attention_module = t5x_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
+ t5x_enc_dec_attention_key = t5x_enc_dec_attention_module["key"]["kernel"]
+ t5x_enc_dec_attention_out = t5x_enc_dec_attention_module["out"]["kernel"]
+ t5x_enc_dec_attention_query = t5x_enc_dec_attention_module["query"]["kernel"]
+ t5x_enc_dec_attention_value = t5x_enc_dec_attention_module["value"]["kernel"]
+
+ # Layer Normalization
+ t5x_cross_layer_norm = t5x_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
+
+ # MLP
+ if split_mlp_wi:
+ t5x_mlp_wi_0 = t5x_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
+ t5x_mlp_wi_1 = t5x_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
+ else:
+ t5x_mlp_wi = t5x_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
+
+ t5x_mlp_wo = t5x_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
+
+ # Layer Normalization
+ tx5_mlp_layer_norm = t5x_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
+
+ # Assigning
+ flax_model_decoder_layer_block = flax_model.params["decoder"]["block"][str(layer_index)]["layer"]
+ flax_model_decoder_layer_block["0"]["SelfAttention"]["k"]["kernel"] = t5x_attention_key
+ flax_model_decoder_layer_block["0"]["SelfAttention"]["o"]["kernel"] = t5x_attention_out
+ flax_model_decoder_layer_block["0"]["SelfAttention"]["q"]["kernel"] = t5x_attention_query
+ flax_model_decoder_layer_block["0"]["SelfAttention"]["v"]["kernel"] = t5x_attention_value
+
+ flax_model_decoder_layer_block["0"]["layer_norm"]["weight"] = t5x_pre_attention_layer_norm
+
+ flax_model_decoder_layer_block["1"]["EncDecAttention"]["k"]["kernel"] = t5x_enc_dec_attention_key
+ flax_model_decoder_layer_block["1"]["EncDecAttention"]["o"]["kernel"] = t5x_enc_dec_attention_out
+ flax_model_decoder_layer_block["1"]["EncDecAttention"]["q"]["kernel"] = t5x_enc_dec_attention_query
+ flax_model_decoder_layer_block["1"]["EncDecAttention"]["v"]["kernel"] = t5x_enc_dec_attention_value
+
+ flax_model_decoder_layer_block["1"]["layer_norm"]["weight"] = t5x_cross_layer_norm
+
+ if split_mlp_wi:
+ flax_model_decoder_layer_block["2"]["DenseReluDense"]["wi_0"]["kernel"] = t5x_mlp_wi_0
+ flax_model_decoder_layer_block["2"]["DenseReluDense"]["wi_1"]["kernel"] = t5x_mlp_wi_1
+ else:
+ flax_model_decoder_layer_block["2"]["DenseReluDense"]["wi"]["kernel"] = t5x_mlp_wi
+
+ flax_model_decoder_layer_block["2"]["DenseReluDense"]["wo"]["kernel"] = t5x_mlp_wo
+
+ flax_model_decoder_layer_block["2"]["layer_norm"]["weight"] = tx5_mlp_layer_norm
+
+ flax_model.params["decoder"]["block"][str(layer_index)]["layer"] = flax_model_decoder_layer_block
+
+ # Decoder Normalization
+ tx5_decoder_norm = t5x_model["target"]["decoder"]["decoder_norm"]["scale"]
+ flax_model.params["decoder"]["final_layer_norm"]["weight"] = tx5_decoder_norm
+
+ # Only for layer 0:
+ t5x_decoder_rel_embedding = t5x_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
+ flax_model.params["decoder"]["block"]["0"]["layer"]["0"]["SelfAttention"]["relative_attention_bias"][
+ "embedding"
+ ] = t5x_decoder_rel_embedding
+
+ # Token Embeddings
+ tx5_token_embeddings = t5x_model["target"]["token_embedder"]["embedding"]
+ flax_model.params["shared"]["embedding"] = tx5_token_embeddings
+
+ # LM Head (only in v1.1 and LongT5 checkpoints)
+ if "logits_dense" in t5x_model["target"]["decoder"]:
+ flax_model.params["lm_head"]["kernel"] = t5x_model["target"]["decoder"]["logits_dense"]["kernel"]
+
+ flax_model.save_pretrained(flax_dump_folder_path)
+ print("T5X Model was sucessfully converted!")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint."
+ )
+ parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.")
+ parser.add_argument(
+ "--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model."
+ )
+ args = parser.parse_args()
+ convert_t5x_checkpoint_to_flax(args.t5x_checkpoint_path, args.config_name, args.flax_dump_folder_path)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/longt5/modeling_flax_longt5.py b/venv/lib/python3.10/site-packages/transformers/models/longt5/modeling_flax_longt5.py
new file mode 100644
index 0000000000000000000000000000000000000000..d47f644ba37da0383732874ca3634ec9088cd6ca
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/longt5/modeling_flax_longt5.py
@@ -0,0 +1,2447 @@
+# coding=utf-8
+# Copyright 2022 LongT5 Authors and HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Flax LongT5 model."""
+
+
+import copy
+from typing import Any, Callable, List, Optional, Tuple
+
+import flax.linen as nn
+import jax
+import jax.numpy as jnp
+import numpy as np
+from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
+from flax.linen import combine_masks, make_causal_mask
+from flax.linen import partitioning as nn_partitioning
+from flax.linen.attention import dot_product_attention_weights
+from flax.traverse_util import flatten_dict, unflatten_dict
+from jax.random import PRNGKey
+
+from ...modeling_flax_outputs import (
+ FlaxBaseModelOutput,
+ FlaxBaseModelOutputWithPastAndCrossAttentions,
+ FlaxCausalLMOutputWithCrossAttentions,
+ FlaxSeq2SeqLMOutput,
+ FlaxSeq2SeqModelOutput,
+)
+from ...modeling_flax_utils import (
+ ACT2FN,
+ FlaxPreTrainedModel,
+ append_call_sample_docstring,
+ append_replace_return_docstrings,
+ overwrite_call_docstring,
+)
+from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
+from .configuration_longt5 import LongT5Config
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "google/long-t5-local-base"
+_CONFIG_FOR_DOC = "LongT5Config"
+
+remat = nn_partitioning.remat
+
+
+# Copied from transformers.models.bart.modeling_flax_bart.shift_tokens_right
+def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int, decoder_start_token_id: int) -> jnp.ndarray:
+ """
+ Shift input ids one token to the right.
+ """
+ shifted_input_ids = jnp.zeros_like(input_ids)
+ shifted_input_ids = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1])
+ shifted_input_ids = shifted_input_ids.at[:, 0].set(decoder_start_token_id)
+
+ shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids)
+ return shifted_input_ids
+
+
+def _pad_to_multiple(x: jnp.ndarray, block_len: int, axis: int, pad_value: int = 0) -> jnp.ndarray:
+ """Pad an array so that a sequence length will be a multiple of `block_len`"""
+ pad_len = -x.shape[axis] % block_len
+ pad = [(0, 0)] * x.ndim
+ pad[axis] = (0, pad_len)
+ x = jnp.pad(x, pad_width=pad, mode="constant", constant_values=pad_value)
+ return x
+
+
+def _split_into_blocks(x: jnp.ndarray, block_len: int, axis: int) -> jnp.ndarray:
+ """Split an input array into blocks of a given `block_len` along the given `axis`. If the dimension length
+ is not a multiple of `block_len`, it will be padded first with selected `pad_value`.
+ """
+ # pad tensor to multiple of block_len
+ if x.shape[axis] % block_len != 0:
+ x = _pad_to_multiple(x, block_len, axis, pad_value=0)
+ num_blocks = x.shape[axis] // block_len
+ output_shape = x.shape[:axis] + (num_blocks, block_len) + x.shape[(axis + 1) :]
+ return x.reshape(output_shape)
+
+
+def _concatenate_3_blocks(x: jnp.ndarray, block_axis: int, sequence_axis: int, pad_value: int = 0) -> jnp.ndarray:
+ """Concatenate three consecutive blocks for each input block for local attentiont.
+ For more information, see: https://arxiv.org/pdf/2112.07916.pdf.
+ """
+ num_blocks = x.shape[block_axis]
+
+ pad = [(0, 0)] * x.ndim
+ pad[block_axis] = (1, 1)
+ # [batch_size, num_blocks, block_len] -> [batch_size, num_blocks + 2, block_len]
+ x = jnp.pad(x, pad_width=pad, mode="constant", constant_values=pad_value)
+
+ blocks_list: List[np.array] = []
+ for i in range(3):
+ # We use indexing approach here:
+ # https://numpy.org/doc/stable/user/basics.indexing.html#dealing-with-variable-numbers-of-indices-within-programs
+ indices = [slice(0, None)] * x.ndim
+ indices[block_axis] = slice(i, i + num_blocks)
+ indices = tuple(indices)
+ blocks_list.append(x[indices])
+ return jnp.concatenate(blocks_list, axis=sequence_axis) # [batch_size, num_blocks, 3 * block_len, ...]
+
+
+def _make_3block_relative_position_ids(block_len: int) -> jnp.ndarray:
+ """Makes 3-blocked relative position ids for local attention."""
+ position_ids = jnp.arange(3 * block_len, dtype=jnp.int32)
+ center_position_ids = position_ids[block_len:-block_len]
+ relative_position_ids = position_ids[None, :] - center_position_ids[:, None] # [block_len, 3 * block_len]
+ return relative_position_ids
+
+
+def _mask_local_attention_mask(local_attention_mask: np.ndarray, block_len: int) -> jnp.ndarray:
+ """Mask local attention mask to enforce that tokens are not allowed to attend tokens farther than ``local_radius."""
+ relative_position_ids = _make_3block_relative_position_ids(block_len)
+ locality_mask = jnp.abs(relative_position_ids) < block_len
+ locality_mask = locality_mask[None, None, :, :]
+ return jnp.logical_and(local_attention_mask, locality_mask)
+
+
+def _get_local_attention_mask(attention_mask: np.ndarray, block_len: int) -> jnp.ndarray:
+ """Prepare attention mask to be applied for a local attention."""
+ # [batch_size, num_blocks, block_len]
+ _blocked_attention_mask = _split_into_blocks(attention_mask, block_len, axis=1)
+ # [batch_size, num_block, 3 * block_len]
+ _3blocked_attention_mask = _concatenate_3_blocks(_blocked_attention_mask, block_axis=1, sequence_axis=2)
+
+ _blocked_attention_mask = _blocked_attention_mask[..., None]
+ _3blocked_attention_mask = _3blocked_attention_mask[..., None, :]
+ # [batch_size, num_block, block_len, 3 * block_len]
+ local_attention_mask = jnp.logical_and(_blocked_attention_mask, _3blocked_attention_mask)
+ local_attention_mask = _mask_local_attention_mask(local_attention_mask, block_len)
+ # [batch_size, 1, num_block, block_len, 3 * block_len]
+ return local_attention_mask[:, None, ...]
+
+
+def _make_global_fixed_block_ids(attention_mask: np.ndarray, global_block_size: int) -> Tuple[jnp.ndarray, np.ndarray]:
+ """Obtain the "fixed block" global id corresponding to each input token.
+
+ This implementation is a simlified version of the original Flaxformr implementation adopted from:
+ https://github.com/google/flaxformer/blob/main/flaxformer/architectures/longt5/long_attention.py.
+
+ In our scenario, as we use this strategy only for a decoder, orphan tokens, i.e. those tokens which do not make for
+ the whole fixed block, are assigned to the preceding block.
+
+ Padding tokens from the original sequence are represented by -1.
+ """
+ batch_size, seq_len = attention_mask.shape[:2]
+
+ def handle_orphan_tokens(block_ids: np.ndarray) -> jnp.ndarray:
+ block_ends = (jnp.arange(seq_len) % global_block_size) == global_block_size - 1
+ true_block_ends = jnp.logical_and(block_ends, block_ids >= 0)
+ full_blocks = true_block_ends.sum(-1)[..., None]
+ block_ids = jnp.minimum(block_ids, full_blocks - 1)
+ return block_ids
+
+ fixed_block_mask = jnp.ones_like(attention_mask) / global_block_size
+ fixed_block_mask = jnp.cumsum(fixed_block_mask, axis=1) - fixed_block_mask
+ mask = jnp.where(attention_mask != 0.0, 1.0, -1000.0)
+ global_block_ids = jnp.maximum(
+ jnp.floor(mask + fixed_block_mask - 1.0), jnp.array(-1.0, dtype=attention_mask.dtype)
+ )
+ # set padding tokens to -1
+ global_block_ids = (global_block_ids * attention_mask) + (attention_mask - 1)
+ # [batch_size, seq_len]
+ global_block_ids = handle_orphan_tokens(global_block_ids)
+ num_globals = seq_len // global_block_size
+
+ # [batch_size, seq_len // global_block_size]
+ if num_globals > 0:
+ _sequence_block_ids_max = jnp.repeat(global_block_ids.max(axis=-1)[:, None], repeats=num_globals, axis=1)
+ else:
+ _sequence_block_ids_max = jnp.zeros((batch_size, 0), dtype=global_block_ids.dtype)
+ global_segment_ids = jnp.cumsum(jnp.ones((batch_size, num_globals)), axis=-1) - 1
+ global_segment_ids = jnp.where(global_segment_ids <= _sequence_block_ids_max, 1, 0)
+ return global_block_ids, global_segment_ids
+
+
+def _make_side_relative_position_ids(attention_mask: np.ndarray, global_block_size: int) -> np.ndarray:
+ """Create the relative position tensor for local -> global attention."""
+ block_ids, global_segment_ids = _make_global_fixed_block_ids(attention_mask, global_block_size)
+ global_seq_len = global_segment_ids.shape[-1]
+ global_positions = jnp.arange(global_seq_len)
+ side_relative_position = global_positions - block_ids[..., None]
+ return side_relative_position
+
+
+def _create_global_aggregates(hidden_states: np.ndarray, block_ids: np.ndarray, global_seq_len: int) -> np.ndarray:
+ """Compute individual block aggregates by summing over individual blocks."""
+ # (batch..., seq_len, global_seq_len))
+ one_hot_block_ids = jax.nn.one_hot(block_ids, global_seq_len)
+ return jnp.einsum("...nd,...ng->...gd", hidden_states, one_hot_block_ids)
+
+
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5LayerNorm with T5->LongT5
+class FlaxLongT5LayerNorm(nn.Module):
+ hidden_size: int
+ dtype: jnp.dtype = jnp.float32
+ eps: float = 1e-6
+ weight_init: Callable[..., np.ndarray] = jax.nn.initializers.ones
+
+ def setup(self):
+ self.weight = self.param("weight", self.weight_init, (self.hidden_size,))
+
+ def __call__(self, hidden_states):
+ """
+ Construct a layernorm module in the LongT5 style; No bias and no subtraction of mean.
+ """
+ # layer norm should always be calculated in float32
+ variance = jnp.power(hidden_states.astype("f4"), 2).mean(axis=-1, keepdims=True)
+ hidden_states = hidden_states / jnp.sqrt(variance + self.eps)
+
+ return self.weight * hidden_states
+
+
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5DenseActDense with T5->LongT5
+class FlaxLongT5DenseActDense(nn.Module):
+ config: LongT5Config
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ wi_init_std = self.config.initializer_factor * (self.config.d_model**-0.5)
+ wo_init_std = self.config.initializer_factor * (self.config.d_ff**-0.5)
+
+ self.wi = nn.Dense(
+ self.config.d_ff,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(wi_init_std),
+ dtype=self.dtype,
+ )
+ self.wo = nn.Dense(
+ self.config.d_model,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(wo_init_std),
+ dtype=self.dtype,
+ )
+ self.dropout = nn.Dropout(self.config.dropout_rate)
+ self.act = ACT2FN[self.config.dense_act_fn]
+
+ def __call__(self, hidden_states, deterministic=True):
+ hidden_states = self.wi(hidden_states)
+ hidden_states = self.act(hidden_states)
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ hidden_states = self.wo(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5DenseGatedActDense with T5->LongT5
+class FlaxLongT5DenseGatedActDense(nn.Module):
+ config: LongT5Config
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ wi_init_std = self.config.initializer_factor * (self.config.d_model**-0.5)
+ wo_init_std = self.config.initializer_factor * (self.config.d_ff**-0.5)
+
+ self.wi_0 = nn.Dense(
+ self.config.d_ff,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(wi_init_std),
+ dtype=self.dtype,
+ )
+ self.wi_1 = nn.Dense(
+ self.config.d_ff,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(wi_init_std),
+ dtype=self.dtype,
+ )
+ self.wo = nn.Dense(
+ self.config.d_model,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(wo_init_std),
+ dtype=self.dtype,
+ )
+ self.dropout = nn.Dropout(self.config.dropout_rate)
+ self.act = ACT2FN[self.config.dense_act_fn]
+
+ def __call__(self, hidden_states, deterministic):
+ hidden_gelu = self.act(self.wi_0(hidden_states))
+ hidden_linear = self.wi_1(hidden_states)
+ hidden_states = hidden_gelu * hidden_linear
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ hidden_states = self.wo(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5LayerFF with T5->LongT5
+class FlaxLongT5LayerFF(nn.Module):
+ config: LongT5Config
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ if self.config.is_gated_act:
+ self.DenseReluDense = FlaxLongT5DenseGatedActDense(self.config, dtype=self.dtype)
+ else:
+ self.DenseReluDense = FlaxLongT5DenseActDense(self.config, dtype=self.dtype)
+
+ self.layer_norm = FlaxLongT5LayerNorm(
+ self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype
+ )
+ self.dropout = nn.Dropout(self.config.dropout_rate)
+
+ def __call__(self, hidden_states, deterministic=True):
+ forwarded_states = self.layer_norm(hidden_states)
+ forwarded_states = self.DenseReluDense(forwarded_states, deterministic=deterministic)
+ hidden_states = hidden_states + self.dropout(forwarded_states, deterministic=deterministic)
+ return hidden_states
+
+
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5Attention with T5->LongT5
+class FlaxLongT5Attention(nn.Module):
+ config: LongT5Config
+ has_relative_attention_bias: bool = False
+ causal: bool = False
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.relative_attention_num_buckets = self.config.relative_attention_num_buckets
+ self.relative_attention_max_distance = self.config.relative_attention_max_distance
+ self.d_model = self.config.d_model
+ self.key_value_proj_dim = self.config.d_kv
+ self.n_heads = self.config.num_heads
+ self.dropout = self.config.dropout_rate
+ self.inner_dim = self.n_heads * self.key_value_proj_dim
+
+ q_init_std = self.config.initializer_factor * ((self.inner_dim * self.key_value_proj_dim) ** -0.5)
+ kv_init_std = self.config.initializer_factor * (self.inner_dim**-0.5)
+ o_init_std = self.config.initializer_factor * (self.inner_dim**-0.5)
+
+ self.q = nn.Dense(
+ self.inner_dim,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(q_init_std),
+ dtype=self.dtype,
+ )
+ self.k = nn.Dense(
+ self.inner_dim,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(kv_init_std),
+ dtype=self.dtype,
+ )
+ self.v = nn.Dense(
+ self.inner_dim,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(kv_init_std),
+ dtype=self.dtype,
+ )
+ self.o = nn.Dense(
+ self.d_model,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(o_init_std),
+ dtype=self.dtype,
+ )
+
+ if self.has_relative_attention_bias:
+ self.relative_attention_bias = nn.Embed(
+ self.relative_attention_num_buckets,
+ self.n_heads,
+ embedding_init=jax.nn.initializers.normal(kv_init_std),
+ dtype=self.dtype,
+ )
+
+ @staticmethod
+ def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
+ """
+ Adapted from Mesh Tensorflow:
+ https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
+
+ Translate relative position to a bucket number for relative attention. The relative position is defined as
+ memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
+ position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
+ small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
+ positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
+ This should allow for more graceful generalization to longer sequences than the model has been trained on
+ """
+ relative_buckets = 0
+ if bidirectional:
+ num_buckets //= 2
+ relative_buckets += (relative_position > 0) * num_buckets
+ relative_position = jnp.abs(relative_position)
+ else:
+ relative_position = -jnp.clip(relative_position, a_max=0)
+ # now relative_position is in the range [0, inf)
+
+ # half of the buckets are for exact increments in positions
+ max_exact = num_buckets // 2
+ is_small = relative_position < max_exact
+
+ # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
+ relative_position_if_large = max_exact + (
+ jnp.log(relative_position / max_exact) / jnp.log(max_distance / max_exact) * (num_buckets - max_exact)
+ )
+ relative_position_if_large = jnp.clip(relative_position_if_large, a_max=num_buckets - 1)
+
+ relative_buckets += jnp.where(is_small, relative_position, relative_position_if_large)
+
+ return relative_buckets.astype("i4")
+
+ def compute_bias(self, query_length, key_length):
+ """Compute binned relative position bias"""
+ context_position = jnp.arange(query_length, dtype="i4")[:, None]
+ memory_position = jnp.arange(key_length, dtype="i4")[None, :]
+
+ relative_position = memory_position - context_position
+ relative_position_bucket = self._relative_position_bucket(
+ relative_position,
+ bidirectional=(not self.causal),
+ num_buckets=self.relative_attention_num_buckets,
+ max_distance=self.relative_attention_max_distance,
+ )
+
+ values = self.relative_attention_bias(relative_position_bucket)
+ values = values.transpose((2, 0, 1))[None, :, :, :]
+ return values
+
+ def _split_heads(self, hidden_states):
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.n_heads, self.key_value_proj_dim))
+
+ def _merge_heads(self, hidden_states):
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.inner_dim,))
+
+ @nn.compact
+ def _concatenate_to_cache(self, key, value, query, attention_mask):
+ """
+ This function takes projected key, value states from a single input token and concatenates the states to cached
+ states from previous steps. This function is slighly adapted from the official Flax repository:
+ https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
+ """
+ # detect if we're initializing by absence of existing cache data.
+ is_initialized = self.has_variable("cache", "cached_key")
+ cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
+ cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
+ cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
+
+ if is_initialized:
+ *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
+ # update key, value caches with our new 1d spatial slices
+ cur_index = cache_index.value
+ indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
+ key = jax.lax.dynamic_update_slice(cached_key.value, key, indices)
+ value = jax.lax.dynamic_update_slice(cached_value.value, value, indices)
+ cached_key.value = key
+ cached_value.value = value
+ num_updated_cache_vectors = query.shape[1]
+ cache_index.value = cache_index.value + num_updated_cache_vectors
+ # causal mask for cached decoder self-attention: our single query position should only attend to those key positions
+ # that have already been generated and cached, not the remaining zero elements.
+ pad_mask = jnp.broadcast_to(
+ jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
+ tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
+ )
+ attention_mask = combine_masks(pad_mask, attention_mask)
+ return key, value, attention_mask
+
+ def _create_position_bias(
+ self, key_states, query_states, attention_mask, init_cache, seq_length, causal_attention_mask_shift
+ ):
+ cache_is_filled = self.causal and self.has_variable("cache", "cached_key") and (not init_cache)
+ key_length = key_states.shape[1]
+ query_length = key_length if cache_is_filled else query_states.shape[1]
+
+ if self.has_relative_attention_bias:
+ position_bias = self.compute_bias(query_length, key_length)
+ elif attention_mask is not None:
+ position_bias = jnp.zeros_like(attention_mask)
+ else:
+ position_bias = jnp.zeros((1, self.n_heads, query_length, key_length), dtype=self.dtype)
+
+ # if key and values are already calculated, only the last query position bias should be taken
+ if cache_is_filled:
+ max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
+ position_bias = jax.lax.dynamic_slice(
+ position_bias,
+ (0, 0, causal_attention_mask_shift, 0),
+ (1, self.n_heads, seq_length, max_decoder_length),
+ )
+ return position_bias
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask=None,
+ key_value_states=None,
+ position_bias=None,
+ use_cache=False,
+ output_attentions=False,
+ deterministic=True,
+ init_cache=False,
+ ):
+ """
+ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
+ """
+ batch_size, seq_length = hidden_states.shape[:2]
+
+ # q, k, v projections
+ query_states = self.q(hidden_states) # (batch_size, n_heads, seq_length, dim_per_head)
+ key_states = self.k(hidden_states) if key_value_states is None else self.k(key_value_states)
+ value_states = self.v(hidden_states) if key_value_states is None else self.v(key_value_states)
+
+ # reshape to (batch_size, seq_length, n_heads, head_dim)
+ query_states = self._split_heads(query_states)
+ key_states = self._split_heads(key_states)
+ value_states = self._split_heads(value_states)
+
+ # counter-act scaling in dot_product_attention_weights function
+ query_states *= jnp.sqrt(query_states.shape[-1])
+
+ # for fast decoding causal attention mask should be shifted
+ causal_attention_mask_shift = (
+ self.variables["cache"]["cache_index"] if (self.has_variable("cache", "cached_key") and self.causal) else 0
+ )
+ # create causal attention_mask; attention_mask has to be defined when model is causal
+ if self.causal:
+ causal_attention_mask = make_causal_mask(attention_mask, dtype="bool")
+
+ # fast decoding for generate requires special attention_mask
+ if self.has_variable("cache", "cached_key"):
+ max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
+ causal_attention_mask = jax.lax.dynamic_slice(
+ causal_attention_mask,
+ (0, 0, causal_attention_mask_shift, 0),
+ (1, 1, seq_length, max_decoder_length),
+ )
+
+ # broadcast causal attention mask & attention mask to fit for merge
+ causal_attention_mask = jnp.broadcast_to(
+ causal_attention_mask, (batch_size,) + causal_attention_mask.shape[1:]
+ )
+ attention_mask = jnp.broadcast_to(
+ jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_attention_mask.shape
+ )
+ attention_mask = combine_masks(attention_mask, causal_attention_mask)
+ elif attention_mask is not None:
+ attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
+
+ # During fast autoregressive decoding, we feed one position at a time,
+ # and cache the keys and values step by step.
+ if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
+ key_states, value_states, attention_mask = self._concatenate_to_cache(
+ key_states, value_states, query_states, attention_mask
+ )
+
+ # replace masked positions with -10_000
+ if attention_mask is not None:
+ mask_value = jnp.finfo(self.dtype).min
+ attention_mask = jax.lax.select(
+ attention_mask > 0,
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
+ jnp.full(attention_mask.shape, mask_value).astype(self.dtype),
+ )
+
+ if position_bias is None:
+ # compute position bias (only for first layer)
+ position_bias = self._create_position_bias(
+ key_states, query_states, attention_mask, init_cache, seq_length, causal_attention_mask_shift
+ )
+
+ if attention_mask is not None:
+ position_bias = position_bias + attention_mask
+
+ # create dropout rng
+ dropout_rng = None
+ if not deterministic and self.dropout > 0.0:
+ dropout_rng = self.make_rng("dropout")
+
+ # Softmax(QK^T)
+ attn_weights = dot_product_attention_weights(
+ query_states,
+ key_states,
+ bias=position_bias,
+ dropout_rng=dropout_rng,
+ dropout_rate=self.dropout,
+ broadcast_dropout=True,
+ deterministic=deterministic,
+ dtype=self.dtype,
+ )
+
+ # multiply with value states
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
+
+ # bring back to (batch_size, seq_length, d_model)
+ attn_output = self._merge_heads(attn_output)
+
+ # apply output matrix
+ attn_output = self.o(attn_output)
+
+ outputs = (attn_output, position_bias)
+
+ if output_attentions:
+ outputs = outputs + (attn_weights,)
+
+ return outputs
+
+
+class FlaxLongT5LocalAttention(nn.Module):
+ config: LongT5Config
+ has_relative_attention_bias: bool = False
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.relative_attention_num_buckets = self.config.relative_attention_num_buckets
+ self.relative_attention_max_distance = self.config.relative_attention_max_distance
+ self.d_model = self.config.d_model
+ self.key_value_proj_dim = self.config.d_kv
+ self.n_heads = self.config.num_heads
+ self.local_radius = self.config.local_radius
+ self.block_len = self.local_radius + 1
+ self.dropout = self.config.dropout_rate
+ self.inner_dim = self.n_heads * self.key_value_proj_dim
+
+ q_init_std = self.config.initializer_factor * ((self.inner_dim * self.key_value_proj_dim) ** -0.5)
+ kv_init_std = self.config.initializer_factor * (self.inner_dim**-0.5)
+ o_init_std = self.config.initializer_factor * (self.inner_dim**-0.5)
+
+ self.q = nn.Dense(
+ self.inner_dim,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(q_init_std),
+ dtype=self.dtype,
+ )
+ self.k = nn.Dense(
+ self.inner_dim,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(kv_init_std),
+ dtype=self.dtype,
+ )
+ self.v = nn.Dense(
+ self.inner_dim,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(kv_init_std),
+ dtype=self.dtype,
+ )
+ self.o = nn.Dense(
+ self.d_model,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(o_init_std),
+ dtype=self.dtype,
+ )
+
+ if self.has_relative_attention_bias:
+ self.relative_attention_bias = nn.Embed(
+ self.relative_attention_num_buckets,
+ self.n_heads,
+ embedding_init=jax.nn.initializers.normal(kv_init_std),
+ )
+
+ @staticmethod
+ # Copied from transformers.models.t5.modeling_flax_t5.FlaxT5Attention._relative_position_bucket
+ def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
+ """
+ Adapted from Mesh Tensorflow:
+ https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
+
+ Translate relative position to a bucket number for relative attention. The relative position is defined as
+ memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
+ position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
+ small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
+ positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
+ This should allow for more graceful generalization to longer sequences than the model has been trained on
+ """
+ relative_buckets = 0
+ if bidirectional:
+ num_buckets //= 2
+ relative_buckets += (relative_position > 0) * num_buckets
+ relative_position = jnp.abs(relative_position)
+ else:
+ relative_position = -jnp.clip(relative_position, a_max=0)
+ # now relative_position is in the range [0, inf)
+
+ # half of the buckets are for exact increments in positions
+ max_exact = num_buckets // 2
+ is_small = relative_position < max_exact
+
+ # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
+ relative_position_if_large = max_exact + (
+ jnp.log(relative_position / max_exact) / jnp.log(max_distance / max_exact) * (num_buckets - max_exact)
+ )
+ relative_position_if_large = jnp.clip(relative_position_if_large, a_max=num_buckets - 1)
+
+ relative_buckets += jnp.where(is_small, relative_position, relative_position_if_large)
+
+ return relative_buckets.astype("i4")
+
+ def compute_bias(self, block_length: int):
+ """Compute binned relative position bias"""
+ memory_position = jnp.arange(3 * block_length, dtype="i4")
+ context_position = memory_position[block_length:-block_length]
+
+ relative_position = memory_position[None, :] - context_position[:, None]
+ relative_position_bucket = self._relative_position_bucket(
+ relative_position,
+ bidirectional=True,
+ num_buckets=self.relative_attention_num_buckets,
+ max_distance=self.relative_attention_max_distance,
+ )
+
+ values = self.relative_attention_bias(relative_position_bucket)
+ values = values.transpose((2, 0, 1))[None, None, :, :, :]
+ return values
+
+ def _split_heads(self, hidden_states):
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.n_heads, self.key_value_proj_dim))
+
+ def _merge_heads(self, hidden_states):
+ return hidden_states.reshape(hidden_states.shape[0], -1, self.inner_dim)
+
+ def _create_position_bias(self, block_len: int, attention_mask: Optional[np.ndarray]) -> np.ndarray:
+ # position_bias shape: # (1, 1, n_heads, block_len, 3 * block_len)
+ if self.has_relative_attention_bias:
+ position_bias = self.compute_bias(block_len)
+ elif attention_mask is not None:
+ position_bias = jnp.zeros_like(attention_mask)
+ else:
+ position_bias = jnp.zeros((1, 1, self.n_heads, block_len, 3 * block_len), dtype=self.dtype)
+
+ return position_bias
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask=None,
+ key_value_states=None,
+ position_bias=None,
+ output_attentions=False,
+ deterministic=True,
+ ):
+ """
+ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
+ """
+ batch_size, seq_length = hidden_states.shape[:2]
+
+ # q, k, v projections
+ query_states = self.q(hidden_states) # (batch_size, n_heads, seq_length, dim_per_head)
+ key_states = self.k(hidden_states) if key_value_states is None else self.k(key_value_states)
+ value_states = self.v(hidden_states) if key_value_states is None else self.v(key_value_states)
+
+ # reshape to (batch_size, seq_length, n_heads, head_dim)
+ query_states = self._split_heads(query_states)
+ key_states = self._split_heads(key_states)
+ value_states = self._split_heads(value_states)
+
+ # Split into blocks -> (batch_size, num_blocks, block_len, n_heads, head_dim)
+ query_states = _split_into_blocks(query_states, self.block_len, axis=1)
+ key_states = _split_into_blocks(key_states, self.block_len, axis=1)
+ value_states = _split_into_blocks(value_states, self.block_len, axis=1)
+
+ # Concatenate 3 blocks for keys and values -> (batch_size, num_blocks, 3 * block_len, n_heads, dim_per_head)
+ key_states = _concatenate_3_blocks(key_states, block_axis=1, sequence_axis=2)
+ value_states = _concatenate_3_blocks(value_states, block_axis=1, sequence_axis=2)
+
+ # counter-act scaling in dot_product_attention_weights function
+ query_states *= jnp.sqrt(query_states.shape[-1])
+
+ if attention_mask is not None:
+ attention_mask = _get_local_attention_mask(attention_mask, self.block_len)
+
+ # replace masked positions with -10_000
+ attention_mask = jax.lax.select(
+ attention_mask > 0,
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
+ jnp.full(attention_mask.shape, -1e10).astype(self.dtype),
+ )
+
+ if position_bias is None:
+ # compute position bias (only for first layer)
+ position_bias = self._create_position_bias(self.block_len, attention_mask)
+
+ if attention_mask is not None:
+ position_bias = position_bias + attention_mask.swapaxes(1, 2)
+
+ # create dropout rng
+ dropout_rng = None
+ if not deterministic and self.dropout > 0.0:
+ dropout_rng = self.make_rng("dropout")
+
+ # Softmax(QK^T)
+ attn_weights = dot_product_attention_weights(
+ query_states,
+ key_states,
+ bias=position_bias,
+ dropout_rng=dropout_rng,
+ dropout_rate=self.dropout,
+ broadcast_dropout=True,
+ deterministic=deterministic,
+ dtype=self.dtype,
+ )
+
+ # multiply with value states
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
+
+ # bring back to (batch_size, seq_length, d_model)
+ attn_output = self._merge_heads(attn_output)
+ attn_output = attn_output[:, :seq_length, :]
+
+ # apply output matrix
+ attn_output = self.o(attn_output)
+
+ outputs = (attn_output, position_bias)
+
+ if output_attentions:
+ outputs = outputs + (attn_weights,)
+
+ return outputs
+
+
+class FlaxLongT5TransientGlobalAttention(nn.Module):
+ config: LongT5Config
+ has_relative_attention_bias: bool = False
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.relative_attention_num_buckets = self.config.relative_attention_num_buckets
+ self.relative_attention_max_distance = self.config.relative_attention_max_distance
+ self.d_model = self.config.d_model
+ self.key_value_proj_dim = self.config.d_kv
+ self.n_heads = self.config.num_heads
+ self.local_radius = self.config.local_radius
+ self.block_len = self.local_radius + 1
+ self.global_block_size = self.config.global_block_size
+ self.dropout = self.config.dropout_rate
+ self.inner_dim = self.n_heads * self.key_value_proj_dim
+
+ q_init_std = self.config.initializer_factor * ((self.inner_dim * self.key_value_proj_dim) ** -0.5)
+ kv_init_std = self.config.initializer_factor * (self.inner_dim**-0.5)
+ o_init_std = self.config.initializer_factor * (self.inner_dim**-0.5)
+
+ self.q = nn.Dense(
+ self.inner_dim,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(q_init_std),
+ dtype=self.dtype,
+ )
+ self.k = nn.Dense(
+ self.inner_dim,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(kv_init_std),
+ dtype=self.dtype,
+ )
+ self.v = nn.Dense(
+ self.inner_dim,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(kv_init_std),
+ dtype=self.dtype,
+ )
+ self.o = nn.Dense(
+ self.d_model,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(o_init_std),
+ dtype=self.dtype,
+ )
+
+ if self.has_relative_attention_bias:
+ self.relative_attention_bias = nn.Embed(
+ self.relative_attention_num_buckets,
+ self.n_heads,
+ embedding_init=jax.nn.initializers.normal(kv_init_std),
+ )
+
+ # Relativen attention bias & Layer norm for global attention
+ if self.has_relative_attention_bias:
+ self.global_relative_attention_bias = nn.Embed(
+ self.relative_attention_num_buckets,
+ self.n_heads,
+ embedding_init=jax.nn.initializers.normal(kv_init_std),
+ )
+ self.global_input_layer_norm = FlaxLongT5LayerNorm(
+ self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype
+ )
+
+ @staticmethod
+ # Copied from transformers.models.t5.modeling_flax_t5.FlaxT5Attention._relative_position_bucket
+ def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
+ """
+ Adapted from Mesh Tensorflow:
+ https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
+
+ Translate relative position to a bucket number for relative attention. The relative position is defined as
+ memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
+ position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
+ small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
+ positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
+ This should allow for more graceful generalization to longer sequences than the model has been trained on
+ """
+ relative_buckets = 0
+ if bidirectional:
+ num_buckets //= 2
+ relative_buckets += (relative_position > 0) * num_buckets
+ relative_position = jnp.abs(relative_position)
+ else:
+ relative_position = -jnp.clip(relative_position, a_max=0)
+ # now relative_position is in the range [0, inf)
+
+ # half of the buckets are for exact increments in positions
+ max_exact = num_buckets // 2
+ is_small = relative_position < max_exact
+
+ # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
+ relative_position_if_large = max_exact + (
+ jnp.log(relative_position / max_exact) / jnp.log(max_distance / max_exact) * (num_buckets - max_exact)
+ )
+ relative_position_if_large = jnp.clip(relative_position_if_large, a_max=num_buckets - 1)
+
+ relative_buckets += jnp.where(is_small, relative_position, relative_position_if_large)
+
+ return relative_buckets.astype("i4")
+
+ def compute_bias(self, block_length: int):
+ """Compute binned relative position bias"""
+ memory_position = jnp.arange(3 * block_length, dtype="i4")
+ context_position = memory_position[block_length:-block_length]
+
+ relative_position = memory_position[None, :] - context_position[:, None]
+ relative_position_bucket = self._relative_position_bucket(
+ relative_position,
+ bidirectional=True,
+ num_buckets=self.relative_attention_num_buckets,
+ max_distance=self.relative_attention_max_distance,
+ )
+
+ values = self.relative_attention_bias(relative_position_bucket)
+ values = values.transpose((2, 0, 1))[None, None, :, :, :]
+ return values
+
+ def compute_side_bias(self, attention_mask: np.ndarray, global_segment_ids: np.ndarray) -> np.ndarray:
+ # (batch_size, 1, 1, seq_len, global_seq_len)
+ side_attention_mask = jnp.equal(attention_mask[..., None], global_segment_ids[:, None, :])[:, None, ...]
+ attention_side_bias = jax.lax.select(
+ side_attention_mask > 0,
+ jnp.full(side_attention_mask.shape, 0.0).astype(self.dtype),
+ jnp.full(side_attention_mask.shape, -1e10).astype(self.dtype),
+ )
+ # (batch_size, seq_len, global_seq_len)
+ side_relative_position = _make_side_relative_position_ids(attention_mask, self.global_block_size)
+ side_relative_position_bucket = self._relative_position_bucket(
+ side_relative_position,
+ bidirectional=True,
+ num_buckets=self.relative_attention_num_buckets,
+ max_distance=self.relative_attention_max_distance,
+ )
+ # (batch_size, seq_len, global_seq_len, num_heads)
+ side_bias = self.global_relative_attention_bias(side_relative_position_bucket)
+
+ # (batch_size, 1, num_heads, seq_len, global_seq_len)
+ side_bias = jnp.transpose(side_bias, (0, 3, 1, 2))
+ # (batch_size, num_heads, seq_len, global_seq_len)
+ attention_side_bias = attention_side_bias + side_bias
+ return attention_side_bias
+
+ def _split_heads(self, hidden_states):
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.n_heads, self.key_value_proj_dim))
+
+ def _merge_heads(self, hidden_states):
+ return hidden_states.reshape(hidden_states.shape[0], -1, self.inner_dim)
+
+ def _create_position_bias(self, block_len: int, attention_mask: Optional[np.ndarray]) -> np.ndarray:
+ # position_bias shape: # (1, 1, n_heads, block_len, 3 * block_len)
+ if self.has_relative_attention_bias:
+ position_bias = self.compute_bias(block_len)
+ elif attention_mask is not None:
+ position_bias = jnp.zeros_like(attention_mask)
+ else:
+ position_bias = jnp.zeros((1, 1, self.n_heads, block_len, 3 * block_len), dtype=self.dtype)
+
+ return position_bias
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask=None,
+ key_value_states=None,
+ position_bias=None,
+ output_attentions=False,
+ deterministic=True,
+ ):
+ """
+ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
+ """
+ batch_size, seq_length = hidden_states.shape[:2]
+
+ # Prepare components for transient-global attention
+ # Obtain block_ids and global_segment_ids
+ # global_seq_len := seq_len // self.global_block_size
+ # shapes: (batch_size, seq_len) & (batch_size, global_seq_len)
+ block_ids, global_segment_ids = _make_global_fixed_block_ids(
+ attention_mask if attention_mask is not None else jnp.ones((batch_size, seq_length)),
+ self.global_block_size,
+ )
+ # Create global inputs
+ _global_seq_len = global_segment_ids.shape[-1]
+ global_inputs = _create_global_aggregates(hidden_states, block_ids, _global_seq_len)
+ global_inputs = self.global_input_layer_norm(global_inputs)
+
+ # q, k, v projections
+ query_states = self.q(hidden_states) # (batch_size, n_heads, seq_length, dim_per_head)
+ key_states = self.k(hidden_states) if key_value_states is None else self.k(key_value_states)
+ value_states = self.v(hidden_states) if key_value_states is None else self.v(key_value_states)
+
+ # reshape to (batch_size, seq_length, n_heads, head_dim)
+ query_states = self._split_heads(query_states)
+ key_states = self._split_heads(key_states)
+ value_states = self._split_heads(value_states)
+
+ # Get global/side key/value_states
+ side_key_states = self.k(global_inputs)
+ side_value_states = self.v(global_inputs)
+
+ # reshape to (batch_size, global_seq_len, n_heads, head_dim)
+ side_key_states = self._split_heads(side_key_states)
+ side_value_states = self._split_heads(side_value_states)
+
+ # Split into blocks -> (batch_size, num_blocks, block_len, n_heads, head_dim)
+ query_states = _split_into_blocks(query_states, self.block_len, axis=1)
+ key_states = _split_into_blocks(key_states, self.block_len, axis=1)
+ value_states = _split_into_blocks(value_states, self.block_len, axis=1)
+
+ # Concatenate 3 blocks for keys and values -> (batch_size, num_blocks, 3 * block_len, n_heads, dim_per_head)
+ key_states = _concatenate_3_blocks(key_states, block_axis=1, sequence_axis=2)
+ value_states = _concatenate_3_blocks(value_states, block_axis=1, sequence_axis=2)
+
+ # Tile side inputs across local key/value blocks
+ # New shape: (batch_size, num_blocks, global_seq_len, n_heads, dim_per_head)
+ reps = [1] * (side_key_states.ndim + 1)
+ reps[1] = key_states.shape[1]
+ side_key_states = jnp.tile(side_key_states[:, None, ...], reps)
+ side_value_states = jnp.tile(side_value_states[:, None, ...], reps)
+
+ # Concatenate "local" and "side"/"global" key/value states to allow each token to attend global aggregated ones
+ # New shape: (batch_size, num_blocks, 3 * block_len + global_seq_len, n_heads, dim_per_head)
+ key_states = jnp.concatenate((key_states, side_key_states), axis=2)
+ value_states = jnp.concatenate((value_states, side_value_states), axis=2)
+
+ # counter-act scaling in dot_product_attention_weights function
+ query_states *= jnp.sqrt(query_states.shape[-1])
+
+ if attention_mask is not None:
+ local_attention_mask = _get_local_attention_mask(attention_mask, self.block_len)
+ local_attention_mask = jax.lax.select(
+ local_attention_mask > 0,
+ jnp.full(local_attention_mask.shape, 0.0).astype(self.dtype),
+ jnp.full(local_attention_mask.shape, -1e10).astype(self.dtype),
+ )
+ else:
+ local_attention_mask = None
+
+ if position_bias is None:
+ # compute position bias (only for first layer)
+ position_bias = self._create_position_bias(self.block_len, attention_mask)
+ if local_attention_mask is not None:
+ position_bias = position_bias + local_attention_mask.swapaxes(1, 2)
+
+ # Calculate global/side bias - shape: # (batch_size, num_heads, seq_len, global_seq_len)
+ if attention_mask is None:
+ attention_mask = jnp.ones((batch_size, seq_length))
+ side_position_bias = self.compute_side_bias(attention_mask, global_segment_ids)
+ side_position_bias = _split_into_blocks(side_position_bias, self.block_len, axis=-2)
+ side_position_bias = jnp.swapaxes(side_position_bias, 1, 2)
+ position_bias = jnp.concatenate((position_bias, side_position_bias), axis=-1)
+
+ # create dropout rng
+ dropout_rng = None
+ if not deterministic and self.dropout > 0.0:
+ dropout_rng = self.make_rng("dropout")
+
+ # Softmax(QK^T)
+ attn_weights = dot_product_attention_weights(
+ query_states,
+ key_states,
+ bias=position_bias,
+ dropout_rng=dropout_rng,
+ dropout_rate=self.dropout,
+ broadcast_dropout=True,
+ deterministic=deterministic,
+ dtype=self.dtype,
+ )
+
+ # multiply with value states
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
+
+ # bring back to (batch_size, seq_length, d_model)
+ attn_output = self._merge_heads(attn_output)
+ attn_output = attn_output[:, :seq_length, :]
+
+ # apply output matrix
+ attn_output = self.o(attn_output)
+
+ outputs = (attn_output, position_bias)
+
+ if output_attentions:
+ outputs = outputs + (attn_weights,)
+
+ return outputs
+
+
+class FlaxLongT5LayerLocalSelfAttention(nn.Module):
+ """Local self attention used in encoder"""
+
+ config: LongT5Config
+ has_relative_attention_bias: bool = False
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.LocalSelfAttention = FlaxLongT5LocalAttention(
+ self.config, has_relative_attention_bias=self.has_relative_attention_bias, dtype=self.dtype
+ )
+ self.layer_norm = FlaxLongT5LayerNorm(
+ self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype
+ )
+ self.dropout = nn.Dropout(self.config.dropout_rate)
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask=None,
+ position_bias=None,
+ output_attentions=False,
+ deterministic=True,
+ **kwargs: Any, # to accept init_cache kwargs
+ ):
+ normed_hidden_states = self.layer_norm(hidden_states)
+ attention_output = self.LocalSelfAttention(
+ normed_hidden_states,
+ attention_mask=attention_mask,
+ position_bias=position_bias,
+ output_attentions=output_attentions,
+ deterministic=deterministic,
+ )
+ hidden_states = hidden_states + self.dropout(attention_output[0], deterministic=deterministic)
+ outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
+ return outputs
+
+
+class FlaxLongT5LayerTransientGlobalSelfAttention(nn.Module):
+ """Transient-Global self attention used in encoder"""
+
+ config: LongT5Config
+ has_relative_attention_bias: bool = False
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.TransientGlobalSelfAttention = FlaxLongT5TransientGlobalAttention(
+ self.config, has_relative_attention_bias=self.has_relative_attention_bias, dtype=self.dtype
+ )
+ self.layer_norm = FlaxLongT5LayerNorm(
+ self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype
+ )
+ self.dropout = nn.Dropout(self.config.dropout_rate)
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask=None,
+ position_bias=None,
+ output_attentions=False,
+ deterministic=True,
+ **kwargs: Any, # to accept init_cache kwargs
+ ):
+ normed_hidden_states = self.layer_norm(hidden_states)
+ attention_output = self.TransientGlobalSelfAttention(
+ normed_hidden_states,
+ attention_mask=attention_mask,
+ position_bias=position_bias,
+ output_attentions=output_attentions,
+ deterministic=deterministic,
+ )
+ hidden_states = hidden_states + self.dropout(attention_output[0], deterministic=deterministic)
+ outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5LayerSelfAttention with T5->LongT5
+class FlaxLongT5LayerSelfAttention(nn.Module):
+ config: LongT5Config
+ has_relative_attention_bias: bool = False
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.SelfAttention = FlaxLongT5Attention(
+ self.config,
+ has_relative_attention_bias=self.has_relative_attention_bias,
+ causal=self.config.causal,
+ dtype=self.dtype,
+ )
+ self.layer_norm = FlaxLongT5LayerNorm(
+ self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype
+ )
+ self.dropout = nn.Dropout(self.config.dropout_rate)
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask=None,
+ position_bias=None,
+ output_attentions=False,
+ deterministic=True,
+ init_cache=False,
+ ):
+ normed_hidden_states = self.layer_norm(hidden_states)
+ attention_output = self.SelfAttention(
+ normed_hidden_states,
+ attention_mask=attention_mask,
+ position_bias=position_bias,
+ output_attentions=output_attentions,
+ deterministic=deterministic,
+ init_cache=init_cache,
+ )
+ hidden_states = hidden_states + self.dropout(attention_output[0], deterministic=deterministic)
+ outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5LayerCrossAttention with T5->LongT5
+class FlaxLongT5LayerCrossAttention(nn.Module):
+ config: LongT5Config
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.EncDecAttention = FlaxLongT5Attention(
+ self.config, has_relative_attention_bias=False, causal=False, dtype=self.dtype
+ )
+ self.layer_norm = FlaxLongT5LayerNorm(
+ self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype
+ )
+ self.dropout = nn.Dropout(self.config.dropout_rate)
+
+ def __call__(
+ self,
+ hidden_states,
+ key_value_states,
+ attention_mask=None,
+ position_bias=None,
+ output_attentions=False,
+ deterministic=True,
+ ):
+ normed_hidden_states = self.layer_norm(hidden_states)
+ attention_output = self.EncDecAttention(
+ normed_hidden_states,
+ attention_mask=attention_mask,
+ key_value_states=key_value_states,
+ position_bias=position_bias,
+ output_attentions=output_attentions,
+ )
+ hidden_states = hidden_states + self.dropout(attention_output[0], deterministic=deterministic)
+ outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
+ return outputs
+
+
+class FlaxLongT5Block(nn.Module):
+ config: LongT5Config
+ has_relative_attention_bias: bool = False
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.causal = self.config.causal
+ if self.causal:
+ attention_layer = FlaxLongT5LayerSelfAttention
+ elif self.config.encoder_attention_type == "local":
+ attention_layer = FlaxLongT5LayerLocalSelfAttention
+ elif self.config.encoder_attention_type == "transient-global":
+ attention_layer = FlaxLongT5LayerTransientGlobalSelfAttention
+ else:
+ raise ValueError(
+ "For encoder attention mechanism, either `local` or `transient-global` attention type is expected, "
+ f"but got {self.config.encoder_attention_type}."
+ )
+ self.layer = (
+ attention_layer(
+ self.config,
+ has_relative_attention_bias=self.has_relative_attention_bias,
+ name=str(0),
+ dtype=self.dtype,
+ ),
+ )
+ feed_forward_index = 1
+ if self.causal:
+ self.layer += (FlaxLongT5LayerCrossAttention(self.config, name=str(1), dtype=self.dtype),)
+ feed_forward_index += 1
+
+ self.layer += (FlaxLongT5LayerFF(self.config, name=str(feed_forward_index), dtype=self.dtype),)
+
+ # Copied from transformers.models.t5.modeling_flax_t5.FlaxT5Block.__call__ with T5->LongT5
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask=None,
+ position_bias=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ encoder_decoder_position_bias=None,
+ output_attentions=False,
+ return_dict=True,
+ deterministic=True,
+ init_cache=False,
+ ):
+ self_attention_outputs = self.layer[0](
+ hidden_states,
+ attention_mask=attention_mask,
+ position_bias=position_bias,
+ output_attentions=output_attentions,
+ deterministic=deterministic,
+ init_cache=init_cache,
+ )
+ hidden_states = self_attention_outputs[0]
+ attention_outputs = self_attention_outputs[1:] # Keep self-attention outputs and relative position weights
+
+ do_cross_attention = self.causal and encoder_hidden_states is not None
+ if do_cross_attention:
+ cross_attention_outputs = self.layer[1](
+ hidden_states,
+ key_value_states=encoder_hidden_states,
+ attention_mask=encoder_attention_mask,
+ position_bias=encoder_decoder_position_bias,
+ output_attentions=output_attentions,
+ deterministic=deterministic,
+ )
+ hidden_states = cross_attention_outputs[0]
+
+ # Keep cross-attention outputs and relative position weights
+ attention_outputs = attention_outputs + cross_attention_outputs[1:]
+
+ # Apply Feed Forward layer
+ hidden_states = self.layer[-1](hidden_states, deterministic=deterministic)
+
+ outputs = (hidden_states,)
+
+ outputs = outputs + attention_outputs
+
+ # returns hidden-states, present_key_value_states, (self-attention position bias), (self-attention weights),
+ # (cross-attention position bias), (cross-attention weights)
+ return outputs
+
+
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5LayerCollection with T5->LongT5
+class FlaxLongT5LayerCollection(nn.Module):
+ config: LongT5Config
+ has_relative_attention_bias: bool
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.layer = FlaxLongT5Block(
+ self.config, has_relative_attention_bias=self.has_relative_attention_bias, dtype=self.dtype
+ )
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask=None,
+ position_bias=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ encoder_decoder_position_bias=None,
+ output_attentions=False,
+ deterministic=True,
+ init_cache=False,
+ ):
+ return self.layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ position_bias=position_bias,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ encoder_decoder_position_bias=encoder_decoder_position_bias,
+ output_attentions=output_attentions,
+ deterministic=deterministic,
+ init_cache=init_cache,
+ )
+
+
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5BlockCollection with T5->LongT5
+class FlaxLongT5BlockCollection(nn.Module):
+ config: LongT5Config
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+ gradient_checkpointing: bool = False
+
+ def setup(self):
+ self.causal = self.config.causal
+ if self.gradient_checkpointing:
+ FlaxLongT5CheckpointLayer = remat(FlaxLongT5LayerCollection, static_argnums=(6, 7, 8))
+ self.blocks = [
+ FlaxLongT5CheckpointLayer(
+ self.config,
+ has_relative_attention_bias=(i == 0),
+ dtype=self.dtype,
+ name=str(i),
+ )
+ for i in range(self.config.num_layers)
+ ]
+ else:
+ self.blocks = [
+ FlaxLongT5LayerCollection(
+ self.config,
+ has_relative_attention_bias=(i == 0),
+ dtype=self.dtype,
+ name=str(i),
+ )
+ for i in range(self.config.num_layers)
+ ]
+
+ def __call__(
+ self,
+ hidden_states=None,
+ attention_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ deterministic: bool = True,
+ init_cache: bool = False,
+ ):
+ # Prepare head mask if needed
+ all_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+ all_cross_attentions = () if (output_attentions and self.causal) else None
+ position_bias = None
+ encoder_decoder_position_bias = None
+
+ for i, layer_module in enumerate(self.blocks):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_outputs = layer_module(
+ hidden_states,
+ attention_mask,
+ position_bias,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ encoder_decoder_position_bias,
+ output_attentions,
+ deterministic,
+ init_cache,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ # We share the position biases between the layers - the first layer store them
+ # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights),
+ # (cross-attention position bias), (cross-attention weights)
+ position_bias = layer_outputs[1]
+
+ if self.causal and encoder_hidden_states is not None:
+ encoder_decoder_position_bias = layer_outputs[3 if output_attentions else 2]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[2],)
+ if self.causal:
+ all_cross_attentions = all_cross_attentions + (layer_outputs[4],)
+
+ return FlaxBaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_attentions,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5Stack with T5->LongT5
+class FlaxLongT5Stack(nn.Module):
+ config: LongT5Config
+ embed_tokens: nn.Embed
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+ gradient_checkpointing: bool = False
+
+ def setup(self):
+ self.causal = self.config.causal
+
+ self.block = FlaxLongT5BlockCollection(
+ self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing
+ )
+ self.final_layer_norm = FlaxLongT5LayerNorm(
+ self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype
+ )
+ self.dropout = nn.Dropout(self.config.dropout_rate)
+
+ def __call__(
+ self,
+ input_ids=None,
+ attention_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ deterministic: bool = True,
+ init_cache: bool = False,
+ ):
+ hidden_states = self.embed_tokens(input_ids)
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+
+ outputs = self.block(
+ hidden_states,
+ attention_mask=attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ deterministic=deterministic,
+ init_cache=init_cache,
+ )
+
+ hidden_states = outputs[0]
+
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+
+ # Add last layer
+ all_hidden_states = None
+
+ if output_hidden_states:
+ all_hidden_states = outputs.hidden_states
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ if output_hidden_states:
+ return (
+ hidden_states,
+ all_hidden_states,
+ ) + outputs[2:]
+ return (hidden_states,) + outputs[1:]
+
+ return FlaxBaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=outputs.attentions,
+ cross_attentions=outputs.cross_attentions,
+ )
+
+
+LONGT5_ENCODE_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so
+ you should be able to pad the inputs on both the right and the left.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for detail.
+
+ To know more on how to prepare `input_ids` for pretraining take a look a [LONGT5
+ Training](./longt5#training).
+ attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+LONGT5_DECODE_INPUTS_DOCSTRING = r"""
+ Args:
+ decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ For training, `decoder_input_ids` should be provided.
+ encoder_outputs (`tuple(tuple(jnp.ndarray)`):
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+
+ If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the
+ paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
+ past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):
+ Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
+ auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+LONGT5_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so
+ you should be able to pad the inputs on both the right and the left.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for detail.
+
+ [What are input IDs?](../glossary#input-ids)
+
+ To know more on how to prepare `input_ids` for pretraining take a look a [LONGT5
+ Training](./longt5#training).
+ attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ LONGT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+
+ To know more on how to prepare `decoder_input_ids` for pretraining take a look at [LONGT5
+ Training](./longt5#training).
+ decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+ encoder_outputs (`tuple(tuple(jnp.ndarray)`, *optional*):
+ Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at
+ the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ past_key_values (`tuple(tuple(jnp.ndarray))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+class FlaxLongT5PreTrainedModel(FlaxPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = LongT5Config
+ base_model_prefix = "transformer"
+ module_class: nn.Module = None
+
+ def __init__(
+ self,
+ config: LongT5Config,
+ input_shape: Tuple[int] = (1, 1),
+ seed: int = 0,
+ dtype: jnp.dtype = jnp.float32,
+ _do_init: bool = True,
+ **kwargs,
+ ):
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
+
+ def enable_gradient_checkpointing(self):
+ self._module = self.module_class(
+ config=self.config,
+ dtype=self.dtype,
+ gradient_checkpointing=True,
+ )
+
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
+ # init input tensors
+ input_ids = jnp.zeros(input_shape, dtype="i4")
+
+ attention_mask = jnp.ones_like(input_ids)
+ decoder_input_ids = jnp.ones_like(input_ids)
+ decoder_attention_mask = jnp.ones_like(input_ids)
+
+ params_rng, dropout_rng = jax.random.split(rng)
+ rngs = {"params": params_rng, "dropout": dropout_rng}
+
+ random_params = self.module.init(
+ rngs,
+ input_ids,
+ attention_mask,
+ decoder_input_ids,
+ decoder_attention_mask,
+ )["params"]
+
+ if params is not None:
+ random_params = flatten_dict(unfreeze(random_params))
+ params = flatten_dict(unfreeze(params))
+ for missing_key in self._missing_keys:
+ params[missing_key] = random_params[missing_key]
+ self._missing_keys = set()
+ return freeze(unflatten_dict(params))
+ else:
+ return random_params
+
+ @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING)
+ def __call__(
+ self,
+ input_ids: jnp.ndarray,
+ attention_mask: Optional[jnp.ndarray] = None,
+ decoder_input_ids: jnp.ndarray = None,
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ train: bool = False,
+ params: dict = None,
+ dropout_rng: PRNGKey = None,
+ ):
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ if decoder_input_ids is None:
+ raise ValueError(
+ "Make sure to provide both `input_ids` and `decoder_input_ids`. `decoder_input_ids` is not passed"
+ " here."
+ )
+
+ # prepare encoder inputs
+ if attention_mask is None:
+ attention_mask = jnp.ones_like(input_ids)
+
+ # prepare decoder inputs
+ if decoder_attention_mask is None:
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
+
+ # Handle any PRNG if needed
+ rngs = {"dropout": dropout_rng} if dropout_rng is not None else {}
+
+ return self.module.apply(
+ {"params": params or self.params},
+ input_ids=jnp.array(input_ids, dtype="i4"),
+ attention_mask=jnp.array(attention_mask, dtype="i4"),
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=not train,
+ rngs=rngs,
+ )
+
+ def init_cache(self, batch_size, max_length, encoder_outputs):
+ r"""
+ Args:
+ batch_size (`int`):
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
+ max_length (`int`):
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
+ cache.
+ encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`):
+ `encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*:
+ `attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*)
+ is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
+ cross-attention of the decoder.
+ """
+ # init input variables to retrieve cache
+ decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4")
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
+
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, **kwargs):
+ decoder_module = module._get_decoder_module()
+ return decoder_module(
+ decoder_input_ids,
+ decoder_attention_mask,
+ **kwargs,
+ )
+
+ init_variables = self.module.init(
+ jax.random.PRNGKey(0),
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_outputs[0],
+ init_cache=True,
+ method=_decoder_forward, # we only need to call the decoder to init the cache
+ )
+ return unfreeze(init_variables["cache"])
+
+ @add_start_docstrings(LONGT5_ENCODE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=LongT5Config)
+ def encode(
+ self,
+ input_ids: jnp.ndarray,
+ attention_mask: Optional[jnp.ndarray] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ train: bool = False,
+ params: dict = None,
+ dropout_rng: PRNGKey = None,
+ ):
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, FlaxLongT5ForConditionalGeneration
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base")
+ >>> model = FlaxLongT5ForConditionalGeneration.from_pretrained("google/long-t5-local-base")
+
+ >>> text = "My friends are cool but they eat too many carbs."
+ >>> inputs = tokenizer(text, return_tensors="np")
+ >>> encoder_outputs = model.encode(**inputs)
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ if attention_mask is None:
+ attention_mask = jnp.ones_like(input_ids)
+
+ # Handle any PRNG if needed
+ rngs = {}
+ if dropout_rng is not None:
+ rngs["dropout"] = dropout_rng
+
+ def _encoder_forward(module, input_ids, attention_mask, **kwargs):
+ encode_module = module._get_encoder_module()
+ return encode_module(input_ids, attention_mask, **kwargs)
+
+ return self.module.apply(
+ {"params": params or self.params},
+ input_ids=jnp.array(input_ids, dtype="i4"),
+ attention_mask=jnp.array(attention_mask, dtype="i4"),
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=not train,
+ rngs=rngs,
+ method=_encoder_forward,
+ )
+
+ @add_start_docstrings(LONGT5_DECODE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=FlaxBaseModelOutputWithPastAndCrossAttentions, config_class=LongT5Config)
+ def decode(
+ self,
+ decoder_input_ids,
+ encoder_outputs,
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
+ past_key_values: dict = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ train: bool = False,
+ params: dict = None,
+ dropout_rng: PRNGKey = None,
+ ):
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, FlaxLongT5ForConditionalGeneration
+ >>> import jax.numpy as jnp
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base")
+ >>> model = FlaxLongT5ForConditionalGeneration.from_pretrained("google/long-t5-local-base")
+
+ >>> text = "My friends are cool but they eat too many carbs."
+ >>> inputs = tokenizer(text, return_tensors="np")
+ >>> encoder_outputs = model.encode(**inputs)
+
+ >>> decoder_start_token_id = model.config.decoder_start_token_id
+ >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
+
+ >>> outputs = model.decode(decoder_input_ids, encoder_outputs)
+ >>> logits = outputs.logits
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ encoder_hidden_states = encoder_outputs[0]
+ if encoder_attention_mask is None:
+ batch_size, sequence_length = encoder_hidden_states.shape[:2]
+ encoder_attention_mask = jnp.ones((batch_size, sequence_length))
+
+ batch_size, sequence_length = decoder_input_ids.shape
+ if decoder_attention_mask is None:
+ decoder_attention_mask = jnp.ones((batch_size, sequence_length))
+
+ # Handle any PRNG if needed
+ rngs = {}
+ if dropout_rng is not None:
+ rngs["dropout"] = dropout_rng
+
+ inputs = {"params": params or self.params}
+
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be
+ # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
+ # it can be changed by FlaxLongT5Attention module
+ if past_key_values:
+ inputs["cache"] = past_key_values
+ mutable = ["cache"]
+ else:
+ mutable = False
+
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, **kwargs):
+ decoder_module = module._get_decoder_module()
+ return decoder_module(
+ decoder_input_ids,
+ decoder_attention_mask,
+ **kwargs,
+ )
+
+ outputs = self.module.apply(
+ inputs,
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=not train,
+ rngs=rngs,
+ mutable=mutable,
+ method=_decoder_forward,
+ )
+
+ # add updated cache to model output
+ if past_key_values is not None and return_dict:
+ outputs, past = outputs
+ outputs["past_key_values"] = unfreeze(past["cache"])
+ return outputs
+ elif past_key_values is not None and not return_dict:
+ outputs, past = outputs
+ outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
+
+ return outputs
+
+
+LONGT5_START_DOCSTRING = r"""
+ The LongT5 model was proposed in [LongT5: Efficient Text-To-Text Transformer for Long
+ Sequences](https://arxiv.org/abs/2112.07916) by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo
+ Ni, Yun-Hsuan Sung and Yinfei Yang. It's an encoder-decoder transformer pre-trained in a text-to-text denoising
+ generative setting. LongT5 model is an extension of T5 model, and it enables using one of the two different
+ efficient attention mechanisms - (1) Local attention, or (2) Transient-Global attention.
+
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a Flax Linen
+ [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
+ regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
+
+ Finally, this model supports inherent JAX features such as:
+
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
+
+ Parameters:
+ config ([`LongT5Config`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
+ `jax.numpy.bfloat16` (on TPUs).
+
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
+ specified all the computation will be performed with the given `dtype`.
+
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
+ parameters.**
+
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
+ [`~FlaxPreTrainedModel.to_bf16`].
+"""
+
+
+@add_start_docstrings(
+ "The bare LONGT5 Model transformer outputting raw hidden-stateswithout any specific head on top.",
+ LONGT5_START_DOCSTRING,
+)
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5Module with T5->LongT5
+class FlaxLongT5Module(nn.Module):
+ config: LongT5Config
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+ gradient_checkpointing: bool = False
+
+ def _get_encoder_module(self):
+ return self.encoder
+
+ def _get_decoder_module(self):
+ return self.decoder
+
+ def setup(self):
+ self.shared = nn.Embed(
+ self.config.vocab_size,
+ self.config.d_model,
+ embedding_init=jax.nn.initializers.normal(self.config.initializer_factor * 1.0),
+ dtype=self.dtype,
+ )
+
+ encoder_config = copy.deepcopy(self.config)
+ encoder_config.causal = False
+ self.encoder = FlaxLongT5Stack(
+ encoder_config,
+ embed_tokens=self.shared,
+ dtype=self.dtype,
+ gradient_checkpointing=self.gradient_checkpointing,
+ )
+
+ decoder_config = copy.deepcopy(self.config)
+ decoder_config.causal = True
+ decoder_config.num_layers = self.config.num_decoder_layers
+ self.decoder = FlaxLongT5Stack(
+ decoder_config,
+ embed_tokens=self.shared,
+ dtype=self.dtype,
+ gradient_checkpointing=self.gradient_checkpointing,
+ )
+
+ def __call__(
+ self,
+ input_ids=None,
+ attention_mask=None,
+ decoder_input_ids=None,
+ decoder_attention_mask=None,
+ encoder_outputs=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ deterministic: bool = True,
+ ):
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # Encode if needed (training, first prediction pass)
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=deterministic,
+ )
+
+ # Decode
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=deterministic,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs
+
+ return FlaxSeq2SeqModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5Model with T5->LongT5
+class FlaxLongT5Model(FlaxLongT5PreTrainedModel):
+ module_class = FlaxLongT5Module
+
+
+append_call_sample_docstring(FlaxLongT5Model, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqModelOutput, _CONFIG_FOR_DOC)
+
+FLAX_LONGT5_MODEL_DOCSTRING = """
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, FlaxLongT5Model
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base")
+ >>> model = FlaxLongT5Model.from_pretrained("google/long-t5-local-base")
+
+ >>> input_ids = tokenizer(
+ ... "Studies have been shown that owning a dog is good for you", return_tensors="np"
+ ... ).input_ids
+ >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="np").input_ids
+
+ >>> # forward pass
+ >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
+ >>> last_hidden_states = outputs.last_hidden_state
+ ```
+"""
+
+
+overwrite_call_docstring(FlaxLongT5Model, LONGT5_INPUTS_DOCSTRING + FLAX_LONGT5_MODEL_DOCSTRING)
+append_replace_return_docstrings(FlaxLongT5Model, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
+
+
+@add_start_docstrings("""LONGT5 Model with a `language modeling` head on top.""", LONGT5_START_DOCSTRING)
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5ForConditionalGenerationModule with T5->LongT5
+class FlaxLongT5ForConditionalGenerationModule(nn.Module):
+ config: LongT5Config
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+ gradient_checkpointing: bool = False
+
+ def _get_encoder_module(self):
+ return self.encoder
+
+ def _get_decoder_module(self):
+ return self.decoder
+
+ def setup(self):
+ self.model_dim = self.config.d_model
+
+ self.shared = nn.Embed(
+ self.config.vocab_size,
+ self.config.d_model,
+ embedding_init=jax.nn.initializers.normal(self.config.initializer_factor),
+ dtype=self.dtype,
+ )
+
+ encoder_config = copy.deepcopy(self.config)
+ encoder_config.causal = False
+ encoder_config.use_cache = False
+ encoder_config.is_encoder_decoder = False
+ self.encoder = FlaxLongT5Stack(
+ encoder_config, self.shared, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing
+ )
+
+ decoder_config = copy.deepcopy(self.config)
+ decoder_config.causal = True
+ decoder_config.is_encoder_decoder = False
+ decoder_config.num_layers = self.config.num_decoder_layers
+ self.decoder = FlaxLongT5Stack(
+ decoder_config, self.shared, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing
+ )
+
+ self.lm_head = nn.Dense(
+ self.config.vocab_size,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_factor),
+ dtype=self.dtype,
+ )
+
+ def __call__(
+ self,
+ input_ids=None,
+ attention_mask=None,
+ decoder_input_ids=None,
+ decoder_attention_mask=None,
+ encoder_outputs=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ deterministic: bool = True,
+ ):
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # Encode
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=deterministic,
+ )
+
+ hidden_states = encoder_outputs[0]
+
+ # Decode
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=hidden_states,
+ encoder_attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=deterministic,
+ )
+
+ sequence_output = decoder_outputs[0]
+
+ if self.config.tie_word_embeddings:
+ # Rescale output before projecting on vocab
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586
+ sequence_output = sequence_output * (self.model_dim**-0.5)
+
+ if self.config.tie_word_embeddings:
+ shared_embedding = self.shared.variables["params"]["embedding"]
+ lm_logits = self.lm_head.apply({"params": {"kernel": shared_embedding.T}}, sequence_output)
+ else:
+ lm_logits = self.lm_head(sequence_output)
+
+ if not return_dict:
+ return (lm_logits,) + decoder_outputs[1:] + encoder_outputs
+
+ return FlaxSeq2SeqLMOutput(
+ logits=lm_logits,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+
+class FlaxLongT5ForConditionalGeneration(FlaxLongT5PreTrainedModel):
+ module_class = FlaxLongT5ForConditionalGenerationModule
+
+ @add_start_docstrings(LONGT5_DECODE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=LongT5Config)
+ def decode(
+ self,
+ decoder_input_ids,
+ encoder_outputs,
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
+ past_key_values: dict = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ train: bool = False,
+ params: dict = None,
+ dropout_rng: PRNGKey = None,
+ ):
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, FlaxLongT5ForConditionalGeneration
+ >>> import jax.numpy as jnp
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base")
+ >>> model = FlaxLongT5ForConditionalGeneration.from_pretrained("google/long-t5-local-base")
+
+ >>> text = "summarize: My friends are cool but they eat too many carbs."
+ >>> inputs = tokenizer(text, return_tensors="np")
+ >>> encoder_outputs = model.encode(**inputs)
+
+ >>> decoder_start_token_id = model.config.decoder_start_token_id
+ >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
+
+ >>> outputs = model.decode(decoder_input_ids, encoder_outputs)
+ >>> logits = outputs.logits
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ encoder_hidden_states = encoder_outputs[0]
+ if encoder_attention_mask is None:
+ batch_size, sequence_length = encoder_hidden_states.shape[:2]
+ encoder_attention_mask = jnp.ones((batch_size, sequence_length))
+
+ batch_size, sequence_length = decoder_input_ids.shape
+ if decoder_attention_mask is None:
+ decoder_attention_mask = jnp.ones((batch_size, sequence_length))
+
+ # Handle any PRNG if needed
+ rngs = {}
+ if dropout_rng is not None:
+ rngs["dropout"] = dropout_rng
+
+ inputs = {"params": params or self.params}
+
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be
+ # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
+ # it can be changed by FlaxLongT5Attention module
+ if past_key_values:
+ inputs["cache"] = past_key_values
+ mutable = ["cache"]
+ else:
+ mutable = False
+
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, **kwargs):
+ decoder_module = module._get_decoder_module()
+ decoder_outputs = decoder_module(
+ decoder_input_ids,
+ decoder_attention_mask,
+ **kwargs,
+ )
+
+ sequence_output = decoder_outputs[0]
+
+ if self.config.tie_word_embeddings:
+ # Rescale output before projecting on vocab
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586
+ sequence_output = sequence_output * (self.config.d_model**-0.5)
+
+ if self.config.tie_word_embeddings:
+ shared_embedding = module.shared.variables["params"]["embedding"]
+ lm_logits = module.lm_head.apply({"params": {"kernel": shared_embedding.T}}, sequence_output)
+ else:
+ lm_logits = module.lm_head(sequence_output)
+
+ return lm_logits, decoder_outputs
+
+ outputs = self.module.apply(
+ inputs,
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=not train,
+ rngs=rngs,
+ mutable=mutable,
+ method=_decoder_forward,
+ )
+
+ if past_key_values is None:
+ lm_logits, decoder_outputs = outputs
+ else:
+ (lm_logits, decoder_outputs), past = outputs
+
+ if return_dict:
+ outputs = FlaxCausalLMOutputWithCrossAttentions(
+ logits=lm_logits,
+ hidden_states=decoder_outputs.hidden_states,
+ attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ )
+ else:
+ outputs = (lm_logits,) + decoder_outputs[1:]
+
+ # add updated cache to model output
+ if past_key_values is not None and return_dict:
+ outputs["past_key_values"] = unfreeze(past["cache"])
+ return outputs
+ elif past_key_values is not None and not return_dict:
+ outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
+
+ return outputs
+
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ max_length,
+ attention_mask: Optional[jax.Array] = None,
+ decoder_attention_mask: Optional[jax.Array] = None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ # initializing the cache
+ batch_size, seq_length = decoder_input_ids.shape
+
+ past_key_values = self.init_cache(batch_size, max_length, encoder_outputs)
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
+ # But since the decoder uses a causal mask, those positions are masked anyways.
+ # Thus we can create a single static attention_mask here, which is more efficient for compilation
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
+ if decoder_attention_mask is not None:
+ extended_attention_mask = jax.lax.dynamic_update_slice(
+ extended_attention_mask, decoder_attention_mask, (0, 0)
+ )
+
+ return {
+ "past_key_values": past_key_values,
+ "encoder_outputs": encoder_outputs,
+ "encoder_attention_mask": attention_mask,
+ "decoder_attention_mask": extended_attention_mask,
+ }
+
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
+ return model_kwargs
+
+
+FLAX_LONGT5_CONDITIONAL_GENERATION_DOCSTRING = """
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, FlaxLongT5ForConditionalGeneration
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base")
+ >>> model = FlaxLongT5ForConditionalGeneration.from_pretrained("google/long-t5-local-base")
+
+ >>> ARTICLE_TO_SUMMARIZE = "summarize: My friends are cool but they eat too many carbs."
+ >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], return_tensors="np")
+
+ >>> # Generate Summary
+ >>> summary_ids = model.generate(inputs["input_ids"]).sequences
+ >>> print(tokenizer.decode(summary_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=False))
+ ```
+"""
+
+
+overwrite_call_docstring(
+ FlaxLongT5ForConditionalGeneration, LONGT5_INPUTS_DOCSTRING + FLAX_LONGT5_CONDITIONAL_GENERATION_DOCSTRING
+)
+append_replace_return_docstrings(
+ FlaxLongT5ForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC
+)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/longt5/modeling_longt5.py b/venv/lib/python3.10/site-packages/transformers/models/longt5/modeling_longt5.py
new file mode 100644
index 0000000000000000000000000000000000000000..e16e0951208f774e17b951bc7d83120b7c68404f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/longt5/modeling_longt5.py
@@ -0,0 +1,2236 @@
+# coding=utf-8
+# Copyright 2022 Google LLC., LongT5 Authors and HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch LongT5 model."""
+
+
+import copy
+import math
+import warnings
+from typing import Any, List, Optional, Tuple, Union
+
+import torch
+from torch import nn
+from torch.nn import CrossEntropyLoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import (
+ BaseModelOutput,
+ BaseModelOutputWithPastAndCrossAttentions,
+ Seq2SeqLMOutput,
+ Seq2SeqModelOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import ALL_LAYERNORM_LAYERS, find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ DUMMY_INPUTS,
+ DUMMY_MASK,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ is_torch_fx_proxy,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_longt5 import LongT5Config
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "LongT5Config"
+_CHECKPOINT_FOR_DOC = "google/long-t5-local-base"
+
+# TODO: Update before the merge
+
+from ..deprecated._archive_maps import LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+def _pad_to_multiple(x: torch.Tensor, block_len: int, dim: int, pad_value: int = 0) -> torch.Tensor:
+ """Pad a tensor so that a sequence length will be a multiple of `block_len`"""
+ pad_len = -x.shape[dim] % block_len
+ # Handle cases when an empty input sequence is given
+ if not all(x.shape):
+ new_shape = list(x.shape)
+ new_shape[dim] += pad_len
+ return torch.zeros(new_shape, dtype=x.dtype)
+
+ pad = [(0, 0)] * x.ndim
+ pad[dim] = (0, pad_len)
+ pad = sum(pad[::-1], ())
+ x = nn.functional.pad(x, pad=pad, mode="constant", value=pad_value)
+ return x
+
+
+def _split_into_blocks(x: torch.Tensor, block_len: int, dim: int) -> torch.Tensor:
+ """Split an input tensor into blocks of a given `block_len` along the given `dim`. If the dimension length
+ is not a multiple of `block_len`, it will be padded first with selected `pad_value`.
+ """
+ # pad tensor to multiple of block_len
+ if x.shape[dim] % block_len != 0:
+ x = _pad_to_multiple(x, block_len, dim, pad_value=0)
+ num_blocks = x.shape[dim] // block_len
+ output_shape = x.shape[:dim] + (num_blocks, block_len) + x.shape[(dim + 1) :]
+ # If 0 is in output_shape, we cannot apply reshape because of incompatibility with ONNX conversion
+ if 0 in output_shape:
+ return torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ return x.reshape(output_shape)
+
+
+def _concatenate_3_blocks(x: torch.Tensor, block_dim: int, sequence_dim: int, pad_value: int = 0) -> torch.Tensor:
+ """Concatenate three consecutive blocks for each input block for local attentiont.
+
+ For more information, see: https://arxiv.org/pdf/2112.07916.pdf.
+ """
+ num_blocks = x.shape[block_dim]
+
+ pad = [(0, 0)] * x.ndim
+ pad[block_dim] = (1, 1)
+ pad = sum(pad[::-1], ())
+ # [batch_size, num_blocks, block_len] -> [batch_size, num_blocks + 2, block_len]
+ x = nn.functional.pad(x, pad=pad, mode="constant", value=pad_value)
+
+ blocks_list: List[torch.Tensor] = []
+ for i in range(3):
+ # We use indexing approach here:
+ # https://numpy.org/doc/stable/user/basics.indexing.html#dealing-with-variable-numbers-of-indices-within-programs
+ indices = [slice(0, None)] * x.ndim
+ indices[block_dim] = slice(i, i + num_blocks)
+ indices = tuple(indices)
+ blocks_list.append(x[indices])
+ # [batch_size, num_blocks, 3 * block_len, ...]
+ return torch.cat(blocks_list, dim=sequence_dim)
+
+
+def _make_3block_relative_position_ids(block_len: int) -> torch.Tensor:
+ """Makes 3-blocked relative position ids for local attention."""
+ position_ids = torch.arange(3 * block_len, dtype=torch.int32)
+ center_position_ids = position_ids[block_len:-block_len]
+ # [block_len, 3 * block_len]
+ relative_position_ids = position_ids.unsqueeze(0) - center_position_ids.unsqueeze(1)
+ return relative_position_ids
+
+
+def _mask_local_attention_mask(local_attention_mask: torch.Tensor, block_len: int) -> torch.Tensor:
+ """Mask local attention mask to enforce that tokens are not allowed to attend tokens farther than ``local_radius."""
+ relative_position_ids = _make_3block_relative_position_ids(block_len)
+ locality_mask = torch.abs(relative_position_ids) < block_len
+ locality_mask = locality_mask[None, None, :, :]
+ locality_mask = locality_mask.to(local_attention_mask.device)
+ return torch.logical_and(local_attention_mask, locality_mask)
+
+
+def _get_local_attention_mask(attention_mask: torch.Tensor, block_len: int, device: torch.device) -> torch.Tensor:
+ """Prepare attention mask to be applied for a local attention."""
+ # [batch_size, num_blocks, block_len]
+ _blocked_attention_mask = _split_into_blocks(attention_mask, block_len, dim=1)
+ # [batch_size, num_block, 3 * block_len]
+ _3blocked_attention_mask = _concatenate_3_blocks(_blocked_attention_mask, block_dim=1, sequence_dim=2)
+
+ _blocked_attention_mask = _blocked_attention_mask.unsqueeze(-1)
+ _3blocked_attention_mask = _3blocked_attention_mask.unsqueeze(-2)
+ # [batch_size, num_block, block_len, 3 * block_len]
+ local_attention_mask = torch.logical_and(_blocked_attention_mask, _3blocked_attention_mask)
+ local_attention_mask = _mask_local_attention_mask(local_attention_mask, block_len)
+ # [batch_size, 1, num_block, block_len, 3 * block_len]
+ return local_attention_mask.unsqueeze(1).to(device)
+
+
+def _make_global_fixed_block_ids(
+ attention_mask: torch.Tensor, global_block_size: int
+) -> Tuple[torch.Tensor, torch.Tensor]:
+ """Obtain the "fixed block" global id corresponding to each input token.
+
+ This implementation is a simlified version of the original Flaxformr implementation adopted from:
+ https://github.com/google/flaxformer/blob/main/flaxformer/architectures/longt5/long_attention.py.
+
+ In our scenario, as we use this strategy only for a decoder, orphan tokens, i.e. those tokens which do not make for
+ the whole fixed block, are assigned to the preceding block.
+
+ Padding tokens from the original sequence are represented by -1.
+ """
+ batch_size, seq_len = attention_mask.shape[:2]
+
+ def handle_orphan_tokens(block_ids: torch.Tensor) -> torch.Tensor:
+ block_ends = (torch.arange(seq_len) % global_block_size) == global_block_size - 1
+ block_ends = block_ends.to(block_ids.device)
+ true_block_ends = torch.logical_and(block_ends, block_ids >= 0)
+ full_blocks = true_block_ends.sum(-1).unsqueeze(-1).type(block_ids.dtype) - 1
+ block_ids = torch.where(block_ids < full_blocks, block_ids, full_blocks)
+ return block_ids
+
+ fixed_block_mask = torch.ones_like(attention_mask, device=attention_mask.device) / global_block_size
+ fixed_block_mask = torch.cumsum(fixed_block_mask, axis=1) - fixed_block_mask
+ mask = torch.where(attention_mask != 0.0, 1.0, -1000.0).type(attention_mask.dtype)
+ global_block_ids = torch.floor(mask + fixed_block_mask - 1.0).type(attention_mask.dtype)
+ _global_block_ids_lower_bound = torch.tensor(-1, dtype=global_block_ids.dtype, device=global_block_ids.device)
+ global_block_ids = torch.where(
+ global_block_ids > _global_block_ids_lower_bound, global_block_ids, _global_block_ids_lower_bound
+ )
+ # set padding tokens to -1
+ global_block_ids = (global_block_ids * attention_mask) + (attention_mask - 1)
+ # [batch_size, seq_len]
+ global_block_ids = handle_orphan_tokens(global_block_ids)
+ num_globals = seq_len // global_block_size
+ # [batch_size, seq_len // global_block_size]
+ if num_globals > 0:
+ _sequence_block_ids_max = torch.max(global_block_ids, dim=-1).values.repeat(num_globals, 1).transpose(0, 1)
+ else:
+ _sequence_block_ids_max = torch.zeros(
+ batch_size, 0, dtype=global_block_ids.dtype, device=global_block_ids.device
+ )
+ global_segment_ids = torch.cumsum(torch.ones(batch_size, num_globals), dim=-1) - 1
+ global_segment_ids = global_segment_ids.to(attention_mask.device)
+ global_segment_ids = torch.where(global_segment_ids <= _sequence_block_ids_max, 1, 0)
+ return global_block_ids.type(torch.int), global_segment_ids.type(torch.int)
+
+
+def _make_side_relative_position_ids(attention_mask: torch.Tensor, global_block_size: int) -> torch.Tensor:
+ """Create the relative position tensor for local -> global attention."""
+ block_ids, global_segment_ids = _make_global_fixed_block_ids(attention_mask, global_block_size)
+ global_seq_len = global_segment_ids.shape[-1]
+ global_positions = torch.arange(global_seq_len, device=block_ids.device)
+ side_relative_position = global_positions - block_ids[..., None]
+ return side_relative_position.type(torch.int64)
+
+
+def _create_global_aggregates(
+ hidden_states: torch.Tensor, block_ids: torch.Tensor, global_seq_len: int
+) -> torch.Tensor:
+ """Compute individual block aggregates by summing over individual blocks."""
+ # (batch..., seq_len, global_seq_len))
+ block_ids = block_ids.where(
+ block_ids >= 0, torch.tensor(global_seq_len, dtype=block_ids.dtype, device=block_ids.device)
+ )
+ one_hot_block_ids = nn.functional.one_hot(block_ids.type(torch.int64), global_seq_len + 1)[:, :, :-1]
+ return torch.einsum("...nd,...ng->...gd", hidden_states, one_hot_block_ids.type(hidden_states.dtype))
+
+
+# Copied from transformers.models.t5.modeling_t5.T5LayerNorm with T5->LongT5
+class LongT5LayerNorm(nn.Module):
+ def __init__(self, hidden_size, eps=1e-6):
+ """
+ Construct a layernorm module in the LongT5 style. No bias and no subtraction of mean.
+ """
+ super().__init__()
+ self.weight = nn.Parameter(torch.ones(hidden_size))
+ self.variance_epsilon = eps
+
+ def forward(self, hidden_states):
+ # LongT5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
+ # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated
+ # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
+ # half-precision inputs is done in fp32
+
+ variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
+
+ # convert into half-precision if necessary
+ if self.weight.dtype in [torch.float16, torch.bfloat16]:
+ hidden_states = hidden_states.to(self.weight.dtype)
+
+ return self.weight * hidden_states
+
+
+try:
+ from apex.normalization import FusedRMSNorm
+
+ LongT5LayerNorm = FusedRMSNorm # noqa
+
+ logger.info("Discovered apex.normalization.FusedRMSNorm - will use it instead of LongT5LayerNorm")
+except ImportError:
+ # using the normal LongT5LayerNorm
+ pass
+except Exception:
+ logger.warning("discovered apex but it failed to load, falling back to LongT5LayerNorm")
+ pass
+
+ALL_LAYERNORM_LAYERS.append(LongT5LayerNorm)
+
+
+# Copied from transformers.models.t5.modeling_t5.T5DenseActDense with T5->LongT5
+class LongT5DenseActDense(nn.Module):
+ def __init__(self, config: LongT5Config):
+ super().__init__()
+ self.wi = nn.Linear(config.d_model, config.d_ff, bias=False)
+ self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
+ self.dropout = nn.Dropout(config.dropout_rate)
+ self.act = ACT2FN[config.dense_act_fn]
+
+ def forward(self, hidden_states):
+ hidden_states = self.wi(hidden_states)
+ hidden_states = self.act(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ if (
+ isinstance(self.wo.weight, torch.Tensor)
+ and hidden_states.dtype != self.wo.weight.dtype
+ and self.wo.weight.dtype != torch.int8
+ ):
+ hidden_states = hidden_states.to(self.wo.weight.dtype)
+ hidden_states = self.wo(hidden_states)
+ return hidden_states
+
+
+class LongT5DenseGatedActDense(nn.Module):
+ def __init__(self, config: LongT5Config):
+ super().__init__()
+ self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False)
+ self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False)
+ self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
+ self.dropout = nn.Dropout(config.dropout_rate)
+ self.act = ACT2FN[config.dense_act_fn]
+
+ def forward(self, hidden_states):
+ hidden_gelu = self.act(self.wi_0(hidden_states))
+ hidden_linear = self.wi_1(hidden_states)
+ hidden_states = hidden_gelu * hidden_linear
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.wo(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.t5.modeling_t5.T5LayerFF with T5->LongT5
+class LongT5LayerFF(nn.Module):
+ def __init__(self, config: LongT5Config):
+ super().__init__()
+ if config.is_gated_act:
+ self.DenseReluDense = LongT5DenseGatedActDense(config)
+ else:
+ self.DenseReluDense = LongT5DenseActDense(config)
+
+ self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
+ self.dropout = nn.Dropout(config.dropout_rate)
+
+ def forward(self, hidden_states):
+ forwarded_states = self.layer_norm(hidden_states)
+ forwarded_states = self.DenseReluDense(forwarded_states)
+ hidden_states = hidden_states + self.dropout(forwarded_states)
+ return hidden_states
+
+
+# Copied from transformers.models.t5.modeling_t5.T5Attention with T5->LongT5
+class LongT5Attention(nn.Module):
+ def __init__(self, config: LongT5Config, has_relative_attention_bias=False):
+ super().__init__()
+ self.is_decoder = config.is_decoder
+ self.has_relative_attention_bias = has_relative_attention_bias
+ self.relative_attention_num_buckets = config.relative_attention_num_buckets
+ self.relative_attention_max_distance = config.relative_attention_max_distance
+ self.d_model = config.d_model
+ self.key_value_proj_dim = config.d_kv
+ self.n_heads = config.num_heads
+ self.dropout = config.dropout_rate
+ self.inner_dim = self.n_heads * self.key_value_proj_dim
+
+ # Mesh TensorFlow initialization to avoid scaling before softmax
+ self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)
+ self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)
+ self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)
+ self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)
+
+ if self.has_relative_attention_bias:
+ self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
+ self.pruned_heads = set()
+ self.gradient_checkpointing = False
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads
+ )
+ # Prune linear layers
+ self.q = prune_linear_layer(self.q, index)
+ self.k = prune_linear_layer(self.k, index)
+ self.v = prune_linear_layer(self.v, index)
+ self.o = prune_linear_layer(self.o, index, dim=1)
+ # Update hyper params
+ self.n_heads = self.n_heads - len(heads)
+ self.inner_dim = self.key_value_proj_dim * self.n_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ @staticmethod
+ def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
+ """
+ Adapted from Mesh Tensorflow:
+ https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
+
+ Translate relative position to a bucket number for relative attention. The relative position is defined as
+ memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
+ position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
+ small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
+ positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
+ This should allow for more graceful generalization to longer sequences than the model has been trained on
+
+ Args:
+ relative_position: an int32 Tensor
+ bidirectional: a boolean - whether the attention is bidirectional
+ num_buckets: an integer
+ max_distance: an integer
+
+ Returns:
+ a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
+ """
+ relative_buckets = 0
+ if bidirectional:
+ num_buckets //= 2
+ relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
+ relative_position = torch.abs(relative_position)
+ else:
+ relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
+ # now relative_position is in the range [0, inf)
+
+ # half of the buckets are for exact increments in positions
+ max_exact = num_buckets // 2
+ is_small = relative_position < max_exact
+
+ # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
+ relative_position_if_large = max_exact + (
+ torch.log(relative_position.float() / max_exact)
+ / math.log(max_distance / max_exact)
+ * (num_buckets - max_exact)
+ ).to(torch.long)
+ relative_position_if_large = torch.min(
+ relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)
+ )
+
+ relative_buckets += torch.where(is_small, relative_position, relative_position_if_large)
+ return relative_buckets
+
+ def compute_bias(self, query_length, key_length, device=None):
+ """Compute binned relative position bias"""
+ if device is None:
+ device = self.relative_attention_bias.weight.device
+ context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
+ memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :]
+ relative_position = memory_position - context_position # shape (query_length, key_length)
+ relative_position_bucket = self._relative_position_bucket(
+ relative_position, # shape (query_length, key_length)
+ bidirectional=(not self.is_decoder),
+ num_buckets=self.relative_attention_num_buckets,
+ max_distance=self.relative_attention_max_distance,
+ )
+ values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads)
+ values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length)
+ return values
+
+ def forward(
+ self,
+ hidden_states,
+ mask=None,
+ key_value_states=None,
+ position_bias=None,
+ past_key_value=None,
+ layer_head_mask=None,
+ query_length=None,
+ use_cache=False,
+ output_attentions=False,
+ ):
+ """
+ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
+ """
+ # Input is (batch_size, seq_length, dim)
+ # Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length)
+ # past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head)
+ batch_size, seq_length = hidden_states.shape[:2]
+
+ real_seq_length = seq_length
+
+ if past_key_value is not None:
+ if len(past_key_value) != 2:
+ raise ValueError(
+ f"past_key_value should have 2 past states: keys and values. Got { len(past_key_value)} past states"
+ )
+ real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length
+
+ key_length = real_seq_length if key_value_states is None else key_value_states.shape[1]
+
+ def shape(states):
+ """projection"""
+ return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
+
+ def unshape(states):
+ """reshape"""
+ return states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim)
+
+ def project(hidden_states, proj_layer, key_value_states, past_key_value):
+ """projects hidden states correctly to key/query states"""
+ if key_value_states is None:
+ # self-attn
+ # (batch_size, n_heads, seq_length, dim_per_head)
+ hidden_states = shape(proj_layer(hidden_states))
+ elif past_key_value is None:
+ # cross-attn
+ # (batch_size, n_heads, seq_length, dim_per_head)
+ hidden_states = shape(proj_layer(key_value_states))
+
+ if past_key_value is not None:
+ if key_value_states is None:
+ # self-attn
+ # (batch_size, n_heads, key_length, dim_per_head)
+ hidden_states = torch.cat([past_key_value, hidden_states], dim=2)
+ elif past_key_value.shape[2] != key_value_states.shape[1]:
+ # checking that the `sequence_length` of the `past_key_value` is the same as
+ # the provided `key_value_states` to support prefix tuning
+ # cross-attn
+ # (batch_size, n_heads, seq_length, dim_per_head)
+ hidden_states = shape(proj_layer(key_value_states))
+ else:
+ # cross-attn
+ hidden_states = past_key_value
+ return hidden_states
+
+ # get query states
+ query_states = shape(self.q(hidden_states)) # (batch_size, n_heads, seq_length, dim_per_head)
+
+ # get key/value states
+ key_states = project(
+ hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value is not None else None
+ )
+ value_states = project(
+ hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value is not None else None
+ )
+
+ # compute scores
+ scores = torch.matmul(
+ query_states, key_states.transpose(3, 2)
+ ) # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9
+
+ if position_bias is None:
+ if not self.has_relative_attention_bias:
+ position_bias = torch.zeros(
+ (1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype
+ )
+ if self.gradient_checkpointing and self.training:
+ position_bias.requires_grad = True
+ else:
+ position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device)
+
+ # if key and values are already calculated
+ # we want only the last query position bias
+ if past_key_value is not None:
+ position_bias = position_bias[:, :, -hidden_states.size(1) :, :]
+
+ if mask is not None:
+ position_bias = position_bias + mask # (batch_size, n_heads, seq_length, key_length)
+
+ if self.pruned_heads:
+ mask = torch.ones(position_bias.shape[1])
+ mask[list(self.pruned_heads)] = 0
+ position_bias_masked = position_bias[:, mask.bool()]
+ else:
+ position_bias_masked = position_bias
+
+ scores += position_bias_masked
+ attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(
+ scores
+ ) # (batch_size, n_heads, seq_length, key_length)
+ attn_weights = nn.functional.dropout(
+ attn_weights, p=self.dropout, training=self.training
+ ) # (batch_size, n_heads, seq_length, key_length)
+
+ # Mask heads if we want to
+ if layer_head_mask is not None:
+ attn_weights = attn_weights * layer_head_mask
+
+ attn_output = unshape(torch.matmul(attn_weights, value_states)) # (batch_size, seq_length, dim)
+ attn_output = self.o(attn_output)
+
+ present_key_value_state = (key_states, value_states) if (self.is_decoder and use_cache) else None
+ outputs = (attn_output,) + (present_key_value_state,) + (position_bias,)
+
+ if output_attentions:
+ outputs = outputs + (attn_weights,)
+ return outputs
+
+
+class LongT5LocalAttention(nn.Module):
+ def __init__(self, config: LongT5Config, has_relative_attention_bias: bool = False) -> None:
+ super().__init__()
+ self.is_decoder = config.is_decoder
+ self.has_relative_attention_bias = has_relative_attention_bias
+ self.relative_attention_num_buckets = config.relative_attention_num_buckets
+ self.relative_attention_max_distance = config.relative_attention_max_distance
+ self.d_model = config.d_model
+ self.key_value_proj_dim = config.d_kv
+ self.n_heads = config.num_heads
+ self.local_radius = config.local_radius
+ self.block_len = self.local_radius + 1
+ self.dropout = config.dropout_rate
+ self.inner_dim = self.n_heads * self.key_value_proj_dim
+
+ # Mesh TensorFlow initialization to avoid scaling before softmax
+ self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)
+ self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)
+ self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)
+ self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)
+
+ if self.has_relative_attention_bias:
+ self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
+ self.pruned_heads = set()
+ self.gradient_checkpointing = False
+
+ # Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads
+ )
+ # Prune linear layers
+ self.q = prune_linear_layer(self.q, index)
+ self.k = prune_linear_layer(self.k, index)
+ self.v = prune_linear_layer(self.v, index)
+ self.o = prune_linear_layer(self.o, index, dim=1)
+ # Update hyper params
+ self.n_heads = self.n_heads - len(heads)
+ self.inner_dim = self.key_value_proj_dim * self.n_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ @staticmethod
+ # Copied from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket
+ def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
+ """
+ Adapted from Mesh Tensorflow:
+ https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
+
+ Translate relative position to a bucket number for relative attention. The relative position is defined as
+ memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
+ position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
+ small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
+ positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
+ This should allow for more graceful generalization to longer sequences than the model has been trained on
+
+ Args:
+ relative_position: an int32 Tensor
+ bidirectional: a boolean - whether the attention is bidirectional
+ num_buckets: an integer
+ max_distance: an integer
+
+ Returns:
+ a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
+ """
+ relative_buckets = 0
+ if bidirectional:
+ num_buckets //= 2
+ relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
+ relative_position = torch.abs(relative_position)
+ else:
+ relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
+ # now relative_position is in the range [0, inf)
+
+ # half of the buckets are for exact increments in positions
+ max_exact = num_buckets // 2
+ is_small = relative_position < max_exact
+
+ # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
+ relative_position_if_large = max_exact + (
+ torch.log(relative_position.float() / max_exact)
+ / math.log(max_distance / max_exact)
+ * (num_buckets - max_exact)
+ ).to(torch.long)
+ relative_position_if_large = torch.min(
+ relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)
+ )
+
+ relative_buckets += torch.where(is_small, relative_position, relative_position_if_large)
+ return relative_buckets
+
+ def compute_bias(self, block_length: int):
+ """Compute binned relative position bias"""
+ target_device = (
+ self.relative_attention_bias.weight.device
+ if self.relative_attention_bias.weight.device.type != "meta"
+ else None
+ )
+ memory_position = torch.arange(3 * block_length, dtype=torch.long, device=target_device)
+ context_position = memory_position[block_length:-block_length]
+
+ # (block_length, 3 * block_length)
+ relative_position = memory_position[None, :] - context_position[:, None]
+ relative_position_bucket = self._relative_position_bucket(
+ relative_position, # (block_length, 3 * block_length)
+ bidirectional=(not self.is_decoder),
+ num_buckets=self.relative_attention_num_buckets,
+ max_distance=self.relative_attention_max_distance,
+ )
+ # (block_length, 3 * block_length, num_heads)
+ values = self.relative_attention_bias(relative_position_bucket)
+ # (1, 1, num_heads, block_length, 3 * block_length)
+ values = values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0)
+ return values
+
+ def forward(
+ self,
+ hidden_states,
+ mask=None,
+ position_bias=None,
+ layer_head_mask=None,
+ output_attentions=False,
+ ):
+ batch_size, seq_length = hidden_states.shape[:2]
+
+ def shape(states):
+ """projection"""
+ return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim)
+
+ def unshape(states):
+ """reshape"""
+ return states.contiguous().view(batch_size, -1, self.inner_dim)
+
+ # get query/key/value states -> (batch_size, seq_length, n_heads, dim_per_head)
+ query_states = shape(self.q(hidden_states))
+ key_states = shape(self.k(hidden_states))
+ value_states = shape(self.v(hidden_states))
+
+ # Split into blocks -> (batch_size, num_blocks, block_len, n_heads, dim_per_head)
+ query_states = _split_into_blocks(query_states, self.block_len, dim=1)
+ key_states = _split_into_blocks(key_states, self.block_len, dim=1)
+ value_states = _split_into_blocks(value_states, self.block_len, dim=1)
+
+ # Concatenate 3 blocks for keys and values -> (batch_size, num_blocks, 3 * block_len, n_heads, dim_per_head)
+ key_states = _concatenate_3_blocks(key_states, block_dim=1, sequence_dim=2)
+ value_states = _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2)
+
+ # Compute scores
+ scores = torch.einsum(
+ "...qhd,...khd->...hqk", query_states, key_states
+ ) # (batch_size, num_block, n_heads, block_len, 3 * block_len)
+
+ if position_bias is None:
+ # position_bias shape: # (1, 1, n_heads, block_len, 3 * block_len)
+ if not self.has_relative_attention_bias:
+ position_bias = torch.zeros(
+ (1, 1, self.n_heads, self.block_len, 3 * self.block_len), device=scores.device, dtype=scores.dtype
+ )
+ if self.gradient_checkpointing and self.training:
+ position_bias.requires_grad = True
+ else:
+ position_bias = self.compute_bias(self.block_len)
+
+ if mask is not None:
+ # Replace masked positions with -1e10 (according to the original implementation)
+ mask = torch.where(mask > 0, 0.0, -1e10)
+ # We need to adjust position bias shape to be sum with mask
+ position_bias = position_bias + mask.transpose(1, 2)
+
+ scores += position_bias
+ # (batch_size, num_blocks, n_heads, block_len, 3 * block_len)
+ attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores)
+ # (batch_size, num_blocks, n_heads, block_len, 3 * block_len)
+ attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ # Mask heads if we want to
+ if layer_head_mask is not None:
+ attn_weights = attn_weights * layer_head_mask
+ attn_weights = attn_weights.type(value_states.dtype)
+ attn_output = unshape(torch.einsum("...hqk,...khd->...qhd", attn_weights, value_states))
+ attn_output = attn_output[:, :seq_length, :]
+ attn_output = self.o(attn_output)
+
+ present_key_value_state = None
+ outputs = (attn_output,) + (present_key_value_state,) + (position_bias,)
+
+ if output_attentions:
+ outputs = outputs + (attn_weights,)
+ return outputs
+
+
+class LongT5TransientGlobalAttention(nn.Module):
+ def __init__(self, config: LongT5Config, has_relative_attention_bias: bool = False) -> None:
+ super().__init__()
+ self.is_decoder = config.is_decoder
+ self.has_relative_attention_bias = has_relative_attention_bias
+ self.relative_attention_num_buckets = config.relative_attention_num_buckets
+ self.relative_attention_max_distance = config.relative_attention_max_distance
+ self.d_model = config.d_model
+ self.key_value_proj_dim = config.d_kv
+ self.n_heads = config.num_heads
+ self.local_radius = config.local_radius
+ self.block_len = self.local_radius + 1
+ self.global_block_size = config.global_block_size
+ self.dropout = config.dropout_rate
+ self.inner_dim = self.n_heads * self.key_value_proj_dim
+
+ # Mesh TensorFlow initialization to avoid scaling before softmax
+ self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)
+ self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)
+ self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)
+ self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)
+
+ if self.has_relative_attention_bias:
+ self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
+ self.pruned_heads = set()
+
+ # Relativen attention bias & Layer norm for global attention
+ if self.has_relative_attention_bias:
+ self.global_relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
+ self.global_input_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
+
+ # Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads
+ )
+ # Prune linear layers
+ self.q = prune_linear_layer(self.q, index)
+ self.k = prune_linear_layer(self.k, index)
+ self.v = prune_linear_layer(self.v, index)
+ self.o = prune_linear_layer(self.o, index, dim=1)
+ # Update hyper params
+ self.n_heads = self.n_heads - len(heads)
+ self.inner_dim = self.key_value_proj_dim * self.n_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ @staticmethod
+ # Copied from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket
+ def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
+ """
+ Adapted from Mesh Tensorflow:
+ https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
+
+ Translate relative position to a bucket number for relative attention. The relative position is defined as
+ memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
+ position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
+ small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
+ positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
+ This should allow for more graceful generalization to longer sequences than the model has been trained on
+
+ Args:
+ relative_position: an int32 Tensor
+ bidirectional: a boolean - whether the attention is bidirectional
+ num_buckets: an integer
+ max_distance: an integer
+
+ Returns:
+ a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
+ """
+ relative_buckets = 0
+ if bidirectional:
+ num_buckets //= 2
+ relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
+ relative_position = torch.abs(relative_position)
+ else:
+ relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
+ # now relative_position is in the range [0, inf)
+
+ # half of the buckets are for exact increments in positions
+ max_exact = num_buckets // 2
+ is_small = relative_position < max_exact
+
+ # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
+ relative_position_if_large = max_exact + (
+ torch.log(relative_position.float() / max_exact)
+ / math.log(max_distance / max_exact)
+ * (num_buckets - max_exact)
+ ).to(torch.long)
+ relative_position_if_large = torch.min(
+ relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)
+ )
+
+ relative_buckets += torch.where(is_small, relative_position, relative_position_if_large)
+ return relative_buckets
+
+ def compute_bias(self, block_length: int):
+ """Compute binned relative position bias"""
+ target_device = (
+ self.relative_attention_bias.weight.device
+ if self.relative_attention_bias.weight.device.type != "meta"
+ else None
+ )
+ memory_position = torch.arange(3 * block_length, dtype=torch.long, device=target_device)
+ context_position = memory_position[block_length:-block_length]
+
+ # (block_length, 3 * block_length)
+ relative_position = memory_position[None, :] - context_position[:, None]
+ relative_position_bucket = self._relative_position_bucket(
+ relative_position, # (block_length, 3 * block_length)
+ bidirectional=(not self.is_decoder),
+ num_buckets=self.relative_attention_num_buckets,
+ max_distance=self.relative_attention_max_distance,
+ )
+ # (block_length, 3 * block_length, num_heads)
+ values = self.relative_attention_bias(relative_position_bucket)
+ # (1, 1, num_heads, block_length, 3 * block_length)
+ values = values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0)
+ return values
+
+ def compute_side_bias(self, mask: torch.Tensor, global_segment_ids: torch.Tensor) -> torch.Tensor:
+ # (batch_size, 1, seq_len, global_seq_len)
+ side_attention_mask = torch.eq(mask[..., None], global_segment_ids[:, None, :])[:, None, ...]
+ attention_side_bias = torch.where(side_attention_mask > 0, 0.0, -1e10)
+ # (batch_size, seq_len, global_seq_len)
+ side_relative_position = _make_side_relative_position_ids(mask, self.global_block_size)
+ side_relative_position_bucket = self._relative_position_bucket(
+ side_relative_position,
+ bidirectional=(not self.is_decoder),
+ num_buckets=self.relative_attention_num_buckets,
+ max_distance=self.relative_attention_max_distance,
+ )
+ # (batch_size, seq_len, global_seq_len, num_heads)
+ side_bias = self.global_relative_attention_bias(side_relative_position_bucket)
+
+ # (batch_size, num_heads, seq_len, global_seq_len)
+ side_bias = side_bias.permute([0, 3, 1, 2])
+ # (batch_size, num_heads, seq_len, global_seq_len)
+ attention_side_bias = attention_side_bias + side_bias
+ return attention_side_bias
+
+ def forward(
+ self,
+ hidden_states,
+ mask=None,
+ position_bias=None,
+ layer_head_mask=None,
+ output_attentions=False,
+ ):
+ batch_size, seq_length = hidden_states.shape[:2]
+
+ def shape(states):
+ """projection"""
+ return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim)
+
+ def unshape(states):
+ """reshape"""
+ return states.contiguous().view(batch_size, -1, self.inner_dim)
+
+ # Prepare components for transient-global attention
+ # Obtain block_ids and global_segment_ids
+ # global_seq_len := seq_len // self.global_block_size
+ # shapes: (batch_size, seq_len) & (batch_size, global_seq_len)
+ block_ids, global_segment_ids = _make_global_fixed_block_ids(
+ mask if mask is not None else torch.ones(hidden_states.shape[:-1]),
+ self.global_block_size,
+ )
+ # Create global inputs
+ _global_seq_len = global_segment_ids.shape[-1]
+ global_inputs = _create_global_aggregates(hidden_states, block_ids, _global_seq_len)
+ global_inputs = self.global_input_layer_norm(global_inputs)
+
+ # get query states -> (batch_size, seq_length, n_heads, dim_per_head)
+ query_states = shape(self.q(hidden_states))
+ key_states = shape(self.k(hidden_states))
+ value_states = shape(self.v(hidden_states))
+ # Get global/side key/value states shape: (batch_size, global_seq_len, n_heads, dim_per_head)
+ side_key_states = shape(self.k(global_inputs))
+ side_value_states = shape(self.v(global_inputs))
+
+ # Split into blocks -> (batch_size, num_blocks, block_len, n_heads, dim_per_head)
+ query_states = _split_into_blocks(query_states, self.block_len, dim=1)
+ key_states = _split_into_blocks(key_states, self.block_len, dim=1)
+ value_states = _split_into_blocks(value_states, self.block_len, dim=1)
+
+ # Concatenate 3 blocks for keys and values -> (batch_size, num_blocks, 3 * block_len, n_heads, dim_per_head)
+ key_states = _concatenate_3_blocks(key_states, block_dim=1, sequence_dim=2)
+ value_states = _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2)
+
+ # Tile side inputs across local key/value blocks
+ # New shape: (batch_size, num_blocks, global_seq_len, n_heads, dim_per_head)
+ reps = [1] * (side_key_states.ndim + 1)
+ reps[1] = key_states.shape[1]
+ side_key_states = side_key_states.unsqueeze(1).repeat(reps)
+ side_value_states = side_value_states.unsqueeze(1).repeat(reps)
+
+ # Concatenate "local" and "side"/"global" key/value states to allow each token to attend global aggregated ones
+ # New shape: (batch_size, num_blocks, 3 * block_len + global_seq_len, n_heads, dim_per_head)
+ key_states = torch.cat([key_states, side_key_states], dim=2)
+ value_states = torch.cat([value_states, side_value_states], dim=2)
+
+ # Compute scores -> (batch_size, num_block, n_heads, block_len, 3 * block_len + global_seq_len)
+ scores = torch.einsum("...qhd,...khd->...hqk", query_states, key_states)
+
+ if mask is not None:
+ # We need to adjust position bias shape to be sum with mask
+ local_attention_mask = _get_local_attention_mask(mask, self.block_len, hidden_states.device)
+ # Replace masked positions with -10_000 (according to the original implementation)
+ local_attention_mask = torch.where(local_attention_mask > 0, 0.0, -1e10)
+ else:
+ local_attention_mask = None
+
+ if position_bias is None:
+ # position_bias shape: # (1, 1, n_heads, block_len, 3 * block_len)
+ if not self.has_relative_attention_bias:
+ position_bias = torch.zeros(
+ (1, 1, self.n_heads, self.block_len, 3 * self.block_len),
+ device=scores.device,
+ dtype=scores.dtype,
+ )
+ if self.gradient_checkpointing and self.training:
+ position_bias.requires_grad = True
+ else:
+ position_bias = self.compute_bias(self.block_len)
+
+ if local_attention_mask is not None:
+ # (batch_size, 1, n_heads, block_len, 3 * block_len)
+ position_bias = position_bias + local_attention_mask.transpose(1, 2)
+ position_bias = position_bias.type(scores.dtype)
+
+ # Calculate global/side bias - shape: # (batch_size, num_heads, seq_len, global_seq_len)
+ if mask is None:
+ mask = torch.ones(batch_size, seq_length)
+ # (batch_size, num_heads, seq_len, global_seq_len)
+ side_position_bias = self.compute_side_bias(mask, global_segment_ids)
+ # (batch_size, num_blocks, num_heads, block_len, global_seq_len)
+ side_position_bias = _split_into_blocks(side_position_bias, self.block_len, dim=-2).transpose(1, 2)
+ side_position_bias = side_position_bias.type(scores.dtype).to(scores.device)
+ # (batch_size, num_blocks, num_heads, block_len, 3 * block_len + global_seq_len)
+ position_bias = torch.cat([position_bias, side_position_bias], dim=-1)
+
+ scores += position_bias
+ # (batch_size, num_blocks, n_heads, block_len, 3 * block_len + global_seq_len)
+ attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores)
+ attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ # Mask heads if we want to
+ if layer_head_mask is not None:
+ attn_weights = attn_weights * layer_head_mask
+ attn_weights = attn_weights.type(value_states.dtype)
+ attn_output = unshape(torch.einsum("...hqk,...khd->...qhd", attn_weights, value_states))
+ attn_output = attn_output[:, :seq_length, :]
+ attn_output = self.o(attn_output)
+
+ present_key_value_state = None
+ outputs = (attn_output,) + (present_key_value_state,) + (position_bias,)
+
+ if output_attentions:
+ outputs = outputs + (attn_weights,)
+ return outputs
+
+
+# Copied from transformers.models.t5.modeling_t5.T5LayerSelfAttention with T5->LongT5
+class LongT5LayerSelfAttention(nn.Module):
+ def __init__(self, config, has_relative_attention_bias=False):
+ super().__init__()
+ self.SelfAttention = LongT5Attention(config, has_relative_attention_bias=has_relative_attention_bias)
+ self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
+ self.dropout = nn.Dropout(config.dropout_rate)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ position_bias=None,
+ layer_head_mask=None,
+ past_key_value=None,
+ use_cache=False,
+ output_attentions=False,
+ ):
+ normed_hidden_states = self.layer_norm(hidden_states)
+ attention_output = self.SelfAttention(
+ normed_hidden_states,
+ mask=attention_mask,
+ position_bias=position_bias,
+ layer_head_mask=layer_head_mask,
+ past_key_value=past_key_value,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ )
+ hidden_states = hidden_states + self.dropout(attention_output[0])
+ outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
+ return outputs
+
+
+class LongT5LayerLocalSelfAttention(nn.Module):
+ """Local self attention used in encoder"""
+
+ def __init__(self, config, has_relative_attention_bias=False):
+ super().__init__()
+ self.LocalSelfAttention = LongT5LocalAttention(config, has_relative_attention_bias=has_relative_attention_bias)
+ self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
+ self.dropout = nn.Dropout(config.dropout_rate)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ position_bias=None,
+ layer_head_mask=None,
+ output_attentions=False,
+ **kwargs: Any, # to accept past_key_value and use_cache kwargs
+ ):
+ normed_hidden_states = self.layer_norm(hidden_states)
+ attention_output = self.LocalSelfAttention(
+ normed_hidden_states,
+ mask=attention_mask,
+ position_bias=position_bias,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = hidden_states + self.dropout(attention_output[0])
+ outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
+ return outputs
+
+
+class LongT5LayerTransientGlobalSelfAttention(nn.Module):
+ """Transient-Global self attention used in encoder"""
+
+ def __init__(self, config, has_relative_attention_bias=False):
+ super().__init__()
+ self.TransientGlobalSelfAttention = LongT5TransientGlobalAttention(
+ config, has_relative_attention_bias=has_relative_attention_bias
+ )
+ self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
+ self.dropout = nn.Dropout(config.dropout_rate)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ position_bias=None,
+ layer_head_mask=None,
+ output_attentions=False,
+ **kwargs: Any, # to accept past_key_value and use_cache kwargs
+ ):
+ normed_hidden_states = self.layer_norm(hidden_states)
+ attention_output = self.TransientGlobalSelfAttention(
+ normed_hidden_states,
+ mask=attention_mask,
+ position_bias=position_bias,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = hidden_states + self.dropout(attention_output[0])
+ outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.t5.modeling_t5.T5LayerCrossAttention with T5->LongT5
+class LongT5LayerCrossAttention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.EncDecAttention = LongT5Attention(config, has_relative_attention_bias=False)
+ self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
+ self.dropout = nn.Dropout(config.dropout_rate)
+
+ def forward(
+ self,
+ hidden_states,
+ key_value_states,
+ attention_mask=None,
+ position_bias=None,
+ layer_head_mask=None,
+ past_key_value=None,
+ use_cache=False,
+ query_length=None,
+ output_attentions=False,
+ ):
+ normed_hidden_states = self.layer_norm(hidden_states)
+ attention_output = self.EncDecAttention(
+ normed_hidden_states,
+ mask=attention_mask,
+ key_value_states=key_value_states,
+ position_bias=position_bias,
+ layer_head_mask=layer_head_mask,
+ past_key_value=past_key_value,
+ use_cache=use_cache,
+ query_length=query_length,
+ output_attentions=output_attentions,
+ )
+ layer_output = hidden_states + self.dropout(attention_output[0])
+ outputs = (layer_output,) + attention_output[1:] # add attentions if we output them
+ return outputs
+
+
+class LongT5Block(nn.Module):
+ def __init__(self, config, has_relative_attention_bias=False):
+ super().__init__()
+ self.is_decoder = config.is_decoder
+ if config.is_decoder:
+ attention_layer = LongT5LayerSelfAttention
+ elif config.encoder_attention_type == "local":
+ attention_layer = LongT5LayerLocalSelfAttention
+ elif config.encoder_attention_type == "transient-global":
+ attention_layer = LongT5LayerTransientGlobalSelfAttention
+ else:
+ raise ValueError(
+ "For encoder attention mechanism, either `local` or `transient-global` attention type is expected, "
+ f"but got {config.encoder_attention_type}."
+ )
+ self.layer = nn.ModuleList()
+ self.layer.append(attention_layer(config, has_relative_attention_bias=has_relative_attention_bias))
+ if self.is_decoder:
+ self.layer.append(LongT5LayerCrossAttention(config))
+
+ self.layer.append(LongT5LayerFF(config))
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ position_bias=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ encoder_decoder_position_bias=None,
+ layer_head_mask=None,
+ cross_attn_layer_head_mask=None,
+ past_key_value=None,
+ use_cache=False,
+ output_attentions=False,
+ return_dict=True,
+ ):
+ if past_key_value is not None:
+ if not self.is_decoder:
+ logger.warning("`past_key_values` is passed to the encoder. Please make sure this is intended.")
+ expected_num_past_key_values = 2 if encoder_hidden_states is None else 4
+
+ if len(past_key_value) != expected_num_past_key_values:
+ raise ValueError(
+ f"There should be {expected_num_past_key_values} past states. "
+ f"{'2 (past / key) for cross attention. ' if expected_num_past_key_values == 4 else ''}"
+ f"Got {len(past_key_value)} past key / value states"
+ )
+
+ self_attn_past_key_value = past_key_value[:2]
+ cross_attn_past_key_value = past_key_value[2:]
+ else:
+ self_attn_past_key_value, cross_attn_past_key_value = None, None
+
+ self_attention_outputs = self.layer[0](
+ hidden_states,
+ attention_mask=attention_mask,
+ position_bias=position_bias,
+ layer_head_mask=layer_head_mask,
+ past_key_value=self_attn_past_key_value,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ )
+ hidden_states, present_key_value_state = self_attention_outputs[:2]
+ attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights
+
+ # clamp inf values to enable fp16 inference - check https://github.com/huggingface/transformers/pull/19229/
+ if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
+
+ do_cross_attention = self.is_decoder and encoder_hidden_states is not None
+ if do_cross_attention:
+ # the actual query length is unknown for cross attention
+ # if using past key value states. Need to inject it here
+ if present_key_value_state is not None:
+ query_length = present_key_value_state[0].shape[2]
+ else:
+ query_length = None
+
+ cross_attention_outputs = self.layer[1](
+ hidden_states,
+ key_value_states=encoder_hidden_states,
+ attention_mask=encoder_attention_mask,
+ position_bias=encoder_decoder_position_bias,
+ layer_head_mask=cross_attn_layer_head_mask,
+ past_key_value=cross_attn_past_key_value,
+ query_length=query_length,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ )
+ hidden_states = cross_attention_outputs[0]
+
+ # clamp inf values to enable fp16 inference - check https://github.com/huggingface/transformers/pull/19229/
+ if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
+
+ # Combine self attn and cross attn key value states
+ if present_key_value_state is not None:
+ present_key_value_state = present_key_value_state + cross_attention_outputs[1]
+
+ # Keep cross-attention outputs and relative position weights
+ attention_outputs = attention_outputs + cross_attention_outputs[2:]
+
+ # Apply Feed Forward layer
+ hidden_states = self.layer[-1](hidden_states)
+
+ # clamp inf values to enable fp16 inference - check https://github.com/huggingface/transformers/pull/19229/
+ if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
+
+ outputs = (hidden_states,)
+
+ if use_cache:
+ outputs = outputs + (present_key_value_state,) + attention_outputs
+ else:
+ outputs = outputs + attention_outputs
+
+ return outputs # hidden-states, present_key_value_states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)
+
+
+class LongT5PreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = LongT5Config
+ base_model_prefix = "transformer"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["LongT5Block"]
+
+ @property
+ # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel.dummy_inputs
+ def dummy_inputs(self):
+ input_ids = torch.tensor(DUMMY_INPUTS)
+ input_mask = torch.tensor(DUMMY_MASK)
+ dummy_inputs = {
+ "decoder_input_ids": input_ids,
+ "input_ids": input_ids,
+ "decoder_attention_mask": input_mask,
+ }
+ return dummy_inputs
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ factor = self.config.initializer_factor # Used for testing weights initialization
+ if isinstance(module, LongT5LayerNorm):
+ module.weight.data.fill_(factor * 1.0)
+ elif isinstance(module, (LongT5Model, LongT5ForConditionalGeneration, LongT5EncoderModel)):
+ # Mesh TensorFlow embeddings initialization
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624
+ module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0)
+ if hasattr(module, "lm_head") and not self.config.tie_word_embeddings:
+ module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0)
+ elif isinstance(module, LongT5DenseActDense):
+ # Mesh TensorFlow FF initialization
+ # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56
+ # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89
+ module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
+ if hasattr(module.wi, "bias") and module.wi.bias is not None:
+ module.wi.bias.data.zero_()
+ module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))
+ if hasattr(module.wo, "bias") and module.wo.bias is not None:
+ module.wo.bias.data.zero_()
+ elif isinstance(module, LongT5DenseGatedActDense):
+ module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
+ if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None:
+ module.wi_0.bias.data.zero_()
+ module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
+ if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None:
+ module.wi_1.bias.data.zero_()
+ module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))
+ if hasattr(module.wo, "bias") and module.wo.bias is not None:
+ module.wo.bias.data.zero_()
+ elif isinstance(module, (LongT5Attention, LongT5LocalAttention, LongT5TransientGlobalAttention)):
+ # Mesh TensorFlow attention initialization to avoid scaling before softmax
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136
+ d_model = self.config.d_model
+ key_value_proj_dim = self.config.d_kv
+ n_heads = self.config.num_heads
+ module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5))
+ module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
+ module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
+ module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5))
+ if module.has_relative_attention_bias:
+ module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5))
+ if isinstance(module, LongT5TransientGlobalAttention):
+ module.global_relative_attention_bias.weight.data.normal_(
+ mean=0.0, std=factor * ((d_model) ** -0.5)
+ )
+
+ # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._shift_right with T5->LongT5
+ def _shift_right(self, input_ids):
+ decoder_start_token_id = self.config.decoder_start_token_id
+ pad_token_id = self.config.pad_token_id
+
+ if decoder_start_token_id is None:
+ raise ValueError(
+ "self.model.config.decoder_start_token_id has to be defined. In LongT5 it is usually set to the pad_token_id. "
+ "See LongT5 docs for more information."
+ )
+
+ # shift inputs to the right
+ if is_torch_fx_proxy(input_ids):
+ # Item assignment is not supported natively for proxies.
+ shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id)
+ shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1)
+ else:
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
+ shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
+ shifted_input_ids[..., 0] = decoder_start_token_id
+
+ if pad_token_id is None:
+ raise ValueError("self.model.config.pad_token_id has to be defined.")
+ # replace possible -100 values in labels by `pad_token_id`
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
+
+ return shifted_input_ids
+
+
+class LongT5Stack(LongT5PreTrainedModel):
+ def __init__(self, config, embed_tokens=None):
+ super().__init__(config)
+
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model)
+ if embed_tokens is not None:
+ self.embed_tokens.weight = embed_tokens.weight
+ self.is_decoder = config.is_decoder
+
+ self.local_radius = config.local_radius
+ self.block_len = self.local_radius + 1
+
+ self.block = nn.ModuleList(
+ [LongT5Block(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)]
+ )
+ self.final_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
+ self.dropout = nn.Dropout(config.dropout_rate)
+
+ self.gradient_checkpointing = False
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ # Copied from transformers.models.t5.modeling_t5.T5Stack.get_input_embeddings
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ # Copied from transformers.models.t5.modeling_t5.T5Stack.set_input_embeddings
+ def set_input_embeddings(self, new_embeddings):
+ self.embed_tokens = new_embeddings
+
+ def forward(
+ self,
+ input_ids=None,
+ attention_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ inputs_embeds=None,
+ head_mask=None,
+ cross_attn_head_mask=None,
+ past_key_values=None,
+ use_cache=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ ):
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and inputs_embeds is not None:
+ err_msg_prefix = "decoder_" if self.is_decoder else ""
+ raise ValueError(
+ f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time"
+ )
+ elif input_ids is not None:
+ input_shape = input_ids.size()
+ input_ids = input_ids.view(-1, input_shape[-1])
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ err_msg_prefix = "decoder_" if self.is_decoder else ""
+ raise ValueError(f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds")
+
+ if inputs_embeds is None:
+ assert self.embed_tokens is not None, "You have to initialize the model with valid token embeddings"
+ inputs_embeds = self.embed_tokens(input_ids)
+
+ batch_size, seq_length = input_shape
+
+ # required mask seq length can be calculated via length of past
+ mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length
+
+ if use_cache is True:
+ assert self.is_decoder, f"`use_cache` can only be set to `True` if {self} is used as a decoder"
+
+ if attention_mask is None:
+ attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device)
+
+ # initialize past_key_values with `None` if past does not exist
+ if past_key_values is None:
+ past_key_values = [None] * len(self.block)
+
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
+ # ourselves in which case we just need to make it broadcastable to all heads.
+ # We use local attention in encoder self-attention, otherwise standard self & cross attentions are used
+ if self.is_decoder:
+ extended_attention_mask = self.get_extended_attention_mask(
+ attention_mask, input_shape, inputs_embeds.device
+ )
+ elif self.config.encoder_attention_type == "local":
+ extended_attention_mask = _get_local_attention_mask(attention_mask, self.block_len, inputs_embeds.device)
+ else: # we need to use both local attention mask and standard extended mask for transient-global attention
+ extended_attention_mask = attention_mask
+
+ # If a 2D or 3D attention mask is provided for the cross-attention
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
+ if self.is_decoder and encoder_hidden_states is not None:
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
+ if encoder_attention_mask is None:
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device)
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
+ else:
+ encoder_extended_attention_mask = None
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ # Prepare head mask if needed
+ head_mask = self.get_head_mask(head_mask, self.config.num_layers)
+ cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers)
+ present_key_value_states = () if use_cache else None
+ all_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+ all_cross_attentions = () if (output_attentions and self.is_decoder) else None
+ position_bias = None
+ encoder_decoder_position_bias = None
+
+ hidden_states = self.dropout(inputs_embeds)
+
+ for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)):
+ layer_head_mask = head_mask[i]
+ cross_attn_layer_head_mask = cross_attn_head_mask[i]
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.forward,
+ hidden_states,
+ extended_attention_mask,
+ position_bias,
+ encoder_hidden_states,
+ encoder_extended_attention_mask,
+ encoder_decoder_position_bias,
+ layer_head_mask,
+ cross_attn_layer_head_mask,
+ None, # past_key_value is always None with gradient checkpointing
+ use_cache,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(
+ hidden_states,
+ attention_mask=extended_attention_mask,
+ position_bias=position_bias,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_extended_attention_mask,
+ encoder_decoder_position_bias=encoder_decoder_position_bias,
+ layer_head_mask=layer_head_mask,
+ cross_attn_layer_head_mask=cross_attn_layer_head_mask,
+ past_key_value=past_key_value,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ )
+
+ # layer_outputs is a tuple with:
+ # hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)
+ if use_cache is False:
+ layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:]
+
+ hidden_states, present_key_value_state = layer_outputs[:2]
+
+ # We share the position biases between the layers - the first layer store them
+ # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights),
+ # (cross-attention position bias), (cross-attention weights)
+ position_bias = layer_outputs[2]
+ if self.is_decoder and encoder_hidden_states is not None:
+ encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3]
+ # append next layer key value states
+ if use_cache:
+ present_key_value_states = present_key_value_states + (present_key_value_state,)
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[3],)
+ if self.is_decoder:
+ all_cross_attentions = all_cross_attentions + (layer_outputs[5],)
+
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ # Add last layer
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ hidden_states,
+ present_key_value_states,
+ all_hidden_states,
+ all_attentions,
+ all_cross_attentions,
+ ]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=present_key_value_states,
+ hidden_states=all_hidden_states,
+ attentions=all_attentions,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+LONGT5_START_DOCSTRING = r"""
+
+ The LongT5 model was proposed in [LongT5: Efficient Text-To-Text Transformer for Long
+ Sequences](https://arxiv.org/abs/2112.07916) by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo
+ Ni, Yun-Hsuan Sung and Yinfei Yang. It's an encoder-decoder transformer pre-trained in a text-to-text denoising
+ generative setting. LongT5 model is an extension of T5 model, and it enables using one of the two different
+ efficient attention mechanisms - (1) Local attention, or (2) Transient-Global attention.
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`LongT5Config`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+LONGT5_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so
+ you should be able to pad the inputs on both the right and the left.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for detail.
+
+ [What are input IDs?](../glossary#input-ids)
+
+ To know more on how to prepare `input_ids` for pretraining take a look a [LONGT5
+ Training](./longt5#training).
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ LONGT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+
+ To know more on how to prepare `decoder_input_ids` for pretraining take a look at [LONGT5
+ Training](./longt5#training).
+ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0,
+ 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0,
+ 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in
+ `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
+ Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at
+ the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
+ input (see `past_key_values`). This is useful if you want more control over how to convert
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
+
+ If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
+ of `inputs_embeds`.
+
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+LONGT5_ENCODER_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so
+ you should be able to pad the inputs on both the right and the left.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for detail.
+
+ To know more on how to prepare `input_ids` for pretraining take a look a [LONGT5
+ Training](./longt5#training).
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+# Warning message for FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
+__HEAD_MASK_WARNING_MSG = """
+The input argument `head_mask` was split into two arguments `head_mask` and `decoder_head_mask`. Currently,
+`decoder_head_mask` is set to copy `head_mask`, but this feature is deprecated and will be removed in future versions.
+If you do not want to use any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers,
+num_heads)`.
+"""
+
+
+@add_start_docstrings(
+ "The bare LONGT5 Model transformer outputting raw hidden-states without any specific head on top.",
+ LONGT5_START_DOCSTRING,
+)
+class LongT5Model(LongT5PreTrainedModel):
+ _keys_to_ignore_on_load_unexpected = [
+ r"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight",
+ ]
+ _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"]
+
+ def __init__(self, config: LongT5Config):
+ super().__init__(config)
+ self.shared = nn.Embedding(config.vocab_size, config.d_model)
+
+ encoder_config = copy.deepcopy(config)
+ encoder_config.is_decoder = False
+ encoder_config.use_cache = False
+ encoder_config.is_encoder_decoder = False
+ self.encoder = LongT5Stack(encoder_config, self.shared)
+
+ decoder_config = copy.deepcopy(config)
+ decoder_config.is_decoder = True
+ decoder_config.is_encoder_decoder = False
+ decoder_config.num_layers = config.num_decoder_layers
+ self.decoder = LongT5Stack(decoder_config, self.shared)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.shared
+
+ def set_input_embeddings(self, new_embeddings):
+ self.shared = new_embeddings
+ self.encoder.set_input_embeddings(new_embeddings)
+ self.decoder.set_input_embeddings(new_embeddings)
+
+ def _tie_weights(self):
+ if self.config.tie_word_embeddings:
+ self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared)
+ self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared)
+
+ def get_encoder(self):
+ return self.encoder
+
+ def get_decoder(self):
+ return self.decoder
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ decoder_head_mask: Optional[torch.FloatTensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ decoder_inputs_embeds: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]:
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, LongT5Model
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/long-t5-local-base")
+ >>> model = LongT5Model.from_pretrained("google/long-t5-local-base")
+
+ >>> # Let's try a very long encoder input.
+ >>> input_ids = tokenizer(
+ ... 100 * "Studies have been shown that owning a dog is good for you", return_tensors="pt"
+ ... ).input_ids # Batch size 1
+
+ >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
+
+ >>> # forward pass
+ >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
+ >>> last_hidden_states = outputs.last_hidden_state
+ ```"""
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
+ if head_mask is not None and decoder_head_mask is None:
+ if self.config.num_layers == self.config.num_decoder_layers:
+ warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)
+ decoder_head_mask = head_mask
+
+ # Encode if needed (training, first prediction pass)
+ if encoder_outputs is None:
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ hidden_states = encoder_outputs[0]
+
+ # Decode
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ inputs_embeds=decoder_inputs_embeds,
+ past_key_values=past_key_values,
+ encoder_hidden_states=hidden_states,
+ encoder_attention_mask=attention_mask,
+ head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs
+
+ return Seq2SeqModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings("""LONGT5 Model with a `language modeling` head on top.""", LONGT5_START_DOCSTRING)
+class LongT5ForConditionalGeneration(LongT5PreTrainedModel):
+ _keys_to_ignore_on_load_unexpected = [
+ r"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight",
+ ]
+ _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"]
+
+ def __init__(self, config: LongT5Config):
+ super().__init__(config)
+ self.model_dim = config.d_model
+
+ self.shared = nn.Embedding(config.vocab_size, config.d_model)
+
+ encoder_config = copy.deepcopy(config)
+ encoder_config.is_decoder = False
+ encoder_config.use_cache = False
+ encoder_config.is_encoder_decoder = False
+ self.encoder = LongT5Stack(encoder_config, self.shared)
+
+ decoder_config = copy.deepcopy(config)
+ decoder_config.is_decoder = True
+ decoder_config.is_encoder_decoder = False
+ decoder_config.num_layers = config.num_decoder_layers
+ self.decoder = LongT5Stack(decoder_config, self.shared)
+
+ self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.shared
+
+ def set_input_embeddings(self, new_embeddings):
+ self.shared = new_embeddings
+ self.encoder.set_input_embeddings(new_embeddings)
+ self.decoder.set_input_embeddings(new_embeddings)
+
+ def _tie_weights(self):
+ if self.config.tie_word_embeddings:
+ self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared)
+ self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared)
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def get_encoder(self):
+ return self.encoder
+
+ def get_decoder(self):
+ return self.decoder
+
+ @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ decoder_head_mask: Optional[torch.FloatTensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for
+ labels in `[0, ..., config.vocab_size]`
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("Stancld/longt5-tglobal-large-16384-pubmed-3k_steps")
+ >>> model = LongT5ForConditionalGeneration.from_pretrained(
+ ... "Stancld/longt5-tglobal-large-16384-pubmed-3k_steps"
+ ... )
+
+ >>> # Let's try a very long input.
+ >>> inputs = tokenizer(100 * "studies have shown that owning a dog is good for you ", return_tensors="pt")
+ >>> input_ids = inputs.input_ids
+
+ >>> outputs = model.generate(input_ids)
+ >>> print(tokenizer.decode(outputs[0], skip_special_tokens=True))
+ abstractthe aim of this article is to provide an overview of the literature on the role of dog
+ ```"""
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
+ if head_mask is not None and decoder_head_mask is None:
+ if self.config.num_layers == self.config.num_decoder_layers:
+ warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)
+ decoder_head_mask = head_mask
+
+ # Encode if needed (training, first prediction pass)
+ if encoder_outputs is None:
+ # Convert encoder inputs in embeddings if needed
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ hidden_states = encoder_outputs[0]
+
+ if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
+ # get decoder inputs from shifting lm labels to the right
+ decoder_input_ids = self._shift_right(labels)
+
+ # Decode
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ inputs_embeds=decoder_inputs_embeds,
+ past_key_values=past_key_values,
+ encoder_hidden_states=hidden_states,
+ encoder_attention_mask=attention_mask,
+ head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = decoder_outputs[0]
+
+ if self.config.tie_word_embeddings:
+ # Rescale output before projecting on vocab
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586
+ sequence_output = sequence_output * (self.model_dim**-0.5)
+
+ lm_logits = self.lm_head(sequence_output)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss(ignore_index=-100)
+
+ labels = labels.to(lm_logits.device)
+ loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
+ # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666
+
+ if not return_dict:
+ output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs
+ return ((loss,) + output) if loss is not None else output
+
+ return Seq2SeqLMOutput(
+ loss=loss,
+ logits=lm_logits,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+ def prepare_inputs_for_generation(
+ self,
+ input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ head_mask=None,
+ decoder_head_mask=None,
+ cross_attn_head_mask=None,
+ use_cache=None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ # cut decoder_input_ids if past_key_values is used
+ if past_key_values is not None:
+ past_length = past_key_values[0][0].shape[2]
+
+ # Some generation methods already pass only the last input ID
+ if input_ids.shape[1] > past_length:
+ remove_prefix_length = past_length
+ else:
+ # Default to old behavior: keep only final ID
+ remove_prefix_length = input_ids.shape[1] - 1
+
+ input_ids = input_ids[:, remove_prefix_length:]
+
+ return {
+ "decoder_input_ids": input_ids,
+ "past_key_values": past_key_values,
+ "encoder_outputs": encoder_outputs,
+ "attention_mask": attention_mask,
+ "head_mask": head_mask,
+ "decoder_head_mask": decoder_head_mask,
+ "cross_attn_head_mask": cross_attn_head_mask,
+ "use_cache": use_cache,
+ }
+
+ def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
+ return self._shift_right(labels)
+
+ def _reorder_cache(self, past_key_values, beam_idx):
+ # if decoder past is not included in output
+ # speedy decoding is disabled and no need to reorder
+ if past_key_values is None:
+ logger.warning("You might want to consider setting `use_cache=True` to speed up decoding")
+ return past_key_values
+
+ reordered_decoder_past = ()
+ for layer_past_states in past_key_values:
+ # get the correct batch idx from layer past batch dim
+ # batch dim of `past` is at 2nd position
+ reordered_layer_past_states = ()
+ for layer_past_state in layer_past_states:
+ # need to set correct `past` for each of the four key / value states
+ reordered_layer_past_states = reordered_layer_past_states + (
+ layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)),
+ )
+
+ assert reordered_layer_past_states[0].shape == layer_past_states[0].shape
+ assert len(reordered_layer_past_states) == len(layer_past_states)
+
+ reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,)
+ return reordered_decoder_past
+
+
+@add_start_docstrings(
+ "The bare LONGT5 Model transformer outputting encoder's raw hidden-states without any specific head on top.",
+ LONGT5_START_DOCSTRING,
+)
+class LongT5EncoderModel(LongT5PreTrainedModel):
+ _tied_weights_keys = ["encoder.embed_tokens.weight"]
+ _keys_to_ignore_on_load_unexpected = [r"decoder"]
+
+ def __init__(self, config: LongT5Config):
+ super().__init__(config)
+ self.shared = nn.Embedding(config.vocab_size, config.d_model)
+
+ encoder_config = copy.deepcopy(config)
+ encoder_config.use_cache = False
+ encoder_config.is_encoder_decoder = False
+ self.encoder = LongT5Stack(encoder_config, self.shared)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.shared
+
+ def set_input_embeddings(self, new_embeddings):
+ self.shared = new_embeddings
+ self.encoder.set_input_embeddings(new_embeddings)
+
+ def _tie_weights(self):
+ if self.config.tie_word_embeddings:
+ self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared)
+
+ def get_encoder(self):
+ return self.encoder
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(LONGT5_ENCODER_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]:
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/long-t5-local-base")
+ >>> model = LongT5EncoderModel.from_pretrained("google/long-t5-local-base")
+ >>> input_ids = tokenizer(
+ ... 100 * "Studies have been shown that owning a dog is good for you ", return_tensors="pt"
+ ... ).input_ids # Batch size 1
+ >>> outputs = model(input_ids=input_ids)
+ >>> last_hidden_states = outputs.last_hidden_state
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ return encoder_outputs
diff --git a/venv/lib/python3.10/site-packages/transformers/models/splinter/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/splinter/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..24355c01add73bfeb1c6aefb97c1d742d79e983c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/splinter/__init__.py
@@ -0,0 +1,79 @@
+# Copyright 2021 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
+
+
+_import_structure = {
+ "configuration_splinter": ["SPLINTER_PRETRAINED_CONFIG_ARCHIVE_MAP", "SplinterConfig"],
+ "tokenization_splinter": ["SplinterTokenizer"],
+}
+
+try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_splinter_fast"] = ["SplinterTokenizerFast"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_splinter"] = [
+ "SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "SplinterForQuestionAnswering",
+ "SplinterForPreTraining",
+ "SplinterLayer",
+ "SplinterModel",
+ "SplinterPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_splinter import SPLINTER_PRETRAINED_CONFIG_ARCHIVE_MAP, SplinterConfig
+ from .tokenization_splinter import SplinterTokenizer
+
+ try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_splinter_fast import SplinterTokenizerFast
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_splinter import (
+ SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST,
+ SplinterForPreTraining,
+ SplinterForQuestionAnswering,
+ SplinterLayer,
+ SplinterModel,
+ SplinterPreTrainedModel,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/splinter/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/splinter/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e9067fc34b9c87eaff77ff5edb279a8e296c5c69
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/splinter/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/splinter/__pycache__/configuration_splinter.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/splinter/__pycache__/configuration_splinter.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c245a75bb7a657131f20d81b86b9764009c9e378
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/splinter/__pycache__/configuration_splinter.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/splinter/__pycache__/modeling_splinter.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/splinter/__pycache__/modeling_splinter.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c9d15140603c46b1fced9b34a12aefe962d96942
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/splinter/__pycache__/modeling_splinter.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/splinter/__pycache__/tokenization_splinter.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/splinter/__pycache__/tokenization_splinter.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..543d8c654168033ee1b43cde7423edd1912eeea9
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/splinter/__pycache__/tokenization_splinter.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/splinter/__pycache__/tokenization_splinter_fast.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/splinter/__pycache__/tokenization_splinter_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a03df5bd537dd11e8debfaa8ca5db853cbcc463d
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/splinter/__pycache__/tokenization_splinter_fast.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/splinter/configuration_splinter.py b/venv/lib/python3.10/site-packages/transformers/models/splinter/configuration_splinter.py
new file mode 100644
index 0000000000000000000000000000000000000000..5248c74c1a3efc5a26b03ad17d81c022f750880a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/splinter/configuration_splinter.py
@@ -0,0 +1,123 @@
+# coding=utf-8
+# Copyright 2021 Tel AViv University, AllenAI and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Splinter model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import SPLINTER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class SplinterConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`SplinterModel`]. It is used to instantiate an
+ Splinter model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the Splinter
+ [tau/splinter-base](https://huggingface.co/tau/splinter-base) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 30522):
+ Vocabulary size of the Splinter model. Defines the number of different tokens that can be represented by
+ the `inputs_ids` passed when calling [`SplinterModel`].
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimension of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ type_vocab_size (`int`, *optional*, defaults to 2):
+ The vocabulary size of the `token_type_ids` passed when calling [`SplinterModel`].
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
+ relevant if `config.is_decoder=True`.
+ question_token_id (`int`, *optional*, defaults to 104):
+ The id of the `[QUESTION]` token.
+
+ Example:
+
+ ```python
+ >>> from transformers import SplinterModel, SplinterConfig
+
+ >>> # Initializing a Splinter tau/splinter-base style configuration
+ >>> configuration = SplinterConfig()
+
+ >>> # Initializing a model from the tau/splinter-base style configuration
+ >>> model = SplinterModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "splinter"
+
+ def __init__(
+ self,
+ vocab_size=30522,
+ hidden_size=768,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ intermediate_size=3072,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.1,
+ attention_probs_dropout_prob=0.1,
+ max_position_embeddings=512,
+ type_vocab_size=2,
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ use_cache=True,
+ pad_token_id=0,
+ question_token_id=104,
+ **kwargs,
+ ):
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
+
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.intermediate_size = intermediate_size
+ self.hidden_act = hidden_act
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.initializer_range = initializer_range
+ self.type_vocab_size = type_vocab_size
+ self.layer_norm_eps = layer_norm_eps
+ self.use_cache = use_cache
+ self.question_token_id = question_token_id
diff --git a/venv/lib/python3.10/site-packages/transformers/models/splinter/modeling_splinter.py b/venv/lib/python3.10/site-packages/transformers/models/splinter/modeling_splinter.py
new file mode 100644
index 0000000000000000000000000000000000000000..b643601d0ebd49f3dd909df1db63e70da7e7627e
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/splinter/modeling_splinter.py
@@ -0,0 +1,1104 @@
+# coding=utf-8
+# Copyright 2021 Tel AViv University, AllenAI and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch Splinter model."""
+
+
+import math
+from dataclasses import dataclass
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import CrossEntropyLoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, ModelOutput, QuestionAnsweringModelOutput
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
+from .configuration_splinter import SplinterConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "tau/splinter-base"
+_CONFIG_FOR_DOC = "SplinterConfig"
+
+
+from ..deprecated._archive_maps import SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+class SplinterEmbeddings(nn.Module):
+ """Construct the embeddings from word, position and token_type embeddings."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
+
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
+ # any TensorFlow checkpoint file
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
+ self.register_buffer(
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
+ )
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
+
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ past_key_values_length: Optional[int] = 0,
+ ) -> Tuple:
+ if input_ids is not None:
+ input_shape = input_ids.size()
+ else:
+ input_shape = inputs_embeds.size()[:-1]
+
+ seq_length = input_shape[1]
+
+ if position_ids is None:
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
+
+ if token_type_ids is None:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
+
+ embeddings = inputs_embeds + token_type_embeddings
+ if self.position_embedding_type == "absolute":
+ position_embeddings = self.position_embeddings(position_ids)
+ embeddings += position_embeddings
+ embeddings = self.LayerNorm(embeddings)
+ embeddings = self.dropout(embeddings)
+ return embeddings
+
+
+# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Splinter
+class SplinterSelfAttention(nn.Module):
+ def __init__(self, config, position_embedding_type=None):
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({config.num_attention_heads})"
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+ self.position_embedding_type = position_embedding_type or getattr(
+ config, "position_embedding_type", "absolute"
+ )
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ self.max_position_embeddings = config.max_position_embeddings
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
+
+ self.is_decoder = config.is_decoder
+
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ mixed_query_layer = self.query(hidden_states)
+
+ # If this is instantiated as a cross-attention module, the keys
+ # and values come from an encoder; the attention mask needs to be
+ # such that the encoder's padding tokens are not attended to.
+ is_cross_attention = encoder_hidden_states is not None
+
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_layer = past_key_value[0]
+ value_layer = past_key_value[1]
+ attention_mask = encoder_attention_mask
+ elif is_cross_attention:
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
+ attention_mask = encoder_attention_mask
+ elif past_key_value is not None:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
+ else:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ use_cache = past_key_value is not None
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_layer, value_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
+ if use_cache:
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
+ -1, 1
+ )
+ else:
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
+ distance = position_ids_l - position_ids_r
+
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
+
+ if self.position_embedding_type == "relative_key":
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores
+ elif self.position_embedding_type == "relative_key_query":
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
+
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in SplinterModel forward() function)
+ attention_scores = attention_scores + attention_mask
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ if self.is_decoder:
+ outputs = outputs + (past_key_value,)
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->Splinter
+class SplinterSelfOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Splinter
+class SplinterAttention(nn.Module):
+ def __init__(self, config, position_embedding_type=None):
+ super().__init__()
+ self.self = SplinterSelfAttention(config, position_embedding_type=position_embedding_type)
+ self.output = SplinterSelfOutput(config)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.self.query = prune_linear_layer(self.self.query, index)
+ self.self.key = prune_linear_layer(self.self.key, index)
+ self.self.value = prune_linear_layer(self.self.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ self_outputs = self.self(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+ attention_output = self.output(self_outputs[0], hidden_states)
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Splinter
+class SplinterIntermediate(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->Splinter
+class SplinterOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Splinter
+class SplinterLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.attention = SplinterAttention(config)
+ self.is_decoder = config.is_decoder
+ self.add_cross_attention = config.add_cross_attention
+ if self.add_cross_attention:
+ if not self.is_decoder:
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
+ self.crossattention = SplinterAttention(config, position_embedding_type="absolute")
+ self.intermediate = SplinterIntermediate(config)
+ self.output = SplinterOutput(config)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ self_attention_outputs = self.attention(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ output_attentions=output_attentions,
+ past_key_value=self_attn_past_key_value,
+ )
+ attention_output = self_attention_outputs[0]
+
+ # if decoder, the last output is tuple of self-attn cache
+ if self.is_decoder:
+ outputs = self_attention_outputs[1:-1]
+ present_key_value = self_attention_outputs[-1]
+ else:
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ cross_attn_present_key_value = None
+ if self.is_decoder and encoder_hidden_states is not None:
+ if not hasattr(self, "crossattention"):
+ raise ValueError(
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
+ " by setting `config.add_cross_attention=True`"
+ )
+
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ cross_attention_outputs = self.crossattention(
+ attention_output,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ cross_attn_past_key_value,
+ output_attentions,
+ )
+ attention_output = cross_attention_outputs[0]
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
+
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
+ cross_attn_present_key_value = cross_attention_outputs[-1]
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ layer_output = apply_chunking_to_forward(
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
+ )
+ outputs = (layer_output,) + outputs
+
+ # if decoder, return the attn key/values as the last output
+ if self.is_decoder:
+ outputs = outputs + (present_key_value,)
+
+ return outputs
+
+ def feed_forward_chunk(self, attention_output):
+ intermediate_output = self.intermediate(attention_output)
+ layer_output = self.output(intermediate_output, attention_output)
+ return layer_output
+
+
+# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Splinter
+class SplinterEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.layer = nn.ModuleList([SplinterLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = False,
+ output_hidden_states: Optional[bool] = False,
+ return_dict: Optional[bool] = True,
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ next_decoder_cache = () if use_cache else None
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+ past_key_value = past_key_values[i] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+ if use_cache:
+ next_decoder_cache += (layer_outputs[-1],)
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+ if self.config.add_cross_attention:
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ hidden_states,
+ next_decoder_cache,
+ all_hidden_states,
+ all_self_attentions,
+ all_cross_attentions,
+ ]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_decoder_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+class SplinterPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = SplinterConfig
+ base_model_prefix = "splinter"
+ supports_gradient_checkpointing = True
+
+ # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, nn.Linear):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+SPLINTER_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`SplinterConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+SPLINTER_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `{0}`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`torch.LongTensor` of shape `{0}`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`torch.LongTensor` of shape `{0}`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare Splinter Model transformer outputting raw hidden-states without any specific head on top.",
+ SPLINTER_START_DOCSTRING,
+)
+class SplinterModel(SplinterPreTrainedModel):
+ """
+ The model is an encoder (with only self-attention) following the architecture described in [Attention is all you
+ need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones,
+ Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
+ """
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.config = config
+
+ self.embeddings = SplinterEmbeddings(config)
+ self.encoder = SplinterEncoder(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.word_embeddings = value
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(SPLINTER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutputWithPastAndCrossAttentions,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
+ r"""
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
+ the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if self.config.is_decoder:
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ else:
+ use_cache = False
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ batch_size, seq_length = input_shape
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ # past_key_values_length
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
+
+ if attention_mask is None:
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
+ if token_type_ids is None:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
+
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
+ # ourselves in which case we just need to make it broadcastable to all heads.
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
+
+ # If a 2D or 3D attention mask is provided for the cross-attention
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
+ if self.config.is_decoder and encoder_hidden_states is not None:
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
+ if encoder_attention_mask is None:
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
+ else:
+ encoder_extended_attention_mask = None
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ embedding_output = self.embeddings(
+ input_ids=input_ids,
+ position_ids=position_ids,
+ token_type_ids=token_type_ids,
+ inputs_embeds=inputs_embeds,
+ past_key_values_length=past_key_values_length,
+ )
+ encoder_outputs = self.encoder(
+ embedding_output,
+ attention_mask=extended_attention_mask,
+ head_mask=head_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_extended_attention_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+
+ if not return_dict:
+ return (sequence_output,) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=sequence_output,
+ past_key_values=encoder_outputs.past_key_values,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ cross_attentions=encoder_outputs.cross_attentions,
+ )
+
+
+class SplinterFullyConnectedLayer(nn.Module):
+ def __init__(self, input_dim, output_dim, hidden_act="gelu"):
+ super().__init__()
+
+ self.input_dim = input_dim
+ self.output_dim = output_dim
+
+ self.dense = nn.Linear(self.input_dim, self.output_dim)
+ self.act_fn = ACT2FN[hidden_act]
+ self.LayerNorm = nn.LayerNorm(self.output_dim)
+
+ def forward(self, inputs: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(inputs)
+ hidden_states = self.act_fn(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states)
+ return hidden_states
+
+
+class QuestionAwareSpanSelectionHead(nn.Module):
+ """
+ Implementation of Question-Aware Span Selection (QASS) head, described in Splinter's paper:
+
+ """
+
+ def __init__(self, config):
+ super().__init__()
+
+ self.query_start_transform = SplinterFullyConnectedLayer(config.hidden_size, config.hidden_size)
+ self.query_end_transform = SplinterFullyConnectedLayer(config.hidden_size, config.hidden_size)
+ self.start_transform = SplinterFullyConnectedLayer(config.hidden_size, config.hidden_size)
+ self.end_transform = SplinterFullyConnectedLayer(config.hidden_size, config.hidden_size)
+
+ self.start_classifier = nn.Linear(config.hidden_size, config.hidden_size, bias=False)
+ self.end_classifier = nn.Linear(config.hidden_size, config.hidden_size, bias=False)
+
+ def forward(self, inputs, positions):
+ _, _, dim = inputs.size()
+ index = positions.unsqueeze(-1).repeat(1, 1, dim) # [batch_size, num_positions, dim]
+ gathered_reps = torch.gather(inputs, dim=1, index=index) # [batch_size, num_positions, dim]
+
+ query_start_reps = self.query_start_transform(gathered_reps) # [batch_size, num_positions, dim]
+ query_end_reps = self.query_end_transform(gathered_reps) # [batch_size, num_positions, dim]
+ start_reps = self.start_transform(inputs) # [batch_size, seq_length, dim]
+ end_reps = self.end_transform(inputs) # [batch_size, seq_length, dim]
+
+ hidden_states = self.start_classifier(query_start_reps) # [batch_size, num_positions, dim]
+ start_reps = start_reps.permute(0, 2, 1) # [batch_size, dim, seq_length]
+ start_logits = torch.matmul(hidden_states, start_reps)
+
+ hidden_states = self.end_classifier(query_end_reps)
+ end_reps = end_reps.permute(0, 2, 1)
+ end_logits = torch.matmul(hidden_states, end_reps)
+
+ return start_logits, end_logits
+
+
+@add_start_docstrings(
+ """
+ Splinter Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ SPLINTER_START_DOCSTRING,
+)
+class SplinterForQuestionAnswering(SplinterPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.splinter = SplinterModel(config)
+ self.splinter_qass = QuestionAwareSpanSelectionHead(config)
+ self.question_token_id = config.question_token_id
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(SPLINTER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=QuestionAnsweringModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ start_positions: Optional[torch.LongTensor] = None,
+ end_positions: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ question_positions: Optional[torch.LongTensor] = None,
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ question_positions (`torch.LongTensor` of shape `(batch_size, num_questions)`, *optional*):
+ The positions of all question tokens. If given, start_logits and end_logits will be of shape `(batch_size,
+ num_questions, sequence_length)`. If None, the first question token in each sequence in the batch will be
+ the only one for which start_logits and end_logits are calculated and they will be of shape `(batch_size,
+ sequence_length)`.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ question_positions_were_none = False
+ if question_positions is None:
+ if input_ids is not None:
+ question_position_for_each_example = torch.argmax(
+ (torch.eq(input_ids, self.question_token_id)).int(), dim=-1
+ )
+ else:
+ question_position_for_each_example = torch.zeros(
+ inputs_embeds.size(0), dtype=torch.long, layout=inputs_embeds.layout, device=inputs_embeds.device
+ )
+ question_positions = question_position_for_each_example.unsqueeze(-1)
+ question_positions_were_none = True
+
+ outputs = self.splinter(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+ start_logits, end_logits = self.splinter_qass(sequence_output, question_positions)
+
+ if question_positions_were_none:
+ start_logits, end_logits = start_logits.squeeze(1), end_logits.squeeze(1)
+
+ if attention_mask is not None:
+ start_logits = start_logits + (1 - attention_mask) * torch.finfo(start_logits.dtype).min
+ end_logits = end_logits + (1 - attention_mask) * torch.finfo(end_logits.dtype).min
+
+ total_loss = None
+ if start_positions is not None and end_positions is not None:
+ # If we are on multi-GPU, split add a dimension
+ if len(start_positions.size()) > 1:
+ start_positions = start_positions.squeeze(-1)
+ if len(end_positions.size()) > 1:
+ end_positions = end_positions.squeeze(-1)
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
+ ignored_index = start_logits.size(1)
+ start_positions.clamp_(0, ignored_index)
+ end_positions.clamp_(0, ignored_index)
+
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
+ start_loss = loss_fct(start_logits, start_positions)
+ end_loss = loss_fct(end_logits, end_positions)
+ total_loss = (start_loss + end_loss) / 2
+
+ if not return_dict:
+ output = (start_logits, end_logits) + outputs[1:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return QuestionAnsweringModelOutput(
+ loss=total_loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@dataclass
+class SplinterForPreTrainingOutput(ModelOutput):
+ """
+ Class for outputs of Splinter as a span selection model.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when start and end positions are provided):
+ Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
+ start_logits (`torch.FloatTensor` of shape `(batch_size, num_questions, sequence_length)`):
+ Span-start scores (before SoftMax).
+ end_logits (`torch.FloatTensor` of shape `(batch_size, num_questions, sequence_length)`):
+ Span-end scores (before SoftMax).
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ start_logits: torch.FloatTensor = None
+ end_logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@add_start_docstrings(
+ """
+ Splinter Model for the recurring span selection task as done during the pretraining. The difference to the QA task
+ is that we do not have a question, but multiple question tokens that replace the occurrences of recurring spans
+ instead.
+ """,
+ SPLINTER_START_DOCSTRING,
+)
+class SplinterForPreTraining(SplinterPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.splinter = SplinterModel(config)
+ self.splinter_qass = QuestionAwareSpanSelectionHead(config)
+ self.question_token_id = config.question_token_id
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(
+ SPLINTER_INPUTS_DOCSTRING.format("batch_size, num_questions, sequence_length")
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ start_positions: Optional[torch.LongTensor] = None,
+ end_positions: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ question_positions: Optional[torch.LongTensor] = None,
+ ) -> Union[Tuple, SplinterForPreTrainingOutput]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size, num_questions)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size, num_questions)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ question_positions (`torch.LongTensor` of shape `(batch_size, num_questions)`, *optional*):
+ The positions of all question tokens. If given, start_logits and end_logits will be of shape `(batch_size,
+ num_questions, sequence_length)`. If None, the first question token in each sequence in the batch will be
+ the only one for which start_logits and end_logits are calculated and they will be of shape `(batch_size,
+ sequence_length)`.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if question_positions is None and start_positions is not None and end_positions is not None:
+ raise TypeError("question_positions must be specified in order to calculate the loss")
+
+ elif question_positions is None and input_ids is None:
+ raise TypeError("question_positions must be specified when input_embeds is used")
+
+ elif question_positions is None:
+ question_positions = self._prepare_question_positions(input_ids)
+
+ outputs = self.splinter(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+ batch_size, sequence_length, dim = sequence_output.size()
+ # [batch_size, num_questions, sequence_length]
+ start_logits, end_logits = self.splinter_qass(sequence_output, question_positions)
+
+ num_questions = question_positions.size(1)
+ if attention_mask is not None:
+ attention_mask_for_each_question = attention_mask.unsqueeze(1).expand(
+ batch_size, num_questions, sequence_length
+ )
+ start_logits = start_logits + (1 - attention_mask_for_each_question) * torch.finfo(start_logits.dtype).min
+ end_logits = end_logits + (1 - attention_mask_for_each_question) * torch.finfo(end_logits.dtype).min
+
+ total_loss = None
+ # [batch_size, num_questions, sequence_length]
+ if start_positions is not None and end_positions is not None:
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
+ start_positions.clamp_(0, max(0, sequence_length - 1))
+ end_positions.clamp_(0, max(0, sequence_length - 1))
+
+ # Ignore zero positions in the loss. Splinter never predicts zero
+ # during pretraining and zero is used for padding question
+ # tokens as well as for start and end positions of padded
+ # question tokens.
+ loss_fct = CrossEntropyLoss(ignore_index=self.config.pad_token_id)
+ start_loss = loss_fct(
+ start_logits.view(batch_size * num_questions, sequence_length),
+ start_positions.view(batch_size * num_questions),
+ )
+ end_loss = loss_fct(
+ end_logits.view(batch_size * num_questions, sequence_length),
+ end_positions.view(batch_size * num_questions),
+ )
+ total_loss = (start_loss + end_loss) / 2
+
+ if not return_dict:
+ output = (start_logits, end_logits) + outputs[1:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return SplinterForPreTrainingOutput(
+ loss=total_loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def _prepare_question_positions(self, input_ids: torch.Tensor) -> torch.Tensor:
+ rows, flat_positions = torch.where(input_ids == self.config.question_token_id)
+ num_questions = torch.bincount(rows)
+ positions = torch.full(
+ (input_ids.size(0), num_questions.max()),
+ self.config.pad_token_id,
+ dtype=torch.long,
+ device=input_ids.device,
+ )
+ cols = torch.cat([torch.arange(n) for n in num_questions])
+ positions[rows, cols] = flat_positions
+ return positions
diff --git a/venv/lib/python3.10/site-packages/transformers/models/splinter/tokenization_splinter.py b/venv/lib/python3.10/site-packages/transformers/models/splinter/tokenization_splinter.py
new file mode 100644
index 0000000000000000000000000000000000000000..ee82e19c6cb9b316bb9c7681cd2561a0dac7b4ff
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/splinter/tokenization_splinter.py
@@ -0,0 +1,503 @@
+# coding=utf-8
+# Copyright 2021 Tel AViv University, AllenAI and The HuggingFace Inc. team. All rights reserved.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes for Splinter."""
+
+import collections
+import os
+import unicodedata
+from typing import List, Optional, Tuple
+
+from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
+
+
+def load_vocab(vocab_file):
+ """Loads a vocabulary file into a dictionary."""
+ vocab = collections.OrderedDict()
+ with open(vocab_file, "r", encoding="utf-8") as reader:
+ tokens = reader.readlines()
+ for index, token in enumerate(tokens):
+ token = token.rstrip("\n")
+ vocab[token] = index
+ return vocab
+
+
+def whitespace_tokenize(text):
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
+ text = text.strip()
+ if not text:
+ return []
+ tokens = text.split()
+ return tokens
+
+
+class SplinterTokenizer(PreTrainedTokenizer):
+ r"""
+ Construct a Splinter tokenizer. Based on WordPiece.
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ File containing the vocabulary.
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
+ Whether or not to do basic tokenization before WordPiece.
+ never_split (`Iterable`, *optional*):
+ Collection of tokens which will never be split during tokenization. Only has an effect when
+ `do_basic_tokenize=True`
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
+ The token used for padding, for example when batching sequences of different lengths.
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ question_token (`str`, *optional*, defaults to `"[QUESTION]"`):
+ The token used for constructing question representations.
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters.
+
+ This should likely be deactivated for Japanese (see this
+ [issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+
+ def __init__(
+ self,
+ vocab_file,
+ do_lower_case=True,
+ do_basic_tokenize=True,
+ never_split=None,
+ unk_token="[UNK]",
+ sep_token="[SEP]",
+ pad_token="[PAD]",
+ cls_token="[CLS]",
+ mask_token="[MASK]",
+ question_token="[QUESTION]",
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ **kwargs,
+ ):
+ if not os.path.isfile(vocab_file):
+ raise ValueError(
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
+ " model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
+ )
+ self.vocab = load_vocab(vocab_file)
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
+ self.do_basic_tokenize = do_basic_tokenize
+ if do_basic_tokenize:
+ self.basic_tokenizer = BasicTokenizer(
+ do_lower_case=do_lower_case,
+ never_split=never_split,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ )
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
+ self.question_token = question_token
+ super().__init__(
+ do_lower_case=do_lower_case,
+ do_basic_tokenize=do_basic_tokenize,
+ never_split=never_split,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ **kwargs,
+ )
+
+ @property
+ def question_token_id(self):
+ """
+ `Optional[int]`: Id of the question token in the vocabulary, used to condition the answer on a question
+ representation.
+ """
+ return self.convert_tokens_to_ids(self.question_token)
+
+ @property
+ def do_lower_case(self):
+ return self.basic_tokenizer.do_lower_case
+
+ @property
+ def vocab_size(self):
+ return len(self.vocab)
+
+ def get_vocab(self):
+ return dict(self.vocab, **self.added_tokens_encoder)
+
+ def _tokenize(self, text):
+ split_tokens = []
+ if self.do_basic_tokenize:
+ for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
+ # If the token is part of the never_split set
+ if token in self.basic_tokenizer.never_split:
+ split_tokens.append(token)
+ else:
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
+ else:
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
+ return split_tokens
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.ids_to_tokens.get(index, self.unk_token)
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ out_string = " ".join(tokens).replace(" ##", "").strip()
+ return out_string
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a pair of sequence for question answering tasks by concatenating and adding special
+ tokens. A Splinter sequence has the following format:
+
+ - single sequence: `[CLS] X [SEP]`
+ - pair of sequences for question answering: `[CLS] question_tokens [QUESTION] . [SEP] context_tokens [SEP]`
+
+ Args:
+ token_ids_0 (`List[int]`):
+ The question token IDs if pad_on_right, else context tokens IDs
+ token_ids_1 (`List[int]`, *optional*):
+ The context token IDs if pad_on_right, else question token IDs
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ if token_ids_1 is None:
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+
+ cls = [self.cls_token_id]
+ sep = [self.sep_token_id]
+ question_suffix = [self.question_token_id] + [self.convert_tokens_to_ids(".")]
+ if self.padding_side == "right":
+ # Input is question-then-context
+ return cls + token_ids_0 + question_suffix + sep + token_ids_1 + sep
+ else:
+ # Input is context-then-question
+ return cls + token_ids_0 + sep + token_ids_1 + question_suffix + sep
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is not None:
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
+ return [1] + ([0] * len(token_ids_0)) + [1]
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create the token type IDs corresponding to the sequences passed. [What are token type
+ IDs?](../glossary#token-type-ids)
+
+ Should be overridden in a subclass if the model has a special way of building those.
+
+ Args:
+ token_ids_0 (`List[int]`): The first tokenized sequence.
+ token_ids_1 (`List[int]`, *optional*): The second tokenized sequence.
+
+ Returns:
+ `List[int]`: The token type ids.
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+ question_suffix = [self.question_token_id] + [self.convert_tokens_to_ids(".")]
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+
+ if self.padding_side == "right":
+ # Input is question-then-context
+ return len(cls + token_ids_0 + question_suffix + sep) * [0] + len(token_ids_1 + sep) * [1]
+ else:
+ # Input is context-then-question
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + question_suffix + sep) * [1]
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ index = 0
+ if os.path.isdir(save_directory):
+ vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+ else:
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
+ with open(vocab_file, "w", encoding="utf-8") as writer:
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
+ if index != token_index:
+ logger.warning(
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
+ " Please check that the vocabulary is not corrupted!"
+ )
+ index = token_index
+ writer.write(token + "\n")
+ index += 1
+ return (vocab_file,)
+
+
+class BasicTokenizer(object):
+ """
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
+
+ Args:
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ never_split (`Iterable`, *optional*):
+ Collection of tokens which will never be split during tokenization. Only has an effect when
+ `do_basic_tokenize=True`
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters.
+
+ This should likely be deactivated for Japanese (see this
+ [issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ """
+
+ def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None):
+ if never_split is None:
+ never_split = []
+ self.do_lower_case = do_lower_case
+ self.never_split = set(never_split)
+ self.tokenize_chinese_chars = tokenize_chinese_chars
+ self.strip_accents = strip_accents
+
+ def tokenize(self, text, never_split=None):
+ """
+ Basic Tokenization of a piece of text. Split on "white spaces" only, for sub-word tokenization, see
+ WordPieceTokenizer.
+
+ Args:
+ **never_split**: (*optional*) list of str
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
+ """
+ # union() returns a new set by concatenating the two sets.
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
+ text = self._clean_text(text)
+
+ # This was added on November 1st, 2018 for the multilingual and Chinese
+ # models. This is also applied to the English models now, but it doesn't
+ # matter since the English models were not trained on any Chinese data
+ # and generally don't have any Chinese data in them (there are Chinese
+ # characters in the vocabulary because Wikipedia does have some Chinese
+ # words in the English Wikipedia.).
+ if self.tokenize_chinese_chars:
+ text = self._tokenize_chinese_chars(text)
+ orig_tokens = whitespace_tokenize(text)
+ split_tokens = []
+ for token in orig_tokens:
+ if token not in never_split:
+ if self.do_lower_case:
+ token = token.lower()
+ if self.strip_accents is not False:
+ token = self._run_strip_accents(token)
+ elif self.strip_accents:
+ token = self._run_strip_accents(token)
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
+
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
+ return output_tokens
+
+ def _run_strip_accents(self, text):
+ """Strips accents from a piece of text."""
+ text = unicodedata.normalize("NFD", text)
+ output = []
+ for char in text:
+ cat = unicodedata.category(char)
+ if cat == "Mn":
+ continue
+ output.append(char)
+ return "".join(output)
+
+ def _run_split_on_punc(self, text, never_split=None):
+ """Splits punctuation on a piece of text."""
+ if never_split is not None and text in never_split:
+ return [text]
+ chars = list(text)
+ i = 0
+ start_new_word = True
+ output = []
+ while i < len(chars):
+ char = chars[i]
+ if _is_punctuation(char):
+ output.append([char])
+ start_new_word = True
+ else:
+ if start_new_word:
+ output.append([])
+ start_new_word = False
+ output[-1].append(char)
+ i += 1
+
+ return ["".join(x) for x in output]
+
+ def _tokenize_chinese_chars(self, text):
+ """Adds whitespace around any CJK character."""
+ output = []
+ for char in text:
+ cp = ord(char)
+ if self._is_chinese_char(cp):
+ output.append(" ")
+ output.append(char)
+ output.append(" ")
+ else:
+ output.append(char)
+ return "".join(output)
+
+ def _is_chinese_char(self, cp):
+ """Checks whether CP is the codepoint of a CJK character."""
+ # This defines a "chinese character" as anything in the CJK Unicode block:
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
+ #
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
+ # despite its name. The modern Korean Hangul alphabet is a different block,
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
+ # space-separated words, so they are not treated specially and handled
+ # like the all of the other languages.
+ if (
+ (cp >= 0x4E00 and cp <= 0x9FFF)
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
+ or (cp >= 0xF900 and cp <= 0xFAFF)
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
+ ): #
+ return True
+
+ return False
+
+ def _clean_text(self, text):
+ """Performs invalid character removal and whitespace cleanup on text."""
+ output = []
+ for char in text:
+ cp = ord(char)
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
+ continue
+ if _is_whitespace(char):
+ output.append(" ")
+ else:
+ output.append(char)
+ return "".join(output)
+
+
+class WordpieceTokenizer(object):
+ """Runs WordPiece tokenization."""
+
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
+ self.vocab = vocab
+ self.unk_token = unk_token
+ self.max_input_chars_per_word = max_input_chars_per_word
+
+ def tokenize(self, text):
+ """
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
+ tokenization using the given vocabulary.
+
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
+
+ Args:
+ text: A single token or whitespace separated tokens. This should have
+ already been passed through *BasicTokenizer*.
+
+ Returns:
+ A list of wordpiece tokens.
+ """
+
+ output_tokens = []
+ for token in whitespace_tokenize(text):
+ chars = list(token)
+ if len(chars) > self.max_input_chars_per_word:
+ output_tokens.append(self.unk_token)
+ continue
+
+ is_bad = False
+ start = 0
+ sub_tokens = []
+ while start < len(chars):
+ end = len(chars)
+ cur_substr = None
+ while start < end:
+ substr = "".join(chars[start:end])
+ if start > 0:
+ substr = "##" + substr
+ if substr in self.vocab:
+ cur_substr = substr
+ break
+ end -= 1
+ if cur_substr is None:
+ is_bad = True
+ break
+ sub_tokens.append(cur_substr)
+ start = end
+
+ if is_bad:
+ output_tokens.append(self.unk_token)
+ else:
+ output_tokens.extend(sub_tokens)
+ return output_tokens
diff --git a/venv/lib/python3.10/site-packages/transformers/models/splinter/tokenization_splinter_fast.py b/venv/lib/python3.10/site-packages/transformers/models/splinter/tokenization_splinter_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..0371fdf2828eb289350ce1b69e13110d8b8c8b22
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/splinter/tokenization_splinter_fast.py
@@ -0,0 +1,190 @@
+# coding=utf-8
+# Copyright 2021 Tel AViv University, AllenAI and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Fast Tokenization classes for Splinter."""
+
+import json
+from typing import List, Optional, Tuple
+
+from tokenizers import normalizers
+
+from ...tokenization_utils_fast import PreTrainedTokenizerFast
+from ...utils import logging
+from .tokenization_splinter import SplinterTokenizer
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
+
+
+class SplinterTokenizerFast(PreTrainedTokenizerFast):
+ r"""
+ Construct a "fast" Splinter tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
+
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
+ refer to this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ File containing the vocabulary.
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
+ The token used for padding, for example when batching sequences of different lengths.
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ question_token (`str`, *optional*, defaults to `"[QUESTION]"`):
+ The token used for constructing question representations.
+ clean_text (`bool`, *optional*, defaults to `True`):
+ Whether or not to clean the text before tokenization by removing any control characters and replacing all
+ whitespaces by the classic one.
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
+ issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
+ The prefix for subwords.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ slow_tokenizer_class = SplinterTokenizer
+
+ def __init__(
+ self,
+ vocab_file=None,
+ tokenizer_file=None,
+ do_lower_case=True,
+ unk_token="[UNK]",
+ sep_token="[SEP]",
+ pad_token="[PAD]",
+ cls_token="[CLS]",
+ mask_token="[MASK]",
+ question_token="[QUESTION]",
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ **kwargs,
+ ):
+ super().__init__(
+ vocab_file,
+ tokenizer_file=tokenizer_file,
+ do_lower_case=do_lower_case,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ additional_special_tokens=(question_token,),
+ **kwargs,
+ )
+
+ pre_tok_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
+ if (
+ pre_tok_state.get("lowercase", do_lower_case) != do_lower_case
+ or pre_tok_state.get("strip_accents", strip_accents) != strip_accents
+ ):
+ pre_tok_class = getattr(normalizers, pre_tok_state.pop("type"))
+ pre_tok_state["lowercase"] = do_lower_case
+ pre_tok_state["strip_accents"] = strip_accents
+ self.backend_tokenizer.normalizer = pre_tok_class(**pre_tok_state)
+
+ self.do_lower_case = do_lower_case
+
+ @property
+ def question_token_id(self):
+ """
+ `Optional[int]`: Id of the question token in the vocabulary, used to condition the answer on a question
+ representation.
+ """
+ return self.convert_tokens_to_ids(self.question_token)
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a pair of sequence for question answering tasks by concatenating and adding special
+ tokens. A Splinter sequence has the following format:
+
+ - single sequence: `[CLS] X [SEP]`
+ - pair of sequences for question answering: `[CLS] question_tokens [QUESTION] . [SEP] context_tokens [SEP]`
+
+ Args:
+ token_ids_0 (`List[int]`):
+ The question token IDs if pad_on_right, else context tokens IDs
+ token_ids_1 (`List[int]`, *optional*):
+ The context token IDs if pad_on_right, else question token IDs
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ if token_ids_1 is None:
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+
+ cls = [self.cls_token_id]
+ sep = [self.sep_token_id]
+ question_suffix = [self.question_token_id] + [self.convert_tokens_to_ids(".")]
+ if self.padding_side == "right":
+ # Input is question-then-context
+ return cls + token_ids_0 + question_suffix + sep + token_ids_1 + sep
+ else:
+ # Input is context-then-question
+ return cls + token_ids_0 + sep + token_ids_1 + question_suffix + sep
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create the token type IDs corresponding to the sequences passed. [What are token type
+ IDs?](../glossary#token-type-ids)
+
+ Should be overridden in a subclass if the model has a special way of building those.
+
+ Args:
+ token_ids_0 (`List[int]`): The first tokenized sequence.
+ token_ids_1 (`List[int]`, *optional*): The second tokenized sequence.
+
+ Returns:
+ `List[int]`: The token type ids.
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+ question_suffix = [self.question_token_id] + [self.convert_tokens_to_ids(".")]
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+
+ if self.padding_side == "right":
+ # Input is question-then-context
+ return len(cls + token_ids_0 + question_suffix + sep) * [0] + len(token_ids_1 + sep) * [1]
+ else:
+ # Input is context-then-question
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + question_suffix + sep) * [1]
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
+ return tuple(files)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/vision_encoder_decoder/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/vision_encoder_decoder/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..102217dffb29d680555ab13759f74f98ed049ccb
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/vision_encoder_decoder/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/vision_encoder_decoder/__pycache__/modeling_flax_vision_encoder_decoder.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/vision_encoder_decoder/__pycache__/modeling_flax_vision_encoder_decoder.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3f056cc79d53a25dfab5754068703af4860ea4f2
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/vision_encoder_decoder/__pycache__/modeling_flax_vision_encoder_decoder.cpython-310.pyc differ