diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..96a159133cc0050a1493a40b9aa14c8c29bec46d
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__init__.py
@@ -0,0 +1,403 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_flax_available,
+ is_tf_available,
+ is_torch_available,
+)
+
+
+_import_structure = {
+ "auto_factory": ["get_values"],
+ "configuration_auto": ["ALL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CONFIG_MAPPING", "MODEL_NAMES_MAPPING", "AutoConfig"],
+ "feature_extraction_auto": ["FEATURE_EXTRACTOR_MAPPING", "AutoFeatureExtractor"],
+ "image_processing_auto": ["IMAGE_PROCESSOR_MAPPING", "AutoImageProcessor"],
+ "processing_auto": ["PROCESSOR_MAPPING", "AutoProcessor"],
+ "tokenization_auto": ["TOKENIZER_MAPPING", "AutoTokenizer"],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_auto"] = [
+ "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING",
+ "MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING",
+ "MODEL_FOR_AUDIO_XVECTOR_MAPPING",
+ "MODEL_FOR_BACKBONE_MAPPING",
+ "MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING",
+ "MODEL_FOR_CAUSAL_LM_MAPPING",
+ "MODEL_FOR_CTC_MAPPING",
+ "MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING",
+ "MODEL_FOR_DEPTH_ESTIMATION_MAPPING",
+ "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING",
+ "MODEL_FOR_IMAGE_MAPPING",
+ "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING",
+ "MODEL_FOR_IMAGE_TO_IMAGE_MAPPING",
+ "MODEL_FOR_KEYPOINT_DETECTION_MAPPING",
+ "MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING",
+ "MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING",
+ "MODEL_FOR_MASKED_LM_MAPPING",
+ "MODEL_FOR_MASK_GENERATION_MAPPING",
+ "MODEL_FOR_MULTIPLE_CHOICE_MAPPING",
+ "MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING",
+ "MODEL_FOR_OBJECT_DETECTION_MAPPING",
+ "MODEL_FOR_PRETRAINING_MAPPING",
+ "MODEL_FOR_QUESTION_ANSWERING_MAPPING",
+ "MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING",
+ "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING",
+ "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING",
+ "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING",
+ "MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING",
+ "MODEL_FOR_TEXT_ENCODING_MAPPING",
+ "MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING",
+ "MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING",
+ "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING",
+ "MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING",
+ "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING",
+ "MODEL_FOR_VISION_2_SEQ_MAPPING",
+ "MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING",
+ "MODEL_MAPPING",
+ "MODEL_WITH_LM_HEAD_MAPPING",
+ "MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING",
+ "MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING",
+ "MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING",
+ "MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING",
+ "AutoModel",
+ "AutoBackbone",
+ "AutoModelForAudioClassification",
+ "AutoModelForAudioFrameClassification",
+ "AutoModelForAudioXVector",
+ "AutoModelForCausalLM",
+ "AutoModelForCTC",
+ "AutoModelForDepthEstimation",
+ "AutoModelForImageClassification",
+ "AutoModelForImageSegmentation",
+ "AutoModelForImageToImage",
+ "AutoModelForInstanceSegmentation",
+ "AutoModelForKeypointDetection",
+ "AutoModelForMaskGeneration",
+ "AutoModelForTextEncoding",
+ "AutoModelForMaskedImageModeling",
+ "AutoModelForMaskedLM",
+ "AutoModelForMultipleChoice",
+ "AutoModelForNextSentencePrediction",
+ "AutoModelForObjectDetection",
+ "AutoModelForPreTraining",
+ "AutoModelForQuestionAnswering",
+ "AutoModelForSemanticSegmentation",
+ "AutoModelForSeq2SeqLM",
+ "AutoModelForSequenceClassification",
+ "AutoModelForSpeechSeq2Seq",
+ "AutoModelForTableQuestionAnswering",
+ "AutoModelForTextToSpectrogram",
+ "AutoModelForTextToWaveform",
+ "AutoModelForTokenClassification",
+ "AutoModelForUniversalSegmentation",
+ "AutoModelForVideoClassification",
+ "AutoModelForVision2Seq",
+ "AutoModelForVisualQuestionAnswering",
+ "AutoModelForDocumentQuestionAnswering",
+ "AutoModelWithLMHead",
+ "AutoModelForZeroShotImageClassification",
+ "AutoModelForZeroShotObjectDetection",
+ ]
+
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tf_auto"] = [
+ "TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING",
+ "TF_MODEL_FOR_CAUSAL_LM_MAPPING",
+ "TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING",
+ "TF_MODEL_FOR_MASK_GENERATION_MAPPING",
+ "TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING",
+ "TF_MODEL_FOR_MASKED_LM_MAPPING",
+ "TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING",
+ "TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING",
+ "TF_MODEL_FOR_PRETRAINING_MAPPING",
+ "TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING",
+ "TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING",
+ "TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING",
+ "TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING",
+ "TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING",
+ "TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING",
+ "TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING",
+ "TF_MODEL_FOR_TEXT_ENCODING_MAPPING",
+ "TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING",
+ "TF_MODEL_FOR_VISION_2_SEQ_MAPPING",
+ "TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING",
+ "TF_MODEL_MAPPING",
+ "TF_MODEL_WITH_LM_HEAD_MAPPING",
+ "TFAutoModel",
+ "TFAutoModelForAudioClassification",
+ "TFAutoModelForCausalLM",
+ "TFAutoModelForImageClassification",
+ "TFAutoModelForMaskedImageModeling",
+ "TFAutoModelForMaskedLM",
+ "TFAutoModelForMaskGeneration",
+ "TFAutoModelForMultipleChoice",
+ "TFAutoModelForNextSentencePrediction",
+ "TFAutoModelForPreTraining",
+ "TFAutoModelForDocumentQuestionAnswering",
+ "TFAutoModelForQuestionAnswering",
+ "TFAutoModelForSemanticSegmentation",
+ "TFAutoModelForSeq2SeqLM",
+ "TFAutoModelForSequenceClassification",
+ "TFAutoModelForSpeechSeq2Seq",
+ "TFAutoModelForTableQuestionAnswering",
+ "TFAutoModelForTextEncoding",
+ "TFAutoModelForTokenClassification",
+ "TFAutoModelForVision2Seq",
+ "TFAutoModelForZeroShotImageClassification",
+ "TFAutoModelWithLMHead",
+ ]
+
+try:
+ if not is_flax_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_flax_auto"] = [
+ "FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING",
+ "FLAX_MODEL_FOR_CAUSAL_LM_MAPPING",
+ "FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING",
+ "FLAX_MODEL_FOR_MASKED_LM_MAPPING",
+ "FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING",
+ "FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING",
+ "FLAX_MODEL_FOR_PRETRAINING_MAPPING",
+ "FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING",
+ "FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING",
+ "FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING",
+ "FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING",
+ "FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING",
+ "FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING",
+ "FLAX_MODEL_MAPPING",
+ "FlaxAutoModel",
+ "FlaxAutoModelForCausalLM",
+ "FlaxAutoModelForImageClassification",
+ "FlaxAutoModelForMaskedLM",
+ "FlaxAutoModelForMultipleChoice",
+ "FlaxAutoModelForNextSentencePrediction",
+ "FlaxAutoModelForPreTraining",
+ "FlaxAutoModelForQuestionAnswering",
+ "FlaxAutoModelForSeq2SeqLM",
+ "FlaxAutoModelForSequenceClassification",
+ "FlaxAutoModelForSpeechSeq2Seq",
+ "FlaxAutoModelForTokenClassification",
+ "FlaxAutoModelForVision2Seq",
+ ]
+
+
+if TYPE_CHECKING:
+ from .auto_factory import get_values
+ from .configuration_auto import ALL_PRETRAINED_CONFIG_ARCHIVE_MAP, CONFIG_MAPPING, MODEL_NAMES_MAPPING, AutoConfig
+ from .feature_extraction_auto import FEATURE_EXTRACTOR_MAPPING, AutoFeatureExtractor
+ from .image_processing_auto import IMAGE_PROCESSOR_MAPPING, AutoImageProcessor
+ from .processing_auto import PROCESSOR_MAPPING, AutoProcessor
+ from .tokenization_auto import TOKENIZER_MAPPING, AutoTokenizer
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_auto import (
+ MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING,
+ MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING,
+ MODEL_FOR_AUDIO_XVECTOR_MAPPING,
+ MODEL_FOR_BACKBONE_MAPPING,
+ MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING,
+ MODEL_FOR_CAUSAL_LM_MAPPING,
+ MODEL_FOR_CTC_MAPPING,
+ MODEL_FOR_DEPTH_ESTIMATION_MAPPING,
+ MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING,
+ MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
+ MODEL_FOR_IMAGE_MAPPING,
+ MODEL_FOR_IMAGE_SEGMENTATION_MAPPING,
+ MODEL_FOR_IMAGE_TO_IMAGE_MAPPING,
+ MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING,
+ MODEL_FOR_KEYPOINT_DETECTION_MAPPING,
+ MODEL_FOR_MASK_GENERATION_MAPPING,
+ MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
+ MODEL_FOR_MASKED_LM_MAPPING,
+ MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
+ MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING,
+ MODEL_FOR_OBJECT_DETECTION_MAPPING,
+ MODEL_FOR_PRETRAINING_MAPPING,
+ MODEL_FOR_QUESTION_ANSWERING_MAPPING,
+ MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING,
+ MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
+ MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
+ MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING,
+ MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING,
+ MODEL_FOR_TEXT_ENCODING_MAPPING,
+ MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING,
+ MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING,
+ MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING,
+ MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING,
+ MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
+ MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING,
+ MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
+ MODEL_FOR_VISION_2_SEQ_MAPPING,
+ MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING,
+ MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING,
+ MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING,
+ MODEL_MAPPING,
+ MODEL_WITH_LM_HEAD_MAPPING,
+ AutoBackbone,
+ AutoModel,
+ AutoModelForAudioClassification,
+ AutoModelForAudioFrameClassification,
+ AutoModelForAudioXVector,
+ AutoModelForCausalLM,
+ AutoModelForCTC,
+ AutoModelForDepthEstimation,
+ AutoModelForDocumentQuestionAnswering,
+ AutoModelForImageClassification,
+ AutoModelForImageSegmentation,
+ AutoModelForImageToImage,
+ AutoModelForInstanceSegmentation,
+ AutoModelForKeypointDetection,
+ AutoModelForMaskedImageModeling,
+ AutoModelForMaskedLM,
+ AutoModelForMaskGeneration,
+ AutoModelForMultipleChoice,
+ AutoModelForNextSentencePrediction,
+ AutoModelForObjectDetection,
+ AutoModelForPreTraining,
+ AutoModelForQuestionAnswering,
+ AutoModelForSemanticSegmentation,
+ AutoModelForSeq2SeqLM,
+ AutoModelForSequenceClassification,
+ AutoModelForSpeechSeq2Seq,
+ AutoModelForTableQuestionAnswering,
+ AutoModelForTextEncoding,
+ AutoModelForTextToSpectrogram,
+ AutoModelForTextToWaveform,
+ AutoModelForTokenClassification,
+ AutoModelForUniversalSegmentation,
+ AutoModelForVideoClassification,
+ AutoModelForVision2Seq,
+ AutoModelForVisualQuestionAnswering,
+ AutoModelForZeroShotImageClassification,
+ AutoModelForZeroShotObjectDetection,
+ AutoModelWithLMHead,
+ )
+
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tf_auto import (
+ TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING,
+ TF_MODEL_FOR_CAUSAL_LM_MAPPING,
+ TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING,
+ TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
+ TF_MODEL_FOR_MASK_GENERATION_MAPPING,
+ TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
+ TF_MODEL_FOR_MASKED_LM_MAPPING,
+ TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
+ TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING,
+ TF_MODEL_FOR_PRETRAINING_MAPPING,
+ TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
+ TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING,
+ TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
+ TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
+ TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING,
+ TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING,
+ TF_MODEL_FOR_TEXT_ENCODING_MAPPING,
+ TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
+ TF_MODEL_FOR_VISION_2_SEQ_MAPPING,
+ TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING,
+ TF_MODEL_MAPPING,
+ TF_MODEL_WITH_LM_HEAD_MAPPING,
+ TFAutoModel,
+ TFAutoModelForAudioClassification,
+ TFAutoModelForCausalLM,
+ TFAutoModelForDocumentQuestionAnswering,
+ TFAutoModelForImageClassification,
+ TFAutoModelForMaskedImageModeling,
+ TFAutoModelForMaskedLM,
+ TFAutoModelForMaskGeneration,
+ TFAutoModelForMultipleChoice,
+ TFAutoModelForNextSentencePrediction,
+ TFAutoModelForPreTraining,
+ TFAutoModelForQuestionAnswering,
+ TFAutoModelForSemanticSegmentation,
+ TFAutoModelForSeq2SeqLM,
+ TFAutoModelForSequenceClassification,
+ TFAutoModelForSpeechSeq2Seq,
+ TFAutoModelForTableQuestionAnswering,
+ TFAutoModelForTextEncoding,
+ TFAutoModelForTokenClassification,
+ TFAutoModelForVision2Seq,
+ TFAutoModelForZeroShotImageClassification,
+ TFAutoModelWithLMHead,
+ )
+
+ try:
+ if not is_flax_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_flax_auto import (
+ FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING,
+ FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
+ FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
+ FLAX_MODEL_FOR_MASKED_LM_MAPPING,
+ FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
+ FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING,
+ FLAX_MODEL_FOR_PRETRAINING_MAPPING,
+ FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
+ FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
+ FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
+ FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING,
+ FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
+ FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING,
+ FLAX_MODEL_MAPPING,
+ FlaxAutoModel,
+ FlaxAutoModelForCausalLM,
+ FlaxAutoModelForImageClassification,
+ FlaxAutoModelForMaskedLM,
+ FlaxAutoModelForMultipleChoice,
+ FlaxAutoModelForNextSentencePrediction,
+ FlaxAutoModelForPreTraining,
+ FlaxAutoModelForQuestionAnswering,
+ FlaxAutoModelForSeq2SeqLM,
+ FlaxAutoModelForSequenceClassification,
+ FlaxAutoModelForSpeechSeq2Seq,
+ FlaxAutoModelForTokenClassification,
+ FlaxAutoModelForVision2Seq,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4fc2770fc7287aa1680351db338f37bfe93192bd
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/auto_factory.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/auto_factory.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..126622c6218af749d032de8758e30c62e048e5fa
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/auto_factory.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/configuration_auto.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/configuration_auto.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dbe5ffc69300e74889a10de30763ec94c60b6b7d
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/configuration_auto.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/feature_extraction_auto.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/feature_extraction_auto.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6b4ab0252ff72715c342ed66018aeb23fafbad70
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/feature_extraction_auto.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/image_processing_auto.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/image_processing_auto.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b566f43654640bbc47d92cea4239f3cc969b8526
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/image_processing_auto.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/modeling_auto.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/modeling_auto.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ae239c669bc1ba5f13cf8574a271d61064ed6721
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/modeling_auto.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/modeling_flax_auto.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/modeling_flax_auto.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..612fecebbdf10b81b85532910b7a2c8cbb9a9528
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/modeling_flax_auto.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/modeling_tf_auto.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/modeling_tf_auto.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..147521a1e8c259fb73f522473d4255f0199e9d03
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/modeling_tf_auto.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/processing_auto.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/processing_auto.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..626ed9b9e4bf9a29ef1d473bcda7c9363b88a5f4
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/processing_auto.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/tokenization_auto.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/tokenization_auto.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a9945885600a8fe2cef5987acfe4ba234774040d
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/tokenization_auto.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/auto_factory.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/auto_factory.py
new file mode 100644
index 0000000000000000000000000000000000000000..e53dcab379bb06852530465d064272821a8a1e24
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/auto_factory.py
@@ -0,0 +1,806 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Factory function to build auto-model classes."""
+import copy
+import importlib
+import json
+import os
+import warnings
+from collections import OrderedDict
+
+from ...configuration_utils import PretrainedConfig
+from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
+from ...utils import (
+ CONFIG_NAME,
+ cached_file,
+ copy_func,
+ extract_commit_hash,
+ find_adapter_config_file,
+ is_peft_available,
+ logging,
+ requires_backends,
+)
+from .configuration_auto import AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings
+
+
+logger = logging.get_logger(__name__)
+
+
+CLASS_DOCSTRING = """
+ This is a generic model class that will be instantiated as one of the model classes of the library when created
+ with the [`~BaseAutoModelClass.from_pretrained`] class method or the [`~BaseAutoModelClass.from_config`] class
+ method.
+
+ This class cannot be instantiated directly using `__init__()` (throws an error).
+"""
+
+FROM_CONFIG_DOCSTRING = """
+ Instantiates one of the model classes of the library from a configuration.
+
+ Note:
+ Loading a model from its configuration file does **not** load the model weights. It only affects the
+ model's configuration. Use [`~BaseAutoModelClass.from_pretrained`] to load the model weights.
+
+ Args:
+ config ([`PretrainedConfig`]):
+ The model class to instantiate is selected based on the configuration class:
+
+ List options
+ attn_implementation (`str`, *optional*):
+ The attention implementation to use in the model (if relevant). Can be any of `"eager"` (manual implementation of the attention), `"sdpa"` (using [`F.scaled_dot_product_attention`](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention.html)), or `"flash_attention_2"` (using [Dao-AILab/flash-attention](https://github.com/Dao-AILab/flash-attention)). By default, if available, SDPA will be used for torch>=2.1.1. The default is otherwise the manual `"eager"` implementation.
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoConfig, BaseAutoModelClass
+
+ >>> # Download configuration from huggingface.co and cache.
+ >>> config = AutoConfig.from_pretrained("checkpoint_placeholder")
+ >>> model = BaseAutoModelClass.from_config(config)
+ ```
+"""
+
+FROM_PRETRAINED_TORCH_DOCSTRING = """
+ Instantiate one of the model classes of the library from a pretrained model.
+
+ The model class to instantiate is selected based on the `model_type` property of the config object (either
+ passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by
+ falling back to using pattern matching on `pretrained_model_name_or_path`:
+
+ List options
+
+ The model is set in evaluation mode by default using `model.eval()` (so for instance, dropout modules are
+ deactivated). To train the model, you should first set it back in training mode with `model.train()`
+
+ Args:
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
+ Can be either:
+
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
+ - A path to a *directory* containing model weights saved using
+ [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
+ - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In
+ this case, `from_tf` should be set to `True` and a configuration object should be provided as
+ `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a
+ PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
+ model_args (additional positional arguments, *optional*):
+ Will be passed along to the underlying model `__init__()` method.
+ config ([`PretrainedConfig`], *optional*):
+ Configuration for the model to use instead of an automatically loaded configuration. Configuration can
+ be automatically loaded when:
+
+ - The model is a model provided by the library (loaded with the *model id* string of a pretrained
+ model).
+ - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the
+ save directory.
+ - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a
+ configuration JSON file named *config.json* is found in the directory.
+ state_dict (*Dict[str, torch.Tensor]*, *optional*):
+ A state dictionary to use instead of a state dictionary loaded from saved weights file.
+
+ This option can be used if you want to create a model from a pretrained configuration but load your own
+ weights. In this case though, you should check if using [`~PreTrainedModel.save_pretrained`] and
+ [`~PreTrainedModel.from_pretrained`] is not a simpler option.
+ cache_dir (`str` or `os.PathLike`, *optional*):
+ Path to a directory in which a downloaded pretrained model configuration should be cached if the
+ standard cache should not be used.
+ from_tf (`bool`, *optional*, defaults to `False`):
+ Load the model weights from a TensorFlow checkpoint save file (see docstring of
+ `pretrained_model_name_or_path` argument).
+ force_download (`bool`, *optional*, defaults to `False`):
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
+ cached versions if they exist.
+ resume_download (`bool`, *optional*, defaults to `False`):
+ Whether or not to delete incompletely received files. Will attempt to resume the download if such a
+ file exists.
+ proxies (`Dict[str, str]`, *optional*):
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
+ output_loading_info(`bool`, *optional*, defaults to `False`):
+ Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
+ local_files_only(`bool`, *optional*, defaults to `False`):
+ Whether or not to only look at local files (e.g., not try downloading the model).
+ revision (`str`, *optional*, defaults to `"main"`):
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
+ identifier allowed by git.
+ trust_remote_code (`bool`, *optional*, defaults to `False`):
+ Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
+ execute code present on the Hub on your local machine.
+ code_revision (`str`, *optional*, defaults to `"main"`):
+ The specific revision to use for the code on the Hub, if the code leaves in a different repository than
+ the rest of the model. It can be a branch name, a tag name, or a commit id, since we use a git-based
+ system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier
+ allowed by git.
+ kwargs (additional keyword arguments, *optional*):
+ Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
+ `output_attentions=True`). Behaves differently depending on whether a `config` is provided or
+ automatically loaded:
+
+ - If a configuration is provided with `config`, `**kwargs` will be directly passed to the
+ underlying model's `__init__` method (we assume all relevant updates to the configuration have
+ already been done)
+ - If a configuration is not provided, `kwargs` will be first passed to the configuration class
+ initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that
+ corresponds to a configuration attribute will be used to override said attribute with the
+ supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute
+ will be passed to the underlying model's `__init__` function.
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoConfig, BaseAutoModelClass
+
+ >>> # Download model and configuration from huggingface.co and cache.
+ >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder")
+
+ >>> # Update configuration during loading
+ >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder", output_attentions=True)
+ >>> model.config.output_attentions
+ True
+
+ >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
+ >>> config = AutoConfig.from_pretrained("./tf_model/shortcut_placeholder_tf_model_config.json")
+ >>> model = BaseAutoModelClass.from_pretrained(
+ ... "./tf_model/shortcut_placeholder_tf_checkpoint.ckpt.index", from_tf=True, config=config
+ ... )
+ ```
+"""
+
+FROM_PRETRAINED_TF_DOCSTRING = """
+ Instantiate one of the model classes of the library from a pretrained model.
+
+ The model class to instantiate is selected based on the `model_type` property of the config object (either
+ passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by
+ falling back to using pattern matching on `pretrained_model_name_or_path`:
+
+ List options
+
+ Args:
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
+ Can be either:
+
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
+ - A path to a *directory* containing model weights saved using
+ [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
+ - A path or url to a *PyTorch state_dict save file* (e.g, `./pt_model/pytorch_model.bin`). In this
+ case, `from_pt` should be set to `True` and a configuration object should be provided as `config`
+ argument. This loading path is slower than converting the PyTorch model in a TensorFlow model
+ using the provided conversion scripts and loading the TensorFlow model afterwards.
+ model_args (additional positional arguments, *optional*):
+ Will be passed along to the underlying model `__init__()` method.
+ config ([`PretrainedConfig`], *optional*):
+ Configuration for the model to use instead of an automatically loaded configuration. Configuration can
+ be automatically loaded when:
+
+ - The model is a model provided by the library (loaded with the *model id* string of a pretrained
+ model).
+ - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the
+ save directory.
+ - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a
+ configuration JSON file named *config.json* is found in the directory.
+ cache_dir (`str` or `os.PathLike`, *optional*):
+ Path to a directory in which a downloaded pretrained model configuration should be cached if the
+ standard cache should not be used.
+ from_pt (`bool`, *optional*, defaults to `False`):
+ Load the model weights from a PyTorch checkpoint save file (see docstring of
+ `pretrained_model_name_or_path` argument).
+ force_download (`bool`, *optional*, defaults to `False`):
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
+ cached versions if they exist.
+ resume_download (`bool`, *optional*, defaults to `False`):
+ Whether or not to delete incompletely received files. Will attempt to resume the download if such a
+ file exists.
+ proxies (`Dict[str, str]`, *optional*):
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
+ output_loading_info(`bool`, *optional*, defaults to `False`):
+ Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
+ local_files_only(`bool`, *optional*, defaults to `False`):
+ Whether or not to only look at local files (e.g., not try downloading the model).
+ revision (`str`, *optional*, defaults to `"main"`):
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
+ identifier allowed by git.
+ trust_remote_code (`bool`, *optional*, defaults to `False`):
+ Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
+ execute code present on the Hub on your local machine.
+ code_revision (`str`, *optional*, defaults to `"main"`):
+ The specific revision to use for the code on the Hub, if the code leaves in a different repository than
+ the rest of the model. It can be a branch name, a tag name, or a commit id, since we use a git-based
+ system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier
+ allowed by git.
+ kwargs (additional keyword arguments, *optional*):
+ Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
+ `output_attentions=True`). Behaves differently depending on whether a `config` is provided or
+ automatically loaded:
+
+ - If a configuration is provided with `config`, `**kwargs` will be directly passed to the
+ underlying model's `__init__` method (we assume all relevant updates to the configuration have
+ already been done)
+ - If a configuration is not provided, `kwargs` will be first passed to the configuration class
+ initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that
+ corresponds to a configuration attribute will be used to override said attribute with the
+ supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute
+ will be passed to the underlying model's `__init__` function.
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoConfig, BaseAutoModelClass
+
+ >>> # Download model and configuration from huggingface.co and cache.
+ >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder")
+
+ >>> # Update configuration during loading
+ >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder", output_attentions=True)
+ >>> model.config.output_attentions
+ True
+
+ >>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)
+ >>> config = AutoConfig.from_pretrained("./pt_model/shortcut_placeholder_pt_model_config.json")
+ >>> model = BaseAutoModelClass.from_pretrained(
+ ... "./pt_model/shortcut_placeholder_pytorch_model.bin", from_pt=True, config=config
+ ... )
+ ```
+"""
+
+FROM_PRETRAINED_FLAX_DOCSTRING = """
+ Instantiate one of the model classes of the library from a pretrained model.
+
+ The model class to instantiate is selected based on the `model_type` property of the config object (either
+ passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by
+ falling back to using pattern matching on `pretrained_model_name_or_path`:
+
+ List options
+
+ Args:
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
+ Can be either:
+
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
+ - A path to a *directory* containing model weights saved using
+ [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
+ - A path or url to a *PyTorch state_dict save file* (e.g, `./pt_model/pytorch_model.bin`). In this
+ case, `from_pt` should be set to `True` and a configuration object should be provided as `config`
+ argument. This loading path is slower than converting the PyTorch model in a TensorFlow model
+ using the provided conversion scripts and loading the TensorFlow model afterwards.
+ model_args (additional positional arguments, *optional*):
+ Will be passed along to the underlying model `__init__()` method.
+ config ([`PretrainedConfig`], *optional*):
+ Configuration for the model to use instead of an automatically loaded configuration. Configuration can
+ be automatically loaded when:
+
+ - The model is a model provided by the library (loaded with the *model id* string of a pretrained
+ model).
+ - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the
+ save directory.
+ - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a
+ configuration JSON file named *config.json* is found in the directory.
+ cache_dir (`str` or `os.PathLike`, *optional*):
+ Path to a directory in which a downloaded pretrained model configuration should be cached if the
+ standard cache should not be used.
+ from_pt (`bool`, *optional*, defaults to `False`):
+ Load the model weights from a PyTorch checkpoint save file (see docstring of
+ `pretrained_model_name_or_path` argument).
+ force_download (`bool`, *optional*, defaults to `False`):
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
+ cached versions if they exist.
+ resume_download (`bool`, *optional*, defaults to `False`):
+ Whether or not to delete incompletely received files. Will attempt to resume the download if such a
+ file exists.
+ proxies (`Dict[str, str]`, *optional*):
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
+ output_loading_info(`bool`, *optional*, defaults to `False`):
+ Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
+ local_files_only(`bool`, *optional*, defaults to `False`):
+ Whether or not to only look at local files (e.g., not try downloading the model).
+ revision (`str`, *optional*, defaults to `"main"`):
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
+ identifier allowed by git.
+ trust_remote_code (`bool`, *optional*, defaults to `False`):
+ Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
+ execute code present on the Hub on your local machine.
+ code_revision (`str`, *optional*, defaults to `"main"`):
+ The specific revision to use for the code on the Hub, if the code leaves in a different repository than
+ the rest of the model. It can be a branch name, a tag name, or a commit id, since we use a git-based
+ system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier
+ allowed by git.
+ kwargs (additional keyword arguments, *optional*):
+ Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
+ `output_attentions=True`). Behaves differently depending on whether a `config` is provided or
+ automatically loaded:
+
+ - If a configuration is provided with `config`, `**kwargs` will be directly passed to the
+ underlying model's `__init__` method (we assume all relevant updates to the configuration have
+ already been done)
+ - If a configuration is not provided, `kwargs` will be first passed to the configuration class
+ initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that
+ corresponds to a configuration attribute will be used to override said attribute with the
+ supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute
+ will be passed to the underlying model's `__init__` function.
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoConfig, BaseAutoModelClass
+
+ >>> # Download model and configuration from huggingface.co and cache.
+ >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder")
+
+ >>> # Update configuration during loading
+ >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder", output_attentions=True)
+ >>> model.config.output_attentions
+ True
+
+ >>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)
+ >>> config = AutoConfig.from_pretrained("./pt_model/shortcut_placeholder_pt_model_config.json")
+ >>> model = BaseAutoModelClass.from_pretrained(
+ ... "./pt_model/shortcut_placeholder_pytorch_model.bin", from_pt=True, config=config
+ ... )
+ ```
+"""
+
+
+def _get_model_class(config, model_mapping):
+ supported_models = model_mapping[type(config)]
+ if not isinstance(supported_models, (list, tuple)):
+ return supported_models
+
+ name_to_model = {model.__name__: model for model in supported_models}
+ architectures = getattr(config, "architectures", [])
+ for arch in architectures:
+ if arch in name_to_model:
+ return name_to_model[arch]
+ elif f"TF{arch}" in name_to_model:
+ return name_to_model[f"TF{arch}"]
+ elif f"Flax{arch}" in name_to_model:
+ return name_to_model[f"Flax{arch}"]
+
+ # If not architecture is set in the config or match the supported models, the first element of the tuple is the
+ # defaults.
+ return supported_models[0]
+
+
+class _BaseAutoModelClass:
+ # Base class for auto models.
+ _model_mapping = None
+
+ def __init__(self, *args, **kwargs):
+ raise EnvironmentError(
+ f"{self.__class__.__name__} is designed to be instantiated "
+ f"using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or "
+ f"`{self.__class__.__name__}.from_config(config)` methods."
+ )
+
+ @classmethod
+ def from_config(cls, config, **kwargs):
+ trust_remote_code = kwargs.pop("trust_remote_code", None)
+ has_remote_code = hasattr(config, "auto_map") and cls.__name__ in config.auto_map
+ has_local_code = type(config) in cls._model_mapping.keys()
+ trust_remote_code = resolve_trust_remote_code(
+ trust_remote_code, config._name_or_path, has_local_code, has_remote_code
+ )
+
+ if has_remote_code and trust_remote_code:
+ class_ref = config.auto_map[cls.__name__]
+ if "--" in class_ref:
+ repo_id, class_ref = class_ref.split("--")
+ else:
+ repo_id = config.name_or_path
+ model_class = get_class_from_dynamic_module(class_ref, repo_id, **kwargs)
+ if os.path.isdir(config._name_or_path):
+ model_class.register_for_auto_class(cls.__name__)
+ else:
+ cls.register(config.__class__, model_class, exist_ok=True)
+ _ = kwargs.pop("code_revision", None)
+ return model_class._from_config(config, **kwargs)
+ elif type(config) in cls._model_mapping.keys():
+ model_class = _get_model_class(config, cls._model_mapping)
+ return model_class._from_config(config, **kwargs)
+
+ raise ValueError(
+ f"Unrecognized configuration class {config.__class__} for this kind of AutoModel: {cls.__name__}.\n"
+ f"Model type should be one of {', '.join(c.__name__ for c in cls._model_mapping.keys())}."
+ )
+
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
+ config = kwargs.pop("config", None)
+ trust_remote_code = kwargs.pop("trust_remote_code", None)
+ kwargs["_from_auto"] = True
+ hub_kwargs_names = [
+ "cache_dir",
+ "force_download",
+ "local_files_only",
+ "proxies",
+ "resume_download",
+ "revision",
+ "subfolder",
+ "use_auth_token",
+ "token",
+ ]
+ hub_kwargs = {name: kwargs.pop(name) for name in hub_kwargs_names if name in kwargs}
+ code_revision = kwargs.pop("code_revision", None)
+ commit_hash = kwargs.pop("_commit_hash", None)
+ adapter_kwargs = kwargs.pop("adapter_kwargs", None)
+
+ token = hub_kwargs.pop("token", None)
+ use_auth_token = hub_kwargs.pop("use_auth_token", None)
+ if use_auth_token is not None:
+ warnings.warn(
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
+ FutureWarning,
+ )
+ if token is not None:
+ raise ValueError(
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
+ )
+ token = use_auth_token
+
+ if token is not None:
+ hub_kwargs["token"] = token
+
+ if commit_hash is None:
+ if not isinstance(config, PretrainedConfig):
+ # We make a call to the config file first (which may be absent) to get the commit hash as soon as possible
+ resolved_config_file = cached_file(
+ pretrained_model_name_or_path,
+ CONFIG_NAME,
+ _raise_exceptions_for_gated_repo=False,
+ _raise_exceptions_for_missing_entries=False,
+ _raise_exceptions_for_connection_errors=False,
+ **hub_kwargs,
+ )
+ commit_hash = extract_commit_hash(resolved_config_file, commit_hash)
+ else:
+ commit_hash = getattr(config, "_commit_hash", None)
+
+ if is_peft_available():
+ if adapter_kwargs is None:
+ adapter_kwargs = {}
+ if token is not None:
+ adapter_kwargs["token"] = token
+
+ maybe_adapter_path = find_adapter_config_file(
+ pretrained_model_name_or_path, _commit_hash=commit_hash, **adapter_kwargs
+ )
+
+ if maybe_adapter_path is not None:
+ with open(maybe_adapter_path, "r", encoding="utf-8") as f:
+ adapter_config = json.load(f)
+
+ adapter_kwargs["_adapter_model_path"] = pretrained_model_name_or_path
+ pretrained_model_name_or_path = adapter_config["base_model_name_or_path"]
+
+ if not isinstance(config, PretrainedConfig):
+ kwargs_orig = copy.deepcopy(kwargs)
+ # ensure not to pollute the config object with torch_dtype="auto" - since it's
+ # meaningless in the context of the config object - torch.dtype values are acceptable
+ if kwargs.get("torch_dtype", None) == "auto":
+ _ = kwargs.pop("torch_dtype")
+ # to not overwrite the quantization_config if config has a quantization_config
+ if kwargs.get("quantization_config", None) is not None:
+ _ = kwargs.pop("quantization_config")
+
+ config, kwargs = AutoConfig.from_pretrained(
+ pretrained_model_name_or_path,
+ return_unused_kwargs=True,
+ trust_remote_code=trust_remote_code,
+ code_revision=code_revision,
+ _commit_hash=commit_hash,
+ **hub_kwargs,
+ **kwargs,
+ )
+
+ # if torch_dtype=auto was passed here, ensure to pass it on
+ if kwargs_orig.get("torch_dtype", None) == "auto":
+ kwargs["torch_dtype"] = "auto"
+ if kwargs_orig.get("quantization_config", None) is not None:
+ kwargs["quantization_config"] = kwargs_orig["quantization_config"]
+
+ has_remote_code = hasattr(config, "auto_map") and cls.__name__ in config.auto_map
+ has_local_code = type(config) in cls._model_mapping.keys()
+ trust_remote_code = resolve_trust_remote_code(
+ trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code
+ )
+
+ # Set the adapter kwargs
+ kwargs["adapter_kwargs"] = adapter_kwargs
+
+ if has_remote_code and trust_remote_code:
+ class_ref = config.auto_map[cls.__name__]
+ model_class = get_class_from_dynamic_module(
+ class_ref, pretrained_model_name_or_path, code_revision=code_revision, **hub_kwargs, **kwargs
+ )
+ _ = hub_kwargs.pop("code_revision", None)
+ if os.path.isdir(pretrained_model_name_or_path):
+ model_class.register_for_auto_class(cls.__name__)
+ else:
+ cls.register(config.__class__, model_class, exist_ok=True)
+ return model_class.from_pretrained(
+ pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs
+ )
+ elif type(config) in cls._model_mapping.keys():
+ model_class = _get_model_class(config, cls._model_mapping)
+ return model_class.from_pretrained(
+ pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs
+ )
+ raise ValueError(
+ f"Unrecognized configuration class {config.__class__} for this kind of AutoModel: {cls.__name__}.\n"
+ f"Model type should be one of {', '.join(c.__name__ for c in cls._model_mapping.keys())}."
+ )
+
+ @classmethod
+ def register(cls, config_class, model_class, exist_ok=False):
+ """
+ Register a new model for this class.
+
+ Args:
+ config_class ([`PretrainedConfig`]):
+ The configuration corresponding to the model to register.
+ model_class ([`PreTrainedModel`]):
+ The model to register.
+ """
+ if hasattr(model_class, "config_class") and str(model_class.config_class) != str(config_class):
+ raise ValueError(
+ "The model class you are passing has a `config_class` attribute that is not consistent with the "
+ f"config class you passed (model has {model_class.config_class} and you passed {config_class}. Fix "
+ "one of those so they match!"
+ )
+ cls._model_mapping.register(config_class, model_class, exist_ok=exist_ok)
+
+
+class _BaseAutoBackboneClass(_BaseAutoModelClass):
+ # Base class for auto backbone models.
+ _model_mapping = None
+
+ @classmethod
+ def _load_timm_backbone_from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
+ requires_backends(cls, ["vision", "timm"])
+ from ...models.timm_backbone import TimmBackboneConfig
+
+ config = kwargs.pop("config", TimmBackboneConfig())
+
+ if kwargs.get("out_features", None) is not None:
+ raise ValueError("Cannot specify `out_features` for timm backbones")
+
+ if kwargs.get("output_loading_info", False):
+ raise ValueError("Cannot specify `output_loading_info=True` when loading from timm")
+
+ num_channels = kwargs.pop("num_channels", config.num_channels)
+ features_only = kwargs.pop("features_only", config.features_only)
+ use_pretrained_backbone = kwargs.pop("use_pretrained_backbone", config.use_pretrained_backbone)
+ out_indices = kwargs.pop("out_indices", config.out_indices)
+ config = TimmBackboneConfig(
+ backbone=pretrained_model_name_or_path,
+ num_channels=num_channels,
+ features_only=features_only,
+ use_pretrained_backbone=use_pretrained_backbone,
+ out_indices=out_indices,
+ )
+ return super().from_config(config, **kwargs)
+
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
+ use_timm_backbone = kwargs.pop("use_timm_backbone", False)
+ if use_timm_backbone:
+ return cls._load_timm_backbone_from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
+
+ return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
+
+
+def insert_head_doc(docstring, head_doc=""):
+ if len(head_doc) > 0:
+ return docstring.replace(
+ "one of the model classes of the library ",
+ f"one of the model classes of the library (with a {head_doc} head) ",
+ )
+ return docstring.replace(
+ "one of the model classes of the library ", "one of the base model classes of the library "
+ )
+
+
+def auto_class_update(cls, checkpoint_for_example="google-bert/bert-base-cased", head_doc=""):
+ # Create a new class with the right name from the base class
+ model_mapping = cls._model_mapping
+ name = cls.__name__
+ class_docstring = insert_head_doc(CLASS_DOCSTRING, head_doc=head_doc)
+ cls.__doc__ = class_docstring.replace("BaseAutoModelClass", name)
+
+ # Now we need to copy and re-register `from_config` and `from_pretrained` as class methods otherwise we can't
+ # have a specific docstrings for them.
+ from_config = copy_func(_BaseAutoModelClass.from_config)
+ from_config_docstring = insert_head_doc(FROM_CONFIG_DOCSTRING, head_doc=head_doc)
+ from_config_docstring = from_config_docstring.replace("BaseAutoModelClass", name)
+ from_config_docstring = from_config_docstring.replace("checkpoint_placeholder", checkpoint_for_example)
+ from_config.__doc__ = from_config_docstring
+ from_config = replace_list_option_in_docstrings(model_mapping._model_mapping, use_model_types=False)(from_config)
+ cls.from_config = classmethod(from_config)
+
+ if name.startswith("TF"):
+ from_pretrained_docstring = FROM_PRETRAINED_TF_DOCSTRING
+ elif name.startswith("Flax"):
+ from_pretrained_docstring = FROM_PRETRAINED_FLAX_DOCSTRING
+ else:
+ from_pretrained_docstring = FROM_PRETRAINED_TORCH_DOCSTRING
+ from_pretrained = copy_func(_BaseAutoModelClass.from_pretrained)
+ from_pretrained_docstring = insert_head_doc(from_pretrained_docstring, head_doc=head_doc)
+ from_pretrained_docstring = from_pretrained_docstring.replace("BaseAutoModelClass", name)
+ from_pretrained_docstring = from_pretrained_docstring.replace("checkpoint_placeholder", checkpoint_for_example)
+ shortcut = checkpoint_for_example.split("/")[-1].split("-")[0]
+ from_pretrained_docstring = from_pretrained_docstring.replace("shortcut_placeholder", shortcut)
+ from_pretrained.__doc__ = from_pretrained_docstring
+ from_pretrained = replace_list_option_in_docstrings(model_mapping._model_mapping)(from_pretrained)
+ cls.from_pretrained = classmethod(from_pretrained)
+ return cls
+
+
+def get_values(model_mapping):
+ result = []
+ for model in model_mapping.values():
+ if isinstance(model, (list, tuple)):
+ result += list(model)
+ else:
+ result.append(model)
+
+ return result
+
+
+def getattribute_from_module(module, attr):
+ if attr is None:
+ return None
+ if isinstance(attr, tuple):
+ return tuple(getattribute_from_module(module, a) for a in attr)
+ if hasattr(module, attr):
+ return getattr(module, attr)
+ # Some of the mappings have entries model_type -> object of another model type. In that case we try to grab the
+ # object at the top level.
+ transformers_module = importlib.import_module("transformers")
+
+ if module != transformers_module:
+ try:
+ return getattribute_from_module(transformers_module, attr)
+ except ValueError:
+ raise ValueError(f"Could not find {attr} neither in {module} nor in {transformers_module}!")
+ else:
+ raise ValueError(f"Could not find {attr} in {transformers_module}!")
+
+
+class _LazyAutoMapping(OrderedDict):
+ """
+ " A mapping config to object (model or tokenizer for instance) that will load keys and values when it is accessed.
+
+ Args:
+ - config_mapping: The map model type to config class
+ - model_mapping: The map model type to model (or tokenizer) class
+ """
+
+ def __init__(self, config_mapping, model_mapping):
+ self._config_mapping = config_mapping
+ self._reverse_config_mapping = {v: k for k, v in config_mapping.items()}
+ self._model_mapping = model_mapping
+ self._model_mapping._model_mapping = self
+ self._extra_content = {}
+ self._modules = {}
+
+ def __len__(self):
+ common_keys = set(self._config_mapping.keys()).intersection(self._model_mapping.keys())
+ return len(common_keys) + len(self._extra_content)
+
+ def __getitem__(self, key):
+ if key in self._extra_content:
+ return self._extra_content[key]
+ model_type = self._reverse_config_mapping[key.__name__]
+ if model_type in self._model_mapping:
+ model_name = self._model_mapping[model_type]
+ return self._load_attr_from_module(model_type, model_name)
+
+ # Maybe there was several model types associated with this config.
+ model_types = [k for k, v in self._config_mapping.items() if v == key.__name__]
+ for mtype in model_types:
+ if mtype in self._model_mapping:
+ model_name = self._model_mapping[mtype]
+ return self._load_attr_from_module(mtype, model_name)
+ raise KeyError(key)
+
+ def _load_attr_from_module(self, model_type, attr):
+ module_name = model_type_to_module_name(model_type)
+ if module_name not in self._modules:
+ self._modules[module_name] = importlib.import_module(f".{module_name}", "transformers.models")
+ return getattribute_from_module(self._modules[module_name], attr)
+
+ def keys(self):
+ mapping_keys = [
+ self._load_attr_from_module(key, name)
+ for key, name in self._config_mapping.items()
+ if key in self._model_mapping.keys()
+ ]
+ return mapping_keys + list(self._extra_content.keys())
+
+ def get(self, key, default):
+ try:
+ return self.__getitem__(key)
+ except KeyError:
+ return default
+
+ def __bool__(self):
+ return bool(self.keys())
+
+ def values(self):
+ mapping_values = [
+ self._load_attr_from_module(key, name)
+ for key, name in self._model_mapping.items()
+ if key in self._config_mapping.keys()
+ ]
+ return mapping_values + list(self._extra_content.values())
+
+ def items(self):
+ mapping_items = [
+ (
+ self._load_attr_from_module(key, self._config_mapping[key]),
+ self._load_attr_from_module(key, self._model_mapping[key]),
+ )
+ for key in self._model_mapping.keys()
+ if key in self._config_mapping.keys()
+ ]
+ return mapping_items + list(self._extra_content.items())
+
+ def __iter__(self):
+ return iter(self.keys())
+
+ def __contains__(self, item):
+ if item in self._extra_content:
+ return True
+ if not hasattr(item, "__name__") or item.__name__ not in self._reverse_config_mapping:
+ return False
+ model_type = self._reverse_config_mapping[item.__name__]
+ return model_type in self._model_mapping
+
+ def register(self, key, value, exist_ok=False):
+ """
+ Register a new model in this mapping.
+ """
+ if hasattr(key, "__name__") and key.__name__ in self._reverse_config_mapping:
+ model_type = self._reverse_config_mapping[key.__name__]
+ if model_type in self._model_mapping.keys() and not exist_ok:
+ raise ValueError(f"'{key}' is already used by a Transformers model.")
+
+ self._extra_content[key] = value
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/configuration_auto.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/configuration_auto.py
new file mode 100644
index 0000000000000000000000000000000000000000..29a52ba755f023698aca4d9b329e731b15f8a0ab
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/configuration_auto.py
@@ -0,0 +1,984 @@
+# coding=utf-8
+# Copyright 2018 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Auto Config class."""
+import importlib
+import os
+import re
+import warnings
+from collections import OrderedDict
+from typing import List, Union
+
+from ...configuration_utils import PretrainedConfig
+from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
+from ...utils import CONFIG_NAME, logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import CONFIG_ARCHIVE_MAP_MAPPING_NAMES # noqa: F401, E402
+
+
+CONFIG_MAPPING_NAMES = OrderedDict(
+ [
+ # Add configs here
+ ("albert", "AlbertConfig"),
+ ("align", "AlignConfig"),
+ ("altclip", "AltCLIPConfig"),
+ ("audio-spectrogram-transformer", "ASTConfig"),
+ ("autoformer", "AutoformerConfig"),
+ ("bark", "BarkConfig"),
+ ("bart", "BartConfig"),
+ ("beit", "BeitConfig"),
+ ("bert", "BertConfig"),
+ ("bert-generation", "BertGenerationConfig"),
+ ("big_bird", "BigBirdConfig"),
+ ("bigbird_pegasus", "BigBirdPegasusConfig"),
+ ("biogpt", "BioGptConfig"),
+ ("bit", "BitConfig"),
+ ("blenderbot", "BlenderbotConfig"),
+ ("blenderbot-small", "BlenderbotSmallConfig"),
+ ("blip", "BlipConfig"),
+ ("blip-2", "Blip2Config"),
+ ("bloom", "BloomConfig"),
+ ("bridgetower", "BridgeTowerConfig"),
+ ("bros", "BrosConfig"),
+ ("camembert", "CamembertConfig"),
+ ("canine", "CanineConfig"),
+ ("chinese_clip", "ChineseCLIPConfig"),
+ ("chinese_clip_vision_model", "ChineseCLIPVisionConfig"),
+ ("clap", "ClapConfig"),
+ ("clip", "CLIPConfig"),
+ ("clip_vision_model", "CLIPVisionConfig"),
+ ("clipseg", "CLIPSegConfig"),
+ ("clvp", "ClvpConfig"),
+ ("code_llama", "LlamaConfig"),
+ ("codegen", "CodeGenConfig"),
+ ("cohere", "CohereConfig"),
+ ("conditional_detr", "ConditionalDetrConfig"),
+ ("convbert", "ConvBertConfig"),
+ ("convnext", "ConvNextConfig"),
+ ("convnextv2", "ConvNextV2Config"),
+ ("cpmant", "CpmAntConfig"),
+ ("ctrl", "CTRLConfig"),
+ ("cvt", "CvtConfig"),
+ ("data2vec-audio", "Data2VecAudioConfig"),
+ ("data2vec-text", "Data2VecTextConfig"),
+ ("data2vec-vision", "Data2VecVisionConfig"),
+ ("dbrx", "DbrxConfig"),
+ ("deberta", "DebertaConfig"),
+ ("deberta-v2", "DebertaV2Config"),
+ ("decision_transformer", "DecisionTransformerConfig"),
+ ("deformable_detr", "DeformableDetrConfig"),
+ ("deit", "DeiTConfig"),
+ ("depth_anything", "DepthAnythingConfig"),
+ ("deta", "DetaConfig"),
+ ("detr", "DetrConfig"),
+ ("dinat", "DinatConfig"),
+ ("dinov2", "Dinov2Config"),
+ ("distilbert", "DistilBertConfig"),
+ ("donut-swin", "DonutSwinConfig"),
+ ("dpr", "DPRConfig"),
+ ("dpt", "DPTConfig"),
+ ("efficientformer", "EfficientFormerConfig"),
+ ("efficientnet", "EfficientNetConfig"),
+ ("electra", "ElectraConfig"),
+ ("encodec", "EncodecConfig"),
+ ("encoder-decoder", "EncoderDecoderConfig"),
+ ("ernie", "ErnieConfig"),
+ ("ernie_m", "ErnieMConfig"),
+ ("esm", "EsmConfig"),
+ ("falcon", "FalconConfig"),
+ ("fastspeech2_conformer", "FastSpeech2ConformerConfig"),
+ ("flaubert", "FlaubertConfig"),
+ ("flava", "FlavaConfig"),
+ ("fnet", "FNetConfig"),
+ ("focalnet", "FocalNetConfig"),
+ ("fsmt", "FSMTConfig"),
+ ("funnel", "FunnelConfig"),
+ ("fuyu", "FuyuConfig"),
+ ("gemma", "GemmaConfig"),
+ ("git", "GitConfig"),
+ ("glpn", "GLPNConfig"),
+ ("gpt-sw3", "GPT2Config"),
+ ("gpt2", "GPT2Config"),
+ ("gpt_bigcode", "GPTBigCodeConfig"),
+ ("gpt_neo", "GPTNeoConfig"),
+ ("gpt_neox", "GPTNeoXConfig"),
+ ("gpt_neox_japanese", "GPTNeoXJapaneseConfig"),
+ ("gptj", "GPTJConfig"),
+ ("gptsan-japanese", "GPTSanJapaneseConfig"),
+ ("graphormer", "GraphormerConfig"),
+ ("grounding-dino", "GroundingDinoConfig"),
+ ("groupvit", "GroupViTConfig"),
+ ("hubert", "HubertConfig"),
+ ("ibert", "IBertConfig"),
+ ("idefics", "IdeficsConfig"),
+ ("idefics2", "Idefics2Config"),
+ ("imagegpt", "ImageGPTConfig"),
+ ("informer", "InformerConfig"),
+ ("instructblip", "InstructBlipConfig"),
+ ("jamba", "JambaConfig"),
+ ("jukebox", "JukeboxConfig"),
+ ("kosmos-2", "Kosmos2Config"),
+ ("layoutlm", "LayoutLMConfig"),
+ ("layoutlmv2", "LayoutLMv2Config"),
+ ("layoutlmv3", "LayoutLMv3Config"),
+ ("led", "LEDConfig"),
+ ("levit", "LevitConfig"),
+ ("lilt", "LiltConfig"),
+ ("llama", "LlamaConfig"),
+ ("llava", "LlavaConfig"),
+ ("llava_next", "LlavaNextConfig"),
+ ("longformer", "LongformerConfig"),
+ ("longt5", "LongT5Config"),
+ ("luke", "LukeConfig"),
+ ("lxmert", "LxmertConfig"),
+ ("m2m_100", "M2M100Config"),
+ ("mamba", "MambaConfig"),
+ ("marian", "MarianConfig"),
+ ("markuplm", "MarkupLMConfig"),
+ ("mask2former", "Mask2FormerConfig"),
+ ("maskformer", "MaskFormerConfig"),
+ ("maskformer-swin", "MaskFormerSwinConfig"),
+ ("mbart", "MBartConfig"),
+ ("mctct", "MCTCTConfig"),
+ ("mega", "MegaConfig"),
+ ("megatron-bert", "MegatronBertConfig"),
+ ("mgp-str", "MgpstrConfig"),
+ ("mistral", "MistralConfig"),
+ ("mixtral", "MixtralConfig"),
+ ("mobilebert", "MobileBertConfig"),
+ ("mobilenet_v1", "MobileNetV1Config"),
+ ("mobilenet_v2", "MobileNetV2Config"),
+ ("mobilevit", "MobileViTConfig"),
+ ("mobilevitv2", "MobileViTV2Config"),
+ ("mpnet", "MPNetConfig"),
+ ("mpt", "MptConfig"),
+ ("mra", "MraConfig"),
+ ("mt5", "MT5Config"),
+ ("musicgen", "MusicgenConfig"),
+ ("musicgen_melody", "MusicgenMelodyConfig"),
+ ("mvp", "MvpConfig"),
+ ("nat", "NatConfig"),
+ ("nezha", "NezhaConfig"),
+ ("nllb-moe", "NllbMoeConfig"),
+ ("nougat", "VisionEncoderDecoderConfig"),
+ ("nystromformer", "NystromformerConfig"),
+ ("olmo", "OlmoConfig"),
+ ("oneformer", "OneFormerConfig"),
+ ("open-llama", "OpenLlamaConfig"),
+ ("openai-gpt", "OpenAIGPTConfig"),
+ ("opt", "OPTConfig"),
+ ("owlv2", "Owlv2Config"),
+ ("owlvit", "OwlViTConfig"),
+ ("patchtsmixer", "PatchTSMixerConfig"),
+ ("patchtst", "PatchTSTConfig"),
+ ("pegasus", "PegasusConfig"),
+ ("pegasus_x", "PegasusXConfig"),
+ ("perceiver", "PerceiverConfig"),
+ ("persimmon", "PersimmonConfig"),
+ ("phi", "PhiConfig"),
+ ("pix2struct", "Pix2StructConfig"),
+ ("plbart", "PLBartConfig"),
+ ("poolformer", "PoolFormerConfig"),
+ ("pop2piano", "Pop2PianoConfig"),
+ ("prophetnet", "ProphetNetConfig"),
+ ("pvt", "PvtConfig"),
+ ("pvt_v2", "PvtV2Config"),
+ ("qdqbert", "QDQBertConfig"),
+ ("qwen2", "Qwen2Config"),
+ ("qwen2_moe", "Qwen2MoeConfig"),
+ ("rag", "RagConfig"),
+ ("realm", "RealmConfig"),
+ ("recurrent_gemma", "RecurrentGemmaConfig"),
+ ("reformer", "ReformerConfig"),
+ ("regnet", "RegNetConfig"),
+ ("rembert", "RemBertConfig"),
+ ("resnet", "ResNetConfig"),
+ ("retribert", "RetriBertConfig"),
+ ("roberta", "RobertaConfig"),
+ ("roberta-prelayernorm", "RobertaPreLayerNormConfig"),
+ ("roc_bert", "RoCBertConfig"),
+ ("roformer", "RoFormerConfig"),
+ ("rwkv", "RwkvConfig"),
+ ("sam", "SamConfig"),
+ ("seamless_m4t", "SeamlessM4TConfig"),
+ ("seamless_m4t_v2", "SeamlessM4Tv2Config"),
+ ("segformer", "SegformerConfig"),
+ ("seggpt", "SegGptConfig"),
+ ("sew", "SEWConfig"),
+ ("sew-d", "SEWDConfig"),
+ ("siglip", "SiglipConfig"),
+ ("siglip_vision_model", "SiglipVisionConfig"),
+ ("speech-encoder-decoder", "SpeechEncoderDecoderConfig"),
+ ("speech_to_text", "Speech2TextConfig"),
+ ("speech_to_text_2", "Speech2Text2Config"),
+ ("speecht5", "SpeechT5Config"),
+ ("splinter", "SplinterConfig"),
+ ("squeezebert", "SqueezeBertConfig"),
+ ("stablelm", "StableLmConfig"),
+ ("starcoder2", "Starcoder2Config"),
+ ("superpoint", "SuperPointConfig"),
+ ("swiftformer", "SwiftFormerConfig"),
+ ("swin", "SwinConfig"),
+ ("swin2sr", "Swin2SRConfig"),
+ ("swinv2", "Swinv2Config"),
+ ("switch_transformers", "SwitchTransformersConfig"),
+ ("t5", "T5Config"),
+ ("table-transformer", "TableTransformerConfig"),
+ ("tapas", "TapasConfig"),
+ ("time_series_transformer", "TimeSeriesTransformerConfig"),
+ ("timesformer", "TimesformerConfig"),
+ ("timm_backbone", "TimmBackboneConfig"),
+ ("trajectory_transformer", "TrajectoryTransformerConfig"),
+ ("transfo-xl", "TransfoXLConfig"),
+ ("trocr", "TrOCRConfig"),
+ ("tvlt", "TvltConfig"),
+ ("tvp", "TvpConfig"),
+ ("udop", "UdopConfig"),
+ ("umt5", "UMT5Config"),
+ ("unispeech", "UniSpeechConfig"),
+ ("unispeech-sat", "UniSpeechSatConfig"),
+ ("univnet", "UnivNetConfig"),
+ ("upernet", "UperNetConfig"),
+ ("van", "VanConfig"),
+ ("videomae", "VideoMAEConfig"),
+ ("vilt", "ViltConfig"),
+ ("vipllava", "VipLlavaConfig"),
+ ("vision-encoder-decoder", "VisionEncoderDecoderConfig"),
+ ("vision-text-dual-encoder", "VisionTextDualEncoderConfig"),
+ ("visual_bert", "VisualBertConfig"),
+ ("vit", "ViTConfig"),
+ ("vit_hybrid", "ViTHybridConfig"),
+ ("vit_mae", "ViTMAEConfig"),
+ ("vit_msn", "ViTMSNConfig"),
+ ("vitdet", "VitDetConfig"),
+ ("vitmatte", "VitMatteConfig"),
+ ("vits", "VitsConfig"),
+ ("vivit", "VivitConfig"),
+ ("wav2vec2", "Wav2Vec2Config"),
+ ("wav2vec2-bert", "Wav2Vec2BertConfig"),
+ ("wav2vec2-conformer", "Wav2Vec2ConformerConfig"),
+ ("wavlm", "WavLMConfig"),
+ ("whisper", "WhisperConfig"),
+ ("xclip", "XCLIPConfig"),
+ ("xglm", "XGLMConfig"),
+ ("xlm", "XLMConfig"),
+ ("xlm-prophetnet", "XLMProphetNetConfig"),
+ ("xlm-roberta", "XLMRobertaConfig"),
+ ("xlm-roberta-xl", "XLMRobertaXLConfig"),
+ ("xlnet", "XLNetConfig"),
+ ("xmod", "XmodConfig"),
+ ("yolos", "YolosConfig"),
+ ("yoso", "YosoConfig"),
+ ]
+)
+
+
+MODEL_NAMES_MAPPING = OrderedDict(
+ [
+ # Add full (and cased) model names here
+ ("albert", "ALBERT"),
+ ("align", "ALIGN"),
+ ("altclip", "AltCLIP"),
+ ("audio-spectrogram-transformer", "Audio Spectrogram Transformer"),
+ ("autoformer", "Autoformer"),
+ ("bark", "Bark"),
+ ("bart", "BART"),
+ ("barthez", "BARThez"),
+ ("bartpho", "BARTpho"),
+ ("beit", "BEiT"),
+ ("bert", "BERT"),
+ ("bert-generation", "Bert Generation"),
+ ("bert-japanese", "BertJapanese"),
+ ("bertweet", "BERTweet"),
+ ("big_bird", "BigBird"),
+ ("bigbird_pegasus", "BigBird-Pegasus"),
+ ("biogpt", "BioGpt"),
+ ("bit", "BiT"),
+ ("blenderbot", "Blenderbot"),
+ ("blenderbot-small", "BlenderbotSmall"),
+ ("blip", "BLIP"),
+ ("blip-2", "BLIP-2"),
+ ("bloom", "BLOOM"),
+ ("bort", "BORT"),
+ ("bridgetower", "BridgeTower"),
+ ("bros", "BROS"),
+ ("byt5", "ByT5"),
+ ("camembert", "CamemBERT"),
+ ("canine", "CANINE"),
+ ("chinese_clip", "Chinese-CLIP"),
+ ("chinese_clip_vision_model", "ChineseCLIPVisionModel"),
+ ("clap", "CLAP"),
+ ("clip", "CLIP"),
+ ("clip_vision_model", "CLIPVisionModel"),
+ ("clipseg", "CLIPSeg"),
+ ("clvp", "CLVP"),
+ ("code_llama", "CodeLlama"),
+ ("codegen", "CodeGen"),
+ ("cohere", "Cohere"),
+ ("conditional_detr", "Conditional DETR"),
+ ("convbert", "ConvBERT"),
+ ("convnext", "ConvNeXT"),
+ ("convnextv2", "ConvNeXTV2"),
+ ("cpm", "CPM"),
+ ("cpmant", "CPM-Ant"),
+ ("ctrl", "CTRL"),
+ ("cvt", "CvT"),
+ ("data2vec-audio", "Data2VecAudio"),
+ ("data2vec-text", "Data2VecText"),
+ ("data2vec-vision", "Data2VecVision"),
+ ("dbrx", "DBRX"),
+ ("deberta", "DeBERTa"),
+ ("deberta-v2", "DeBERTa-v2"),
+ ("decision_transformer", "Decision Transformer"),
+ ("deformable_detr", "Deformable DETR"),
+ ("deit", "DeiT"),
+ ("deplot", "DePlot"),
+ ("depth_anything", "Depth Anything"),
+ ("deta", "DETA"),
+ ("detr", "DETR"),
+ ("dialogpt", "DialoGPT"),
+ ("dinat", "DiNAT"),
+ ("dinov2", "DINOv2"),
+ ("distilbert", "DistilBERT"),
+ ("dit", "DiT"),
+ ("donut-swin", "DonutSwin"),
+ ("dpr", "DPR"),
+ ("dpt", "DPT"),
+ ("efficientformer", "EfficientFormer"),
+ ("efficientnet", "EfficientNet"),
+ ("electra", "ELECTRA"),
+ ("encodec", "EnCodec"),
+ ("encoder-decoder", "Encoder decoder"),
+ ("ernie", "ERNIE"),
+ ("ernie_m", "ErnieM"),
+ ("esm", "ESM"),
+ ("falcon", "Falcon"),
+ ("fastspeech2_conformer", "FastSpeech2Conformer"),
+ ("flan-t5", "FLAN-T5"),
+ ("flan-ul2", "FLAN-UL2"),
+ ("flaubert", "FlauBERT"),
+ ("flava", "FLAVA"),
+ ("fnet", "FNet"),
+ ("focalnet", "FocalNet"),
+ ("fsmt", "FairSeq Machine-Translation"),
+ ("funnel", "Funnel Transformer"),
+ ("fuyu", "Fuyu"),
+ ("gemma", "Gemma"),
+ ("git", "GIT"),
+ ("glpn", "GLPN"),
+ ("gpt-sw3", "GPT-Sw3"),
+ ("gpt2", "OpenAI GPT-2"),
+ ("gpt_bigcode", "GPTBigCode"),
+ ("gpt_neo", "GPT Neo"),
+ ("gpt_neox", "GPT NeoX"),
+ ("gpt_neox_japanese", "GPT NeoX Japanese"),
+ ("gptj", "GPT-J"),
+ ("gptsan-japanese", "GPTSAN-japanese"),
+ ("graphormer", "Graphormer"),
+ ("grounding-dino", "Grounding DINO"),
+ ("groupvit", "GroupViT"),
+ ("herbert", "HerBERT"),
+ ("hubert", "Hubert"),
+ ("ibert", "I-BERT"),
+ ("idefics", "IDEFICS"),
+ ("idefics2", "Idefics2"),
+ ("imagegpt", "ImageGPT"),
+ ("informer", "Informer"),
+ ("instructblip", "InstructBLIP"),
+ ("jamba", "Jamba"),
+ ("jukebox", "Jukebox"),
+ ("kosmos-2", "KOSMOS-2"),
+ ("layoutlm", "LayoutLM"),
+ ("layoutlmv2", "LayoutLMv2"),
+ ("layoutlmv3", "LayoutLMv3"),
+ ("layoutxlm", "LayoutXLM"),
+ ("led", "LED"),
+ ("levit", "LeViT"),
+ ("lilt", "LiLT"),
+ ("llama", "LLaMA"),
+ ("llama2", "Llama2"),
+ ("llava", "LLaVa"),
+ ("llava_next", "LLaVA-NeXT"),
+ ("longformer", "Longformer"),
+ ("longt5", "LongT5"),
+ ("luke", "LUKE"),
+ ("lxmert", "LXMERT"),
+ ("m2m_100", "M2M100"),
+ ("madlad-400", "MADLAD-400"),
+ ("mamba", "Mamba"),
+ ("marian", "Marian"),
+ ("markuplm", "MarkupLM"),
+ ("mask2former", "Mask2Former"),
+ ("maskformer", "MaskFormer"),
+ ("maskformer-swin", "MaskFormerSwin"),
+ ("matcha", "MatCha"),
+ ("mbart", "mBART"),
+ ("mbart50", "mBART-50"),
+ ("mctct", "M-CTC-T"),
+ ("mega", "MEGA"),
+ ("megatron-bert", "Megatron-BERT"),
+ ("megatron_gpt2", "Megatron-GPT2"),
+ ("mgp-str", "MGP-STR"),
+ ("mistral", "Mistral"),
+ ("mixtral", "Mixtral"),
+ ("mluke", "mLUKE"),
+ ("mms", "MMS"),
+ ("mobilebert", "MobileBERT"),
+ ("mobilenet_v1", "MobileNetV1"),
+ ("mobilenet_v2", "MobileNetV2"),
+ ("mobilevit", "MobileViT"),
+ ("mobilevitv2", "MobileViTV2"),
+ ("mpnet", "MPNet"),
+ ("mpt", "MPT"),
+ ("mra", "MRA"),
+ ("mt5", "MT5"),
+ ("musicgen", "MusicGen"),
+ ("musicgen_melody", "MusicGen Melody"),
+ ("mvp", "MVP"),
+ ("nat", "NAT"),
+ ("nezha", "Nezha"),
+ ("nllb", "NLLB"),
+ ("nllb-moe", "NLLB-MOE"),
+ ("nougat", "Nougat"),
+ ("nystromformer", "Nyströmformer"),
+ ("olmo", "OLMo"),
+ ("oneformer", "OneFormer"),
+ ("open-llama", "OpenLlama"),
+ ("openai-gpt", "OpenAI GPT"),
+ ("opt", "OPT"),
+ ("owlv2", "OWLv2"),
+ ("owlvit", "OWL-ViT"),
+ ("patchtsmixer", "PatchTSMixer"),
+ ("patchtst", "PatchTST"),
+ ("pegasus", "Pegasus"),
+ ("pegasus_x", "PEGASUS-X"),
+ ("perceiver", "Perceiver"),
+ ("persimmon", "Persimmon"),
+ ("phi", "Phi"),
+ ("phobert", "PhoBERT"),
+ ("pix2struct", "Pix2Struct"),
+ ("plbart", "PLBart"),
+ ("poolformer", "PoolFormer"),
+ ("pop2piano", "Pop2Piano"),
+ ("prophetnet", "ProphetNet"),
+ ("pvt", "PVT"),
+ ("pvt_v2", "PVTv2"),
+ ("qdqbert", "QDQBert"),
+ ("qwen2", "Qwen2"),
+ ("qwen2_moe", "Qwen2MoE"),
+ ("rag", "RAG"),
+ ("realm", "REALM"),
+ ("recurrent_gemma", "RecurrentGemma"),
+ ("reformer", "Reformer"),
+ ("regnet", "RegNet"),
+ ("rembert", "RemBERT"),
+ ("resnet", "ResNet"),
+ ("retribert", "RetriBERT"),
+ ("roberta", "RoBERTa"),
+ ("roberta-prelayernorm", "RoBERTa-PreLayerNorm"),
+ ("roc_bert", "RoCBert"),
+ ("roformer", "RoFormer"),
+ ("rwkv", "RWKV"),
+ ("sam", "SAM"),
+ ("seamless_m4t", "SeamlessM4T"),
+ ("seamless_m4t_v2", "SeamlessM4Tv2"),
+ ("segformer", "SegFormer"),
+ ("seggpt", "SegGPT"),
+ ("sew", "SEW"),
+ ("sew-d", "SEW-D"),
+ ("siglip", "SigLIP"),
+ ("siglip_vision_model", "SiglipVisionModel"),
+ ("speech-encoder-decoder", "Speech Encoder decoder"),
+ ("speech_to_text", "Speech2Text"),
+ ("speech_to_text_2", "Speech2Text2"),
+ ("speecht5", "SpeechT5"),
+ ("splinter", "Splinter"),
+ ("squeezebert", "SqueezeBERT"),
+ ("stablelm", "StableLm"),
+ ("starcoder2", "Starcoder2"),
+ ("superpoint", "SuperPoint"),
+ ("swiftformer", "SwiftFormer"),
+ ("swin", "Swin Transformer"),
+ ("swin2sr", "Swin2SR"),
+ ("swinv2", "Swin Transformer V2"),
+ ("switch_transformers", "SwitchTransformers"),
+ ("t5", "T5"),
+ ("t5v1.1", "T5v1.1"),
+ ("table-transformer", "Table Transformer"),
+ ("tapas", "TAPAS"),
+ ("tapex", "TAPEX"),
+ ("time_series_transformer", "Time Series Transformer"),
+ ("timesformer", "TimeSformer"),
+ ("timm_backbone", "TimmBackbone"),
+ ("trajectory_transformer", "Trajectory Transformer"),
+ ("transfo-xl", "Transformer-XL"),
+ ("trocr", "TrOCR"),
+ ("tvlt", "TVLT"),
+ ("tvp", "TVP"),
+ ("udop", "UDOP"),
+ ("ul2", "UL2"),
+ ("umt5", "UMT5"),
+ ("unispeech", "UniSpeech"),
+ ("unispeech-sat", "UniSpeechSat"),
+ ("univnet", "UnivNet"),
+ ("upernet", "UPerNet"),
+ ("van", "VAN"),
+ ("videomae", "VideoMAE"),
+ ("vilt", "ViLT"),
+ ("vipllava", "VipLlava"),
+ ("vision-encoder-decoder", "Vision Encoder decoder"),
+ ("vision-text-dual-encoder", "VisionTextDualEncoder"),
+ ("visual_bert", "VisualBERT"),
+ ("vit", "ViT"),
+ ("vit_hybrid", "ViT Hybrid"),
+ ("vit_mae", "ViTMAE"),
+ ("vit_msn", "ViTMSN"),
+ ("vitdet", "VitDet"),
+ ("vitmatte", "ViTMatte"),
+ ("vits", "VITS"),
+ ("vivit", "ViViT"),
+ ("wav2vec2", "Wav2Vec2"),
+ ("wav2vec2-bert", "Wav2Vec2-BERT"),
+ ("wav2vec2-conformer", "Wav2Vec2-Conformer"),
+ ("wav2vec2_phoneme", "Wav2Vec2Phoneme"),
+ ("wavlm", "WavLM"),
+ ("whisper", "Whisper"),
+ ("xclip", "X-CLIP"),
+ ("xglm", "XGLM"),
+ ("xlm", "XLM"),
+ ("xlm-prophetnet", "XLM-ProphetNet"),
+ ("xlm-roberta", "XLM-RoBERTa"),
+ ("xlm-roberta-xl", "XLM-RoBERTa-XL"),
+ ("xlm-v", "XLM-V"),
+ ("xlnet", "XLNet"),
+ ("xls_r", "XLS-R"),
+ ("xlsr_wav2vec2", "XLSR-Wav2Vec2"),
+ ("xmod", "X-MOD"),
+ ("yolos", "YOLOS"),
+ ("yoso", "YOSO"),
+ ]
+)
+
+# This is tied to the processing `-` -> `_` in `model_type_to_module_name`. For example, instead of putting
+# `transfo-xl` (as in `CONFIG_MAPPING_NAMES`), we should use `transfo_xl`.
+DEPRECATED_MODELS = [
+ "bort",
+ "mctct",
+ "mmbt",
+ "open_llama",
+ "retribert",
+ "tapex",
+ "trajectory_transformer",
+ "transfo_xl",
+ "van",
+]
+
+SPECIAL_MODEL_TYPE_TO_MODULE_NAME = OrderedDict(
+ [
+ ("openai-gpt", "openai"),
+ ("data2vec-audio", "data2vec"),
+ ("data2vec-text", "data2vec"),
+ ("data2vec-vision", "data2vec"),
+ ("donut-swin", "donut"),
+ ("kosmos-2", "kosmos2"),
+ ("maskformer-swin", "maskformer"),
+ ("xclip", "x_clip"),
+ ("clip_vision_model", "clip"),
+ ("siglip_vision_model", "siglip"),
+ ("chinese_clip_vision_model", "chinese_clip"),
+ ]
+)
+
+
+def model_type_to_module_name(key):
+ """Converts a config key to the corresponding module."""
+ # Special treatment
+ if key in SPECIAL_MODEL_TYPE_TO_MODULE_NAME:
+ return SPECIAL_MODEL_TYPE_TO_MODULE_NAME[key]
+
+ key = key.replace("-", "_")
+ if key in DEPRECATED_MODELS:
+ key = f"deprecated.{key}"
+
+ return key
+
+
+def config_class_to_model_type(config):
+ """Converts a config class name to the corresponding model type"""
+ for key, cls in CONFIG_MAPPING_NAMES.items():
+ if cls == config:
+ return key
+ # if key not found check in extra content
+ for key, cls in CONFIG_MAPPING._extra_content.items():
+ if cls.__name__ == config:
+ return key
+ return None
+
+
+class _LazyConfigMapping(OrderedDict):
+ """
+ A dictionary that lazily load its values when they are requested.
+ """
+
+ def __init__(self, mapping):
+ self._mapping = mapping
+ self._extra_content = {}
+ self._modules = {}
+
+ def __getitem__(self, key):
+ if key in self._extra_content:
+ return self._extra_content[key]
+ if key not in self._mapping:
+ raise KeyError(key)
+ value = self._mapping[key]
+ module_name = model_type_to_module_name(key)
+ if module_name not in self._modules:
+ self._modules[module_name] = importlib.import_module(f".{module_name}", "transformers.models")
+ if hasattr(self._modules[module_name], value):
+ return getattr(self._modules[module_name], value)
+
+ # Some of the mappings have entries model_type -> config of another model type. In that case we try to grab the
+ # object at the top level.
+ transformers_module = importlib.import_module("transformers")
+ return getattr(transformers_module, value)
+
+ def keys(self):
+ return list(self._mapping.keys()) + list(self._extra_content.keys())
+
+ def values(self):
+ return [self[k] for k in self._mapping.keys()] + list(self._extra_content.values())
+
+ def items(self):
+ return [(k, self[k]) for k in self._mapping.keys()] + list(self._extra_content.items())
+
+ def __iter__(self):
+ return iter(list(self._mapping.keys()) + list(self._extra_content.keys()))
+
+ def __contains__(self, item):
+ return item in self._mapping or item in self._extra_content
+
+ def register(self, key, value, exist_ok=False):
+ """
+ Register a new configuration in this mapping.
+ """
+ if key in self._mapping.keys() and not exist_ok:
+ raise ValueError(f"'{key}' is already used by a Transformers config, pick another name.")
+ self._extra_content[key] = value
+
+
+CONFIG_MAPPING = _LazyConfigMapping(CONFIG_MAPPING_NAMES)
+
+
+class _LazyLoadAllMappings(OrderedDict):
+ """
+ A mapping that will load all pairs of key values at the first access (either by indexing, requestions keys, values,
+ etc.)
+
+ Args:
+ mapping: The mapping to load.
+ """
+
+ def __init__(self, mapping):
+ self._mapping = mapping
+ self._initialized = False
+ self._data = {}
+
+ def _initialize(self):
+ if self._initialized:
+ return
+
+ for model_type, map_name in self._mapping.items():
+ module_name = model_type_to_module_name(model_type)
+ module = importlib.import_module(f".{module_name}", "transformers.models")
+ mapping = getattr(module, map_name)
+ self._data.update(mapping)
+
+ self._initialized = True
+
+ def __getitem__(self, key):
+ self._initialize()
+ return self._data[key]
+
+ def keys(self):
+ self._initialize()
+ return self._data.keys()
+
+ def values(self):
+ self._initialize()
+ return self._data.values()
+
+ def items(self):
+ self._initialize()
+ return self._data.keys()
+
+ def __iter__(self):
+ self._initialize()
+ return iter(self._data)
+
+ def __contains__(self, item):
+ self._initialize()
+ return item in self._data
+
+
+def _get_class_name(model_class: Union[str, List[str]]):
+ if isinstance(model_class, (list, tuple)):
+ return " or ".join([f"[`{c}`]" for c in model_class if c is not None])
+ return f"[`{model_class}`]"
+
+
+def _list_model_options(indent, config_to_class=None, use_model_types=True):
+ if config_to_class is None and not use_model_types:
+ raise ValueError("Using `use_model_types=False` requires a `config_to_class` dictionary.")
+ if use_model_types:
+ if config_to_class is None:
+ model_type_to_name = {model_type: f"[`{config}`]" for model_type, config in CONFIG_MAPPING_NAMES.items()}
+ else:
+ model_type_to_name = {
+ model_type: _get_class_name(model_class)
+ for model_type, model_class in config_to_class.items()
+ if model_type in MODEL_NAMES_MAPPING
+ }
+ lines = [
+ f"{indent}- **{model_type}** -- {model_type_to_name[model_type]} ({MODEL_NAMES_MAPPING[model_type]} model)"
+ for model_type in sorted(model_type_to_name.keys())
+ ]
+ else:
+ config_to_name = {
+ CONFIG_MAPPING_NAMES[config]: _get_class_name(clas)
+ for config, clas in config_to_class.items()
+ if config in CONFIG_MAPPING_NAMES
+ }
+ config_to_model_name = {
+ config: MODEL_NAMES_MAPPING[model_type] for model_type, config in CONFIG_MAPPING_NAMES.items()
+ }
+ lines = [
+ f"{indent}- [`{config_name}`] configuration class:"
+ f" {config_to_name[config_name]} ({config_to_model_name[config_name]} model)"
+ for config_name in sorted(config_to_name.keys())
+ ]
+ return "\n".join(lines)
+
+
+def replace_list_option_in_docstrings(config_to_class=None, use_model_types=True):
+ def docstring_decorator(fn):
+ docstrings = fn.__doc__
+ if docstrings is None:
+ # Example: -OO
+ return fn
+ lines = docstrings.split("\n")
+ i = 0
+ while i < len(lines) and re.search(r"^(\s*)List options\s*$", lines[i]) is None:
+ i += 1
+ if i < len(lines):
+ indent = re.search(r"^(\s*)List options\s*$", lines[i]).groups()[0]
+ if use_model_types:
+ indent = f"{indent} "
+ lines[i] = _list_model_options(indent, config_to_class=config_to_class, use_model_types=use_model_types)
+ docstrings = "\n".join(lines)
+ else:
+ raise ValueError(
+ f"The function {fn} should have an empty 'List options' in its docstring as placeholder, current"
+ f" docstring is:\n{docstrings}"
+ )
+ fn.__doc__ = docstrings
+ return fn
+
+ return docstring_decorator
+
+
+class AutoConfig:
+ r"""
+ This is a generic configuration class that will be instantiated as one of the configuration classes of the library
+ when created with the [`~AutoConfig.from_pretrained`] class method.
+
+ This class cannot be instantiated directly using `__init__()` (throws an error).
+ """
+
+ def __init__(self):
+ raise EnvironmentError(
+ "AutoConfig is designed to be instantiated "
+ "using the `AutoConfig.from_pretrained(pretrained_model_name_or_path)` method."
+ )
+
+ @classmethod
+ def for_model(cls, model_type: str, *args, **kwargs):
+ if model_type in CONFIG_MAPPING:
+ config_class = CONFIG_MAPPING[model_type]
+ return config_class(*args, **kwargs)
+ raise ValueError(
+ f"Unrecognized model identifier: {model_type}. Should contain one of {', '.join(CONFIG_MAPPING.keys())}"
+ )
+
+ @classmethod
+ @replace_list_option_in_docstrings()
+ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
+ r"""
+ Instantiate one of the configuration classes of the library from a pretrained model configuration.
+
+ The configuration class to instantiate is selected based on the `model_type` property of the config object that
+ is loaded, or when it's missing, by falling back to using pattern matching on `pretrained_model_name_or_path`:
+
+ List options
+
+ Args:
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
+ Can be either:
+
+ - A string, the *model id* of a pretrained model configuration hosted inside a model repo on
+ huggingface.co.
+ - A path to a *directory* containing a configuration file saved using the
+ [`~PretrainedConfig.save_pretrained`] method, or the [`~PreTrainedModel.save_pretrained`] method,
+ e.g., `./my_model_directory/`.
+ - A path or url to a saved configuration JSON *file*, e.g.,
+ `./my_model_directory/configuration.json`.
+ cache_dir (`str` or `os.PathLike`, *optional*):
+ Path to a directory in which a downloaded pretrained model configuration should be cached if the
+ standard cache should not be used.
+ force_download (`bool`, *optional*, defaults to `False`):
+ Whether or not to force the (re-)download the model weights and configuration files and override the
+ cached versions if they exist.
+ resume_download (`bool`, *optional*, defaults to `False`):
+ Whether or not to delete incompletely received files. Will attempt to resume the download if such a
+ file exists.
+ proxies (`Dict[str, str]`, *optional*):
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
+ revision (`str`, *optional*, defaults to `"main"`):
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
+ identifier allowed by git.
+ return_unused_kwargs (`bool`, *optional*, defaults to `False`):
+ If `False`, then this function returns just the final configuration object.
+
+ If `True`, then this functions returns a `Tuple(config, unused_kwargs)` where *unused_kwargs* is a
+ dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the
+ part of `kwargs` which has not been used to update `config` and is otherwise ignored.
+ trust_remote_code (`bool`, *optional*, defaults to `False`):
+ Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
+ execute code present on the Hub on your local machine.
+ kwargs(additional keyword arguments, *optional*):
+ The values in kwargs of any keys which are configuration attributes will be used to override the loaded
+ values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled
+ by the `return_unused_kwargs` keyword parameter.
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoConfig
+
+ >>> # Download configuration from huggingface.co and cache.
+ >>> config = AutoConfig.from_pretrained("google-bert/bert-base-uncased")
+
+ >>> # Download configuration from huggingface.co (user-uploaded) and cache.
+ >>> config = AutoConfig.from_pretrained("dbmdz/bert-base-german-cased")
+
+ >>> # If configuration file is in a directory (e.g., was saved using *save_pretrained('./test/saved_model/')*).
+ >>> config = AutoConfig.from_pretrained("./test/bert_saved_model/")
+
+ >>> # Load a specific configuration file.
+ >>> config = AutoConfig.from_pretrained("./test/bert_saved_model/my_configuration.json")
+
+ >>> # Change some config attributes when loading a pretrained config.
+ >>> config = AutoConfig.from_pretrained("google-bert/bert-base-uncased", output_attentions=True, foo=False)
+ >>> config.output_attentions
+ True
+
+ >>> config, unused_kwargs = AutoConfig.from_pretrained(
+ ... "google-bert/bert-base-uncased", output_attentions=True, foo=False, return_unused_kwargs=True
+ ... )
+ >>> config.output_attentions
+ True
+
+ >>> unused_kwargs
+ {'foo': False}
+ ```"""
+ use_auth_token = kwargs.pop("use_auth_token", None)
+ if use_auth_token is not None:
+ warnings.warn(
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
+ FutureWarning,
+ )
+ if kwargs.get("token", None) is not None:
+ raise ValueError(
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
+ )
+ kwargs["token"] = use_auth_token
+
+ kwargs["_from_auto"] = True
+ kwargs["name_or_path"] = pretrained_model_name_or_path
+ trust_remote_code = kwargs.pop("trust_remote_code", None)
+ code_revision = kwargs.pop("code_revision", None)
+
+ config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)
+ has_remote_code = "auto_map" in config_dict and "AutoConfig" in config_dict["auto_map"]
+ has_local_code = "model_type" in config_dict and config_dict["model_type"] in CONFIG_MAPPING
+ trust_remote_code = resolve_trust_remote_code(
+ trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code
+ )
+
+ if has_remote_code and trust_remote_code:
+ class_ref = config_dict["auto_map"]["AutoConfig"]
+ config_class = get_class_from_dynamic_module(
+ class_ref, pretrained_model_name_or_path, code_revision=code_revision, **kwargs
+ )
+ if os.path.isdir(pretrained_model_name_or_path):
+ config_class.register_for_auto_class()
+ return config_class.from_pretrained(pretrained_model_name_or_path, **kwargs)
+ elif "model_type" in config_dict:
+ try:
+ config_class = CONFIG_MAPPING[config_dict["model_type"]]
+ except KeyError:
+ raise ValueError(
+ f"The checkpoint you are trying to load has model type `{config_dict['model_type']}` "
+ "but Transformers does not recognize this architecture. This could be because of an "
+ "issue with the checkpoint, or because your version of Transformers is out of date."
+ )
+ return config_class.from_dict(config_dict, **unused_kwargs)
+ else:
+ # Fallback: use pattern matching on the string.
+ # We go from longer names to shorter names to catch roberta before bert (for instance)
+ for pattern in sorted(CONFIG_MAPPING.keys(), key=len, reverse=True):
+ if pattern in str(pretrained_model_name_or_path):
+ return CONFIG_MAPPING[pattern].from_dict(config_dict, **unused_kwargs)
+
+ raise ValueError(
+ f"Unrecognized model in {pretrained_model_name_or_path}. "
+ f"Should have a `model_type` key in its {CONFIG_NAME}, or contain one of the following strings "
+ f"in its name: {', '.join(CONFIG_MAPPING.keys())}"
+ )
+
+ @staticmethod
+ def register(model_type, config, exist_ok=False):
+ """
+ Register a new configuration for this class.
+
+ Args:
+ model_type (`str`): The model type like "bert" or "gpt".
+ config ([`PretrainedConfig`]): The config to register.
+ """
+ if issubclass(config, PretrainedConfig) and config.model_type != model_type:
+ raise ValueError(
+ "The config you are passing has a `model_type` attribute that is not consistent with the model type "
+ f"you passed (config has {config.model_type} and you passed {model_type}. Fix one of those so they "
+ "match!"
+ )
+ CONFIG_MAPPING.register(model_type, config, exist_ok=exist_ok)
+
+
+ALL_PRETRAINED_CONFIG_ARCHIVE_MAP = _LazyLoadAllMappings(CONFIG_ARCHIVE_MAP_MAPPING_NAMES)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/feature_extraction_auto.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/feature_extraction_auto.py
new file mode 100644
index 0000000000000000000000000000000000000000..f8cb55091b02fdc055ab21c3df48062462f243b9
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/feature_extraction_auto.py
@@ -0,0 +1,396 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" AutoFeatureExtractor class."""
+import importlib
+import json
+import os
+import warnings
+from collections import OrderedDict
+from typing import Dict, Optional, Union
+
+# Build the list of all feature extractors
+from ...configuration_utils import PretrainedConfig
+from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
+from ...feature_extraction_utils import FeatureExtractionMixin
+from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
+from .auto_factory import _LazyAutoMapping
+from .configuration_auto import (
+ CONFIG_MAPPING_NAMES,
+ AutoConfig,
+ model_type_to_module_name,
+ replace_list_option_in_docstrings,
+)
+
+
+logger = logging.get_logger(__name__)
+
+FEATURE_EXTRACTOR_MAPPING_NAMES = OrderedDict(
+ [
+ ("audio-spectrogram-transformer", "ASTFeatureExtractor"),
+ ("beit", "BeitFeatureExtractor"),
+ ("chinese_clip", "ChineseCLIPFeatureExtractor"),
+ ("clap", "ClapFeatureExtractor"),
+ ("clip", "CLIPFeatureExtractor"),
+ ("clipseg", "ViTFeatureExtractor"),
+ ("clvp", "ClvpFeatureExtractor"),
+ ("conditional_detr", "ConditionalDetrFeatureExtractor"),
+ ("convnext", "ConvNextFeatureExtractor"),
+ ("cvt", "ConvNextFeatureExtractor"),
+ ("data2vec-audio", "Wav2Vec2FeatureExtractor"),
+ ("data2vec-vision", "BeitFeatureExtractor"),
+ ("deformable_detr", "DeformableDetrFeatureExtractor"),
+ ("deit", "DeiTFeatureExtractor"),
+ ("detr", "DetrFeatureExtractor"),
+ ("dinat", "ViTFeatureExtractor"),
+ ("donut-swin", "DonutFeatureExtractor"),
+ ("dpt", "DPTFeatureExtractor"),
+ ("encodec", "EncodecFeatureExtractor"),
+ ("flava", "FlavaFeatureExtractor"),
+ ("glpn", "GLPNFeatureExtractor"),
+ ("groupvit", "CLIPFeatureExtractor"),
+ ("hubert", "Wav2Vec2FeatureExtractor"),
+ ("imagegpt", "ImageGPTFeatureExtractor"),
+ ("layoutlmv2", "LayoutLMv2FeatureExtractor"),
+ ("layoutlmv3", "LayoutLMv3FeatureExtractor"),
+ ("levit", "LevitFeatureExtractor"),
+ ("maskformer", "MaskFormerFeatureExtractor"),
+ ("mctct", "MCTCTFeatureExtractor"),
+ ("mobilenet_v1", "MobileNetV1FeatureExtractor"),
+ ("mobilenet_v2", "MobileNetV2FeatureExtractor"),
+ ("mobilevit", "MobileViTFeatureExtractor"),
+ ("nat", "ViTFeatureExtractor"),
+ ("owlvit", "OwlViTFeatureExtractor"),
+ ("perceiver", "PerceiverFeatureExtractor"),
+ ("poolformer", "PoolFormerFeatureExtractor"),
+ ("pop2piano", "Pop2PianoFeatureExtractor"),
+ ("regnet", "ConvNextFeatureExtractor"),
+ ("resnet", "ConvNextFeatureExtractor"),
+ ("seamless_m4t", "SeamlessM4TFeatureExtractor"),
+ ("seamless_m4t_v2", "SeamlessM4TFeatureExtractor"),
+ ("segformer", "SegformerFeatureExtractor"),
+ ("sew", "Wav2Vec2FeatureExtractor"),
+ ("sew-d", "Wav2Vec2FeatureExtractor"),
+ ("speech_to_text", "Speech2TextFeatureExtractor"),
+ ("speecht5", "SpeechT5FeatureExtractor"),
+ ("swiftformer", "ViTFeatureExtractor"),
+ ("swin", "ViTFeatureExtractor"),
+ ("swinv2", "ViTFeatureExtractor"),
+ ("table-transformer", "DetrFeatureExtractor"),
+ ("timesformer", "VideoMAEFeatureExtractor"),
+ ("tvlt", "TvltFeatureExtractor"),
+ ("unispeech", "Wav2Vec2FeatureExtractor"),
+ ("unispeech-sat", "Wav2Vec2FeatureExtractor"),
+ ("univnet", "UnivNetFeatureExtractor"),
+ ("van", "ConvNextFeatureExtractor"),
+ ("videomae", "VideoMAEFeatureExtractor"),
+ ("vilt", "ViltFeatureExtractor"),
+ ("vit", "ViTFeatureExtractor"),
+ ("vit_mae", "ViTFeatureExtractor"),
+ ("vit_msn", "ViTFeatureExtractor"),
+ ("wav2vec2", "Wav2Vec2FeatureExtractor"),
+ ("wav2vec2-bert", "Wav2Vec2FeatureExtractor"),
+ ("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
+ ("wavlm", "Wav2Vec2FeatureExtractor"),
+ ("whisper", "WhisperFeatureExtractor"),
+ ("xclip", "CLIPFeatureExtractor"),
+ ("yolos", "YolosFeatureExtractor"),
+ ]
+)
+
+FEATURE_EXTRACTOR_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
+
+
+def feature_extractor_class_from_name(class_name: str):
+ for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
+ if class_name in extractors:
+ module_name = model_type_to_module_name(module_name)
+
+ module = importlib.import_module(f".{module_name}", "transformers.models")
+ try:
+ return getattr(module, class_name)
+ except AttributeError:
+ continue
+
+ for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
+ if getattr(extractor, "__name__", None) == class_name:
+ return extractor
+
+ # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
+ # init and we return the proper dummy to get an appropriate error message.
+ main_module = importlib.import_module("transformers")
+ if hasattr(main_module, class_name):
+ return getattr(main_module, class_name)
+
+ return None
+
+
+def get_feature_extractor_config(
+ pretrained_model_name_or_path: Union[str, os.PathLike],
+ cache_dir: Optional[Union[str, os.PathLike]] = None,
+ force_download: bool = False,
+ resume_download: bool = False,
+ proxies: Optional[Dict[str, str]] = None,
+ token: Optional[Union[bool, str]] = None,
+ revision: Optional[str] = None,
+ local_files_only: bool = False,
+ **kwargs,
+):
+ """
+ Loads the tokenizer configuration from a pretrained model tokenizer configuration.
+
+ Args:
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
+ This can be either:
+
+ - a string, the *model id* of a pretrained model configuration hosted inside a model repo on
+ huggingface.co.
+ - a path to a *directory* containing a configuration file saved using the
+ [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`.
+
+ cache_dir (`str` or `os.PathLike`, *optional*):
+ Path to a directory in which a downloaded pretrained model configuration should be cached if the standard
+ cache should not be used.
+ force_download (`bool`, *optional*, defaults to `False`):
+ Whether or not to force to (re-)download the configuration files and override the cached versions if they
+ exist.
+ resume_download (`bool`, *optional*, defaults to `False`):
+ Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists.
+ proxies (`Dict[str, str]`, *optional*):
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
+ 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
+ token (`str` or *bool*, *optional*):
+ The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
+ when running `huggingface-cli login` (stored in `~/.huggingface`).
+ revision (`str`, *optional*, defaults to `"main"`):
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
+ identifier allowed by git.
+ local_files_only (`bool`, *optional*, defaults to `False`):
+ If `True`, will only try to load the tokenizer configuration from local files.
+
+
+
+ Passing `token=True` is required when you want to use a private model.
+
+
+
+ Returns:
+ `Dict`: The configuration of the tokenizer.
+
+ Examples:
+
+ ```python
+ # Download configuration from huggingface.co and cache.
+ tokenizer_config = get_tokenizer_config("google-bert/bert-base-uncased")
+ # This model does not have a tokenizer config so the result will be an empty dict.
+ tokenizer_config = get_tokenizer_config("FacebookAI/xlm-roberta-base")
+
+ # Save a pretrained tokenizer locally and you can reload its config
+ from transformers import AutoTokenizer
+
+ tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased")
+ tokenizer.save_pretrained("tokenizer-test")
+ tokenizer_config = get_tokenizer_config("tokenizer-test")
+ ```"""
+ use_auth_token = kwargs.pop("use_auth_token", None)
+ if use_auth_token is not None:
+ warnings.warn(
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
+ FutureWarning,
+ )
+ if token is not None:
+ raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.")
+ token = use_auth_token
+
+ resolved_config_file = get_file_from_repo(
+ pretrained_model_name_or_path,
+ FEATURE_EXTRACTOR_NAME,
+ cache_dir=cache_dir,
+ force_download=force_download,
+ resume_download=resume_download,
+ proxies=proxies,
+ token=token,
+ revision=revision,
+ local_files_only=local_files_only,
+ )
+ if resolved_config_file is None:
+ logger.info(
+ "Could not locate the feature extractor configuration file, will try to use the model config instead."
+ )
+ return {}
+
+ with open(resolved_config_file, encoding="utf-8") as reader:
+ return json.load(reader)
+
+
+class AutoFeatureExtractor:
+ r"""
+ This is a generic feature extractor class that will be instantiated as one of the feature extractor classes of the
+ library when created with the [`AutoFeatureExtractor.from_pretrained`] class method.
+
+ This class cannot be instantiated directly using `__init__()` (throws an error).
+ """
+
+ def __init__(self):
+ raise EnvironmentError(
+ "AutoFeatureExtractor is designed to be instantiated "
+ "using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method."
+ )
+
+ @classmethod
+ @replace_list_option_in_docstrings(FEATURE_EXTRACTOR_MAPPING_NAMES)
+ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
+ r"""
+ Instantiate one of the feature extractor classes of the library from a pretrained model vocabulary.
+
+ The feature extractor class to instantiate is selected based on the `model_type` property of the config object
+ (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's
+ missing, by falling back to using pattern matching on `pretrained_model_name_or_path`:
+
+ List options
+
+ Params:
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
+ This can be either:
+
+ - a string, the *model id* of a pretrained feature_extractor hosted inside a model repo on
+ huggingface.co.
+ - a path to a *directory* containing a feature extractor file saved using the
+ [`~feature_extraction_utils.FeatureExtractionMixin.save_pretrained`] method, e.g.,
+ `./my_model_directory/`.
+ - a path or url to a saved feature extractor JSON *file*, e.g.,
+ `./my_model_directory/preprocessor_config.json`.
+ cache_dir (`str` or `os.PathLike`, *optional*):
+ Path to a directory in which a downloaded pretrained model feature extractor should be cached if the
+ standard cache should not be used.
+ force_download (`bool`, *optional*, defaults to `False`):
+ Whether or not to force to (re-)download the feature extractor files and override the cached versions
+ if they exist.
+ resume_download (`bool`, *optional*, defaults to `False`):
+ Whether or not to delete incompletely received file. Attempts to resume the download if such a file
+ exists.
+ proxies (`Dict[str, str]`, *optional*):
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
+ 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
+ token (`str` or *bool*, *optional*):
+ The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
+ when running `huggingface-cli login` (stored in `~/.huggingface`).
+ revision (`str`, *optional*, defaults to `"main"`):
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
+ identifier allowed by git.
+ return_unused_kwargs (`bool`, *optional*, defaults to `False`):
+ If `False`, then this function returns just the final feature extractor object. If `True`, then this
+ functions returns a `Tuple(feature_extractor, unused_kwargs)` where *unused_kwargs* is a dictionary
+ consisting of the key/value pairs whose keys are not feature extractor attributes: i.e., the part of
+ `kwargs` which has not been used to update `feature_extractor` and is otherwise ignored.
+ trust_remote_code (`bool`, *optional*, defaults to `False`):
+ Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
+ execute code present on the Hub on your local machine.
+ kwargs (`Dict[str, Any]`, *optional*):
+ The values in kwargs of any keys which are feature extractor attributes will be used to override the
+ loaded values. Behavior concerning key/value pairs whose keys are *not* feature extractor attributes is
+ controlled by the `return_unused_kwargs` keyword parameter.
+
+
+
+ Passing `token=True` is required when you want to use a private model.
+
+
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoFeatureExtractor
+
+ >>> # Download feature extractor from huggingface.co and cache.
+ >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h")
+
+ >>> # If feature extractor files are in a directory (e.g. feature extractor was saved using *save_pretrained('./test/saved_model/')*)
+ >>> # feature_extractor = AutoFeatureExtractor.from_pretrained("./test/saved_model/")
+ ```"""
+ use_auth_token = kwargs.pop("use_auth_token", None)
+ if use_auth_token is not None:
+ warnings.warn(
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
+ FutureWarning,
+ )
+ if kwargs.get("token", None) is not None:
+ raise ValueError(
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
+ )
+ kwargs["token"] = use_auth_token
+
+ config = kwargs.pop("config", None)
+ trust_remote_code = kwargs.pop("trust_remote_code", None)
+ kwargs["_from_auto"] = True
+
+ config_dict, _ = FeatureExtractionMixin.get_feature_extractor_dict(pretrained_model_name_or_path, **kwargs)
+ feature_extractor_class = config_dict.get("feature_extractor_type", None)
+ feature_extractor_auto_map = None
+ if "AutoFeatureExtractor" in config_dict.get("auto_map", {}):
+ feature_extractor_auto_map = config_dict["auto_map"]["AutoFeatureExtractor"]
+
+ # If we don't find the feature extractor class in the feature extractor config, let's try the model config.
+ if feature_extractor_class is None and feature_extractor_auto_map is None:
+ if not isinstance(config, PretrainedConfig):
+ config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
+ # It could be in `config.feature_extractor_type``
+ feature_extractor_class = getattr(config, "feature_extractor_type", None)
+ if hasattr(config, "auto_map") and "AutoFeatureExtractor" in config.auto_map:
+ feature_extractor_auto_map = config.auto_map["AutoFeatureExtractor"]
+
+ if feature_extractor_class is not None:
+ feature_extractor_class = feature_extractor_class_from_name(feature_extractor_class)
+
+ has_remote_code = feature_extractor_auto_map is not None
+ has_local_code = feature_extractor_class is not None or type(config) in FEATURE_EXTRACTOR_MAPPING
+ trust_remote_code = resolve_trust_remote_code(
+ trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code
+ )
+
+ if has_remote_code and trust_remote_code:
+ feature_extractor_class = get_class_from_dynamic_module(
+ feature_extractor_auto_map, pretrained_model_name_or_path, **kwargs
+ )
+ _ = kwargs.pop("code_revision", None)
+ if os.path.isdir(pretrained_model_name_or_path):
+ feature_extractor_class.register_for_auto_class()
+ return feature_extractor_class.from_dict(config_dict, **kwargs)
+ elif feature_extractor_class is not None:
+ return feature_extractor_class.from_dict(config_dict, **kwargs)
+ # Last try: we use the FEATURE_EXTRACTOR_MAPPING.
+ elif type(config) in FEATURE_EXTRACTOR_MAPPING:
+ feature_extractor_class = FEATURE_EXTRACTOR_MAPPING[type(config)]
+ return feature_extractor_class.from_dict(config_dict, **kwargs)
+
+ raise ValueError(
+ f"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a "
+ f"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following "
+ f"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys())}"
+ )
+
+ @staticmethod
+ def register(config_class, feature_extractor_class, exist_ok=False):
+ """
+ Register a new feature extractor for this class.
+
+ Args:
+ config_class ([`PretrainedConfig`]):
+ The configuration corresponding to the model to register.
+ feature_extractor_class ([`FeatureExtractorMixin`]): The feature extractor to register.
+ """
+ FEATURE_EXTRACTOR_MAPPING.register(config_class, feature_extractor_class, exist_ok=exist_ok)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/image_processing_auto.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/image_processing_auto.py
new file mode 100644
index 0000000000000000000000000000000000000000..c8538a9a55143ad59d50e632b5415fbb31489ccb
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/image_processing_auto.py
@@ -0,0 +1,437 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" AutoImageProcessor class."""
+import importlib
+import json
+import os
+import warnings
+from collections import OrderedDict
+from typing import Dict, Optional, Union
+
+# Build the list of all image processors
+from ...configuration_utils import PretrainedConfig
+from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
+from ...image_processing_utils import ImageProcessingMixin
+from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
+from .auto_factory import _LazyAutoMapping
+from .configuration_auto import (
+ CONFIG_MAPPING_NAMES,
+ AutoConfig,
+ model_type_to_module_name,
+ replace_list_option_in_docstrings,
+)
+
+
+logger = logging.get_logger(__name__)
+
+IMAGE_PROCESSOR_MAPPING_NAMES = OrderedDict(
+ [
+ ("align", "EfficientNetImageProcessor"),
+ ("beit", "BeitImageProcessor"),
+ ("bit", "BitImageProcessor"),
+ ("blip", "BlipImageProcessor"),
+ ("blip-2", "BlipImageProcessor"),
+ ("bridgetower", "BridgeTowerImageProcessor"),
+ ("chinese_clip", "ChineseCLIPImageProcessor"),
+ ("clip", "CLIPImageProcessor"),
+ ("clipseg", "ViTImageProcessor"),
+ ("conditional_detr", "ConditionalDetrImageProcessor"),
+ ("convnext", "ConvNextImageProcessor"),
+ ("convnextv2", "ConvNextImageProcessor"),
+ ("cvt", "ConvNextImageProcessor"),
+ ("data2vec-vision", "BeitImageProcessor"),
+ ("deformable_detr", "DeformableDetrImageProcessor"),
+ ("deit", "DeiTImageProcessor"),
+ ("depth_anything", "DPTImageProcessor"),
+ ("deta", "DetaImageProcessor"),
+ ("detr", "DetrImageProcessor"),
+ ("dinat", "ViTImageProcessor"),
+ ("dinov2", "BitImageProcessor"),
+ ("donut-swin", "DonutImageProcessor"),
+ ("dpt", "DPTImageProcessor"),
+ ("efficientformer", "EfficientFormerImageProcessor"),
+ ("efficientnet", "EfficientNetImageProcessor"),
+ ("flava", "FlavaImageProcessor"),
+ ("focalnet", "BitImageProcessor"),
+ ("fuyu", "FuyuImageProcessor"),
+ ("git", "CLIPImageProcessor"),
+ ("glpn", "GLPNImageProcessor"),
+ ("grounding-dino", "GroundingDinoImageProcessor"),
+ ("groupvit", "CLIPImageProcessor"),
+ ("idefics", "IdeficsImageProcessor"),
+ ("idefics2", "Idefics2ImageProcessor"),
+ ("imagegpt", "ImageGPTImageProcessor"),
+ ("instructblip", "BlipImageProcessor"),
+ ("kosmos-2", "CLIPImageProcessor"),
+ ("layoutlmv2", "LayoutLMv2ImageProcessor"),
+ ("layoutlmv3", "LayoutLMv3ImageProcessor"),
+ ("levit", "LevitImageProcessor"),
+ ("llava", "CLIPImageProcessor"),
+ ("llava_next", "LlavaNextImageProcessor"),
+ ("mask2former", "Mask2FormerImageProcessor"),
+ ("maskformer", "MaskFormerImageProcessor"),
+ ("mgp-str", "ViTImageProcessor"),
+ ("mobilenet_v1", "MobileNetV1ImageProcessor"),
+ ("mobilenet_v2", "MobileNetV2ImageProcessor"),
+ ("mobilevit", "MobileViTImageProcessor"),
+ ("mobilevit", "MobileViTImageProcessor"),
+ ("mobilevitv2", "MobileViTImageProcessor"),
+ ("nat", "ViTImageProcessor"),
+ ("nougat", "NougatImageProcessor"),
+ ("oneformer", "OneFormerImageProcessor"),
+ ("owlv2", "Owlv2ImageProcessor"),
+ ("owlvit", "OwlViTImageProcessor"),
+ ("perceiver", "PerceiverImageProcessor"),
+ ("pix2struct", "Pix2StructImageProcessor"),
+ ("poolformer", "PoolFormerImageProcessor"),
+ ("pvt", "PvtImageProcessor"),
+ ("pvt_v2", "PvtImageProcessor"),
+ ("regnet", "ConvNextImageProcessor"),
+ ("resnet", "ConvNextImageProcessor"),
+ ("sam", "SamImageProcessor"),
+ ("segformer", "SegformerImageProcessor"),
+ ("seggpt", "SegGptImageProcessor"),
+ ("siglip", "SiglipImageProcessor"),
+ ("swiftformer", "ViTImageProcessor"),
+ ("swin", "ViTImageProcessor"),
+ ("swin2sr", "Swin2SRImageProcessor"),
+ ("swinv2", "ViTImageProcessor"),
+ ("table-transformer", "DetrImageProcessor"),
+ ("timesformer", "VideoMAEImageProcessor"),
+ ("tvlt", "TvltImageProcessor"),
+ ("tvp", "TvpImageProcessor"),
+ ("udop", "LayoutLMv3ImageProcessor"),
+ ("upernet", "SegformerImageProcessor"),
+ ("van", "ConvNextImageProcessor"),
+ ("videomae", "VideoMAEImageProcessor"),
+ ("vilt", "ViltImageProcessor"),
+ ("vipllava", "CLIPImageProcessor"),
+ ("vit", "ViTImageProcessor"),
+ ("vit_hybrid", "ViTHybridImageProcessor"),
+ ("vit_mae", "ViTImageProcessor"),
+ ("vit_msn", "ViTImageProcessor"),
+ ("vitmatte", "VitMatteImageProcessor"),
+ ("xclip", "CLIPImageProcessor"),
+ ("yolos", "YolosImageProcessor"),
+ ]
+)
+
+IMAGE_PROCESSOR_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
+
+
+def image_processor_class_from_name(class_name: str):
+ for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
+ if class_name in extractors:
+ module_name = model_type_to_module_name(module_name)
+
+ module = importlib.import_module(f".{module_name}", "transformers.models")
+ try:
+ return getattr(module, class_name)
+ except AttributeError:
+ continue
+
+ for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
+ if getattr(extractor, "__name__", None) == class_name:
+ return extractor
+
+ # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
+ # init and we return the proper dummy to get an appropriate error message.
+ main_module = importlib.import_module("transformers")
+ if hasattr(main_module, class_name):
+ return getattr(main_module, class_name)
+
+ return None
+
+
+def get_image_processor_config(
+ pretrained_model_name_or_path: Union[str, os.PathLike],
+ cache_dir: Optional[Union[str, os.PathLike]] = None,
+ force_download: bool = False,
+ resume_download: bool = False,
+ proxies: Optional[Dict[str, str]] = None,
+ token: Optional[Union[bool, str]] = None,
+ revision: Optional[str] = None,
+ local_files_only: bool = False,
+ **kwargs,
+):
+ """
+ Loads the image processor configuration from a pretrained model image processor configuration.
+
+ Args:
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
+ This can be either:
+
+ - a string, the *model id* of a pretrained model configuration hosted inside a model repo on
+ huggingface.co.
+ - a path to a *directory* containing a configuration file saved using the
+ [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`.
+
+ cache_dir (`str` or `os.PathLike`, *optional*):
+ Path to a directory in which a downloaded pretrained model configuration should be cached if the standard
+ cache should not be used.
+ force_download (`bool`, *optional*, defaults to `False`):
+ Whether or not to force to (re-)download the configuration files and override the cached versions if they
+ exist.
+ resume_download (`bool`, *optional*, defaults to `False`):
+ Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists.
+ proxies (`Dict[str, str]`, *optional*):
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
+ 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
+ token (`str` or *bool*, *optional*):
+ The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
+ when running `huggingface-cli login` (stored in `~/.huggingface`).
+ revision (`str`, *optional*, defaults to `"main"`):
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
+ identifier allowed by git.
+ local_files_only (`bool`, *optional*, defaults to `False`):
+ If `True`, will only try to load the image processor configuration from local files.
+
+
+
+ Passing `token=True` is required when you want to use a private model.
+
+
+
+ Returns:
+ `Dict`: The configuration of the image processor.
+
+ Examples:
+
+ ```python
+ # Download configuration from huggingface.co and cache.
+ image_processor_config = get_image_processor_config("google-bert/bert-base-uncased")
+ # This model does not have a image processor config so the result will be an empty dict.
+ image_processor_config = get_image_processor_config("FacebookAI/xlm-roberta-base")
+
+ # Save a pretrained image processor locally and you can reload its config
+ from transformers import AutoTokenizer
+
+ image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k")
+ image_processor.save_pretrained("image-processor-test")
+ image_processor_config = get_image_processor_config("image-processor-test")
+ ```"""
+ use_auth_token = kwargs.pop("use_auth_token", None)
+ if use_auth_token is not None:
+ warnings.warn(
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
+ FutureWarning,
+ )
+ if token is not None:
+ raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.")
+ token = use_auth_token
+
+ resolved_config_file = get_file_from_repo(
+ pretrained_model_name_or_path,
+ IMAGE_PROCESSOR_NAME,
+ cache_dir=cache_dir,
+ force_download=force_download,
+ resume_download=resume_download,
+ proxies=proxies,
+ token=token,
+ revision=revision,
+ local_files_only=local_files_only,
+ )
+ if resolved_config_file is None:
+ logger.info(
+ "Could not locate the image processor configuration file, will try to use the model config instead."
+ )
+ return {}
+
+ with open(resolved_config_file, encoding="utf-8") as reader:
+ return json.load(reader)
+
+
+class AutoImageProcessor:
+ r"""
+ This is a generic image processor class that will be instantiated as one of the image processor classes of the
+ library when created with the [`AutoImageProcessor.from_pretrained`] class method.
+
+ This class cannot be instantiated directly using `__init__()` (throws an error).
+ """
+
+ def __init__(self):
+ raise EnvironmentError(
+ "AutoImageProcessor is designed to be instantiated "
+ "using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method."
+ )
+
+ @classmethod
+ @replace_list_option_in_docstrings(IMAGE_PROCESSOR_MAPPING_NAMES)
+ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
+ r"""
+ Instantiate one of the image processor classes of the library from a pretrained model vocabulary.
+
+ The image processor class to instantiate is selected based on the `model_type` property of the config object
+ (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's
+ missing, by falling back to using pattern matching on `pretrained_model_name_or_path`:
+
+ List options
+
+ Params:
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
+ This can be either:
+
+ - a string, the *model id* of a pretrained image_processor hosted inside a model repo on
+ huggingface.co.
+ - a path to a *directory* containing a image processor file saved using the
+ [`~image_processing_utils.ImageProcessingMixin.save_pretrained`] method, e.g.,
+ `./my_model_directory/`.
+ - a path or url to a saved image processor JSON *file*, e.g.,
+ `./my_model_directory/preprocessor_config.json`.
+ cache_dir (`str` or `os.PathLike`, *optional*):
+ Path to a directory in which a downloaded pretrained model image processor should be cached if the
+ standard cache should not be used.
+ force_download (`bool`, *optional*, defaults to `False`):
+ Whether or not to force to (re-)download the image processor files and override the cached versions if
+ they exist.
+ resume_download (`bool`, *optional*, defaults to `False`):
+ Whether or not to delete incompletely received file. Attempts to resume the download if such a file
+ exists.
+ proxies (`Dict[str, str]`, *optional*):
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
+ 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
+ token (`str` or *bool*, *optional*):
+ The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
+ when running `huggingface-cli login` (stored in `~/.huggingface`).
+ revision (`str`, *optional*, defaults to `"main"`):
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
+ identifier allowed by git.
+ return_unused_kwargs (`bool`, *optional*, defaults to `False`):
+ If `False`, then this function returns just the final image processor object. If `True`, then this
+ functions returns a `Tuple(image_processor, unused_kwargs)` where *unused_kwargs* is a dictionary
+ consisting of the key/value pairs whose keys are not image processor attributes: i.e., the part of
+ `kwargs` which has not been used to update `image_processor` and is otherwise ignored.
+ trust_remote_code (`bool`, *optional*, defaults to `False`):
+ Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
+ execute code present on the Hub on your local machine.
+ kwargs (`Dict[str, Any]`, *optional*):
+ The values in kwargs of any keys which are image processor attributes will be used to override the
+ loaded values. Behavior concerning key/value pairs whose keys are *not* image processor attributes is
+ controlled by the `return_unused_kwargs` keyword parameter.
+
+
+
+ Passing `token=True` is required when you want to use a private model.
+
+
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoImageProcessor
+
+ >>> # Download image processor from huggingface.co and cache.
+ >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k")
+
+ >>> # If image processor files are in a directory (e.g. image processor was saved using *save_pretrained('./test/saved_model/')*)
+ >>> # image_processor = AutoImageProcessor.from_pretrained("./test/saved_model/")
+ ```"""
+ use_auth_token = kwargs.pop("use_auth_token", None)
+ if use_auth_token is not None:
+ warnings.warn(
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
+ FutureWarning,
+ )
+ if kwargs.get("token", None) is not None:
+ raise ValueError(
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
+ )
+ kwargs["token"] = use_auth_token
+
+ config = kwargs.pop("config", None)
+ trust_remote_code = kwargs.pop("trust_remote_code", None)
+ kwargs["_from_auto"] = True
+
+ config_dict, _ = ImageProcessingMixin.get_image_processor_dict(pretrained_model_name_or_path, **kwargs)
+ image_processor_class = config_dict.get("image_processor_type", None)
+ image_processor_auto_map = None
+ if "AutoImageProcessor" in config_dict.get("auto_map", {}):
+ image_processor_auto_map = config_dict["auto_map"]["AutoImageProcessor"]
+
+ # If we still don't have the image processor class, check if we're loading from a previous feature extractor config
+ # and if so, infer the image processor class from there.
+ if image_processor_class is None and image_processor_auto_map is None:
+ feature_extractor_class = config_dict.pop("feature_extractor_type", None)
+ if feature_extractor_class is not None:
+ logger.warning(
+ "Could not find image processor class in the image processor config or the model config. Loading "
+ "based on pattern matching with the model's feature extractor configuration. Please open a "
+ "PR/issue to update `preprocessor_config.json` to use `image_processor_type` instead of "
+ "`feature_extractor_type`. This warning will be removed in v4.40."
+ )
+ image_processor_class = feature_extractor_class.replace("FeatureExtractor", "ImageProcessor")
+ if "AutoFeatureExtractor" in config_dict.get("auto_map", {}):
+ feature_extractor_auto_map = config_dict["auto_map"]["AutoFeatureExtractor"]
+ image_processor_auto_map = feature_extractor_auto_map.replace("FeatureExtractor", "ImageProcessor")
+ logger.warning(
+ "Could not find image processor auto map in the image processor config or the model config. "
+ "Loading based on pattern matching with the model's feature extractor configuration. Please open a "
+ "PR/issue to update `preprocessor_config.json` to use `AutoImageProcessor` instead of "
+ "`AutoFeatureExtractor`. This warning will be removed in v4.40."
+ )
+
+ # If we don't find the image processor class in the image processor config, let's try the model config.
+ if image_processor_class is None and image_processor_auto_map is None:
+ if not isinstance(config, PretrainedConfig):
+ config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
+ # It could be in `config.image_processor_type``
+ image_processor_class = getattr(config, "image_processor_type", None)
+ if hasattr(config, "auto_map") and "AutoImageProcessor" in config.auto_map:
+ image_processor_auto_map = config.auto_map["AutoImageProcessor"]
+
+ if image_processor_class is not None:
+ image_processor_class = image_processor_class_from_name(image_processor_class)
+
+ has_remote_code = image_processor_auto_map is not None
+ has_local_code = image_processor_class is not None or type(config) in IMAGE_PROCESSOR_MAPPING
+ trust_remote_code = resolve_trust_remote_code(
+ trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code
+ )
+
+ if has_remote_code and trust_remote_code:
+ image_processor_class = get_class_from_dynamic_module(
+ image_processor_auto_map, pretrained_model_name_or_path, **kwargs
+ )
+ _ = kwargs.pop("code_revision", None)
+ if os.path.isdir(pretrained_model_name_or_path):
+ image_processor_class.register_for_auto_class()
+ return image_processor_class.from_dict(config_dict, **kwargs)
+ elif image_processor_class is not None:
+ return image_processor_class.from_dict(config_dict, **kwargs)
+ # Last try: we use the IMAGE_PROCESSOR_MAPPING.
+ elif type(config) in IMAGE_PROCESSOR_MAPPING:
+ image_processor_class = IMAGE_PROCESSOR_MAPPING[type(config)]
+ return image_processor_class.from_dict(config_dict, **kwargs)
+
+ raise ValueError(
+ f"Unrecognized image processor in {pretrained_model_name_or_path}. Should have a "
+ f"`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following "
+ f"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys())}"
+ )
+
+ @staticmethod
+ def register(config_class, image_processor_class, exist_ok=False):
+ """
+ Register a new image processor for this class.
+
+ Args:
+ config_class ([`PretrainedConfig`]):
+ The configuration corresponding to the model to register.
+ image_processor_class ([`ImageProcessingMixin`]): The image processor to register.
+ """
+ IMAGE_PROCESSOR_MAPPING.register(config_class, image_processor_class, exist_ok=exist_ok)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/modeling_auto.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/modeling_auto.py
new file mode 100644
index 0000000000000000000000000000000000000000..dcc4829f3f6f1ed0ce6456960f61604bfb8bce09
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/modeling_auto.py
@@ -0,0 +1,1705 @@
+# coding=utf-8
+# Copyright 2018 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Auto Model class."""
+
+import warnings
+from collections import OrderedDict
+
+from ...utils import logging
+from .auto_factory import (
+ _BaseAutoBackboneClass,
+ _BaseAutoModelClass,
+ _LazyAutoMapping,
+ auto_class_update,
+)
+from .configuration_auto import CONFIG_MAPPING_NAMES
+
+
+logger = logging.get_logger(__name__)
+
+MODEL_MAPPING_NAMES = OrderedDict(
+ [
+ # Base model mapping
+ ("albert", "AlbertModel"),
+ ("align", "AlignModel"),
+ ("altclip", "AltCLIPModel"),
+ ("audio-spectrogram-transformer", "ASTModel"),
+ ("autoformer", "AutoformerModel"),
+ ("bark", "BarkModel"),
+ ("bart", "BartModel"),
+ ("beit", "BeitModel"),
+ ("bert", "BertModel"),
+ ("bert-generation", "BertGenerationEncoder"),
+ ("big_bird", "BigBirdModel"),
+ ("bigbird_pegasus", "BigBirdPegasusModel"),
+ ("biogpt", "BioGptModel"),
+ ("bit", "BitModel"),
+ ("blenderbot", "BlenderbotModel"),
+ ("blenderbot-small", "BlenderbotSmallModel"),
+ ("blip", "BlipModel"),
+ ("blip-2", "Blip2Model"),
+ ("bloom", "BloomModel"),
+ ("bridgetower", "BridgeTowerModel"),
+ ("bros", "BrosModel"),
+ ("camembert", "CamembertModel"),
+ ("canine", "CanineModel"),
+ ("chinese_clip", "ChineseCLIPModel"),
+ ("chinese_clip_vision_model", "ChineseCLIPVisionModel"),
+ ("clap", "ClapModel"),
+ ("clip", "CLIPModel"),
+ ("clip_vision_model", "CLIPVisionModel"),
+ ("clipseg", "CLIPSegModel"),
+ ("clvp", "ClvpModelForConditionalGeneration"),
+ ("code_llama", "LlamaModel"),
+ ("codegen", "CodeGenModel"),
+ ("cohere", "CohereModel"),
+ ("conditional_detr", "ConditionalDetrModel"),
+ ("convbert", "ConvBertModel"),
+ ("convnext", "ConvNextModel"),
+ ("convnextv2", "ConvNextV2Model"),
+ ("cpmant", "CpmAntModel"),
+ ("ctrl", "CTRLModel"),
+ ("cvt", "CvtModel"),
+ ("data2vec-audio", "Data2VecAudioModel"),
+ ("data2vec-text", "Data2VecTextModel"),
+ ("data2vec-vision", "Data2VecVisionModel"),
+ ("dbrx", "DbrxModel"),
+ ("deberta", "DebertaModel"),
+ ("deberta-v2", "DebertaV2Model"),
+ ("decision_transformer", "DecisionTransformerModel"),
+ ("deformable_detr", "DeformableDetrModel"),
+ ("deit", "DeiTModel"),
+ ("deta", "DetaModel"),
+ ("detr", "DetrModel"),
+ ("dinat", "DinatModel"),
+ ("dinov2", "Dinov2Model"),
+ ("distilbert", "DistilBertModel"),
+ ("donut-swin", "DonutSwinModel"),
+ ("dpr", "DPRQuestionEncoder"),
+ ("dpt", "DPTModel"),
+ ("efficientformer", "EfficientFormerModel"),
+ ("efficientnet", "EfficientNetModel"),
+ ("electra", "ElectraModel"),
+ ("encodec", "EncodecModel"),
+ ("ernie", "ErnieModel"),
+ ("ernie_m", "ErnieMModel"),
+ ("esm", "EsmModel"),
+ ("falcon", "FalconModel"),
+ ("fastspeech2_conformer", "FastSpeech2ConformerModel"),
+ ("flaubert", "FlaubertModel"),
+ ("flava", "FlavaModel"),
+ ("fnet", "FNetModel"),
+ ("focalnet", "FocalNetModel"),
+ ("fsmt", "FSMTModel"),
+ ("funnel", ("FunnelModel", "FunnelBaseModel")),
+ ("gemma", "GemmaModel"),
+ ("git", "GitModel"),
+ ("glpn", "GLPNModel"),
+ ("gpt-sw3", "GPT2Model"),
+ ("gpt2", "GPT2Model"),
+ ("gpt_bigcode", "GPTBigCodeModel"),
+ ("gpt_neo", "GPTNeoModel"),
+ ("gpt_neox", "GPTNeoXModel"),
+ ("gpt_neox_japanese", "GPTNeoXJapaneseModel"),
+ ("gptj", "GPTJModel"),
+ ("gptsan-japanese", "GPTSanJapaneseForConditionalGeneration"),
+ ("graphormer", "GraphormerModel"),
+ ("grounding-dino", "GroundingDinoModel"),
+ ("groupvit", "GroupViTModel"),
+ ("hubert", "HubertModel"),
+ ("ibert", "IBertModel"),
+ ("idefics", "IdeficsModel"),
+ ("idefics2", "Idefics2Model"),
+ ("imagegpt", "ImageGPTModel"),
+ ("informer", "InformerModel"),
+ ("jamba", "JambaModel"),
+ ("jukebox", "JukeboxModel"),
+ ("kosmos-2", "Kosmos2Model"),
+ ("layoutlm", "LayoutLMModel"),
+ ("layoutlmv2", "LayoutLMv2Model"),
+ ("layoutlmv3", "LayoutLMv3Model"),
+ ("led", "LEDModel"),
+ ("levit", "LevitModel"),
+ ("lilt", "LiltModel"),
+ ("llama", "LlamaModel"),
+ ("longformer", "LongformerModel"),
+ ("longt5", "LongT5Model"),
+ ("luke", "LukeModel"),
+ ("lxmert", "LxmertModel"),
+ ("m2m_100", "M2M100Model"),
+ ("mamba", "MambaModel"),
+ ("marian", "MarianModel"),
+ ("markuplm", "MarkupLMModel"),
+ ("mask2former", "Mask2FormerModel"),
+ ("maskformer", "MaskFormerModel"),
+ ("maskformer-swin", "MaskFormerSwinModel"),
+ ("mbart", "MBartModel"),
+ ("mctct", "MCTCTModel"),
+ ("mega", "MegaModel"),
+ ("megatron-bert", "MegatronBertModel"),
+ ("mgp-str", "MgpstrForSceneTextRecognition"),
+ ("mistral", "MistralModel"),
+ ("mixtral", "MixtralModel"),
+ ("mobilebert", "MobileBertModel"),
+ ("mobilenet_v1", "MobileNetV1Model"),
+ ("mobilenet_v2", "MobileNetV2Model"),
+ ("mobilevit", "MobileViTModel"),
+ ("mobilevitv2", "MobileViTV2Model"),
+ ("mpnet", "MPNetModel"),
+ ("mpt", "MptModel"),
+ ("mra", "MraModel"),
+ ("mt5", "MT5Model"),
+ ("mvp", "MvpModel"),
+ ("nat", "NatModel"),
+ ("nezha", "NezhaModel"),
+ ("nllb-moe", "NllbMoeModel"),
+ ("nystromformer", "NystromformerModel"),
+ ("olmo", "OlmoModel"),
+ ("oneformer", "OneFormerModel"),
+ ("open-llama", "OpenLlamaModel"),
+ ("openai-gpt", "OpenAIGPTModel"),
+ ("opt", "OPTModel"),
+ ("owlv2", "Owlv2Model"),
+ ("owlvit", "OwlViTModel"),
+ ("patchtsmixer", "PatchTSMixerModel"),
+ ("patchtst", "PatchTSTModel"),
+ ("pegasus", "PegasusModel"),
+ ("pegasus_x", "PegasusXModel"),
+ ("perceiver", "PerceiverModel"),
+ ("persimmon", "PersimmonModel"),
+ ("phi", "PhiModel"),
+ ("plbart", "PLBartModel"),
+ ("poolformer", "PoolFormerModel"),
+ ("prophetnet", "ProphetNetModel"),
+ ("pvt", "PvtModel"),
+ ("pvt_v2", "PvtV2Model"),
+ ("qdqbert", "QDQBertModel"),
+ ("qwen2", "Qwen2Model"),
+ ("qwen2_moe", "Qwen2MoeModel"),
+ ("recurrent_gemma", "RecurrentGemmaModel"),
+ ("reformer", "ReformerModel"),
+ ("regnet", "RegNetModel"),
+ ("rembert", "RemBertModel"),
+ ("resnet", "ResNetModel"),
+ ("retribert", "RetriBertModel"),
+ ("roberta", "RobertaModel"),
+ ("roberta-prelayernorm", "RobertaPreLayerNormModel"),
+ ("roc_bert", "RoCBertModel"),
+ ("roformer", "RoFormerModel"),
+ ("rwkv", "RwkvModel"),
+ ("sam", "SamModel"),
+ ("seamless_m4t", "SeamlessM4TModel"),
+ ("seamless_m4t_v2", "SeamlessM4Tv2Model"),
+ ("segformer", "SegformerModel"),
+ ("seggpt", "SegGptModel"),
+ ("sew", "SEWModel"),
+ ("sew-d", "SEWDModel"),
+ ("siglip", "SiglipModel"),
+ ("siglip_vision_model", "SiglipVisionModel"),
+ ("speech_to_text", "Speech2TextModel"),
+ ("speecht5", "SpeechT5Model"),
+ ("splinter", "SplinterModel"),
+ ("squeezebert", "SqueezeBertModel"),
+ ("stablelm", "StableLmModel"),
+ ("starcoder2", "Starcoder2Model"),
+ ("swiftformer", "SwiftFormerModel"),
+ ("swin", "SwinModel"),
+ ("swin2sr", "Swin2SRModel"),
+ ("swinv2", "Swinv2Model"),
+ ("switch_transformers", "SwitchTransformersModel"),
+ ("t5", "T5Model"),
+ ("table-transformer", "TableTransformerModel"),
+ ("tapas", "TapasModel"),
+ ("time_series_transformer", "TimeSeriesTransformerModel"),
+ ("timesformer", "TimesformerModel"),
+ ("timm_backbone", "TimmBackbone"),
+ ("trajectory_transformer", "TrajectoryTransformerModel"),
+ ("transfo-xl", "TransfoXLModel"),
+ ("tvlt", "TvltModel"),
+ ("tvp", "TvpModel"),
+ ("udop", "UdopModel"),
+ ("umt5", "UMT5Model"),
+ ("unispeech", "UniSpeechModel"),
+ ("unispeech-sat", "UniSpeechSatModel"),
+ ("univnet", "UnivNetModel"),
+ ("van", "VanModel"),
+ ("videomae", "VideoMAEModel"),
+ ("vilt", "ViltModel"),
+ ("vision-text-dual-encoder", "VisionTextDualEncoderModel"),
+ ("visual_bert", "VisualBertModel"),
+ ("vit", "ViTModel"),
+ ("vit_hybrid", "ViTHybridModel"),
+ ("vit_mae", "ViTMAEModel"),
+ ("vit_msn", "ViTMSNModel"),
+ ("vitdet", "VitDetModel"),
+ ("vits", "VitsModel"),
+ ("vivit", "VivitModel"),
+ ("wav2vec2", "Wav2Vec2Model"),
+ ("wav2vec2-bert", "Wav2Vec2BertModel"),
+ ("wav2vec2-conformer", "Wav2Vec2ConformerModel"),
+ ("wavlm", "WavLMModel"),
+ ("whisper", "WhisperModel"),
+ ("xclip", "XCLIPModel"),
+ ("xglm", "XGLMModel"),
+ ("xlm", "XLMModel"),
+ ("xlm-prophetnet", "XLMProphetNetModel"),
+ ("xlm-roberta", "XLMRobertaModel"),
+ ("xlm-roberta-xl", "XLMRobertaXLModel"),
+ ("xlnet", "XLNetModel"),
+ ("xmod", "XmodModel"),
+ ("yolos", "YolosModel"),
+ ("yoso", "YosoModel"),
+ ]
+)
+
+MODEL_FOR_PRETRAINING_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for pre-training mapping
+ ("albert", "AlbertForPreTraining"),
+ ("bart", "BartForConditionalGeneration"),
+ ("bert", "BertForPreTraining"),
+ ("big_bird", "BigBirdForPreTraining"),
+ ("bloom", "BloomForCausalLM"),
+ ("camembert", "CamembertForMaskedLM"),
+ ("ctrl", "CTRLLMHeadModel"),
+ ("data2vec-text", "Data2VecTextForMaskedLM"),
+ ("deberta", "DebertaForMaskedLM"),
+ ("deberta-v2", "DebertaV2ForMaskedLM"),
+ ("distilbert", "DistilBertForMaskedLM"),
+ ("electra", "ElectraForPreTraining"),
+ ("ernie", "ErnieForPreTraining"),
+ ("flaubert", "FlaubertWithLMHeadModel"),
+ ("flava", "FlavaForPreTraining"),
+ ("fnet", "FNetForPreTraining"),
+ ("fsmt", "FSMTForConditionalGeneration"),
+ ("funnel", "FunnelForPreTraining"),
+ ("gpt-sw3", "GPT2LMHeadModel"),
+ ("gpt2", "GPT2LMHeadModel"),
+ ("gpt_bigcode", "GPTBigCodeForCausalLM"),
+ ("gptsan-japanese", "GPTSanJapaneseForConditionalGeneration"),
+ ("ibert", "IBertForMaskedLM"),
+ ("idefics", "IdeficsForVisionText2Text"),
+ ("idefics2", "Idefics2ForConditionalGeneration"),
+ ("layoutlm", "LayoutLMForMaskedLM"),
+ ("llava", "LlavaForConditionalGeneration"),
+ ("llava_next", "LlavaNextForConditionalGeneration"),
+ ("longformer", "LongformerForMaskedLM"),
+ ("luke", "LukeForMaskedLM"),
+ ("lxmert", "LxmertForPreTraining"),
+ ("mamba", "MambaForCausalLM"),
+ ("mega", "MegaForMaskedLM"),
+ ("megatron-bert", "MegatronBertForPreTraining"),
+ ("mobilebert", "MobileBertForPreTraining"),
+ ("mpnet", "MPNetForMaskedLM"),
+ ("mpt", "MptForCausalLM"),
+ ("mra", "MraForMaskedLM"),
+ ("mvp", "MvpForConditionalGeneration"),
+ ("nezha", "NezhaForPreTraining"),
+ ("nllb-moe", "NllbMoeForConditionalGeneration"),
+ ("openai-gpt", "OpenAIGPTLMHeadModel"),
+ ("retribert", "RetriBertModel"),
+ ("roberta", "RobertaForMaskedLM"),
+ ("roberta-prelayernorm", "RobertaPreLayerNormForMaskedLM"),
+ ("roc_bert", "RoCBertForPreTraining"),
+ ("rwkv", "RwkvForCausalLM"),
+ ("splinter", "SplinterForPreTraining"),
+ ("squeezebert", "SqueezeBertForMaskedLM"),
+ ("switch_transformers", "SwitchTransformersForConditionalGeneration"),
+ ("t5", "T5ForConditionalGeneration"),
+ ("tapas", "TapasForMaskedLM"),
+ ("transfo-xl", "TransfoXLLMHeadModel"),
+ ("tvlt", "TvltForPreTraining"),
+ ("unispeech", "UniSpeechForPreTraining"),
+ ("unispeech-sat", "UniSpeechSatForPreTraining"),
+ ("videomae", "VideoMAEForPreTraining"),
+ ("vipllava", "VipLlavaForConditionalGeneration"),
+ ("visual_bert", "VisualBertForPreTraining"),
+ ("vit_mae", "ViTMAEForPreTraining"),
+ ("wav2vec2", "Wav2Vec2ForPreTraining"),
+ ("wav2vec2-conformer", "Wav2Vec2ConformerForPreTraining"),
+ ("xlm", "XLMWithLMHeadModel"),
+ ("xlm-roberta", "XLMRobertaForMaskedLM"),
+ ("xlm-roberta-xl", "XLMRobertaXLForMaskedLM"),
+ ("xlnet", "XLNetLMHeadModel"),
+ ("xmod", "XmodForMaskedLM"),
+ ]
+)
+
+MODEL_WITH_LM_HEAD_MAPPING_NAMES = OrderedDict(
+ [
+ # Model with LM heads mapping
+ ("albert", "AlbertForMaskedLM"),
+ ("bart", "BartForConditionalGeneration"),
+ ("bert", "BertForMaskedLM"),
+ ("big_bird", "BigBirdForMaskedLM"),
+ ("bigbird_pegasus", "BigBirdPegasusForConditionalGeneration"),
+ ("blenderbot-small", "BlenderbotSmallForConditionalGeneration"),
+ ("bloom", "BloomForCausalLM"),
+ ("camembert", "CamembertForMaskedLM"),
+ ("codegen", "CodeGenForCausalLM"),
+ ("convbert", "ConvBertForMaskedLM"),
+ ("cpmant", "CpmAntForCausalLM"),
+ ("ctrl", "CTRLLMHeadModel"),
+ ("data2vec-text", "Data2VecTextForMaskedLM"),
+ ("deberta", "DebertaForMaskedLM"),
+ ("deberta-v2", "DebertaV2ForMaskedLM"),
+ ("distilbert", "DistilBertForMaskedLM"),
+ ("electra", "ElectraForMaskedLM"),
+ ("encoder-decoder", "EncoderDecoderModel"),
+ ("ernie", "ErnieForMaskedLM"),
+ ("esm", "EsmForMaskedLM"),
+ ("flaubert", "FlaubertWithLMHeadModel"),
+ ("fnet", "FNetForMaskedLM"),
+ ("fsmt", "FSMTForConditionalGeneration"),
+ ("funnel", "FunnelForMaskedLM"),
+ ("git", "GitForCausalLM"),
+ ("gpt-sw3", "GPT2LMHeadModel"),
+ ("gpt2", "GPT2LMHeadModel"),
+ ("gpt_bigcode", "GPTBigCodeForCausalLM"),
+ ("gpt_neo", "GPTNeoForCausalLM"),
+ ("gpt_neox", "GPTNeoXForCausalLM"),
+ ("gpt_neox_japanese", "GPTNeoXJapaneseForCausalLM"),
+ ("gptj", "GPTJForCausalLM"),
+ ("gptsan-japanese", "GPTSanJapaneseForConditionalGeneration"),
+ ("ibert", "IBertForMaskedLM"),
+ ("layoutlm", "LayoutLMForMaskedLM"),
+ ("led", "LEDForConditionalGeneration"),
+ ("longformer", "LongformerForMaskedLM"),
+ ("longt5", "LongT5ForConditionalGeneration"),
+ ("luke", "LukeForMaskedLM"),
+ ("m2m_100", "M2M100ForConditionalGeneration"),
+ ("mamba", "MambaForCausalLM"),
+ ("marian", "MarianMTModel"),
+ ("mega", "MegaForMaskedLM"),
+ ("megatron-bert", "MegatronBertForCausalLM"),
+ ("mobilebert", "MobileBertForMaskedLM"),
+ ("mpnet", "MPNetForMaskedLM"),
+ ("mpt", "MptForCausalLM"),
+ ("mra", "MraForMaskedLM"),
+ ("mvp", "MvpForConditionalGeneration"),
+ ("nezha", "NezhaForMaskedLM"),
+ ("nllb-moe", "NllbMoeForConditionalGeneration"),
+ ("nystromformer", "NystromformerForMaskedLM"),
+ ("openai-gpt", "OpenAIGPTLMHeadModel"),
+ ("pegasus_x", "PegasusXForConditionalGeneration"),
+ ("plbart", "PLBartForConditionalGeneration"),
+ ("pop2piano", "Pop2PianoForConditionalGeneration"),
+ ("qdqbert", "QDQBertForMaskedLM"),
+ ("reformer", "ReformerModelWithLMHead"),
+ ("rembert", "RemBertForMaskedLM"),
+ ("roberta", "RobertaForMaskedLM"),
+ ("roberta-prelayernorm", "RobertaPreLayerNormForMaskedLM"),
+ ("roc_bert", "RoCBertForMaskedLM"),
+ ("roformer", "RoFormerForMaskedLM"),
+ ("rwkv", "RwkvForCausalLM"),
+ ("speech_to_text", "Speech2TextForConditionalGeneration"),
+ ("squeezebert", "SqueezeBertForMaskedLM"),
+ ("switch_transformers", "SwitchTransformersForConditionalGeneration"),
+ ("t5", "T5ForConditionalGeneration"),
+ ("tapas", "TapasForMaskedLM"),
+ ("transfo-xl", "TransfoXLLMHeadModel"),
+ ("wav2vec2", "Wav2Vec2ForMaskedLM"),
+ ("whisper", "WhisperForConditionalGeneration"),
+ ("xlm", "XLMWithLMHeadModel"),
+ ("xlm-roberta", "XLMRobertaForMaskedLM"),
+ ("xlm-roberta-xl", "XLMRobertaXLForMaskedLM"),
+ ("xlnet", "XLNetLMHeadModel"),
+ ("xmod", "XmodForMaskedLM"),
+ ("yoso", "YosoForMaskedLM"),
+ ]
+)
+
+MODEL_FOR_CAUSAL_LM_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Causal LM mapping
+ ("bart", "BartForCausalLM"),
+ ("bert", "BertLMHeadModel"),
+ ("bert-generation", "BertGenerationDecoder"),
+ ("big_bird", "BigBirdForCausalLM"),
+ ("bigbird_pegasus", "BigBirdPegasusForCausalLM"),
+ ("biogpt", "BioGptForCausalLM"),
+ ("blenderbot", "BlenderbotForCausalLM"),
+ ("blenderbot-small", "BlenderbotSmallForCausalLM"),
+ ("bloom", "BloomForCausalLM"),
+ ("camembert", "CamembertForCausalLM"),
+ ("code_llama", "LlamaForCausalLM"),
+ ("codegen", "CodeGenForCausalLM"),
+ ("cohere", "CohereForCausalLM"),
+ ("cpmant", "CpmAntForCausalLM"),
+ ("ctrl", "CTRLLMHeadModel"),
+ ("data2vec-text", "Data2VecTextForCausalLM"),
+ ("dbrx", "DbrxForCausalLM"),
+ ("electra", "ElectraForCausalLM"),
+ ("ernie", "ErnieForCausalLM"),
+ ("falcon", "FalconForCausalLM"),
+ ("fuyu", "FuyuForCausalLM"),
+ ("gemma", "GemmaForCausalLM"),
+ ("git", "GitForCausalLM"),
+ ("gpt-sw3", "GPT2LMHeadModel"),
+ ("gpt2", "GPT2LMHeadModel"),
+ ("gpt_bigcode", "GPTBigCodeForCausalLM"),
+ ("gpt_neo", "GPTNeoForCausalLM"),
+ ("gpt_neox", "GPTNeoXForCausalLM"),
+ ("gpt_neox_japanese", "GPTNeoXJapaneseForCausalLM"),
+ ("gptj", "GPTJForCausalLM"),
+ ("jamba", "JambaForCausalLM"),
+ ("llama", "LlamaForCausalLM"),
+ ("mamba", "MambaForCausalLM"),
+ ("marian", "MarianForCausalLM"),
+ ("mbart", "MBartForCausalLM"),
+ ("mega", "MegaForCausalLM"),
+ ("megatron-bert", "MegatronBertForCausalLM"),
+ ("mistral", "MistralForCausalLM"),
+ ("mixtral", "MixtralForCausalLM"),
+ ("mpt", "MptForCausalLM"),
+ ("musicgen", "MusicgenForCausalLM"),
+ ("musicgen_melody", "MusicgenMelodyForCausalLM"),
+ ("mvp", "MvpForCausalLM"),
+ ("olmo", "OlmoForCausalLM"),
+ ("open-llama", "OpenLlamaForCausalLM"),
+ ("openai-gpt", "OpenAIGPTLMHeadModel"),
+ ("opt", "OPTForCausalLM"),
+ ("pegasus", "PegasusForCausalLM"),
+ ("persimmon", "PersimmonForCausalLM"),
+ ("phi", "PhiForCausalLM"),
+ ("plbart", "PLBartForCausalLM"),
+ ("prophetnet", "ProphetNetForCausalLM"),
+ ("qdqbert", "QDQBertLMHeadModel"),
+ ("qwen2", "Qwen2ForCausalLM"),
+ ("qwen2_moe", "Qwen2MoeForCausalLM"),
+ ("recurrent_gemma", "RecurrentGemmaForCausalLM"),
+ ("reformer", "ReformerModelWithLMHead"),
+ ("rembert", "RemBertForCausalLM"),
+ ("roberta", "RobertaForCausalLM"),
+ ("roberta-prelayernorm", "RobertaPreLayerNormForCausalLM"),
+ ("roc_bert", "RoCBertForCausalLM"),
+ ("roformer", "RoFormerForCausalLM"),
+ ("rwkv", "RwkvForCausalLM"),
+ ("speech_to_text_2", "Speech2Text2ForCausalLM"),
+ ("stablelm", "StableLmForCausalLM"),
+ ("starcoder2", "Starcoder2ForCausalLM"),
+ ("transfo-xl", "TransfoXLLMHeadModel"),
+ ("trocr", "TrOCRForCausalLM"),
+ ("whisper", "WhisperForCausalLM"),
+ ("xglm", "XGLMForCausalLM"),
+ ("xlm", "XLMWithLMHeadModel"),
+ ("xlm-prophetnet", "XLMProphetNetForCausalLM"),
+ ("xlm-roberta", "XLMRobertaForCausalLM"),
+ ("xlm-roberta-xl", "XLMRobertaXLForCausalLM"),
+ ("xlnet", "XLNetLMHeadModel"),
+ ("xmod", "XmodForCausalLM"),
+ ]
+)
+
+MODEL_FOR_IMAGE_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Image mapping
+ ("beit", "BeitModel"),
+ ("bit", "BitModel"),
+ ("conditional_detr", "ConditionalDetrModel"),
+ ("convnext", "ConvNextModel"),
+ ("convnextv2", "ConvNextV2Model"),
+ ("data2vec-vision", "Data2VecVisionModel"),
+ ("deformable_detr", "DeformableDetrModel"),
+ ("deit", "DeiTModel"),
+ ("deta", "DetaModel"),
+ ("detr", "DetrModel"),
+ ("dinat", "DinatModel"),
+ ("dinov2", "Dinov2Model"),
+ ("dpt", "DPTModel"),
+ ("efficientformer", "EfficientFormerModel"),
+ ("efficientnet", "EfficientNetModel"),
+ ("focalnet", "FocalNetModel"),
+ ("glpn", "GLPNModel"),
+ ("imagegpt", "ImageGPTModel"),
+ ("levit", "LevitModel"),
+ ("mobilenet_v1", "MobileNetV1Model"),
+ ("mobilenet_v2", "MobileNetV2Model"),
+ ("mobilevit", "MobileViTModel"),
+ ("mobilevitv2", "MobileViTV2Model"),
+ ("nat", "NatModel"),
+ ("poolformer", "PoolFormerModel"),
+ ("pvt", "PvtModel"),
+ ("regnet", "RegNetModel"),
+ ("resnet", "ResNetModel"),
+ ("segformer", "SegformerModel"),
+ ("siglip_vision_model", "SiglipVisionModel"),
+ ("swiftformer", "SwiftFormerModel"),
+ ("swin", "SwinModel"),
+ ("swin2sr", "Swin2SRModel"),
+ ("swinv2", "Swinv2Model"),
+ ("table-transformer", "TableTransformerModel"),
+ ("timesformer", "TimesformerModel"),
+ ("timm_backbone", "TimmBackbone"),
+ ("van", "VanModel"),
+ ("videomae", "VideoMAEModel"),
+ ("vit", "ViTModel"),
+ ("vit_hybrid", "ViTHybridModel"),
+ ("vit_mae", "ViTMAEModel"),
+ ("vit_msn", "ViTMSNModel"),
+ ("vitdet", "VitDetModel"),
+ ("vivit", "VivitModel"),
+ ("yolos", "YolosModel"),
+ ]
+)
+
+MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES = OrderedDict(
+ [
+ ("deit", "DeiTForMaskedImageModeling"),
+ ("focalnet", "FocalNetForMaskedImageModeling"),
+ ("swin", "SwinForMaskedImageModeling"),
+ ("swinv2", "Swinv2ForMaskedImageModeling"),
+ ("vit", "ViTForMaskedImageModeling"),
+ ]
+)
+
+
+MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING_NAMES = OrderedDict(
+ # Model for Causal Image Modeling mapping
+ [
+ ("imagegpt", "ImageGPTForCausalImageModeling"),
+ ]
+)
+
+MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Image Classification mapping
+ ("beit", "BeitForImageClassification"),
+ ("bit", "BitForImageClassification"),
+ ("clip", "CLIPForImageClassification"),
+ ("convnext", "ConvNextForImageClassification"),
+ ("convnextv2", "ConvNextV2ForImageClassification"),
+ ("cvt", "CvtForImageClassification"),
+ ("data2vec-vision", "Data2VecVisionForImageClassification"),
+ (
+ "deit",
+ ("DeiTForImageClassification", "DeiTForImageClassificationWithTeacher"),
+ ),
+ ("dinat", "DinatForImageClassification"),
+ ("dinov2", "Dinov2ForImageClassification"),
+ (
+ "efficientformer",
+ (
+ "EfficientFormerForImageClassification",
+ "EfficientFormerForImageClassificationWithTeacher",
+ ),
+ ),
+ ("efficientnet", "EfficientNetForImageClassification"),
+ ("focalnet", "FocalNetForImageClassification"),
+ ("imagegpt", "ImageGPTForImageClassification"),
+ (
+ "levit",
+ ("LevitForImageClassification", "LevitForImageClassificationWithTeacher"),
+ ),
+ ("mobilenet_v1", "MobileNetV1ForImageClassification"),
+ ("mobilenet_v2", "MobileNetV2ForImageClassification"),
+ ("mobilevit", "MobileViTForImageClassification"),
+ ("mobilevitv2", "MobileViTV2ForImageClassification"),
+ ("nat", "NatForImageClassification"),
+ (
+ "perceiver",
+ (
+ "PerceiverForImageClassificationLearned",
+ "PerceiverForImageClassificationFourier",
+ "PerceiverForImageClassificationConvProcessing",
+ ),
+ ),
+ ("poolformer", "PoolFormerForImageClassification"),
+ ("pvt", "PvtForImageClassification"),
+ ("pvt_v2", "PvtV2ForImageClassification"),
+ ("regnet", "RegNetForImageClassification"),
+ ("resnet", "ResNetForImageClassification"),
+ ("segformer", "SegformerForImageClassification"),
+ ("siglip", "SiglipForImageClassification"),
+ ("swiftformer", "SwiftFormerForImageClassification"),
+ ("swin", "SwinForImageClassification"),
+ ("swinv2", "Swinv2ForImageClassification"),
+ ("van", "VanForImageClassification"),
+ ("vit", "ViTForImageClassification"),
+ ("vit_hybrid", "ViTHybridForImageClassification"),
+ ("vit_msn", "ViTMSNForImageClassification"),
+ ]
+)
+
+MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES = OrderedDict(
+ [
+ # Do not add new models here, this class will be deprecated in the future.
+ # Model for Image Segmentation mapping
+ ("detr", "DetrForSegmentation"),
+ ]
+)
+
+MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Semantic Segmentation mapping
+ ("beit", "BeitForSemanticSegmentation"),
+ ("data2vec-vision", "Data2VecVisionForSemanticSegmentation"),
+ ("dpt", "DPTForSemanticSegmentation"),
+ ("mobilenet_v2", "MobileNetV2ForSemanticSegmentation"),
+ ("mobilevit", "MobileViTForSemanticSegmentation"),
+ ("mobilevitv2", "MobileViTV2ForSemanticSegmentation"),
+ ("segformer", "SegformerForSemanticSegmentation"),
+ ("upernet", "UperNetForSemanticSegmentation"),
+ ]
+)
+
+MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Instance Segmentation mapping
+ # MaskFormerForInstanceSegmentation can be removed from this mapping in v5
+ ("maskformer", "MaskFormerForInstanceSegmentation"),
+ ]
+)
+
+MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Universal Segmentation mapping
+ ("detr", "DetrForSegmentation"),
+ ("mask2former", "Mask2FormerForUniversalSegmentation"),
+ ("maskformer", "MaskFormerForInstanceSegmentation"),
+ ("oneformer", "OneFormerForUniversalSegmentation"),
+ ]
+)
+
+MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
+ [
+ ("timesformer", "TimesformerForVideoClassification"),
+ ("videomae", "VideoMAEForVideoClassification"),
+ ("vivit", "VivitForVideoClassification"),
+ ]
+)
+
+MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES = OrderedDict(
+ [
+ ("blip", "BlipForConditionalGeneration"),
+ ("blip-2", "Blip2ForConditionalGeneration"),
+ ("git", "GitForCausalLM"),
+ ("idefics2", "Idefics2ForConditionalGeneration"),
+ ("instructblip", "InstructBlipForConditionalGeneration"),
+ ("kosmos-2", "Kosmos2ForConditionalGeneration"),
+ ("llava", "LlavaForConditionalGeneration"),
+ ("llava_next", "LlavaNextForConditionalGeneration"),
+ ("pix2struct", "Pix2StructForConditionalGeneration"),
+ ("vipllava", "VipLlavaForConditionalGeneration"),
+ ("vision-encoder-decoder", "VisionEncoderDecoderModel"),
+ ]
+)
+
+MODEL_FOR_MASKED_LM_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Masked LM mapping
+ ("albert", "AlbertForMaskedLM"),
+ ("bart", "BartForConditionalGeneration"),
+ ("bert", "BertForMaskedLM"),
+ ("big_bird", "BigBirdForMaskedLM"),
+ ("camembert", "CamembertForMaskedLM"),
+ ("convbert", "ConvBertForMaskedLM"),
+ ("data2vec-text", "Data2VecTextForMaskedLM"),
+ ("deberta", "DebertaForMaskedLM"),
+ ("deberta-v2", "DebertaV2ForMaskedLM"),
+ ("distilbert", "DistilBertForMaskedLM"),
+ ("electra", "ElectraForMaskedLM"),
+ ("ernie", "ErnieForMaskedLM"),
+ ("esm", "EsmForMaskedLM"),
+ ("flaubert", "FlaubertWithLMHeadModel"),
+ ("fnet", "FNetForMaskedLM"),
+ ("funnel", "FunnelForMaskedLM"),
+ ("ibert", "IBertForMaskedLM"),
+ ("layoutlm", "LayoutLMForMaskedLM"),
+ ("longformer", "LongformerForMaskedLM"),
+ ("luke", "LukeForMaskedLM"),
+ ("mbart", "MBartForConditionalGeneration"),
+ ("mega", "MegaForMaskedLM"),
+ ("megatron-bert", "MegatronBertForMaskedLM"),
+ ("mobilebert", "MobileBertForMaskedLM"),
+ ("mpnet", "MPNetForMaskedLM"),
+ ("mra", "MraForMaskedLM"),
+ ("mvp", "MvpForConditionalGeneration"),
+ ("nezha", "NezhaForMaskedLM"),
+ ("nystromformer", "NystromformerForMaskedLM"),
+ ("perceiver", "PerceiverForMaskedLM"),
+ ("qdqbert", "QDQBertForMaskedLM"),
+ ("reformer", "ReformerForMaskedLM"),
+ ("rembert", "RemBertForMaskedLM"),
+ ("roberta", "RobertaForMaskedLM"),
+ ("roberta-prelayernorm", "RobertaPreLayerNormForMaskedLM"),
+ ("roc_bert", "RoCBertForMaskedLM"),
+ ("roformer", "RoFormerForMaskedLM"),
+ ("squeezebert", "SqueezeBertForMaskedLM"),
+ ("tapas", "TapasForMaskedLM"),
+ ("wav2vec2", "Wav2Vec2ForMaskedLM"),
+ ("xlm", "XLMWithLMHeadModel"),
+ ("xlm-roberta", "XLMRobertaForMaskedLM"),
+ ("xlm-roberta-xl", "XLMRobertaXLForMaskedLM"),
+ ("xmod", "XmodForMaskedLM"),
+ ("yoso", "YosoForMaskedLM"),
+ ]
+)
+
+MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Object Detection mapping
+ ("conditional_detr", "ConditionalDetrForObjectDetection"),
+ ("deformable_detr", "DeformableDetrForObjectDetection"),
+ ("deta", "DetaForObjectDetection"),
+ ("detr", "DetrForObjectDetection"),
+ ("table-transformer", "TableTransformerForObjectDetection"),
+ ("yolos", "YolosForObjectDetection"),
+ ]
+)
+
+MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Zero Shot Object Detection mapping
+ ("grounding-dino", "GroundingDinoForObjectDetection"),
+ ("owlv2", "Owlv2ForObjectDetection"),
+ ("owlvit", "OwlViTForObjectDetection"),
+ ]
+)
+
+MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for depth estimation mapping
+ ("depth_anything", "DepthAnythingForDepthEstimation"),
+ ("dpt", "DPTForDepthEstimation"),
+ ("glpn", "GLPNForDepthEstimation"),
+ ]
+)
+MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Seq2Seq Causal LM mapping
+ ("bart", "BartForConditionalGeneration"),
+ ("bigbird_pegasus", "BigBirdPegasusForConditionalGeneration"),
+ ("blenderbot", "BlenderbotForConditionalGeneration"),
+ ("blenderbot-small", "BlenderbotSmallForConditionalGeneration"),
+ ("encoder-decoder", "EncoderDecoderModel"),
+ ("fsmt", "FSMTForConditionalGeneration"),
+ ("gptsan-japanese", "GPTSanJapaneseForConditionalGeneration"),
+ ("led", "LEDForConditionalGeneration"),
+ ("longt5", "LongT5ForConditionalGeneration"),
+ ("m2m_100", "M2M100ForConditionalGeneration"),
+ ("marian", "MarianMTModel"),
+ ("mbart", "MBartForConditionalGeneration"),
+ ("mt5", "MT5ForConditionalGeneration"),
+ ("mvp", "MvpForConditionalGeneration"),
+ ("nllb-moe", "NllbMoeForConditionalGeneration"),
+ ("pegasus", "PegasusForConditionalGeneration"),
+ ("pegasus_x", "PegasusXForConditionalGeneration"),
+ ("plbart", "PLBartForConditionalGeneration"),
+ ("prophetnet", "ProphetNetForConditionalGeneration"),
+ ("seamless_m4t", "SeamlessM4TForTextToText"),
+ ("seamless_m4t_v2", "SeamlessM4Tv2ForTextToText"),
+ ("switch_transformers", "SwitchTransformersForConditionalGeneration"),
+ ("t5", "T5ForConditionalGeneration"),
+ ("umt5", "UMT5ForConditionalGeneration"),
+ ("xlm-prophetnet", "XLMProphetNetForConditionalGeneration"),
+ ]
+)
+
+MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES = OrderedDict(
+ [
+ ("pop2piano", "Pop2PianoForConditionalGeneration"),
+ ("seamless_m4t", "SeamlessM4TForSpeechToText"),
+ ("seamless_m4t_v2", "SeamlessM4Tv2ForSpeechToText"),
+ ("speech-encoder-decoder", "SpeechEncoderDecoderModel"),
+ ("speech_to_text", "Speech2TextForConditionalGeneration"),
+ ("speecht5", "SpeechT5ForSpeechToText"),
+ ("whisper", "WhisperForConditionalGeneration"),
+ ]
+)
+
+MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Sequence Classification mapping
+ ("albert", "AlbertForSequenceClassification"),
+ ("bart", "BartForSequenceClassification"),
+ ("bert", "BertForSequenceClassification"),
+ ("big_bird", "BigBirdForSequenceClassification"),
+ ("bigbird_pegasus", "BigBirdPegasusForSequenceClassification"),
+ ("biogpt", "BioGptForSequenceClassification"),
+ ("bloom", "BloomForSequenceClassification"),
+ ("camembert", "CamembertForSequenceClassification"),
+ ("canine", "CanineForSequenceClassification"),
+ ("code_llama", "LlamaForSequenceClassification"),
+ ("convbert", "ConvBertForSequenceClassification"),
+ ("ctrl", "CTRLForSequenceClassification"),
+ ("data2vec-text", "Data2VecTextForSequenceClassification"),
+ ("deberta", "DebertaForSequenceClassification"),
+ ("deberta-v2", "DebertaV2ForSequenceClassification"),
+ ("distilbert", "DistilBertForSequenceClassification"),
+ ("electra", "ElectraForSequenceClassification"),
+ ("ernie", "ErnieForSequenceClassification"),
+ ("ernie_m", "ErnieMForSequenceClassification"),
+ ("esm", "EsmForSequenceClassification"),
+ ("falcon", "FalconForSequenceClassification"),
+ ("flaubert", "FlaubertForSequenceClassification"),
+ ("fnet", "FNetForSequenceClassification"),
+ ("funnel", "FunnelForSequenceClassification"),
+ ("gemma", "GemmaForSequenceClassification"),
+ ("gpt-sw3", "GPT2ForSequenceClassification"),
+ ("gpt2", "GPT2ForSequenceClassification"),
+ ("gpt_bigcode", "GPTBigCodeForSequenceClassification"),
+ ("gpt_neo", "GPTNeoForSequenceClassification"),
+ ("gpt_neox", "GPTNeoXForSequenceClassification"),
+ ("gptj", "GPTJForSequenceClassification"),
+ ("ibert", "IBertForSequenceClassification"),
+ ("jamba", "JambaForSequenceClassification"),
+ ("layoutlm", "LayoutLMForSequenceClassification"),
+ ("layoutlmv2", "LayoutLMv2ForSequenceClassification"),
+ ("layoutlmv3", "LayoutLMv3ForSequenceClassification"),
+ ("led", "LEDForSequenceClassification"),
+ ("lilt", "LiltForSequenceClassification"),
+ ("llama", "LlamaForSequenceClassification"),
+ ("longformer", "LongformerForSequenceClassification"),
+ ("luke", "LukeForSequenceClassification"),
+ ("markuplm", "MarkupLMForSequenceClassification"),
+ ("mbart", "MBartForSequenceClassification"),
+ ("mega", "MegaForSequenceClassification"),
+ ("megatron-bert", "MegatronBertForSequenceClassification"),
+ ("mistral", "MistralForSequenceClassification"),
+ ("mixtral", "MixtralForSequenceClassification"),
+ ("mobilebert", "MobileBertForSequenceClassification"),
+ ("mpnet", "MPNetForSequenceClassification"),
+ ("mpt", "MptForSequenceClassification"),
+ ("mra", "MraForSequenceClassification"),
+ ("mt5", "MT5ForSequenceClassification"),
+ ("mvp", "MvpForSequenceClassification"),
+ ("nezha", "NezhaForSequenceClassification"),
+ ("nystromformer", "NystromformerForSequenceClassification"),
+ ("open-llama", "OpenLlamaForSequenceClassification"),
+ ("openai-gpt", "OpenAIGPTForSequenceClassification"),
+ ("opt", "OPTForSequenceClassification"),
+ ("perceiver", "PerceiverForSequenceClassification"),
+ ("persimmon", "PersimmonForSequenceClassification"),
+ ("phi", "PhiForSequenceClassification"),
+ ("plbart", "PLBartForSequenceClassification"),
+ ("qdqbert", "QDQBertForSequenceClassification"),
+ ("qwen2", "Qwen2ForSequenceClassification"),
+ ("qwen2_moe", "Qwen2MoeForSequenceClassification"),
+ ("reformer", "ReformerForSequenceClassification"),
+ ("rembert", "RemBertForSequenceClassification"),
+ ("roberta", "RobertaForSequenceClassification"),
+ ("roberta-prelayernorm", "RobertaPreLayerNormForSequenceClassification"),
+ ("roc_bert", "RoCBertForSequenceClassification"),
+ ("roformer", "RoFormerForSequenceClassification"),
+ ("squeezebert", "SqueezeBertForSequenceClassification"),
+ ("stablelm", "StableLmForSequenceClassification"),
+ ("starcoder2", "Starcoder2ForSequenceClassification"),
+ ("t5", "T5ForSequenceClassification"),
+ ("tapas", "TapasForSequenceClassification"),
+ ("transfo-xl", "TransfoXLForSequenceClassification"),
+ ("umt5", "UMT5ForSequenceClassification"),
+ ("xlm", "XLMForSequenceClassification"),
+ ("xlm-roberta", "XLMRobertaForSequenceClassification"),
+ ("xlm-roberta-xl", "XLMRobertaXLForSequenceClassification"),
+ ("xlnet", "XLNetForSequenceClassification"),
+ ("xmod", "XmodForSequenceClassification"),
+ ("yoso", "YosoForSequenceClassification"),
+ ]
+)
+
+MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Question Answering mapping
+ ("albert", "AlbertForQuestionAnswering"),
+ ("bart", "BartForQuestionAnswering"),
+ ("bert", "BertForQuestionAnswering"),
+ ("big_bird", "BigBirdForQuestionAnswering"),
+ ("bigbird_pegasus", "BigBirdPegasusForQuestionAnswering"),
+ ("bloom", "BloomForQuestionAnswering"),
+ ("camembert", "CamembertForQuestionAnswering"),
+ ("canine", "CanineForQuestionAnswering"),
+ ("convbert", "ConvBertForQuestionAnswering"),
+ ("data2vec-text", "Data2VecTextForQuestionAnswering"),
+ ("deberta", "DebertaForQuestionAnswering"),
+ ("deberta-v2", "DebertaV2ForQuestionAnswering"),
+ ("distilbert", "DistilBertForQuestionAnswering"),
+ ("electra", "ElectraForQuestionAnswering"),
+ ("ernie", "ErnieForQuestionAnswering"),
+ ("ernie_m", "ErnieMForQuestionAnswering"),
+ ("falcon", "FalconForQuestionAnswering"),
+ ("flaubert", "FlaubertForQuestionAnsweringSimple"),
+ ("fnet", "FNetForQuestionAnswering"),
+ ("funnel", "FunnelForQuestionAnswering"),
+ ("gpt2", "GPT2ForQuestionAnswering"),
+ ("gpt_neo", "GPTNeoForQuestionAnswering"),
+ ("gpt_neox", "GPTNeoXForQuestionAnswering"),
+ ("gptj", "GPTJForQuestionAnswering"),
+ ("ibert", "IBertForQuestionAnswering"),
+ ("layoutlmv2", "LayoutLMv2ForQuestionAnswering"),
+ ("layoutlmv3", "LayoutLMv3ForQuestionAnswering"),
+ ("led", "LEDForQuestionAnswering"),
+ ("lilt", "LiltForQuestionAnswering"),
+ ("llama", "LlamaForQuestionAnswering"),
+ ("longformer", "LongformerForQuestionAnswering"),
+ ("luke", "LukeForQuestionAnswering"),
+ ("lxmert", "LxmertForQuestionAnswering"),
+ ("markuplm", "MarkupLMForQuestionAnswering"),
+ ("mbart", "MBartForQuestionAnswering"),
+ ("mega", "MegaForQuestionAnswering"),
+ ("megatron-bert", "MegatronBertForQuestionAnswering"),
+ ("mobilebert", "MobileBertForQuestionAnswering"),
+ ("mpnet", "MPNetForQuestionAnswering"),
+ ("mpt", "MptForQuestionAnswering"),
+ ("mra", "MraForQuestionAnswering"),
+ ("mt5", "MT5ForQuestionAnswering"),
+ ("mvp", "MvpForQuestionAnswering"),
+ ("nezha", "NezhaForQuestionAnswering"),
+ ("nystromformer", "NystromformerForQuestionAnswering"),
+ ("opt", "OPTForQuestionAnswering"),
+ ("qdqbert", "QDQBertForQuestionAnswering"),
+ ("reformer", "ReformerForQuestionAnswering"),
+ ("rembert", "RemBertForQuestionAnswering"),
+ ("roberta", "RobertaForQuestionAnswering"),
+ ("roberta-prelayernorm", "RobertaPreLayerNormForQuestionAnswering"),
+ ("roc_bert", "RoCBertForQuestionAnswering"),
+ ("roformer", "RoFormerForQuestionAnswering"),
+ ("splinter", "SplinterForQuestionAnswering"),
+ ("squeezebert", "SqueezeBertForQuestionAnswering"),
+ ("t5", "T5ForQuestionAnswering"),
+ ("umt5", "UMT5ForQuestionAnswering"),
+ ("xlm", "XLMForQuestionAnsweringSimple"),
+ ("xlm-roberta", "XLMRobertaForQuestionAnswering"),
+ ("xlm-roberta-xl", "XLMRobertaXLForQuestionAnswering"),
+ ("xlnet", "XLNetForQuestionAnsweringSimple"),
+ ("xmod", "XmodForQuestionAnswering"),
+ ("yoso", "YosoForQuestionAnswering"),
+ ]
+)
+
+MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Table Question Answering mapping
+ ("tapas", "TapasForQuestionAnswering"),
+ ]
+)
+
+MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict(
+ [
+ ("blip", "BlipForQuestionAnswering"),
+ ("blip-2", "Blip2ForConditionalGeneration"),
+ ("vilt", "ViltForQuestionAnswering"),
+ ]
+)
+
+MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict(
+ [
+ ("layoutlm", "LayoutLMForQuestionAnswering"),
+ ("layoutlmv2", "LayoutLMv2ForQuestionAnswering"),
+ ("layoutlmv3", "LayoutLMv3ForQuestionAnswering"),
+ ]
+)
+
+MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Token Classification mapping
+ ("albert", "AlbertForTokenClassification"),
+ ("bert", "BertForTokenClassification"),
+ ("big_bird", "BigBirdForTokenClassification"),
+ ("biogpt", "BioGptForTokenClassification"),
+ ("bloom", "BloomForTokenClassification"),
+ ("bros", "BrosForTokenClassification"),
+ ("camembert", "CamembertForTokenClassification"),
+ ("canine", "CanineForTokenClassification"),
+ ("convbert", "ConvBertForTokenClassification"),
+ ("data2vec-text", "Data2VecTextForTokenClassification"),
+ ("deberta", "DebertaForTokenClassification"),
+ ("deberta-v2", "DebertaV2ForTokenClassification"),
+ ("distilbert", "DistilBertForTokenClassification"),
+ ("electra", "ElectraForTokenClassification"),
+ ("ernie", "ErnieForTokenClassification"),
+ ("ernie_m", "ErnieMForTokenClassification"),
+ ("esm", "EsmForTokenClassification"),
+ ("falcon", "FalconForTokenClassification"),
+ ("flaubert", "FlaubertForTokenClassification"),
+ ("fnet", "FNetForTokenClassification"),
+ ("funnel", "FunnelForTokenClassification"),
+ ("gpt-sw3", "GPT2ForTokenClassification"),
+ ("gpt2", "GPT2ForTokenClassification"),
+ ("gpt_bigcode", "GPTBigCodeForTokenClassification"),
+ ("gpt_neo", "GPTNeoForTokenClassification"),
+ ("gpt_neox", "GPTNeoXForTokenClassification"),
+ ("ibert", "IBertForTokenClassification"),
+ ("layoutlm", "LayoutLMForTokenClassification"),
+ ("layoutlmv2", "LayoutLMv2ForTokenClassification"),
+ ("layoutlmv3", "LayoutLMv3ForTokenClassification"),
+ ("lilt", "LiltForTokenClassification"),
+ ("longformer", "LongformerForTokenClassification"),
+ ("luke", "LukeForTokenClassification"),
+ ("markuplm", "MarkupLMForTokenClassification"),
+ ("mega", "MegaForTokenClassification"),
+ ("megatron-bert", "MegatronBertForTokenClassification"),
+ ("mobilebert", "MobileBertForTokenClassification"),
+ ("mpnet", "MPNetForTokenClassification"),
+ ("mpt", "MptForTokenClassification"),
+ ("mra", "MraForTokenClassification"),
+ ("mt5", "MT5ForTokenClassification"),
+ ("nezha", "NezhaForTokenClassification"),
+ ("nystromformer", "NystromformerForTokenClassification"),
+ ("phi", "PhiForTokenClassification"),
+ ("qdqbert", "QDQBertForTokenClassification"),
+ ("rembert", "RemBertForTokenClassification"),
+ ("roberta", "RobertaForTokenClassification"),
+ ("roberta-prelayernorm", "RobertaPreLayerNormForTokenClassification"),
+ ("roc_bert", "RoCBertForTokenClassification"),
+ ("roformer", "RoFormerForTokenClassification"),
+ ("squeezebert", "SqueezeBertForTokenClassification"),
+ ("t5", "T5ForTokenClassification"),
+ ("umt5", "UMT5ForTokenClassification"),
+ ("xlm", "XLMForTokenClassification"),
+ ("xlm-roberta", "XLMRobertaForTokenClassification"),
+ ("xlm-roberta-xl", "XLMRobertaXLForTokenClassification"),
+ ("xlnet", "XLNetForTokenClassification"),
+ ("xmod", "XmodForTokenClassification"),
+ ("yoso", "YosoForTokenClassification"),
+ ]
+)
+
+MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Multiple Choice mapping
+ ("albert", "AlbertForMultipleChoice"),
+ ("bert", "BertForMultipleChoice"),
+ ("big_bird", "BigBirdForMultipleChoice"),
+ ("camembert", "CamembertForMultipleChoice"),
+ ("canine", "CanineForMultipleChoice"),
+ ("convbert", "ConvBertForMultipleChoice"),
+ ("data2vec-text", "Data2VecTextForMultipleChoice"),
+ ("deberta-v2", "DebertaV2ForMultipleChoice"),
+ ("distilbert", "DistilBertForMultipleChoice"),
+ ("electra", "ElectraForMultipleChoice"),
+ ("ernie", "ErnieForMultipleChoice"),
+ ("ernie_m", "ErnieMForMultipleChoice"),
+ ("flaubert", "FlaubertForMultipleChoice"),
+ ("fnet", "FNetForMultipleChoice"),
+ ("funnel", "FunnelForMultipleChoice"),
+ ("ibert", "IBertForMultipleChoice"),
+ ("longformer", "LongformerForMultipleChoice"),
+ ("luke", "LukeForMultipleChoice"),
+ ("mega", "MegaForMultipleChoice"),
+ ("megatron-bert", "MegatronBertForMultipleChoice"),
+ ("mobilebert", "MobileBertForMultipleChoice"),
+ ("mpnet", "MPNetForMultipleChoice"),
+ ("mra", "MraForMultipleChoice"),
+ ("nezha", "NezhaForMultipleChoice"),
+ ("nystromformer", "NystromformerForMultipleChoice"),
+ ("qdqbert", "QDQBertForMultipleChoice"),
+ ("rembert", "RemBertForMultipleChoice"),
+ ("roberta", "RobertaForMultipleChoice"),
+ ("roberta-prelayernorm", "RobertaPreLayerNormForMultipleChoice"),
+ ("roc_bert", "RoCBertForMultipleChoice"),
+ ("roformer", "RoFormerForMultipleChoice"),
+ ("squeezebert", "SqueezeBertForMultipleChoice"),
+ ("xlm", "XLMForMultipleChoice"),
+ ("xlm-roberta", "XLMRobertaForMultipleChoice"),
+ ("xlm-roberta-xl", "XLMRobertaXLForMultipleChoice"),
+ ("xlnet", "XLNetForMultipleChoice"),
+ ("xmod", "XmodForMultipleChoice"),
+ ("yoso", "YosoForMultipleChoice"),
+ ]
+)
+
+MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES = OrderedDict(
+ [
+ ("bert", "BertForNextSentencePrediction"),
+ ("ernie", "ErnieForNextSentencePrediction"),
+ ("fnet", "FNetForNextSentencePrediction"),
+ ("megatron-bert", "MegatronBertForNextSentencePrediction"),
+ ("mobilebert", "MobileBertForNextSentencePrediction"),
+ ("nezha", "NezhaForNextSentencePrediction"),
+ ("qdqbert", "QDQBertForNextSentencePrediction"),
+ ]
+)
+
+MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Audio Classification mapping
+ ("audio-spectrogram-transformer", "ASTForAudioClassification"),
+ ("data2vec-audio", "Data2VecAudioForSequenceClassification"),
+ ("hubert", "HubertForSequenceClassification"),
+ ("sew", "SEWForSequenceClassification"),
+ ("sew-d", "SEWDForSequenceClassification"),
+ ("unispeech", "UniSpeechForSequenceClassification"),
+ ("unispeech-sat", "UniSpeechSatForSequenceClassification"),
+ ("wav2vec2", "Wav2Vec2ForSequenceClassification"),
+ ("wav2vec2-bert", "Wav2Vec2BertForSequenceClassification"),
+ ("wav2vec2-conformer", "Wav2Vec2ConformerForSequenceClassification"),
+ ("wavlm", "WavLMForSequenceClassification"),
+ ("whisper", "WhisperForAudioClassification"),
+ ]
+)
+
+MODEL_FOR_CTC_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Connectionist temporal classification (CTC) mapping
+ ("data2vec-audio", "Data2VecAudioForCTC"),
+ ("hubert", "HubertForCTC"),
+ ("mctct", "MCTCTForCTC"),
+ ("sew", "SEWForCTC"),
+ ("sew-d", "SEWDForCTC"),
+ ("unispeech", "UniSpeechForCTC"),
+ ("unispeech-sat", "UniSpeechSatForCTC"),
+ ("wav2vec2", "Wav2Vec2ForCTC"),
+ ("wav2vec2-bert", "Wav2Vec2BertForCTC"),
+ ("wav2vec2-conformer", "Wav2Vec2ConformerForCTC"),
+ ("wavlm", "WavLMForCTC"),
+ ]
+)
+
+MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Audio Classification mapping
+ ("data2vec-audio", "Data2VecAudioForAudioFrameClassification"),
+ ("unispeech-sat", "UniSpeechSatForAudioFrameClassification"),
+ ("wav2vec2", "Wav2Vec2ForAudioFrameClassification"),
+ ("wav2vec2-bert", "Wav2Vec2BertForAudioFrameClassification"),
+ ("wav2vec2-conformer", "Wav2Vec2ConformerForAudioFrameClassification"),
+ ("wavlm", "WavLMForAudioFrameClassification"),
+ ]
+)
+
+MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Audio Classification mapping
+ ("data2vec-audio", "Data2VecAudioForXVector"),
+ ("unispeech-sat", "UniSpeechSatForXVector"),
+ ("wav2vec2", "Wav2Vec2ForXVector"),
+ ("wav2vec2-bert", "Wav2Vec2BertForXVector"),
+ ("wav2vec2-conformer", "Wav2Vec2ConformerForXVector"),
+ ("wavlm", "WavLMForXVector"),
+ ]
+)
+
+MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Text-To-Spectrogram mapping
+ ("fastspeech2_conformer", "FastSpeech2ConformerModel"),
+ ("speecht5", "SpeechT5ForTextToSpeech"),
+ ]
+)
+
+MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Text-To-Waveform mapping
+ ("bark", "BarkModel"),
+ ("fastspeech2_conformer", "FastSpeech2ConformerWithHifiGan"),
+ ("musicgen", "MusicgenForConditionalGeneration"),
+ ("musicgen_melody", "MusicgenMelodyForConditionalGeneration"),
+ ("seamless_m4t", "SeamlessM4TForTextToSpeech"),
+ ("seamless_m4t_v2", "SeamlessM4Tv2ForTextToSpeech"),
+ ("vits", "VitsModel"),
+ ]
+)
+
+MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Zero Shot Image Classification mapping
+ ("align", "AlignModel"),
+ ("altclip", "AltCLIPModel"),
+ ("blip", "BlipModel"),
+ ("chinese_clip", "ChineseCLIPModel"),
+ ("clip", "CLIPModel"),
+ ("clipseg", "CLIPSegModel"),
+ ("siglip", "SiglipModel"),
+ ]
+)
+
+MODEL_FOR_BACKBONE_MAPPING_NAMES = OrderedDict(
+ [
+ # Backbone mapping
+ ("beit", "BeitBackbone"),
+ ("bit", "BitBackbone"),
+ ("convnext", "ConvNextBackbone"),
+ ("convnextv2", "ConvNextV2Backbone"),
+ ("dinat", "DinatBackbone"),
+ ("dinov2", "Dinov2Backbone"),
+ ("focalnet", "FocalNetBackbone"),
+ ("maskformer-swin", "MaskFormerSwinBackbone"),
+ ("nat", "NatBackbone"),
+ ("pvt_v2", "PvtV2Backbone"),
+ ("resnet", "ResNetBackbone"),
+ ("swin", "SwinBackbone"),
+ ("swinv2", "Swinv2Backbone"),
+ ("timm_backbone", "TimmBackbone"),
+ ("vitdet", "VitDetBackbone"),
+ ]
+)
+
+MODEL_FOR_MASK_GENERATION_MAPPING_NAMES = OrderedDict(
+ [
+ ("sam", "SamModel"),
+ ]
+)
+
+
+MODEL_FOR_KEYPOINT_DETECTION_MAPPING_NAMES = OrderedDict(
+ [
+ ("superpoint", "SuperPointForKeypointDetection"),
+ ]
+)
+
+
+MODEL_FOR_TEXT_ENCODING_MAPPING_NAMES = OrderedDict(
+ [
+ ("albert", "AlbertModel"),
+ ("bert", "BertModel"),
+ ("big_bird", "BigBirdModel"),
+ ("data2vec-text", "Data2VecTextModel"),
+ ("deberta", "DebertaModel"),
+ ("deberta-v2", "DebertaV2Model"),
+ ("distilbert", "DistilBertModel"),
+ ("electra", "ElectraModel"),
+ ("flaubert", "FlaubertModel"),
+ ("ibert", "IBertModel"),
+ ("longformer", "LongformerModel"),
+ ("mobilebert", "MobileBertModel"),
+ ("mt5", "MT5EncoderModel"),
+ ("nystromformer", "NystromformerModel"),
+ ("reformer", "ReformerModel"),
+ ("rembert", "RemBertModel"),
+ ("roberta", "RobertaModel"),
+ ("roberta-prelayernorm", "RobertaPreLayerNormModel"),
+ ("roc_bert", "RoCBertModel"),
+ ("roformer", "RoFormerModel"),
+ ("squeezebert", "SqueezeBertModel"),
+ ("t5", "T5EncoderModel"),
+ ("umt5", "UMT5EncoderModel"),
+ ("xlm", "XLMModel"),
+ ("xlm-roberta", "XLMRobertaModel"),
+ ("xlm-roberta-xl", "XLMRobertaXLModel"),
+ ]
+)
+
+MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
+ [
+ ("patchtsmixer", "PatchTSMixerForTimeSeriesClassification"),
+ ("patchtst", "PatchTSTForClassification"),
+ ]
+)
+
+MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING_NAMES = OrderedDict(
+ [
+ ("patchtsmixer", "PatchTSMixerForRegression"),
+ ("patchtst", "PatchTSTForRegression"),
+ ]
+)
+
+MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES = OrderedDict(
+ [
+ ("swin2sr", "Swin2SRForImageSuperResolution"),
+ ]
+)
+
+MODEL_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_MAPPING_NAMES)
+MODEL_FOR_PRETRAINING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES)
+MODEL_WITH_LM_HEAD_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_WITH_LM_HEAD_MAPPING_NAMES)
+MODEL_FOR_CAUSAL_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
+MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING_NAMES
+)
+MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
+)
+MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES
+)
+MODEL_FOR_IMAGE_SEGMENTATION_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES
+)
+MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES
+)
+MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING_NAMES
+)
+MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING_NAMES
+)
+MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES
+)
+MODEL_FOR_VISION_2_SEQ_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
+MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES
+)
+MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES
+)
+MODEL_FOR_MASKED_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES)
+MODEL_FOR_IMAGE_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_IMAGE_MAPPING_NAMES)
+MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES
+)
+MODEL_FOR_OBJECT_DETECTION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES)
+MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES
+)
+MODEL_FOR_DEPTH_ESTIMATION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES)
+MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
+)
+MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
+)
+MODEL_FOR_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
+)
+MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES
+)
+MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
+)
+MODEL_FOR_MULTIPLE_CHOICE_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES)
+MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
+)
+MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
+)
+MODEL_FOR_CTC_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES)
+MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES)
+MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES
+)
+MODEL_FOR_AUDIO_XVECTOR_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES)
+
+MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING_NAMES
+)
+
+MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING_NAMES)
+
+MODEL_FOR_BACKBONE_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_BACKBONE_MAPPING_NAMES)
+
+MODEL_FOR_MASK_GENERATION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_MASK_GENERATION_MAPPING_NAMES)
+
+MODEL_FOR_KEYPOINT_DETECTION_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, MODEL_FOR_KEYPOINT_DETECTION_MAPPING_NAMES
+)
+
+MODEL_FOR_TEXT_ENCODING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_TEXT_ENCODING_MAPPING_NAMES)
+
+MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING_NAMES
+)
+
+MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING_NAMES
+)
+
+MODEL_FOR_IMAGE_TO_IMAGE_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES)
+
+
+class AutoModelForMaskGeneration(_BaseAutoModelClass):
+ _model_mapping = MODEL_FOR_MASK_GENERATION_MAPPING
+
+
+class AutoModelForKeypointDetection(_BaseAutoModelClass):
+ _model_mapping = MODEL_FOR_KEYPOINT_DETECTION_MAPPING
+
+
+class AutoModelForTextEncoding(_BaseAutoModelClass):
+ _model_mapping = MODEL_FOR_TEXT_ENCODING_MAPPING
+
+
+class AutoModelForImageToImage(_BaseAutoModelClass):
+ _model_mapping = MODEL_FOR_IMAGE_TO_IMAGE_MAPPING
+
+
+class AutoModel(_BaseAutoModelClass):
+ _model_mapping = MODEL_MAPPING
+
+
+AutoModel = auto_class_update(AutoModel)
+
+
+class AutoModelForPreTraining(_BaseAutoModelClass):
+ _model_mapping = MODEL_FOR_PRETRAINING_MAPPING
+
+
+AutoModelForPreTraining = auto_class_update(AutoModelForPreTraining, head_doc="pretraining")
+
+
+# Private on purpose, the public class will add the deprecation warnings.
+class _AutoModelWithLMHead(_BaseAutoModelClass):
+ _model_mapping = MODEL_WITH_LM_HEAD_MAPPING
+
+
+_AutoModelWithLMHead = auto_class_update(_AutoModelWithLMHead, head_doc="language modeling")
+
+
+class AutoModelForCausalLM(_BaseAutoModelClass):
+ _model_mapping = MODEL_FOR_CAUSAL_LM_MAPPING
+
+
+AutoModelForCausalLM = auto_class_update(AutoModelForCausalLM, head_doc="causal language modeling")
+
+
+class AutoModelForMaskedLM(_BaseAutoModelClass):
+ _model_mapping = MODEL_FOR_MASKED_LM_MAPPING
+
+
+AutoModelForMaskedLM = auto_class_update(AutoModelForMaskedLM, head_doc="masked language modeling")
+
+
+class AutoModelForSeq2SeqLM(_BaseAutoModelClass):
+ _model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
+
+
+AutoModelForSeq2SeqLM = auto_class_update(
+ AutoModelForSeq2SeqLM,
+ head_doc="sequence-to-sequence language modeling",
+ checkpoint_for_example="google-t5/t5-base",
+)
+
+
+class AutoModelForSequenceClassification(_BaseAutoModelClass):
+ _model_mapping = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
+
+
+AutoModelForSequenceClassification = auto_class_update(
+ AutoModelForSequenceClassification, head_doc="sequence classification"
+)
+
+
+class AutoModelForQuestionAnswering(_BaseAutoModelClass):
+ _model_mapping = MODEL_FOR_QUESTION_ANSWERING_MAPPING
+
+
+AutoModelForQuestionAnswering = auto_class_update(AutoModelForQuestionAnswering, head_doc="question answering")
+
+
+class AutoModelForTableQuestionAnswering(_BaseAutoModelClass):
+ _model_mapping = MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING
+
+
+AutoModelForTableQuestionAnswering = auto_class_update(
+ AutoModelForTableQuestionAnswering,
+ head_doc="table question answering",
+ checkpoint_for_example="google/tapas-base-finetuned-wtq",
+)
+
+
+class AutoModelForVisualQuestionAnswering(_BaseAutoModelClass):
+ _model_mapping = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
+
+
+AutoModelForVisualQuestionAnswering = auto_class_update(
+ AutoModelForVisualQuestionAnswering,
+ head_doc="visual question answering",
+ checkpoint_for_example="dandelin/vilt-b32-finetuned-vqa",
+)
+
+
+class AutoModelForDocumentQuestionAnswering(_BaseAutoModelClass):
+ _model_mapping = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
+
+
+AutoModelForDocumentQuestionAnswering = auto_class_update(
+ AutoModelForDocumentQuestionAnswering,
+ head_doc="document question answering",
+ checkpoint_for_example='impira/layoutlm-document-qa", revision="52e01b3',
+)
+
+
+class AutoModelForTokenClassification(_BaseAutoModelClass):
+ _model_mapping = MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
+
+
+AutoModelForTokenClassification = auto_class_update(AutoModelForTokenClassification, head_doc="token classification")
+
+
+class AutoModelForMultipleChoice(_BaseAutoModelClass):
+ _model_mapping = MODEL_FOR_MULTIPLE_CHOICE_MAPPING
+
+
+AutoModelForMultipleChoice = auto_class_update(AutoModelForMultipleChoice, head_doc="multiple choice")
+
+
+class AutoModelForNextSentencePrediction(_BaseAutoModelClass):
+ _model_mapping = MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
+
+
+AutoModelForNextSentencePrediction = auto_class_update(
+ AutoModelForNextSentencePrediction, head_doc="next sentence prediction"
+)
+
+
+class AutoModelForImageClassification(_BaseAutoModelClass):
+ _model_mapping = MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
+
+
+AutoModelForImageClassification = auto_class_update(AutoModelForImageClassification, head_doc="image classification")
+
+
+class AutoModelForZeroShotImageClassification(_BaseAutoModelClass):
+ _model_mapping = MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
+
+
+AutoModelForZeroShotImageClassification = auto_class_update(
+ AutoModelForZeroShotImageClassification, head_doc="zero-shot image classification"
+)
+
+
+class AutoModelForImageSegmentation(_BaseAutoModelClass):
+ _model_mapping = MODEL_FOR_IMAGE_SEGMENTATION_MAPPING
+
+
+AutoModelForImageSegmentation = auto_class_update(AutoModelForImageSegmentation, head_doc="image segmentation")
+
+
+class AutoModelForSemanticSegmentation(_BaseAutoModelClass):
+ _model_mapping = MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING
+
+
+AutoModelForSemanticSegmentation = auto_class_update(
+ AutoModelForSemanticSegmentation, head_doc="semantic segmentation"
+)
+
+
+class AutoModelForUniversalSegmentation(_BaseAutoModelClass):
+ _model_mapping = MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING
+
+
+AutoModelForUniversalSegmentation = auto_class_update(
+ AutoModelForUniversalSegmentation, head_doc="universal image segmentation"
+)
+
+
+class AutoModelForInstanceSegmentation(_BaseAutoModelClass):
+ _model_mapping = MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING
+
+
+AutoModelForInstanceSegmentation = auto_class_update(
+ AutoModelForInstanceSegmentation, head_doc="instance segmentation"
+)
+
+
+class AutoModelForObjectDetection(_BaseAutoModelClass):
+ _model_mapping = MODEL_FOR_OBJECT_DETECTION_MAPPING
+
+
+AutoModelForObjectDetection = auto_class_update(AutoModelForObjectDetection, head_doc="object detection")
+
+
+class AutoModelForZeroShotObjectDetection(_BaseAutoModelClass):
+ _model_mapping = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
+
+
+AutoModelForZeroShotObjectDetection = auto_class_update(
+ AutoModelForZeroShotObjectDetection, head_doc="zero-shot object detection"
+)
+
+
+class AutoModelForDepthEstimation(_BaseAutoModelClass):
+ _model_mapping = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
+
+
+AutoModelForDepthEstimation = auto_class_update(AutoModelForDepthEstimation, head_doc="depth estimation")
+
+
+class AutoModelForVideoClassification(_BaseAutoModelClass):
+ _model_mapping = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
+
+
+AutoModelForVideoClassification = auto_class_update(AutoModelForVideoClassification, head_doc="video classification")
+
+
+class AutoModelForVision2Seq(_BaseAutoModelClass):
+ _model_mapping = MODEL_FOR_VISION_2_SEQ_MAPPING
+
+
+AutoModelForVision2Seq = auto_class_update(AutoModelForVision2Seq, head_doc="vision-to-text modeling")
+
+
+class AutoModelForAudioClassification(_BaseAutoModelClass):
+ _model_mapping = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
+
+
+AutoModelForAudioClassification = auto_class_update(AutoModelForAudioClassification, head_doc="audio classification")
+
+
+class AutoModelForCTC(_BaseAutoModelClass):
+ _model_mapping = MODEL_FOR_CTC_MAPPING
+
+
+AutoModelForCTC = auto_class_update(AutoModelForCTC, head_doc="connectionist temporal classification")
+
+
+class AutoModelForSpeechSeq2Seq(_BaseAutoModelClass):
+ _model_mapping = MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
+
+
+AutoModelForSpeechSeq2Seq = auto_class_update(
+ AutoModelForSpeechSeq2Seq, head_doc="sequence-to-sequence speech-to-text modeling"
+)
+
+
+class AutoModelForAudioFrameClassification(_BaseAutoModelClass):
+ _model_mapping = MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING
+
+
+AutoModelForAudioFrameClassification = auto_class_update(
+ AutoModelForAudioFrameClassification, head_doc="audio frame (token) classification"
+)
+
+
+class AutoModelForAudioXVector(_BaseAutoModelClass):
+ _model_mapping = MODEL_FOR_AUDIO_XVECTOR_MAPPING
+
+
+class AutoModelForTextToSpectrogram(_BaseAutoModelClass):
+ _model_mapping = MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING
+
+
+class AutoModelForTextToWaveform(_BaseAutoModelClass):
+ _model_mapping = MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING
+
+
+class AutoBackbone(_BaseAutoBackboneClass):
+ _model_mapping = MODEL_FOR_BACKBONE_MAPPING
+
+
+AutoModelForAudioXVector = auto_class_update(AutoModelForAudioXVector, head_doc="audio retrieval via x-vector")
+
+
+class AutoModelForMaskedImageModeling(_BaseAutoModelClass):
+ _model_mapping = MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING
+
+
+AutoModelForMaskedImageModeling = auto_class_update(AutoModelForMaskedImageModeling, head_doc="masked image modeling")
+
+
+class AutoModelWithLMHead(_AutoModelWithLMHead):
+ @classmethod
+ def from_config(cls, config):
+ warnings.warn(
+ "The class `AutoModelWithLMHead` is deprecated and will be removed in a future version. Please use "
+ "`AutoModelForCausalLM` for causal language models, `AutoModelForMaskedLM` for masked language models and "
+ "`AutoModelForSeq2SeqLM` for encoder-decoder models.",
+ FutureWarning,
+ )
+ return super().from_config(config)
+
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
+ warnings.warn(
+ "The class `AutoModelWithLMHead` is deprecated and will be removed in a future version. Please use "
+ "`AutoModelForCausalLM` for causal language models, `AutoModelForMaskedLM` for masked language models and "
+ "`AutoModelForSeq2SeqLM` for encoder-decoder models.",
+ FutureWarning,
+ )
+ return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/modeling_flax_auto.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/modeling_flax_auto.py
new file mode 100644
index 0000000000000000000000000000000000000000..f8e62bf0f2a3b233ff7bfb8410152b0a8b85462b
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/modeling_flax_auto.py
@@ -0,0 +1,382 @@
+# coding=utf-8
+# Copyright 2018 The Google Flax Team Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Auto Model class."""
+
+
+from collections import OrderedDict
+
+from ...utils import logging
+from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
+from .configuration_auto import CONFIG_MAPPING_NAMES
+
+
+logger = logging.get_logger(__name__)
+
+
+FLAX_MODEL_MAPPING_NAMES = OrderedDict(
+ [
+ # Base model mapping
+ ("albert", "FlaxAlbertModel"),
+ ("bart", "FlaxBartModel"),
+ ("beit", "FlaxBeitModel"),
+ ("bert", "FlaxBertModel"),
+ ("big_bird", "FlaxBigBirdModel"),
+ ("blenderbot", "FlaxBlenderbotModel"),
+ ("blenderbot-small", "FlaxBlenderbotSmallModel"),
+ ("bloom", "FlaxBloomModel"),
+ ("clip", "FlaxCLIPModel"),
+ ("distilbert", "FlaxDistilBertModel"),
+ ("electra", "FlaxElectraModel"),
+ ("gemma", "FlaxGemmaModel"),
+ ("gpt-sw3", "FlaxGPT2Model"),
+ ("gpt2", "FlaxGPT2Model"),
+ ("gpt_neo", "FlaxGPTNeoModel"),
+ ("gptj", "FlaxGPTJModel"),
+ ("llama", "FlaxLlamaModel"),
+ ("longt5", "FlaxLongT5Model"),
+ ("marian", "FlaxMarianModel"),
+ ("mbart", "FlaxMBartModel"),
+ ("mistral", "FlaxMistralModel"),
+ ("mt5", "FlaxMT5Model"),
+ ("opt", "FlaxOPTModel"),
+ ("pegasus", "FlaxPegasusModel"),
+ ("regnet", "FlaxRegNetModel"),
+ ("resnet", "FlaxResNetModel"),
+ ("roberta", "FlaxRobertaModel"),
+ ("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
+ ("roformer", "FlaxRoFormerModel"),
+ ("t5", "FlaxT5Model"),
+ ("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
+ ("vit", "FlaxViTModel"),
+ ("wav2vec2", "FlaxWav2Vec2Model"),
+ ("whisper", "FlaxWhisperModel"),
+ ("xglm", "FlaxXGLMModel"),
+ ("xlm-roberta", "FlaxXLMRobertaModel"),
+ ]
+)
+
+FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for pre-training mapping
+ ("albert", "FlaxAlbertForPreTraining"),
+ ("bart", "FlaxBartForConditionalGeneration"),
+ ("bert", "FlaxBertForPreTraining"),
+ ("big_bird", "FlaxBigBirdForPreTraining"),
+ ("electra", "FlaxElectraForPreTraining"),
+ ("longt5", "FlaxLongT5ForConditionalGeneration"),
+ ("mbart", "FlaxMBartForConditionalGeneration"),
+ ("mt5", "FlaxMT5ForConditionalGeneration"),
+ ("roberta", "FlaxRobertaForMaskedLM"),
+ ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
+ ("roformer", "FlaxRoFormerForMaskedLM"),
+ ("t5", "FlaxT5ForConditionalGeneration"),
+ ("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
+ ("whisper", "FlaxWhisperForConditionalGeneration"),
+ ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
+ ]
+)
+
+FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Masked LM mapping
+ ("albert", "FlaxAlbertForMaskedLM"),
+ ("bart", "FlaxBartForConditionalGeneration"),
+ ("bert", "FlaxBertForMaskedLM"),
+ ("big_bird", "FlaxBigBirdForMaskedLM"),
+ ("distilbert", "FlaxDistilBertForMaskedLM"),
+ ("electra", "FlaxElectraForMaskedLM"),
+ ("mbart", "FlaxMBartForConditionalGeneration"),
+ ("roberta", "FlaxRobertaForMaskedLM"),
+ ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
+ ("roformer", "FlaxRoFormerForMaskedLM"),
+ ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
+ ]
+)
+
+FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Seq2Seq Causal LM mapping
+ ("bart", "FlaxBartForConditionalGeneration"),
+ ("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
+ ("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
+ ("encoder-decoder", "FlaxEncoderDecoderModel"),
+ ("longt5", "FlaxLongT5ForConditionalGeneration"),
+ ("marian", "FlaxMarianMTModel"),
+ ("mbart", "FlaxMBartForConditionalGeneration"),
+ ("mt5", "FlaxMT5ForConditionalGeneration"),
+ ("pegasus", "FlaxPegasusForConditionalGeneration"),
+ ("t5", "FlaxT5ForConditionalGeneration"),
+ ]
+)
+
+FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Image-classsification
+ ("beit", "FlaxBeitForImageClassification"),
+ ("regnet", "FlaxRegNetForImageClassification"),
+ ("resnet", "FlaxResNetForImageClassification"),
+ ("vit", "FlaxViTForImageClassification"),
+ ]
+)
+
+FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES = OrderedDict(
+ [
+ ("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
+ ]
+)
+
+FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Causal LM mapping
+ ("bart", "FlaxBartForCausalLM"),
+ ("bert", "FlaxBertForCausalLM"),
+ ("big_bird", "FlaxBigBirdForCausalLM"),
+ ("bloom", "FlaxBloomForCausalLM"),
+ ("electra", "FlaxElectraForCausalLM"),
+ ("gemma", "FlaxGemmaForCausalLM"),
+ ("gpt-sw3", "FlaxGPT2LMHeadModel"),
+ ("gpt2", "FlaxGPT2LMHeadModel"),
+ ("gpt_neo", "FlaxGPTNeoForCausalLM"),
+ ("gptj", "FlaxGPTJForCausalLM"),
+ ("llama", "FlaxLlamaForCausalLM"),
+ ("mistral", "FlaxMistralForCausalLM"),
+ ("opt", "FlaxOPTForCausalLM"),
+ ("roberta", "FlaxRobertaForCausalLM"),
+ ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
+ ("xglm", "FlaxXGLMForCausalLM"),
+ ("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
+ ]
+)
+
+FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Sequence Classification mapping
+ ("albert", "FlaxAlbertForSequenceClassification"),
+ ("bart", "FlaxBartForSequenceClassification"),
+ ("bert", "FlaxBertForSequenceClassification"),
+ ("big_bird", "FlaxBigBirdForSequenceClassification"),
+ ("distilbert", "FlaxDistilBertForSequenceClassification"),
+ ("electra", "FlaxElectraForSequenceClassification"),
+ ("mbart", "FlaxMBartForSequenceClassification"),
+ ("roberta", "FlaxRobertaForSequenceClassification"),
+ ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
+ ("roformer", "FlaxRoFormerForSequenceClassification"),
+ ("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
+ ]
+)
+
+FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Question Answering mapping
+ ("albert", "FlaxAlbertForQuestionAnswering"),
+ ("bart", "FlaxBartForQuestionAnswering"),
+ ("bert", "FlaxBertForQuestionAnswering"),
+ ("big_bird", "FlaxBigBirdForQuestionAnswering"),
+ ("distilbert", "FlaxDistilBertForQuestionAnswering"),
+ ("electra", "FlaxElectraForQuestionAnswering"),
+ ("mbart", "FlaxMBartForQuestionAnswering"),
+ ("roberta", "FlaxRobertaForQuestionAnswering"),
+ ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
+ ("roformer", "FlaxRoFormerForQuestionAnswering"),
+ ("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
+ ]
+)
+
+FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Token Classification mapping
+ ("albert", "FlaxAlbertForTokenClassification"),
+ ("bert", "FlaxBertForTokenClassification"),
+ ("big_bird", "FlaxBigBirdForTokenClassification"),
+ ("distilbert", "FlaxDistilBertForTokenClassification"),
+ ("electra", "FlaxElectraForTokenClassification"),
+ ("roberta", "FlaxRobertaForTokenClassification"),
+ ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
+ ("roformer", "FlaxRoFormerForTokenClassification"),
+ ("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
+ ]
+)
+
+FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Multiple Choice mapping
+ ("albert", "FlaxAlbertForMultipleChoice"),
+ ("bert", "FlaxBertForMultipleChoice"),
+ ("big_bird", "FlaxBigBirdForMultipleChoice"),
+ ("distilbert", "FlaxDistilBertForMultipleChoice"),
+ ("electra", "FlaxElectraForMultipleChoice"),
+ ("roberta", "FlaxRobertaForMultipleChoice"),
+ ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
+ ("roformer", "FlaxRoFormerForMultipleChoice"),
+ ("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
+ ]
+)
+
+FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES = OrderedDict(
+ [
+ ("bert", "FlaxBertForNextSentencePrediction"),
+ ]
+)
+
+FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES = OrderedDict(
+ [
+ ("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
+ ("whisper", "FlaxWhisperForConditionalGeneration"),
+ ]
+)
+
+FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
+ [
+ ("whisper", "FlaxWhisperForAudioClassification"),
+ ]
+)
+
+FLAX_MODEL_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
+FLAX_MODEL_FOR_PRETRAINING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
+FLAX_MODEL_FOR_MASKED_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
+FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
+)
+FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
+)
+FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
+FLAX_MODEL_FOR_CAUSAL_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
+FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
+)
+FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
+)
+FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
+)
+FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
+)
+FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
+)
+FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
+)
+FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
+)
+
+
+class FlaxAutoModel(_BaseAutoModelClass):
+ _model_mapping = FLAX_MODEL_MAPPING
+
+
+FlaxAutoModel = auto_class_update(FlaxAutoModel)
+
+
+class FlaxAutoModelForPreTraining(_BaseAutoModelClass):
+ _model_mapping = FLAX_MODEL_FOR_PRETRAINING_MAPPING
+
+
+FlaxAutoModelForPreTraining = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
+
+
+class FlaxAutoModelForCausalLM(_BaseAutoModelClass):
+ _model_mapping = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
+
+
+FlaxAutoModelForCausalLM = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
+
+
+class FlaxAutoModelForMaskedLM(_BaseAutoModelClass):
+ _model_mapping = FLAX_MODEL_FOR_MASKED_LM_MAPPING
+
+
+FlaxAutoModelForMaskedLM = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
+
+
+class FlaxAutoModelForSeq2SeqLM(_BaseAutoModelClass):
+ _model_mapping = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
+
+
+FlaxAutoModelForSeq2SeqLM = auto_class_update(
+ FlaxAutoModelForSeq2SeqLM,
+ head_doc="sequence-to-sequence language modeling",
+ checkpoint_for_example="google-t5/t5-base",
+)
+
+
+class FlaxAutoModelForSequenceClassification(_BaseAutoModelClass):
+ _model_mapping = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
+
+
+FlaxAutoModelForSequenceClassification = auto_class_update(
+ FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
+)
+
+
+class FlaxAutoModelForQuestionAnswering(_BaseAutoModelClass):
+ _model_mapping = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
+
+
+FlaxAutoModelForQuestionAnswering = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
+
+
+class FlaxAutoModelForTokenClassification(_BaseAutoModelClass):
+ _model_mapping = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
+
+
+FlaxAutoModelForTokenClassification = auto_class_update(
+ FlaxAutoModelForTokenClassification, head_doc="token classification"
+)
+
+
+class FlaxAutoModelForMultipleChoice(_BaseAutoModelClass):
+ _model_mapping = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
+
+
+FlaxAutoModelForMultipleChoice = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
+
+
+class FlaxAutoModelForNextSentencePrediction(_BaseAutoModelClass):
+ _model_mapping = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
+
+
+FlaxAutoModelForNextSentencePrediction = auto_class_update(
+ FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
+)
+
+
+class FlaxAutoModelForImageClassification(_BaseAutoModelClass):
+ _model_mapping = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
+
+
+FlaxAutoModelForImageClassification = auto_class_update(
+ FlaxAutoModelForImageClassification, head_doc="image classification"
+)
+
+
+class FlaxAutoModelForVision2Seq(_BaseAutoModelClass):
+ _model_mapping = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
+
+
+FlaxAutoModelForVision2Seq = auto_class_update(FlaxAutoModelForVision2Seq, head_doc="vision-to-text modeling")
+
+
+class FlaxAutoModelForSpeechSeq2Seq(_BaseAutoModelClass):
+ _model_mapping = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
+
+
+FlaxAutoModelForSpeechSeq2Seq = auto_class_update(
+ FlaxAutoModelForSpeechSeq2Seq, head_doc="sequence-to-sequence speech-to-text modeling"
+)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/modeling_tf_auto.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/modeling_tf_auto.py
new file mode 100644
index 0000000000000000000000000000000000000000..deed743162e4774751af454a755aad020219cbe0
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/modeling_tf_auto.py
@@ -0,0 +1,721 @@
+# coding=utf-8
+# Copyright 2018 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Auto Model class."""
+
+
+import warnings
+from collections import OrderedDict
+
+from ...utils import logging
+from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
+from .configuration_auto import CONFIG_MAPPING_NAMES
+
+
+logger = logging.get_logger(__name__)
+
+
+TF_MODEL_MAPPING_NAMES = OrderedDict(
+ [
+ # Base model mapping
+ ("albert", "TFAlbertModel"),
+ ("bart", "TFBartModel"),
+ ("bert", "TFBertModel"),
+ ("blenderbot", "TFBlenderbotModel"),
+ ("blenderbot-small", "TFBlenderbotSmallModel"),
+ ("blip", "TFBlipModel"),
+ ("camembert", "TFCamembertModel"),
+ ("clip", "TFCLIPModel"),
+ ("convbert", "TFConvBertModel"),
+ ("convnext", "TFConvNextModel"),
+ ("convnextv2", "TFConvNextV2Model"),
+ ("ctrl", "TFCTRLModel"),
+ ("cvt", "TFCvtModel"),
+ ("data2vec-vision", "TFData2VecVisionModel"),
+ ("deberta", "TFDebertaModel"),
+ ("deberta-v2", "TFDebertaV2Model"),
+ ("deit", "TFDeiTModel"),
+ ("distilbert", "TFDistilBertModel"),
+ ("dpr", "TFDPRQuestionEncoder"),
+ ("efficientformer", "TFEfficientFormerModel"),
+ ("electra", "TFElectraModel"),
+ ("esm", "TFEsmModel"),
+ ("flaubert", "TFFlaubertModel"),
+ ("funnel", ("TFFunnelModel", "TFFunnelBaseModel")),
+ ("gpt-sw3", "TFGPT2Model"),
+ ("gpt2", "TFGPT2Model"),
+ ("gptj", "TFGPTJModel"),
+ ("groupvit", "TFGroupViTModel"),
+ ("hubert", "TFHubertModel"),
+ ("layoutlm", "TFLayoutLMModel"),
+ ("layoutlmv3", "TFLayoutLMv3Model"),
+ ("led", "TFLEDModel"),
+ ("longformer", "TFLongformerModel"),
+ ("lxmert", "TFLxmertModel"),
+ ("marian", "TFMarianModel"),
+ ("mbart", "TFMBartModel"),
+ ("mobilebert", "TFMobileBertModel"),
+ ("mobilevit", "TFMobileViTModel"),
+ ("mpnet", "TFMPNetModel"),
+ ("mt5", "TFMT5Model"),
+ ("openai-gpt", "TFOpenAIGPTModel"),
+ ("opt", "TFOPTModel"),
+ ("pegasus", "TFPegasusModel"),
+ ("regnet", "TFRegNetModel"),
+ ("rembert", "TFRemBertModel"),
+ ("resnet", "TFResNetModel"),
+ ("roberta", "TFRobertaModel"),
+ ("roberta-prelayernorm", "TFRobertaPreLayerNormModel"),
+ ("roformer", "TFRoFormerModel"),
+ ("sam", "TFSamModel"),
+ ("segformer", "TFSegformerModel"),
+ ("speech_to_text", "TFSpeech2TextModel"),
+ ("swin", "TFSwinModel"),
+ ("t5", "TFT5Model"),
+ ("tapas", "TFTapasModel"),
+ ("transfo-xl", "TFTransfoXLModel"),
+ ("vision-text-dual-encoder", "TFVisionTextDualEncoderModel"),
+ ("vit", "TFViTModel"),
+ ("vit_mae", "TFViTMAEModel"),
+ ("wav2vec2", "TFWav2Vec2Model"),
+ ("whisper", "TFWhisperModel"),
+ ("xglm", "TFXGLMModel"),
+ ("xlm", "TFXLMModel"),
+ ("xlm-roberta", "TFXLMRobertaModel"),
+ ("xlnet", "TFXLNetModel"),
+ ]
+)
+
+TF_MODEL_FOR_PRETRAINING_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for pre-training mapping
+ ("albert", "TFAlbertForPreTraining"),
+ ("bart", "TFBartForConditionalGeneration"),
+ ("bert", "TFBertForPreTraining"),
+ ("camembert", "TFCamembertForMaskedLM"),
+ ("ctrl", "TFCTRLLMHeadModel"),
+ ("distilbert", "TFDistilBertForMaskedLM"),
+ ("electra", "TFElectraForPreTraining"),
+ ("flaubert", "TFFlaubertWithLMHeadModel"),
+ ("funnel", "TFFunnelForPreTraining"),
+ ("gpt-sw3", "TFGPT2LMHeadModel"),
+ ("gpt2", "TFGPT2LMHeadModel"),
+ ("layoutlm", "TFLayoutLMForMaskedLM"),
+ ("lxmert", "TFLxmertForPreTraining"),
+ ("mobilebert", "TFMobileBertForPreTraining"),
+ ("mpnet", "TFMPNetForMaskedLM"),
+ ("openai-gpt", "TFOpenAIGPTLMHeadModel"),
+ ("roberta", "TFRobertaForMaskedLM"),
+ ("roberta-prelayernorm", "TFRobertaPreLayerNormForMaskedLM"),
+ ("t5", "TFT5ForConditionalGeneration"),
+ ("tapas", "TFTapasForMaskedLM"),
+ ("transfo-xl", "TFTransfoXLLMHeadModel"),
+ ("vit_mae", "TFViTMAEForPreTraining"),
+ ("xlm", "TFXLMWithLMHeadModel"),
+ ("xlm-roberta", "TFXLMRobertaForMaskedLM"),
+ ("xlnet", "TFXLNetLMHeadModel"),
+ ]
+)
+
+TF_MODEL_WITH_LM_HEAD_MAPPING_NAMES = OrderedDict(
+ [
+ # Model with LM heads mapping
+ ("albert", "TFAlbertForMaskedLM"),
+ ("bart", "TFBartForConditionalGeneration"),
+ ("bert", "TFBertForMaskedLM"),
+ ("camembert", "TFCamembertForMaskedLM"),
+ ("convbert", "TFConvBertForMaskedLM"),
+ ("ctrl", "TFCTRLLMHeadModel"),
+ ("distilbert", "TFDistilBertForMaskedLM"),
+ ("electra", "TFElectraForMaskedLM"),
+ ("esm", "TFEsmForMaskedLM"),
+ ("flaubert", "TFFlaubertWithLMHeadModel"),
+ ("funnel", "TFFunnelForMaskedLM"),
+ ("gpt-sw3", "TFGPT2LMHeadModel"),
+ ("gpt2", "TFGPT2LMHeadModel"),
+ ("gptj", "TFGPTJForCausalLM"),
+ ("layoutlm", "TFLayoutLMForMaskedLM"),
+ ("led", "TFLEDForConditionalGeneration"),
+ ("longformer", "TFLongformerForMaskedLM"),
+ ("marian", "TFMarianMTModel"),
+ ("mobilebert", "TFMobileBertForMaskedLM"),
+ ("mpnet", "TFMPNetForMaskedLM"),
+ ("openai-gpt", "TFOpenAIGPTLMHeadModel"),
+ ("rembert", "TFRemBertForMaskedLM"),
+ ("roberta", "TFRobertaForMaskedLM"),
+ ("roberta-prelayernorm", "TFRobertaPreLayerNormForMaskedLM"),
+ ("roformer", "TFRoFormerForMaskedLM"),
+ ("speech_to_text", "TFSpeech2TextForConditionalGeneration"),
+ ("t5", "TFT5ForConditionalGeneration"),
+ ("tapas", "TFTapasForMaskedLM"),
+ ("transfo-xl", "TFTransfoXLLMHeadModel"),
+ ("whisper", "TFWhisperForConditionalGeneration"),
+ ("xlm", "TFXLMWithLMHeadModel"),
+ ("xlm-roberta", "TFXLMRobertaForMaskedLM"),
+ ("xlnet", "TFXLNetLMHeadModel"),
+ ]
+)
+
+TF_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Causal LM mapping
+ ("bert", "TFBertLMHeadModel"),
+ ("camembert", "TFCamembertForCausalLM"),
+ ("ctrl", "TFCTRLLMHeadModel"),
+ ("gpt-sw3", "TFGPT2LMHeadModel"),
+ ("gpt2", "TFGPT2LMHeadModel"),
+ ("gptj", "TFGPTJForCausalLM"),
+ ("openai-gpt", "TFOpenAIGPTLMHeadModel"),
+ ("opt", "TFOPTForCausalLM"),
+ ("rembert", "TFRemBertForCausalLM"),
+ ("roberta", "TFRobertaForCausalLM"),
+ ("roberta-prelayernorm", "TFRobertaPreLayerNormForCausalLM"),
+ ("roformer", "TFRoFormerForCausalLM"),
+ ("transfo-xl", "TFTransfoXLLMHeadModel"),
+ ("xglm", "TFXGLMForCausalLM"),
+ ("xlm", "TFXLMWithLMHeadModel"),
+ ("xlm-roberta", "TFXLMRobertaForCausalLM"),
+ ("xlnet", "TFXLNetLMHeadModel"),
+ ]
+)
+
+TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES = OrderedDict(
+ [
+ ("deit", "TFDeiTForMaskedImageModeling"),
+ ("swin", "TFSwinForMaskedImageModeling"),
+ ]
+)
+
+TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Image-classsification
+ ("convnext", "TFConvNextForImageClassification"),
+ ("convnextv2", "TFConvNextV2ForImageClassification"),
+ ("cvt", "TFCvtForImageClassification"),
+ ("data2vec-vision", "TFData2VecVisionForImageClassification"),
+ ("deit", ("TFDeiTForImageClassification", "TFDeiTForImageClassificationWithTeacher")),
+ (
+ "efficientformer",
+ ("TFEfficientFormerForImageClassification", "TFEfficientFormerForImageClassificationWithTeacher"),
+ ),
+ ("mobilevit", "TFMobileViTForImageClassification"),
+ ("regnet", "TFRegNetForImageClassification"),
+ ("resnet", "TFResNetForImageClassification"),
+ ("segformer", "TFSegformerForImageClassification"),
+ ("swin", "TFSwinForImageClassification"),
+ ("vit", "TFViTForImageClassification"),
+ ]
+)
+
+
+TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Zero Shot Image Classification mapping
+ ("blip", "TFBlipModel"),
+ ("clip", "TFCLIPModel"),
+ ]
+)
+
+
+TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Semantic Segmentation mapping
+ ("data2vec-vision", "TFData2VecVisionForSemanticSegmentation"),
+ ("mobilevit", "TFMobileViTForSemanticSegmentation"),
+ ("segformer", "TFSegformerForSemanticSegmentation"),
+ ]
+)
+
+TF_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES = OrderedDict(
+ [
+ ("blip", "TFBlipForConditionalGeneration"),
+ ("vision-encoder-decoder", "TFVisionEncoderDecoderModel"),
+ ]
+)
+
+TF_MODEL_FOR_MASKED_LM_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Masked LM mapping
+ ("albert", "TFAlbertForMaskedLM"),
+ ("bert", "TFBertForMaskedLM"),
+ ("camembert", "TFCamembertForMaskedLM"),
+ ("convbert", "TFConvBertForMaskedLM"),
+ ("deberta", "TFDebertaForMaskedLM"),
+ ("deberta-v2", "TFDebertaV2ForMaskedLM"),
+ ("distilbert", "TFDistilBertForMaskedLM"),
+ ("electra", "TFElectraForMaskedLM"),
+ ("esm", "TFEsmForMaskedLM"),
+ ("flaubert", "TFFlaubertWithLMHeadModel"),
+ ("funnel", "TFFunnelForMaskedLM"),
+ ("layoutlm", "TFLayoutLMForMaskedLM"),
+ ("longformer", "TFLongformerForMaskedLM"),
+ ("mobilebert", "TFMobileBertForMaskedLM"),
+ ("mpnet", "TFMPNetForMaskedLM"),
+ ("rembert", "TFRemBertForMaskedLM"),
+ ("roberta", "TFRobertaForMaskedLM"),
+ ("roberta-prelayernorm", "TFRobertaPreLayerNormForMaskedLM"),
+ ("roformer", "TFRoFormerForMaskedLM"),
+ ("tapas", "TFTapasForMaskedLM"),
+ ("xlm", "TFXLMWithLMHeadModel"),
+ ("xlm-roberta", "TFXLMRobertaForMaskedLM"),
+ ]
+)
+
+TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Seq2Seq Causal LM mapping
+ ("bart", "TFBartForConditionalGeneration"),
+ ("blenderbot", "TFBlenderbotForConditionalGeneration"),
+ ("blenderbot-small", "TFBlenderbotSmallForConditionalGeneration"),
+ ("encoder-decoder", "TFEncoderDecoderModel"),
+ ("led", "TFLEDForConditionalGeneration"),
+ ("marian", "TFMarianMTModel"),
+ ("mbart", "TFMBartForConditionalGeneration"),
+ ("mt5", "TFMT5ForConditionalGeneration"),
+ ("pegasus", "TFPegasusForConditionalGeneration"),
+ ("t5", "TFT5ForConditionalGeneration"),
+ ]
+)
+
+TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES = OrderedDict(
+ [
+ ("speech_to_text", "TFSpeech2TextForConditionalGeneration"),
+ ("whisper", "TFWhisperForConditionalGeneration"),
+ ]
+)
+
+TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Sequence Classification mapping
+ ("albert", "TFAlbertForSequenceClassification"),
+ ("bart", "TFBartForSequenceClassification"),
+ ("bert", "TFBertForSequenceClassification"),
+ ("camembert", "TFCamembertForSequenceClassification"),
+ ("convbert", "TFConvBertForSequenceClassification"),
+ ("ctrl", "TFCTRLForSequenceClassification"),
+ ("deberta", "TFDebertaForSequenceClassification"),
+ ("deberta-v2", "TFDebertaV2ForSequenceClassification"),
+ ("distilbert", "TFDistilBertForSequenceClassification"),
+ ("electra", "TFElectraForSequenceClassification"),
+ ("esm", "TFEsmForSequenceClassification"),
+ ("flaubert", "TFFlaubertForSequenceClassification"),
+ ("funnel", "TFFunnelForSequenceClassification"),
+ ("gpt-sw3", "TFGPT2ForSequenceClassification"),
+ ("gpt2", "TFGPT2ForSequenceClassification"),
+ ("gptj", "TFGPTJForSequenceClassification"),
+ ("layoutlm", "TFLayoutLMForSequenceClassification"),
+ ("layoutlmv3", "TFLayoutLMv3ForSequenceClassification"),
+ ("longformer", "TFLongformerForSequenceClassification"),
+ ("mobilebert", "TFMobileBertForSequenceClassification"),
+ ("mpnet", "TFMPNetForSequenceClassification"),
+ ("openai-gpt", "TFOpenAIGPTForSequenceClassification"),
+ ("rembert", "TFRemBertForSequenceClassification"),
+ ("roberta", "TFRobertaForSequenceClassification"),
+ ("roberta-prelayernorm", "TFRobertaPreLayerNormForSequenceClassification"),
+ ("roformer", "TFRoFormerForSequenceClassification"),
+ ("tapas", "TFTapasForSequenceClassification"),
+ ("transfo-xl", "TFTransfoXLForSequenceClassification"),
+ ("xlm", "TFXLMForSequenceClassification"),
+ ("xlm-roberta", "TFXLMRobertaForSequenceClassification"),
+ ("xlnet", "TFXLNetForSequenceClassification"),
+ ]
+)
+
+TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Question Answering mapping
+ ("albert", "TFAlbertForQuestionAnswering"),
+ ("bert", "TFBertForQuestionAnswering"),
+ ("camembert", "TFCamembertForQuestionAnswering"),
+ ("convbert", "TFConvBertForQuestionAnswering"),
+ ("deberta", "TFDebertaForQuestionAnswering"),
+ ("deberta-v2", "TFDebertaV2ForQuestionAnswering"),
+ ("distilbert", "TFDistilBertForQuestionAnswering"),
+ ("electra", "TFElectraForQuestionAnswering"),
+ ("flaubert", "TFFlaubertForQuestionAnsweringSimple"),
+ ("funnel", "TFFunnelForQuestionAnswering"),
+ ("gptj", "TFGPTJForQuestionAnswering"),
+ ("layoutlmv3", "TFLayoutLMv3ForQuestionAnswering"),
+ ("longformer", "TFLongformerForQuestionAnswering"),
+ ("mobilebert", "TFMobileBertForQuestionAnswering"),
+ ("mpnet", "TFMPNetForQuestionAnswering"),
+ ("rembert", "TFRemBertForQuestionAnswering"),
+ ("roberta", "TFRobertaForQuestionAnswering"),
+ ("roberta-prelayernorm", "TFRobertaPreLayerNormForQuestionAnswering"),
+ ("roformer", "TFRoFormerForQuestionAnswering"),
+ ("xlm", "TFXLMForQuestionAnsweringSimple"),
+ ("xlm-roberta", "TFXLMRobertaForQuestionAnswering"),
+ ("xlnet", "TFXLNetForQuestionAnsweringSimple"),
+ ]
+)
+TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES = OrderedDict([("wav2vec2", "TFWav2Vec2ForSequenceClassification")])
+
+TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict(
+ [
+ ("layoutlm", "TFLayoutLMForQuestionAnswering"),
+ ("layoutlmv3", "TFLayoutLMv3ForQuestionAnswering"),
+ ]
+)
+
+
+TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Table Question Answering mapping
+ ("tapas", "TFTapasForQuestionAnswering"),
+ ]
+)
+
+TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Token Classification mapping
+ ("albert", "TFAlbertForTokenClassification"),
+ ("bert", "TFBertForTokenClassification"),
+ ("camembert", "TFCamembertForTokenClassification"),
+ ("convbert", "TFConvBertForTokenClassification"),
+ ("deberta", "TFDebertaForTokenClassification"),
+ ("deberta-v2", "TFDebertaV2ForTokenClassification"),
+ ("distilbert", "TFDistilBertForTokenClassification"),
+ ("electra", "TFElectraForTokenClassification"),
+ ("esm", "TFEsmForTokenClassification"),
+ ("flaubert", "TFFlaubertForTokenClassification"),
+ ("funnel", "TFFunnelForTokenClassification"),
+ ("layoutlm", "TFLayoutLMForTokenClassification"),
+ ("layoutlmv3", "TFLayoutLMv3ForTokenClassification"),
+ ("longformer", "TFLongformerForTokenClassification"),
+ ("mobilebert", "TFMobileBertForTokenClassification"),
+ ("mpnet", "TFMPNetForTokenClassification"),
+ ("rembert", "TFRemBertForTokenClassification"),
+ ("roberta", "TFRobertaForTokenClassification"),
+ ("roberta-prelayernorm", "TFRobertaPreLayerNormForTokenClassification"),
+ ("roformer", "TFRoFormerForTokenClassification"),
+ ("xlm", "TFXLMForTokenClassification"),
+ ("xlm-roberta", "TFXLMRobertaForTokenClassification"),
+ ("xlnet", "TFXLNetForTokenClassification"),
+ ]
+)
+
+TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES = OrderedDict(
+ [
+ # Model for Multiple Choice mapping
+ ("albert", "TFAlbertForMultipleChoice"),
+ ("bert", "TFBertForMultipleChoice"),
+ ("camembert", "TFCamembertForMultipleChoice"),
+ ("convbert", "TFConvBertForMultipleChoice"),
+ ("deberta-v2", "TFDebertaV2ForMultipleChoice"),
+ ("distilbert", "TFDistilBertForMultipleChoice"),
+ ("electra", "TFElectraForMultipleChoice"),
+ ("flaubert", "TFFlaubertForMultipleChoice"),
+ ("funnel", "TFFunnelForMultipleChoice"),
+ ("longformer", "TFLongformerForMultipleChoice"),
+ ("mobilebert", "TFMobileBertForMultipleChoice"),
+ ("mpnet", "TFMPNetForMultipleChoice"),
+ ("rembert", "TFRemBertForMultipleChoice"),
+ ("roberta", "TFRobertaForMultipleChoice"),
+ ("roberta-prelayernorm", "TFRobertaPreLayerNormForMultipleChoice"),
+ ("roformer", "TFRoFormerForMultipleChoice"),
+ ("xlm", "TFXLMForMultipleChoice"),
+ ("xlm-roberta", "TFXLMRobertaForMultipleChoice"),
+ ("xlnet", "TFXLNetForMultipleChoice"),
+ ]
+)
+
+TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES = OrderedDict(
+ [
+ ("bert", "TFBertForNextSentencePrediction"),
+ ("mobilebert", "TFMobileBertForNextSentencePrediction"),
+ ]
+)
+TF_MODEL_FOR_MASK_GENERATION_MAPPING_NAMES = OrderedDict(
+ [
+ ("sam", "TFSamModel"),
+ ]
+)
+TF_MODEL_FOR_TEXT_ENCODING_MAPPING_NAMES = OrderedDict(
+ [
+ ("albert", "TFAlbertModel"),
+ ("bert", "TFBertModel"),
+ ("convbert", "TFConvBertModel"),
+ ("deberta", "TFDebertaModel"),
+ ("deberta-v2", "TFDebertaV2Model"),
+ ("distilbert", "TFDistilBertModel"),
+ ("electra", "TFElectraModel"),
+ ("flaubert", "TFFlaubertModel"),
+ ("longformer", "TFLongformerModel"),
+ ("mobilebert", "TFMobileBertModel"),
+ ("mt5", "TFMT5EncoderModel"),
+ ("rembert", "TFRemBertModel"),
+ ("roberta", "TFRobertaModel"),
+ ("roberta-prelayernorm", "TFRobertaPreLayerNormModel"),
+ ("roformer", "TFRoFormerModel"),
+ ("t5", "TFT5EncoderModel"),
+ ("xlm", "TFXLMModel"),
+ ("xlm-roberta", "TFXLMRobertaModel"),
+ ]
+)
+
+TF_MODEL_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_MAPPING_NAMES)
+TF_MODEL_FOR_PRETRAINING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
+TF_MODEL_WITH_LM_HEAD_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_WITH_LM_HEAD_MAPPING_NAMES)
+TF_MODEL_FOR_CAUSAL_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
+TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES
+)
+TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
+)
+TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES
+)
+TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES
+)
+TF_MODEL_FOR_VISION_2_SEQ_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
+TF_MODEL_FOR_MASKED_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
+TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
+)
+TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
+)
+TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
+)
+TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
+)
+TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES
+)
+TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES
+)
+TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
+)
+TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
+)
+TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
+)
+TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
+)
+
+TF_MODEL_FOR_MASK_GENERATION_MAPPING = _LazyAutoMapping(
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_MASK_GENERATION_MAPPING_NAMES
+)
+
+TF_MODEL_FOR_TEXT_ENCODING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_TEXT_ENCODING_MAPPING_NAMES)
+
+
+class TFAutoModelForMaskGeneration(_BaseAutoModelClass):
+ _model_mapping = TF_MODEL_FOR_MASK_GENERATION_MAPPING
+
+
+class TFAutoModelForTextEncoding(_BaseAutoModelClass):
+ _model_mapping = TF_MODEL_FOR_TEXT_ENCODING_MAPPING
+
+
+class TFAutoModel(_BaseAutoModelClass):
+ _model_mapping = TF_MODEL_MAPPING
+
+
+TFAutoModel = auto_class_update(TFAutoModel)
+
+
+class TFAutoModelForAudioClassification(_BaseAutoModelClass):
+ _model_mapping = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
+
+
+TFAutoModelForAudioClassification = auto_class_update(
+ TFAutoModelForAudioClassification, head_doc="audio classification"
+)
+
+
+class TFAutoModelForPreTraining(_BaseAutoModelClass):
+ _model_mapping = TF_MODEL_FOR_PRETRAINING_MAPPING
+
+
+TFAutoModelForPreTraining = auto_class_update(TFAutoModelForPreTraining, head_doc="pretraining")
+
+
+# Private on purpose, the public class will add the deprecation warnings.
+class _TFAutoModelWithLMHead(_BaseAutoModelClass):
+ _model_mapping = TF_MODEL_WITH_LM_HEAD_MAPPING
+
+
+_TFAutoModelWithLMHead = auto_class_update(_TFAutoModelWithLMHead, head_doc="language modeling")
+
+
+class TFAutoModelForCausalLM(_BaseAutoModelClass):
+ _model_mapping = TF_MODEL_FOR_CAUSAL_LM_MAPPING
+
+
+TFAutoModelForCausalLM = auto_class_update(TFAutoModelForCausalLM, head_doc="causal language modeling")
+
+
+class TFAutoModelForMaskedImageModeling(_BaseAutoModelClass):
+ _model_mapping = TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING
+
+
+TFAutoModelForMaskedImageModeling = auto_class_update(
+ TFAutoModelForMaskedImageModeling, head_doc="masked image modeling"
+)
+
+
+class TFAutoModelForImageClassification(_BaseAutoModelClass):
+ _model_mapping = TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
+
+
+TFAutoModelForImageClassification = auto_class_update(
+ TFAutoModelForImageClassification, head_doc="image classification"
+)
+
+
+class TFAutoModelForZeroShotImageClassification(_BaseAutoModelClass):
+ _model_mapping = TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
+
+
+TFAutoModelForZeroShotImageClassification = auto_class_update(
+ TFAutoModelForZeroShotImageClassification, head_doc="zero-shot image classification"
+)
+
+
+class TFAutoModelForSemanticSegmentation(_BaseAutoModelClass):
+ _model_mapping = TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING
+
+
+TFAutoModelForSemanticSegmentation = auto_class_update(
+ TFAutoModelForSemanticSegmentation, head_doc="semantic segmentation"
+)
+
+
+class TFAutoModelForVision2Seq(_BaseAutoModelClass):
+ _model_mapping = TF_MODEL_FOR_VISION_2_SEQ_MAPPING
+
+
+TFAutoModelForVision2Seq = auto_class_update(TFAutoModelForVision2Seq, head_doc="vision-to-text modeling")
+
+
+class TFAutoModelForMaskedLM(_BaseAutoModelClass):
+ _model_mapping = TF_MODEL_FOR_MASKED_LM_MAPPING
+
+
+TFAutoModelForMaskedLM = auto_class_update(TFAutoModelForMaskedLM, head_doc="masked language modeling")
+
+
+class TFAutoModelForSeq2SeqLM(_BaseAutoModelClass):
+ _model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
+
+
+TFAutoModelForSeq2SeqLM = auto_class_update(
+ TFAutoModelForSeq2SeqLM,
+ head_doc="sequence-to-sequence language modeling",
+ checkpoint_for_example="google-t5/t5-base",
+)
+
+
+class TFAutoModelForSequenceClassification(_BaseAutoModelClass):
+ _model_mapping = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
+
+
+TFAutoModelForSequenceClassification = auto_class_update(
+ TFAutoModelForSequenceClassification, head_doc="sequence classification"
+)
+
+
+class TFAutoModelForQuestionAnswering(_BaseAutoModelClass):
+ _model_mapping = TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING
+
+
+TFAutoModelForQuestionAnswering = auto_class_update(TFAutoModelForQuestionAnswering, head_doc="question answering")
+
+
+class TFAutoModelForDocumentQuestionAnswering(_BaseAutoModelClass):
+ _model_mapping = TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
+
+
+TFAutoModelForDocumentQuestionAnswering = auto_class_update(
+ TFAutoModelForDocumentQuestionAnswering,
+ head_doc="document question answering",
+ checkpoint_for_example='impira/layoutlm-document-qa", revision="52e01b3',
+)
+
+
+class TFAutoModelForTableQuestionAnswering(_BaseAutoModelClass):
+ _model_mapping = TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING
+
+
+TFAutoModelForTableQuestionAnswering = auto_class_update(
+ TFAutoModelForTableQuestionAnswering,
+ head_doc="table question answering",
+ checkpoint_for_example="google/tapas-base-finetuned-wtq",
+)
+
+
+class TFAutoModelForTokenClassification(_BaseAutoModelClass):
+ _model_mapping = TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
+
+
+TFAutoModelForTokenClassification = auto_class_update(
+ TFAutoModelForTokenClassification, head_doc="token classification"
+)
+
+
+class TFAutoModelForMultipleChoice(_BaseAutoModelClass):
+ _model_mapping = TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
+
+
+TFAutoModelForMultipleChoice = auto_class_update(TFAutoModelForMultipleChoice, head_doc="multiple choice")
+
+
+class TFAutoModelForNextSentencePrediction(_BaseAutoModelClass):
+ _model_mapping = TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
+
+
+TFAutoModelForNextSentencePrediction = auto_class_update(
+ TFAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
+)
+
+
+class TFAutoModelForSpeechSeq2Seq(_BaseAutoModelClass):
+ _model_mapping = TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
+
+
+TFAutoModelForSpeechSeq2Seq = auto_class_update(
+ TFAutoModelForSpeechSeq2Seq, head_doc="sequence-to-sequence speech-to-text modeling"
+)
+
+
+class TFAutoModelWithLMHead(_TFAutoModelWithLMHead):
+ @classmethod
+ def from_config(cls, config):
+ warnings.warn(
+ "The class `TFAutoModelWithLMHead` is deprecated and will be removed in a future version. Please use"
+ " `TFAutoModelForCausalLM` for causal language models, `TFAutoModelForMaskedLM` for masked language models"
+ " and `TFAutoModelForSeq2SeqLM` for encoder-decoder models.",
+ FutureWarning,
+ )
+ return super().from_config(config)
+
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
+ warnings.warn(
+ "The class `TFAutoModelWithLMHead` is deprecated and will be removed in a future version. Please use"
+ " `TFAutoModelForCausalLM` for causal language models, `TFAutoModelForMaskedLM` for masked language models"
+ " and `TFAutoModelForSeq2SeqLM` for encoder-decoder models.",
+ FutureWarning,
+ )
+ return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/processing_auto.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/processing_auto.py
new file mode 100644
index 0000000000000000000000000000000000000000..a7134f26a7d60c899c7a6bf320031ba075241716
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/processing_auto.py
@@ -0,0 +1,358 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" AutoProcessor class."""
+import importlib
+import inspect
+import json
+import os
+import warnings
+from collections import OrderedDict
+
+# Build the list of all feature extractors
+from ...configuration_utils import PretrainedConfig
+from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
+from ...feature_extraction_utils import FeatureExtractionMixin
+from ...image_processing_utils import ImageProcessingMixin
+from ...processing_utils import ProcessorMixin
+from ...tokenization_utils import TOKENIZER_CONFIG_FILE
+from ...utils import FEATURE_EXTRACTOR_NAME, PROCESSOR_NAME, get_file_from_repo, logging
+from .auto_factory import _LazyAutoMapping
+from .configuration_auto import (
+ CONFIG_MAPPING_NAMES,
+ AutoConfig,
+ model_type_to_module_name,
+ replace_list_option_in_docstrings,
+)
+from .feature_extraction_auto import AutoFeatureExtractor
+from .image_processing_auto import AutoImageProcessor
+from .tokenization_auto import AutoTokenizer
+
+
+logger = logging.get_logger(__name__)
+
+PROCESSOR_MAPPING_NAMES = OrderedDict(
+ [
+ ("align", "AlignProcessor"),
+ ("altclip", "AltCLIPProcessor"),
+ ("bark", "BarkProcessor"),
+ ("blip", "BlipProcessor"),
+ ("blip-2", "Blip2Processor"),
+ ("bridgetower", "BridgeTowerProcessor"),
+ ("chinese_clip", "ChineseCLIPProcessor"),
+ ("clap", "ClapProcessor"),
+ ("clip", "CLIPProcessor"),
+ ("clipseg", "CLIPSegProcessor"),
+ ("clvp", "ClvpProcessor"),
+ ("flava", "FlavaProcessor"),
+ ("fuyu", "FuyuProcessor"),
+ ("git", "GitProcessor"),
+ ("groupvit", "CLIPProcessor"),
+ ("hubert", "Wav2Vec2Processor"),
+ ("idefics", "IdeficsProcessor"),
+ ("idefics2", "Idefics2Processor"),
+ ("instructblip", "InstructBlipProcessor"),
+ ("kosmos-2", "Kosmos2Processor"),
+ ("layoutlmv2", "LayoutLMv2Processor"),
+ ("layoutlmv3", "LayoutLMv3Processor"),
+ ("llava", "LlavaProcessor"),
+ ("llava_next", "LlavaNextProcessor"),
+ ("markuplm", "MarkupLMProcessor"),
+ ("mctct", "MCTCTProcessor"),
+ ("mgp-str", "MgpstrProcessor"),
+ ("oneformer", "OneFormerProcessor"),
+ ("owlv2", "Owlv2Processor"),
+ ("owlvit", "OwlViTProcessor"),
+ ("pix2struct", "Pix2StructProcessor"),
+ ("pop2piano", "Pop2PianoProcessor"),
+ ("sam", "SamProcessor"),
+ ("seamless_m4t", "SeamlessM4TProcessor"),
+ ("sew", "Wav2Vec2Processor"),
+ ("sew-d", "Wav2Vec2Processor"),
+ ("siglip", "SiglipProcessor"),
+ ("speech_to_text", "Speech2TextProcessor"),
+ ("speech_to_text_2", "Speech2Text2Processor"),
+ ("speecht5", "SpeechT5Processor"),
+ ("trocr", "TrOCRProcessor"),
+ ("tvlt", "TvltProcessor"),
+ ("tvp", "TvpProcessor"),
+ ("unispeech", "Wav2Vec2Processor"),
+ ("unispeech-sat", "Wav2Vec2Processor"),
+ ("vilt", "ViltProcessor"),
+ ("vipllava", "LlavaProcessor"),
+ ("vision-text-dual-encoder", "VisionTextDualEncoderProcessor"),
+ ("wav2vec2", "Wav2Vec2Processor"),
+ ("wav2vec2-bert", "Wav2Vec2Processor"),
+ ("wav2vec2-conformer", "Wav2Vec2Processor"),
+ ("wavlm", "Wav2Vec2Processor"),
+ ("whisper", "WhisperProcessor"),
+ ("xclip", "XCLIPProcessor"),
+ ]
+)
+
+PROCESSOR_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, PROCESSOR_MAPPING_NAMES)
+
+
+def processor_class_from_name(class_name: str):
+ for module_name, processors in PROCESSOR_MAPPING_NAMES.items():
+ if class_name in processors:
+ module_name = model_type_to_module_name(module_name)
+
+ module = importlib.import_module(f".{module_name}", "transformers.models")
+ try:
+ return getattr(module, class_name)
+ except AttributeError:
+ continue
+
+ for processor in PROCESSOR_MAPPING._extra_content.values():
+ if getattr(processor, "__name__", None) == class_name:
+ return processor
+
+ # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
+ # init and we return the proper dummy to get an appropriate error message.
+ main_module = importlib.import_module("transformers")
+ if hasattr(main_module, class_name):
+ return getattr(main_module, class_name)
+
+ return None
+
+
+class AutoProcessor:
+ r"""
+ This is a generic processor class that will be instantiated as one of the processor classes of the library when
+ created with the [`AutoProcessor.from_pretrained`] class method.
+
+ This class cannot be instantiated directly using `__init__()` (throws an error).
+ """
+
+ def __init__(self):
+ raise EnvironmentError(
+ "AutoProcessor is designed to be instantiated "
+ "using the `AutoProcessor.from_pretrained(pretrained_model_name_or_path)` method."
+ )
+
+ @classmethod
+ @replace_list_option_in_docstrings(PROCESSOR_MAPPING_NAMES)
+ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
+ r"""
+ Instantiate one of the processor classes of the library from a pretrained model vocabulary.
+
+ The processor class to instantiate is selected based on the `model_type` property of the config object (either
+ passed as an argument or loaded from `pretrained_model_name_or_path` if possible):
+
+ List options
+
+ Params:
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
+ This can be either:
+
+ - a string, the *model id* of a pretrained feature_extractor hosted inside a model repo on
+ huggingface.co.
+ - a path to a *directory* containing a processor files saved using the `save_pretrained()` method,
+ e.g., `./my_model_directory/`.
+ cache_dir (`str` or `os.PathLike`, *optional*):
+ Path to a directory in which a downloaded pretrained model feature extractor should be cached if the
+ standard cache should not be used.
+ force_download (`bool`, *optional*, defaults to `False`):
+ Whether or not to force to (re-)download the feature extractor files and override the cached versions
+ if they exist.
+ resume_download (`bool`, *optional*, defaults to `False`):
+ Whether or not to delete incompletely received file. Attempts to resume the download if such a file
+ exists.
+ proxies (`Dict[str, str]`, *optional*):
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
+ 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
+ token (`str` or *bool*, *optional*):
+ The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
+ when running `huggingface-cli login` (stored in `~/.huggingface`).
+ revision (`str`, *optional*, defaults to `"main"`):
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
+ identifier allowed by git.
+ return_unused_kwargs (`bool`, *optional*, defaults to `False`):
+ If `False`, then this function returns just the final feature extractor object. If `True`, then this
+ functions returns a `Tuple(feature_extractor, unused_kwargs)` where *unused_kwargs* is a dictionary
+ consisting of the key/value pairs whose keys are not feature extractor attributes: i.e., the part of
+ `kwargs` which has not been used to update `feature_extractor` and is otherwise ignored.
+ trust_remote_code (`bool`, *optional*, defaults to `False`):
+ Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
+ execute code present on the Hub on your local machine.
+ kwargs (`Dict[str, Any]`, *optional*):
+ The values in kwargs of any keys which are feature extractor attributes will be used to override the
+ loaded values. Behavior concerning key/value pairs whose keys are *not* feature extractor attributes is
+ controlled by the `return_unused_kwargs` keyword parameter.
+
+
+
+ Passing `token=True` is required when you want to use a private model.
+
+
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoProcessor
+
+ >>> # Download processor from huggingface.co and cache.
+ >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h")
+
+ >>> # If processor files are in a directory (e.g. processor was saved using *save_pretrained('./test/saved_model/')*)
+ >>> # processor = AutoProcessor.from_pretrained("./test/saved_model/")
+ ```"""
+ use_auth_token = kwargs.pop("use_auth_token", None)
+ if use_auth_token is not None:
+ warnings.warn(
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
+ FutureWarning,
+ )
+ if kwargs.get("token", None) is not None:
+ raise ValueError(
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
+ )
+ kwargs["token"] = use_auth_token
+
+ config = kwargs.pop("config", None)
+ trust_remote_code = kwargs.pop("trust_remote_code", None)
+ kwargs["_from_auto"] = True
+
+ processor_class = None
+ processor_auto_map = None
+
+ # First, let's see if we have a processor or preprocessor config.
+ # Filter the kwargs for `get_file_from_repo`.
+ get_file_from_repo_kwargs = {
+ key: kwargs[key] for key in inspect.signature(get_file_from_repo).parameters.keys() if key in kwargs
+ }
+
+ # Let's start by checking whether the processor class is saved in a processor config
+ processor_config_file = get_file_from_repo(
+ pretrained_model_name_or_path, PROCESSOR_NAME, **get_file_from_repo_kwargs
+ )
+ if processor_config_file is not None:
+ config_dict, _ = ProcessorMixin.get_processor_dict(pretrained_model_name_or_path, **kwargs)
+ processor_class = config_dict.get("processor_class", None)
+ if "AutoProcessor" in config_dict.get("auto_map", {}):
+ processor_auto_map = config_dict["auto_map"]["AutoProcessor"]
+
+ if processor_class is None:
+ # If not found, let's check whether the processor class is saved in an image processor config
+ preprocessor_config_file = get_file_from_repo(
+ pretrained_model_name_or_path, FEATURE_EXTRACTOR_NAME, **get_file_from_repo_kwargs
+ )
+ if preprocessor_config_file is not None:
+ config_dict, _ = ImageProcessingMixin.get_image_processor_dict(pretrained_model_name_or_path, **kwargs)
+ processor_class = config_dict.get("processor_class", None)
+ if "AutoProcessor" in config_dict.get("auto_map", {}):
+ processor_auto_map = config_dict["auto_map"]["AutoProcessor"]
+
+ # If not found, let's check whether the processor class is saved in a feature extractor config
+ if preprocessor_config_file is not None and processor_class is None:
+ config_dict, _ = FeatureExtractionMixin.get_feature_extractor_dict(
+ pretrained_model_name_or_path, **kwargs
+ )
+ processor_class = config_dict.get("processor_class", None)
+ if "AutoProcessor" in config_dict.get("auto_map", {}):
+ processor_auto_map = config_dict["auto_map"]["AutoProcessor"]
+
+ if processor_class is None:
+ # Next, let's check whether the processor class is saved in a tokenizer
+ tokenizer_config_file = get_file_from_repo(
+ pretrained_model_name_or_path, TOKENIZER_CONFIG_FILE, **get_file_from_repo_kwargs
+ )
+ if tokenizer_config_file is not None:
+ with open(tokenizer_config_file, encoding="utf-8") as reader:
+ config_dict = json.load(reader)
+
+ processor_class = config_dict.get("processor_class", None)
+ if "AutoProcessor" in config_dict.get("auto_map", {}):
+ processor_auto_map = config_dict["auto_map"]["AutoProcessor"]
+
+ if processor_class is None:
+ # Otherwise, load config, if it can be loaded.
+ if not isinstance(config, PretrainedConfig):
+ config = AutoConfig.from_pretrained(
+ pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs
+ )
+
+ # And check if the config contains the processor class.
+ processor_class = getattr(config, "processor_class", None)
+ if hasattr(config, "auto_map") and "AutoProcessor" in config.auto_map:
+ processor_auto_map = config.auto_map["AutoProcessor"]
+
+ if processor_class is not None:
+ processor_class = processor_class_from_name(processor_class)
+
+ has_remote_code = processor_auto_map is not None
+ has_local_code = processor_class is not None or type(config) in PROCESSOR_MAPPING
+ trust_remote_code = resolve_trust_remote_code(
+ trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code
+ )
+
+ if has_remote_code and trust_remote_code:
+ processor_class = get_class_from_dynamic_module(
+ processor_auto_map, pretrained_model_name_or_path, **kwargs
+ )
+ _ = kwargs.pop("code_revision", None)
+ if os.path.isdir(pretrained_model_name_or_path):
+ processor_class.register_for_auto_class()
+ return processor_class.from_pretrained(
+ pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs
+ )
+ elif processor_class is not None:
+ return processor_class.from_pretrained(
+ pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs
+ )
+ # Last try: we use the PROCESSOR_MAPPING.
+ elif type(config) in PROCESSOR_MAPPING:
+ return PROCESSOR_MAPPING[type(config)].from_pretrained(pretrained_model_name_or_path, **kwargs)
+
+ # At this stage, there doesn't seem to be a `Processor` class available for this model, so let's try a
+ # tokenizer.
+ try:
+ return AutoTokenizer.from_pretrained(
+ pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs
+ )
+ except Exception:
+ try:
+ return AutoImageProcessor.from_pretrained(
+ pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs
+ )
+ except Exception:
+ pass
+
+ try:
+ return AutoFeatureExtractor.from_pretrained(
+ pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs
+ )
+ except Exception:
+ pass
+
+ raise ValueError(
+ f"Unrecognized processing class in {pretrained_model_name_or_path}. Can't instantiate a processor, a "
+ "tokenizer, an image processor or a feature extractor for this model. Make sure the repository contains "
+ "the files of at least one of those processing classes."
+ )
+
+ @staticmethod
+ def register(config_class, processor_class, exist_ok=False):
+ """
+ Register a new processor for this class.
+
+ Args:
+ config_class ([`PretrainedConfig`]):
+ The configuration corresponding to the model to register.
+ processor_class ([`FeatureExtractorMixin`]): The processor to register.
+ """
+ PROCESSOR_MAPPING.register(config_class, processor_class, exist_ok=exist_ok)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/tokenization_auto.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/tokenization_auto.py
new file mode 100644
index 0000000000000000000000000000000000000000..99706afe1655e30164062c035b29ab20d8065ff6
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/auto/tokenization_auto.py
@@ -0,0 +1,936 @@
+# coding=utf-8
+# Copyright 2018 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Auto Tokenizer class."""
+
+import importlib
+import json
+import os
+import warnings
+from collections import OrderedDict
+from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union
+
+from ...configuration_utils import PretrainedConfig
+from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
+from ...tokenization_utils import PreTrainedTokenizer
+from ...tokenization_utils_base import TOKENIZER_CONFIG_FILE
+from ...utils import (
+ cached_file,
+ extract_commit_hash,
+ is_g2p_en_available,
+ is_sentencepiece_available,
+ is_tokenizers_available,
+ logging,
+)
+from ..encoder_decoder import EncoderDecoderConfig
+from .auto_factory import _LazyAutoMapping
+from .configuration_auto import (
+ CONFIG_MAPPING_NAMES,
+ AutoConfig,
+ config_class_to_model_type,
+ model_type_to_module_name,
+ replace_list_option_in_docstrings,
+)
+
+
+if is_tokenizers_available():
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
+else:
+ PreTrainedTokenizerFast = None
+
+
+logger = logging.get_logger(__name__)
+
+if TYPE_CHECKING:
+ # This significantly improves completion suggestion performance when
+ # the transformers package is used with Microsoft's Pylance language server.
+ TOKENIZER_MAPPING_NAMES: OrderedDict[str, Tuple[Optional[str], Optional[str]]] = OrderedDict()
+else:
+ TOKENIZER_MAPPING_NAMES = OrderedDict(
+ [
+ (
+ "albert",
+ (
+ "AlbertTokenizer" if is_sentencepiece_available() else None,
+ "AlbertTokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ ("align", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
+ ("bark", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
+ ("bart", ("BartTokenizer", "BartTokenizerFast")),
+ (
+ "barthez",
+ (
+ "BarthezTokenizer" if is_sentencepiece_available() else None,
+ "BarthezTokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ ("bartpho", ("BartphoTokenizer", None)),
+ ("bert", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
+ ("bert-generation", ("BertGenerationTokenizer" if is_sentencepiece_available() else None, None)),
+ ("bert-japanese", ("BertJapaneseTokenizer", None)),
+ ("bertweet", ("BertweetTokenizer", None)),
+ (
+ "big_bird",
+ (
+ "BigBirdTokenizer" if is_sentencepiece_available() else None,
+ "BigBirdTokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ ("bigbird_pegasus", ("PegasusTokenizer", "PegasusTokenizerFast" if is_tokenizers_available() else None)),
+ ("biogpt", ("BioGptTokenizer", None)),
+ ("blenderbot", ("BlenderbotTokenizer", "BlenderbotTokenizerFast")),
+ ("blenderbot-small", ("BlenderbotSmallTokenizer", None)),
+ ("blip", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
+ ("blip-2", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)),
+ ("bloom", (None, "BloomTokenizerFast" if is_tokenizers_available() else None)),
+ ("bridgetower", ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None)),
+ ("bros", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
+ ("byt5", ("ByT5Tokenizer", None)),
+ (
+ "camembert",
+ (
+ "CamembertTokenizer" if is_sentencepiece_available() else None,
+ "CamembertTokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ ("canine", ("CanineTokenizer", None)),
+ ("chinese_clip", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
+ (
+ "clap",
+ (
+ "RobertaTokenizer",
+ "RobertaTokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ (
+ "clip",
+ (
+ "CLIPTokenizer",
+ "CLIPTokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ (
+ "clipseg",
+ (
+ "CLIPTokenizer",
+ "CLIPTokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ ("clvp", ("ClvpTokenizer", None)),
+ (
+ "code_llama",
+ (
+ "CodeLlamaTokenizer" if is_sentencepiece_available() else None,
+ "CodeLlamaTokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ ("codegen", ("CodeGenTokenizer", "CodeGenTokenizerFast" if is_tokenizers_available() else None)),
+ ("cohere", (None, "CohereTokenizerFast" if is_tokenizers_available() else None)),
+ ("convbert", ("ConvBertTokenizer", "ConvBertTokenizerFast" if is_tokenizers_available() else None)),
+ (
+ "cpm",
+ (
+ "CpmTokenizer" if is_sentencepiece_available() else None,
+ "CpmTokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ ("cpmant", ("CpmAntTokenizer", None)),
+ ("ctrl", ("CTRLTokenizer", None)),
+ ("data2vec-audio", ("Wav2Vec2CTCTokenizer", None)),
+ ("data2vec-text", ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None)),
+ ("dbrx", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)),
+ ("deberta", ("DebertaTokenizer", "DebertaTokenizerFast" if is_tokenizers_available() else None)),
+ (
+ "deberta-v2",
+ (
+ "DebertaV2Tokenizer" if is_sentencepiece_available() else None,
+ "DebertaV2TokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ ("distilbert", ("DistilBertTokenizer", "DistilBertTokenizerFast" if is_tokenizers_available() else None)),
+ (
+ "dpr",
+ (
+ "DPRQuestionEncoderTokenizer",
+ "DPRQuestionEncoderTokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ ("electra", ("ElectraTokenizer", "ElectraTokenizerFast" if is_tokenizers_available() else None)),
+ ("ernie", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
+ ("ernie_m", ("ErnieMTokenizer" if is_sentencepiece_available() else None, None)),
+ ("esm", ("EsmTokenizer", None)),
+ ("falcon", (None, "PreTrainedTokenizerFast" if is_tokenizers_available() else None)),
+ (
+ "fastspeech2_conformer",
+ ("FastSpeech2ConformerTokenizer" if is_g2p_en_available() else None, None),
+ ),
+ ("flaubert", ("FlaubertTokenizer", None)),
+ ("fnet", ("FNetTokenizer", "FNetTokenizerFast" if is_tokenizers_available() else None)),
+ ("fsmt", ("FSMTTokenizer", None)),
+ ("funnel", ("FunnelTokenizer", "FunnelTokenizerFast" if is_tokenizers_available() else None)),
+ (
+ "gemma",
+ (
+ "GemmaTokenizer" if is_sentencepiece_available() else None,
+ "GemmaTokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ ("git", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
+ ("gpt-sw3", ("GPTSw3Tokenizer" if is_sentencepiece_available() else None, None)),
+ ("gpt2", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)),
+ ("gpt_bigcode", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)),
+ ("gpt_neo", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)),
+ ("gpt_neox", (None, "GPTNeoXTokenizerFast" if is_tokenizers_available() else None)),
+ ("gpt_neox_japanese", ("GPTNeoXJapaneseTokenizer", None)),
+ ("gptj", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)),
+ ("gptsan-japanese", ("GPTSanJapaneseTokenizer", None)),
+ ("grounding-dino", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
+ ("groupvit", ("CLIPTokenizer", "CLIPTokenizerFast" if is_tokenizers_available() else None)),
+ ("herbert", ("HerbertTokenizer", "HerbertTokenizerFast" if is_tokenizers_available() else None)),
+ ("hubert", ("Wav2Vec2CTCTokenizer", None)),
+ ("ibert", ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None)),
+ ("idefics", (None, "LlamaTokenizerFast" if is_tokenizers_available() else None)),
+ ("idefics2", ("LlamaTokenizer", "LlamaTokenizerFast" if is_tokenizers_available() else None)),
+ ("instructblip", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)),
+ (
+ "jamba",
+ (
+ "LlamaTokenizer" if is_sentencepiece_available() else None,
+ "LlamaTokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ ("jukebox", ("JukeboxTokenizer", None)),
+ (
+ "kosmos-2",
+ (
+ "XLMRobertaTokenizer" if is_sentencepiece_available() else None,
+ "XLMRobertaTokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ ("layoutlm", ("LayoutLMTokenizer", "LayoutLMTokenizerFast" if is_tokenizers_available() else None)),
+ ("layoutlmv2", ("LayoutLMv2Tokenizer", "LayoutLMv2TokenizerFast" if is_tokenizers_available() else None)),
+ ("layoutlmv3", ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" if is_tokenizers_available() else None)),
+ ("layoutxlm", ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast" if is_tokenizers_available() else None)),
+ ("led", ("LEDTokenizer", "LEDTokenizerFast" if is_tokenizers_available() else None)),
+ ("lilt", ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" if is_tokenizers_available() else None)),
+ (
+ "llama",
+ (
+ "LlamaTokenizer" if is_sentencepiece_available() else None,
+ "LlamaTokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ ("llava", ("LlamaTokenizer", "LlamaTokenizerFast" if is_tokenizers_available() else None)),
+ ("llava_next", ("LlamaTokenizer", "LlamaTokenizerFast" if is_tokenizers_available() else None)),
+ ("longformer", ("LongformerTokenizer", "LongformerTokenizerFast" if is_tokenizers_available() else None)),
+ (
+ "longt5",
+ (
+ "T5Tokenizer" if is_sentencepiece_available() else None,
+ "T5TokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ ("luke", ("LukeTokenizer", None)),
+ ("lxmert", ("LxmertTokenizer", "LxmertTokenizerFast" if is_tokenizers_available() else None)),
+ ("m2m_100", ("M2M100Tokenizer" if is_sentencepiece_available() else None, None)),
+ ("mamba", (None, "GPTNeoXTokenizerFast" if is_tokenizers_available() else None)),
+ ("marian", ("MarianTokenizer" if is_sentencepiece_available() else None, None)),
+ (
+ "mbart",
+ (
+ "MBartTokenizer" if is_sentencepiece_available() else None,
+ "MBartTokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ (
+ "mbart50",
+ (
+ "MBart50Tokenizer" if is_sentencepiece_available() else None,
+ "MBart50TokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ ("mega", ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None)),
+ ("megatron-bert", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
+ ("mgp-str", ("MgpstrTokenizer", None)),
+ (
+ "mistral",
+ (
+ "LlamaTokenizer" if is_sentencepiece_available() else None,
+ "LlamaTokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ (
+ "mixtral",
+ (
+ "LlamaTokenizer" if is_sentencepiece_available() else None,
+ "LlamaTokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ ("mluke", ("MLukeTokenizer" if is_sentencepiece_available() else None, None)),
+ ("mobilebert", ("MobileBertTokenizer", "MobileBertTokenizerFast" if is_tokenizers_available() else None)),
+ ("mpnet", ("MPNetTokenizer", "MPNetTokenizerFast" if is_tokenizers_available() else None)),
+ ("mpt", (None, "GPTNeoXTokenizerFast" if is_tokenizers_available() else None)),
+ ("mra", ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None)),
+ (
+ "mt5",
+ (
+ "MT5Tokenizer" if is_sentencepiece_available() else None,
+ "MT5TokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ ("musicgen", ("T5Tokenizer", "T5TokenizerFast" if is_tokenizers_available() else None)),
+ ("musicgen_melody", ("T5Tokenizer", "T5TokenizerFast" if is_tokenizers_available() else None)),
+ ("mvp", ("MvpTokenizer", "MvpTokenizerFast" if is_tokenizers_available() else None)),
+ ("nezha", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
+ (
+ "nllb",
+ (
+ "NllbTokenizer" if is_sentencepiece_available() else None,
+ "NllbTokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ (
+ "nllb-moe",
+ (
+ "NllbTokenizer" if is_sentencepiece_available() else None,
+ "NllbTokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ (
+ "nystromformer",
+ (
+ "AlbertTokenizer" if is_sentencepiece_available() else None,
+ "AlbertTokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ ("olmo", (None, "GPTNeoXTokenizerFast" if is_tokenizers_available() else None)),
+ ("oneformer", ("CLIPTokenizer", "CLIPTokenizerFast" if is_tokenizers_available() else None)),
+ (
+ "openai-gpt",
+ ("OpenAIGPTTokenizer", "OpenAIGPTTokenizerFast" if is_tokenizers_available() else None),
+ ),
+ ("opt", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)),
+ ("owlv2", ("CLIPTokenizer", "CLIPTokenizerFast" if is_tokenizers_available() else None)),
+ ("owlvit", ("CLIPTokenizer", "CLIPTokenizerFast" if is_tokenizers_available() else None)),
+ (
+ "pegasus",
+ (
+ "PegasusTokenizer" if is_sentencepiece_available() else None,
+ "PegasusTokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ (
+ "pegasus_x",
+ (
+ "PegasusTokenizer" if is_sentencepiece_available() else None,
+ "PegasusTokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ (
+ "perceiver",
+ (
+ "PerceiverTokenizer",
+ None,
+ ),
+ ),
+ (
+ "persimmon",
+ (
+ "LlamaTokenizer" if is_sentencepiece_available() else None,
+ "LlamaTokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ ("phi", ("CodeGenTokenizer", "CodeGenTokenizerFast" if is_tokenizers_available() else None)),
+ ("phobert", ("PhobertTokenizer", None)),
+ ("pix2struct", ("T5Tokenizer", "T5TokenizerFast" if is_tokenizers_available() else None)),
+ ("plbart", ("PLBartTokenizer" if is_sentencepiece_available() else None, None)),
+ ("prophetnet", ("ProphetNetTokenizer", None)),
+ ("qdqbert", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
+ (
+ "qwen2",
+ (
+ "Qwen2Tokenizer",
+ "Qwen2TokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ (
+ "qwen2_moe",
+ (
+ "Qwen2Tokenizer",
+ "Qwen2TokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ ("rag", ("RagTokenizer", None)),
+ ("realm", ("RealmTokenizer", "RealmTokenizerFast" if is_tokenizers_available() else None)),
+ (
+ "recurrent_gemma",
+ (
+ "GemmaTokenizer" if is_sentencepiece_available() else None,
+ "GemmaTokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ (
+ "reformer",
+ (
+ "ReformerTokenizer" if is_sentencepiece_available() else None,
+ "ReformerTokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ (
+ "rembert",
+ (
+ "RemBertTokenizer" if is_sentencepiece_available() else None,
+ "RemBertTokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ ("retribert", ("RetriBertTokenizer", "RetriBertTokenizerFast" if is_tokenizers_available() else None)),
+ ("roberta", ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None)),
+ (
+ "roberta-prelayernorm",
+ ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None),
+ ),
+ ("roc_bert", ("RoCBertTokenizer", None)),
+ ("roformer", ("RoFormerTokenizer", "RoFormerTokenizerFast" if is_tokenizers_available() else None)),
+ ("rwkv", (None, "GPTNeoXTokenizerFast" if is_tokenizers_available() else None)),
+ (
+ "seamless_m4t",
+ (
+ "SeamlessM4TTokenizer" if is_sentencepiece_available() else None,
+ "SeamlessM4TTokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ (
+ "seamless_m4t_v2",
+ (
+ "SeamlessM4TTokenizer" if is_sentencepiece_available() else None,
+ "SeamlessM4TTokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ ("siglip", ("SiglipTokenizer" if is_sentencepiece_available() else None, None)),
+ ("speech_to_text", ("Speech2TextTokenizer" if is_sentencepiece_available() else None, None)),
+ ("speech_to_text_2", ("Speech2Text2Tokenizer", None)),
+ ("speecht5", ("SpeechT5Tokenizer" if is_sentencepiece_available() else None, None)),
+ ("splinter", ("SplinterTokenizer", "SplinterTokenizerFast")),
+ (
+ "squeezebert",
+ ("SqueezeBertTokenizer", "SqueezeBertTokenizerFast" if is_tokenizers_available() else None),
+ ),
+ ("stablelm", (None, "GPTNeoXTokenizerFast" if is_tokenizers_available() else None)),
+ ("starcoder2", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)),
+ (
+ "switch_transformers",
+ (
+ "T5Tokenizer" if is_sentencepiece_available() else None,
+ "T5TokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ (
+ "t5",
+ (
+ "T5Tokenizer" if is_sentencepiece_available() else None,
+ "T5TokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ ("tapas", ("TapasTokenizer", None)),
+ ("tapex", ("TapexTokenizer", None)),
+ ("transfo-xl", ("TransfoXLTokenizer", None)),
+ ("tvp", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
+ (
+ "udop",
+ (
+ "UdopTokenizer" if is_sentencepiece_available() else None,
+ "UdopTokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ (
+ "umt5",
+ (
+ "T5Tokenizer" if is_sentencepiece_available() else None,
+ "T5TokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ ("vilt", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
+ ("vipllava", ("LlamaTokenizer", "LlamaTokenizerFast" if is_tokenizers_available() else None)),
+ ("visual_bert", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
+ ("vits", ("VitsTokenizer", None)),
+ ("wav2vec2", ("Wav2Vec2CTCTokenizer", None)),
+ ("wav2vec2-bert", ("Wav2Vec2CTCTokenizer", None)),
+ ("wav2vec2-conformer", ("Wav2Vec2CTCTokenizer", None)),
+ ("wav2vec2_phoneme", ("Wav2Vec2PhonemeCTCTokenizer", None)),
+ ("whisper", ("WhisperTokenizer", "WhisperTokenizerFast" if is_tokenizers_available() else None)),
+ ("xclip", ("CLIPTokenizer", "CLIPTokenizerFast" if is_tokenizers_available() else None)),
+ (
+ "xglm",
+ (
+ "XGLMTokenizer" if is_sentencepiece_available() else None,
+ "XGLMTokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ ("xlm", ("XLMTokenizer", None)),
+ ("xlm-prophetnet", ("XLMProphetNetTokenizer" if is_sentencepiece_available() else None, None)),
+ (
+ "xlm-roberta",
+ (
+ "XLMRobertaTokenizer" if is_sentencepiece_available() else None,
+ "XLMRobertaTokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ (
+ "xlm-roberta-xl",
+ (
+ "XLMRobertaTokenizer" if is_sentencepiece_available() else None,
+ "XLMRobertaTokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ (
+ "xlnet",
+ (
+ "XLNetTokenizer" if is_sentencepiece_available() else None,
+ "XLNetTokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ (
+ "xmod",
+ (
+ "XLMRobertaTokenizer" if is_sentencepiece_available() else None,
+ "XLMRobertaTokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ (
+ "yoso",
+ (
+ "AlbertTokenizer" if is_sentencepiece_available() else None,
+ "AlbertTokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
+ ]
+ )
+
+TOKENIZER_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TOKENIZER_MAPPING_NAMES)
+
+CONFIG_TO_TYPE = {v: k for k, v in CONFIG_MAPPING_NAMES.items()}
+
+
+def tokenizer_class_from_name(class_name: str):
+ if class_name == "PreTrainedTokenizerFast":
+ return PreTrainedTokenizerFast
+
+ for module_name, tokenizers in TOKENIZER_MAPPING_NAMES.items():
+ if class_name in tokenizers:
+ module_name = model_type_to_module_name(module_name)
+
+ module = importlib.import_module(f".{module_name}", "transformers.models")
+ try:
+ return getattr(module, class_name)
+ except AttributeError:
+ continue
+
+ for config, tokenizers in TOKENIZER_MAPPING._extra_content.items():
+ for tokenizer in tokenizers:
+ if getattr(tokenizer, "__name__", None) == class_name:
+ return tokenizer
+
+ # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
+ # init and we return the proper dummy to get an appropriate error message.
+ main_module = importlib.import_module("transformers")
+ if hasattr(main_module, class_name):
+ return getattr(main_module, class_name)
+
+ return None
+
+
+def get_tokenizer_config(
+ pretrained_model_name_or_path: Union[str, os.PathLike],
+ cache_dir: Optional[Union[str, os.PathLike]] = None,
+ force_download: bool = False,
+ resume_download: bool = False,
+ proxies: Optional[Dict[str, str]] = None,
+ token: Optional[Union[bool, str]] = None,
+ revision: Optional[str] = None,
+ local_files_only: bool = False,
+ subfolder: str = "",
+ **kwargs,
+):
+ """
+ Loads the tokenizer configuration from a pretrained model tokenizer configuration.
+
+ Args:
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
+ This can be either:
+
+ - a string, the *model id* of a pretrained model configuration hosted inside a model repo on
+ huggingface.co.
+ - a path to a *directory* containing a configuration file saved using the
+ [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`.
+
+ cache_dir (`str` or `os.PathLike`, *optional*):
+ Path to a directory in which a downloaded pretrained model configuration should be cached if the standard
+ cache should not be used.
+ force_download (`bool`, *optional*, defaults to `False`):
+ Whether or not to force to (re-)download the configuration files and override the cached versions if they
+ exist.
+ resume_download (`bool`, *optional*, defaults to `False`):
+ Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists.
+ proxies (`Dict[str, str]`, *optional*):
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
+ 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
+ token (`str` or *bool*, *optional*):
+ The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
+ when running `huggingface-cli login` (stored in `~/.huggingface`).
+ revision (`str`, *optional*, defaults to `"main"`):
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
+ identifier allowed by git.
+ local_files_only (`bool`, *optional*, defaults to `False`):
+ If `True`, will only try to load the tokenizer configuration from local files.
+ subfolder (`str`, *optional*, defaults to `""`):
+ In case the tokenizer config is located inside a subfolder of the model repo on huggingface.co, you can
+ specify the folder name here.
+
+
+
+ Passing `token=True` is required when you want to use a private model.
+
+
+
+ Returns:
+ `Dict`: The configuration of the tokenizer.
+
+ Examples:
+
+ ```python
+ # Download configuration from huggingface.co and cache.
+ tokenizer_config = get_tokenizer_config("google-bert/bert-base-uncased")
+ # This model does not have a tokenizer config so the result will be an empty dict.
+ tokenizer_config = get_tokenizer_config("FacebookAI/xlm-roberta-base")
+
+ # Save a pretrained tokenizer locally and you can reload its config
+ from transformers import AutoTokenizer
+
+ tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased")
+ tokenizer.save_pretrained("tokenizer-test")
+ tokenizer_config = get_tokenizer_config("tokenizer-test")
+ ```"""
+ use_auth_token = kwargs.pop("use_auth_token", None)
+ if use_auth_token is not None:
+ warnings.warn(
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
+ FutureWarning,
+ )
+ if token is not None:
+ raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.")
+ token = use_auth_token
+
+ commit_hash = kwargs.get("_commit_hash", None)
+ resolved_config_file = cached_file(
+ pretrained_model_name_or_path,
+ TOKENIZER_CONFIG_FILE,
+ cache_dir=cache_dir,
+ force_download=force_download,
+ resume_download=resume_download,
+ proxies=proxies,
+ token=token,
+ revision=revision,
+ local_files_only=local_files_only,
+ subfolder=subfolder,
+ _raise_exceptions_for_gated_repo=False,
+ _raise_exceptions_for_missing_entries=False,
+ _raise_exceptions_for_connection_errors=False,
+ _commit_hash=commit_hash,
+ )
+ if resolved_config_file is None:
+ logger.info("Could not locate the tokenizer configuration file, will try to use the model config instead.")
+ return {}
+ commit_hash = extract_commit_hash(resolved_config_file, commit_hash)
+
+ with open(resolved_config_file, encoding="utf-8") as reader:
+ result = json.load(reader)
+ result["_commit_hash"] = commit_hash
+ return result
+
+
+class AutoTokenizer:
+ r"""
+ This is a generic tokenizer class that will be instantiated as one of the tokenizer classes of the library when
+ created with the [`AutoTokenizer.from_pretrained`] class method.
+
+ This class cannot be instantiated directly using `__init__()` (throws an error).
+ """
+
+ def __init__(self):
+ raise EnvironmentError(
+ "AutoTokenizer is designed to be instantiated "
+ "using the `AutoTokenizer.from_pretrained(pretrained_model_name_or_path)` method."
+ )
+
+ @classmethod
+ @replace_list_option_in_docstrings(TOKENIZER_MAPPING_NAMES)
+ def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
+ r"""
+ Instantiate one of the tokenizer classes of the library from a pretrained model vocabulary.
+
+ The tokenizer class to instantiate is selected based on the `model_type` property of the config object (either
+ passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by
+ falling back to using pattern matching on `pretrained_model_name_or_path`:
+
+ List options
+
+ Params:
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
+ Can be either:
+
+ - A string, the *model id* of a predefined tokenizer hosted inside a model repo on huggingface.co.
+ - A path to a *directory* containing vocabulary files required by the tokenizer, for instance saved
+ using the [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`.
+ - A path or url to a single saved vocabulary file if and only if the tokenizer only requires a
+ single vocabulary file (like Bert or XLNet), e.g.: `./my_model_directory/vocab.txt`. (Not
+ applicable to all derived classes)
+ inputs (additional positional arguments, *optional*):
+ Will be passed along to the Tokenizer `__init__()` method.
+ config ([`PretrainedConfig`], *optional*)
+ The configuration object used to determine the tokenizer class to instantiate.
+ cache_dir (`str` or `os.PathLike`, *optional*):
+ Path to a directory in which a downloaded pretrained model configuration should be cached if the
+ standard cache should not be used.
+ force_download (`bool`, *optional*, defaults to `False`):
+ Whether or not to force the (re-)download the model weights and configuration files and override the
+ cached versions if they exist.
+ resume_download (`bool`, *optional*, defaults to `False`):
+ Whether or not to delete incompletely received files. Will attempt to resume the download if such a
+ file exists.
+ proxies (`Dict[str, str]`, *optional*):
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
+ revision (`str`, *optional*, defaults to `"main"`):
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
+ identifier allowed by git.
+ subfolder (`str`, *optional*):
+ In case the relevant files are located inside a subfolder of the model repo on huggingface.co (e.g. for
+ facebook/rag-token-base), specify it here.
+ use_fast (`bool`, *optional*, defaults to `True`):
+ Use a [fast Rust-based tokenizer](https://huggingface.co/docs/tokenizers/index) if it is supported for
+ a given model. If a fast tokenizer is not available for a given model, a normal Python-based tokenizer
+ is returned instead.
+ tokenizer_type (`str`, *optional*):
+ Tokenizer type to be loaded.
+ trust_remote_code (`bool`, *optional*, defaults to `False`):
+ Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
+ execute code present on the Hub on your local machine.
+ kwargs (additional keyword arguments, *optional*):
+ Will be passed to the Tokenizer `__init__()` method. Can be used to set special tokens like
+ `bos_token`, `eos_token`, `unk_token`, `sep_token`, `pad_token`, `cls_token`, `mask_token`,
+ `additional_special_tokens`. See parameters in the `__init__()` for more details.
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer
+
+ >>> # Download vocabulary from huggingface.co and cache.
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
+
+ >>> # Download vocabulary from huggingface.co (user-uploaded) and cache.
+ >>> tokenizer = AutoTokenizer.from_pretrained("dbmdz/bert-base-german-cased")
+
+ >>> # If vocabulary files are in a directory (e.g. tokenizer was saved using *save_pretrained('./test/saved_model/')*)
+ >>> # tokenizer = AutoTokenizer.from_pretrained("./test/bert_saved_model/")
+
+ >>> # Download vocabulary from huggingface.co and define model-specific arguments
+ >>> tokenizer = AutoTokenizer.from_pretrained("FacebookAI/roberta-base", add_prefix_space=True)
+ ```"""
+ use_auth_token = kwargs.pop("use_auth_token", None)
+ if use_auth_token is not None:
+ warnings.warn(
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
+ FutureWarning,
+ )
+ if kwargs.get("token", None) is not None:
+ raise ValueError(
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
+ )
+ kwargs["token"] = use_auth_token
+
+ config = kwargs.pop("config", None)
+ kwargs["_from_auto"] = True
+
+ use_fast = kwargs.pop("use_fast", True)
+ tokenizer_type = kwargs.pop("tokenizer_type", None)
+ trust_remote_code = kwargs.pop("trust_remote_code", None)
+
+ # First, let's see whether the tokenizer_type is passed so that we can leverage it
+ if tokenizer_type is not None:
+ tokenizer_class = None
+ tokenizer_class_tuple = TOKENIZER_MAPPING_NAMES.get(tokenizer_type, None)
+
+ if tokenizer_class_tuple is None:
+ raise ValueError(
+ f"Passed `tokenizer_type` {tokenizer_type} does not exist. `tokenizer_type` should be one of "
+ f"{', '.join(c for c in TOKENIZER_MAPPING_NAMES.keys())}."
+ )
+
+ tokenizer_class_name, tokenizer_fast_class_name = tokenizer_class_tuple
+
+ if use_fast:
+ if tokenizer_fast_class_name is not None:
+ tokenizer_class = tokenizer_class_from_name(tokenizer_fast_class_name)
+ else:
+ logger.warning(
+ "`use_fast` is set to `True` but the tokenizer class does not have a fast version. "
+ " Falling back to the slow version."
+ )
+ if tokenizer_class is None:
+ tokenizer_class = tokenizer_class_from_name(tokenizer_class_name)
+
+ if tokenizer_class is None:
+ raise ValueError(f"Tokenizer class {tokenizer_class_name} is not currently imported.")
+
+ return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
+
+ # Next, let's try to use the tokenizer_config file to get the tokenizer class.
+ tokenizer_config = get_tokenizer_config(pretrained_model_name_or_path, **kwargs)
+ if "_commit_hash" in tokenizer_config:
+ kwargs["_commit_hash"] = tokenizer_config["_commit_hash"]
+ config_tokenizer_class = tokenizer_config.get("tokenizer_class")
+ tokenizer_auto_map = None
+ if "auto_map" in tokenizer_config:
+ if isinstance(tokenizer_config["auto_map"], (tuple, list)):
+ # Legacy format for dynamic tokenizers
+ tokenizer_auto_map = tokenizer_config["auto_map"]
+ else:
+ tokenizer_auto_map = tokenizer_config["auto_map"].get("AutoTokenizer", None)
+
+ # If that did not work, let's try to use the config.
+ if config_tokenizer_class is None:
+ if not isinstance(config, PretrainedConfig):
+ config = AutoConfig.from_pretrained(
+ pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs
+ )
+ config_tokenizer_class = config.tokenizer_class
+ if hasattr(config, "auto_map") and "AutoTokenizer" in config.auto_map:
+ tokenizer_auto_map = config.auto_map["AutoTokenizer"]
+
+ has_remote_code = tokenizer_auto_map is not None
+ has_local_code = type(config) in TOKENIZER_MAPPING or (
+ config_tokenizer_class is not None
+ and (
+ tokenizer_class_from_name(config_tokenizer_class) is not None
+ or tokenizer_class_from_name(config_tokenizer_class + "Fast") is not None
+ )
+ )
+ trust_remote_code = resolve_trust_remote_code(
+ trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code
+ )
+
+ if has_remote_code and trust_remote_code:
+ if use_fast and tokenizer_auto_map[1] is not None:
+ class_ref = tokenizer_auto_map[1]
+ else:
+ class_ref = tokenizer_auto_map[0]
+ tokenizer_class = get_class_from_dynamic_module(class_ref, pretrained_model_name_or_path, **kwargs)
+ _ = kwargs.pop("code_revision", None)
+ if os.path.isdir(pretrained_model_name_or_path):
+ tokenizer_class.register_for_auto_class()
+ return tokenizer_class.from_pretrained(
+ pretrained_model_name_or_path, *inputs, trust_remote_code=trust_remote_code, **kwargs
+ )
+ elif config_tokenizer_class is not None:
+ tokenizer_class = None
+ if use_fast and not config_tokenizer_class.endswith("Fast"):
+ tokenizer_class_candidate = f"{config_tokenizer_class}Fast"
+ tokenizer_class = tokenizer_class_from_name(tokenizer_class_candidate)
+ if tokenizer_class is None:
+ tokenizer_class_candidate = config_tokenizer_class
+ tokenizer_class = tokenizer_class_from_name(tokenizer_class_candidate)
+ if tokenizer_class is None:
+ raise ValueError(
+ f"Tokenizer class {tokenizer_class_candidate} does not exist or is not currently imported."
+ )
+ return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
+
+ # Otherwise we have to be creative.
+ # if model is an encoder decoder, the encoder tokenizer class is used by default
+ if isinstance(config, EncoderDecoderConfig):
+ if type(config.decoder) is not type(config.encoder): # noqa: E721
+ logger.warning(
+ f"The encoder model config class: {config.encoder.__class__} is different from the decoder model "
+ f"config class: {config.decoder.__class__}. It is not recommended to use the "
+ "`AutoTokenizer.from_pretrained()` method in this case. Please use the encoder and decoder "
+ "specific tokenizer classes."
+ )
+ config = config.encoder
+
+ model_type = config_class_to_model_type(type(config).__name__)
+ if model_type is not None:
+ tokenizer_class_py, tokenizer_class_fast = TOKENIZER_MAPPING[type(config)]
+ if tokenizer_class_fast and (use_fast or tokenizer_class_py is None):
+ return tokenizer_class_fast.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
+ else:
+ if tokenizer_class_py is not None:
+ return tokenizer_class_py.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
+ else:
+ raise ValueError(
+ "This tokenizer cannot be instantiated. Please make sure you have `sentencepiece` installed "
+ "in order to use this tokenizer."
+ )
+
+ raise ValueError(
+ f"Unrecognized configuration class {config.__class__} to build an AutoTokenizer.\n"
+ f"Model type should be one of {', '.join(c.__name__ for c in TOKENIZER_MAPPING.keys())}."
+ )
+
+ def register(config_class, slow_tokenizer_class=None, fast_tokenizer_class=None, exist_ok=False):
+ """
+ Register a new tokenizer in this mapping.
+
+
+ Args:
+ config_class ([`PretrainedConfig`]):
+ The configuration corresponding to the model to register.
+ slow_tokenizer_class ([`PretrainedTokenizer`], *optional*):
+ The slow tokenizer to register.
+ fast_tokenizer_class ([`PretrainedTokenizerFast`], *optional*):
+ The fast tokenizer to register.
+ """
+ if slow_tokenizer_class is None and fast_tokenizer_class is None:
+ raise ValueError("You need to pass either a `slow_tokenizer_class` or a `fast_tokenizer_class")
+ if slow_tokenizer_class is not None and issubclass(slow_tokenizer_class, PreTrainedTokenizerFast):
+ raise ValueError("You passed a fast tokenizer in the `slow_tokenizer_class`.")
+ if fast_tokenizer_class is not None and issubclass(fast_tokenizer_class, PreTrainedTokenizer):
+ raise ValueError("You passed a slow tokenizer in the `fast_tokenizer_class`.")
+
+ if (
+ slow_tokenizer_class is not None
+ and fast_tokenizer_class is not None
+ and issubclass(fast_tokenizer_class, PreTrainedTokenizerFast)
+ and fast_tokenizer_class.slow_tokenizer_class != slow_tokenizer_class
+ ):
+ raise ValueError(
+ "The fast tokenizer class you are passing has a `slow_tokenizer_class` attribute that is not "
+ "consistent with the slow tokenizer class you passed (fast tokenizer has "
+ f"{fast_tokenizer_class.slow_tokenizer_class} and you passed {slow_tokenizer_class}. Fix one of those "
+ "so they match!"
+ )
+
+ # Avoid resetting a set slow/fast tokenizer if we are passing just the other ones.
+ if config_class in TOKENIZER_MAPPING._extra_content:
+ existing_slow, existing_fast = TOKENIZER_MAPPING[config_class]
+ if slow_tokenizer_class is None:
+ slow_tokenizer_class = existing_slow
+ if fast_tokenizer_class is None:
+ fast_tokenizer_class = existing_fast
+
+ TOKENIZER_MAPPING.register(config_class, (slow_tokenizer_class, fast_tokenizer_class), exist_ok=exist_ok)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/barthez/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/barthez/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..084cd22bdf1d888efd46b759b91ccf95ee53c656
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/barthez/__init__.py
@@ -0,0 +1,59 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available
+
+
+_import_structure = {}
+
+try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_barthez"] = ["BarthezTokenizer"]
+
+try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_barthez_fast"] = ["BarthezTokenizerFast"]
+
+
+if TYPE_CHECKING:
+ try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_barthez import BarthezTokenizer
+
+ try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_barthez_fast import BarthezTokenizerFast
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/barthez/tokenization_barthez_fast.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/barthez/tokenization_barthez_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..e988b0d518a3f369806d3ae7431c62f2a599029a
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/barthez/tokenization_barthez_fast.py
@@ -0,0 +1,195 @@
+# coding=utf-8
+# Copyright 2020 Ecole Polytechnique and the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+""" Tokenization classes for the BARThez model."""
+
+
+import os
+from shutil import copyfile
+from typing import List, Optional, Tuple
+
+from ...tokenization_utils import AddedToken
+from ...tokenization_utils_fast import PreTrainedTokenizerFast
+from ...utils import is_sentencepiece_available, logging
+
+
+if is_sentencepiece_available():
+ from .tokenization_barthez import BarthezTokenizer
+else:
+ BarthezTokenizer = None
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
+
+
+SPIECE_UNDERLINE = "▁"
+
+
+class BarthezTokenizerFast(PreTrainedTokenizerFast):
+ """
+ Adapted from [`CamembertTokenizer`] and [`BartTokenizer`]. Construct a "fast" BARThez tokenizer. Based on
+ [SentencePiece](https://github.com/google/sentencepiece).
+
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
+ refer to this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
+ contains the vocabulary necessary to instantiate a tokenizer.
+ bos_token (`str`, *optional*, defaults to `""`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+
+
+
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
+ The token used is the `sep_token`.
+
+
+
+ sep_token (`str`, *optional*, defaults to `""`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ cls_token (`str`, *optional*, defaults to `""`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ mask_token (`str`, *optional*, defaults to `""`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ additional_special_tokens (`List[str]`, *optional*, defaults to `["NOTUSED", "NOTUSED"]`):
+ Additional special tokens used by the tokenizer.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ model_input_names = ["input_ids", "attention_mask"]
+ slow_tokenizer_class = BarthezTokenizer
+
+ def __init__(
+ self,
+ vocab_file=None,
+ tokenizer_file=None,
+ bos_token="",
+ eos_token="",
+ sep_token="",
+ cls_token="",
+ unk_token="",
+ pad_token="",
+ mask_token="",
+ **kwargs,
+ ):
+ # Mask token behave like a normal word, i.e. include the space before it
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
+
+ super().__init__(
+ vocab_file,
+ tokenizer_file=tokenizer_file,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ cls_token=cls_token,
+ pad_token=pad_token,
+ mask_token=mask_token,
+ **kwargs,
+ )
+
+ self.vocab_file = vocab_file
+
+ @property
+ def can_save_slow_tokenizer(self) -> bool:
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A BARThez sequence has the following format:
+
+ - single sequence: ` X `
+ - pair of sequences: ` A B `
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+
+ if token_ids_1 is None:
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+ cls = [self.cls_token_id]
+ sep = [self.sep_token_id]
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of zeros.
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not self.can_save_slow_tokenizer:
+ raise ValueError(
+ "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
+ "tokenizer."
+ )
+
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ out_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+
+ return (out_vocab_file,)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/bigbird_pegasus/__pycache__/modeling_bigbird_pegasus.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/bigbird_pegasus/__pycache__/modeling_bigbird_pegasus.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9bf08f3c2ab10c99a51d99eb1dd4effa4048123a
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/bigbird_pegasus/__pycache__/modeling_bigbird_pegasus.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..7d3e1440da942edab0543de483240b5a5639de19
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/__init__.py
@@ -0,0 +1,79 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
+
+
+_import_structure = {
+ "configuration_imagegpt": ["IMAGEGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ImageGPTConfig", "ImageGPTOnnxConfig"]
+}
+
+try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["feature_extraction_imagegpt"] = ["ImageGPTFeatureExtractor"]
+ _import_structure["image_processing_imagegpt"] = ["ImageGPTImageProcessor"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_imagegpt"] = [
+ "IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "ImageGPTForCausalImageModeling",
+ "ImageGPTForImageClassification",
+ "ImageGPTModel",
+ "ImageGPTPreTrainedModel",
+ "load_tf_weights_in_imagegpt",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_imagegpt import IMAGEGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ImageGPTConfig, ImageGPTOnnxConfig
+
+ try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .feature_extraction_imagegpt import ImageGPTFeatureExtractor
+ from .image_processing_imagegpt import ImageGPTImageProcessor
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_imagegpt import (
+ IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ ImageGPTForCausalImageModeling,
+ ImageGPTForImageClassification,
+ ImageGPTModel,
+ ImageGPTPreTrainedModel,
+ load_tf_weights_in_imagegpt,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3f1c1deae64e81342f25ff643f05d0155be432b9
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/configuration_imagegpt.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/configuration_imagegpt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a0240183a3bee6826dd960c61f81ec202cd6577d
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/configuration_imagegpt.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/convert_imagegpt_original_tf2_to_pytorch.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/convert_imagegpt_original_tf2_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..44ff5f6526603a796d0180315c3aa2e28d575a43
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/convert_imagegpt_original_tf2_to_pytorch.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/feature_extraction_imagegpt.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/feature_extraction_imagegpt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3dc7fe2c4d8a4100ac9c2cf868b83d1cd76c1d5b
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/feature_extraction_imagegpt.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/image_processing_imagegpt.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/image_processing_imagegpt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e55d225eeee39f50880169183e93ad2e2bffc5fb
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/image_processing_imagegpt.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/modeling_imagegpt.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/modeling_imagegpt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..faf0ffd53ec460f4a118ded095701ca13692f723
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/modeling_imagegpt.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/configuration_imagegpt.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/configuration_imagegpt.py
new file mode 100644
index 0000000000000000000000000000000000000000..2a8d62f9b5e629b7d10d9eb9dfde612c080a08c6
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/configuration_imagegpt.py
@@ -0,0 +1,199 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" OpenAI ImageGPT configuration"""
+
+from collections import OrderedDict
+from typing import TYPE_CHECKING, Any, Mapping, Optional
+
+from ...configuration_utils import PretrainedConfig
+from ...onnx import OnnxConfig
+from ...utils import logging
+
+
+if TYPE_CHECKING:
+ from ... import FeatureExtractionMixin, TensorType
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import IMAGEGPT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class ImageGPTConfig(PretrainedConfig):
+ """
+ This is the configuration class to store the configuration of a [`ImageGPTModel`] or a [`TFImageGPTModel`]. It is
+ used to instantiate a GPT-2 model according to the specified arguments, defining the model architecture.
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the ImageGPT
+ [openai/imagegpt-small](https://huggingface.co/openai/imagegpt-small) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 512):
+ Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`ImageGPTModel`] or [`TFImageGPTModel`].
+ n_positions (`int`, *optional*, defaults to 32*32):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ n_embd (`int`, *optional*, defaults to 512):
+ Dimensionality of the embeddings and hidden states.
+ n_layer (`int`, *optional*, defaults to 24):
+ Number of hidden layers in the Transformer encoder.
+ n_head (`int`, *optional*, defaults to 8):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ n_inner (`int`, *optional*, defaults to None):
+ Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd
+ activation_function (`str`, *optional*, defaults to `"quick_gelu"`):
+ Activation function (can be one of the activation functions defined in src/transformers/activations.py).
+ Defaults to "quick_gelu".
+ resid_pdrop (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ embd_pdrop (`int`, *optional*, defaults to 0.1):
+ The dropout ratio for the embeddings.
+ attn_pdrop (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention.
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):
+ The epsilon to use in the layer normalization layers.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ scale_attn_weights (`bool`, *optional*, defaults to `True`):
+ Scale attention weights by dividing by sqrt(hidden_size)..
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models).
+ scale_attn_by_inverse_layer_idx (`bool`, *optional*, defaults to `False`):
+ Whether to additionally scale attention weights by `1 / layer_idx + 1`.
+ reorder_and_upcast_attn (`bool`, *optional*, defaults to `False`):
+ Whether to scale keys (K) prior to computing attention (dot-product) and upcast attention
+ dot-product/softmax to float() when training with mixed precision.
+
+ Example:
+
+ ```python
+ >>> from transformers import ImageGPTConfig, ImageGPTModel
+
+ >>> # Initializing a ImageGPT configuration
+ >>> configuration = ImageGPTConfig()
+
+ >>> # Initializing a model (with random weights) from the configuration
+ >>> model = ImageGPTModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "imagegpt"
+ keys_to_ignore_at_inference = ["past_key_values"]
+ attribute_map = {
+ "hidden_size": "n_embd",
+ "max_position_embeddings": "n_positions",
+ "num_attention_heads": "n_head",
+ "num_hidden_layers": "n_layer",
+ }
+
+ def __init__(
+ self,
+ vocab_size=512 + 1, # add one for start of sentence (sos) token
+ n_positions=32 * 32,
+ n_embd=512,
+ n_layer=24,
+ n_head=8,
+ n_inner=None,
+ activation_function="quick_gelu",
+ resid_pdrop=0.1,
+ embd_pdrop=0.1,
+ attn_pdrop=0.1,
+ layer_norm_epsilon=1e-5,
+ initializer_range=0.02,
+ scale_attn_weights=True,
+ use_cache=True,
+ tie_word_embeddings=False,
+ scale_attn_by_inverse_layer_idx=False,
+ reorder_and_upcast_attn=False,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.n_positions = n_positions
+ self.n_embd = n_embd
+ self.n_layer = n_layer
+ self.n_head = n_head
+ self.n_inner = n_inner
+ self.activation_function = activation_function
+ self.resid_pdrop = resid_pdrop
+ self.embd_pdrop = embd_pdrop
+ self.attn_pdrop = attn_pdrop
+ self.layer_norm_epsilon = layer_norm_epsilon
+ self.initializer_range = initializer_range
+ self.scale_attn_weights = scale_attn_weights
+ self.use_cache = use_cache
+ self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx
+ self.reorder_and_upcast_attn = reorder_and_upcast_attn
+ self.tie_word_embeddings = tie_word_embeddings
+
+ super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
+
+
+class ImageGPTOnnxConfig(OnnxConfig):
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ return OrderedDict(
+ [
+ ("input_ids", {0: "batch", 1: "sequence"}),
+ ]
+ )
+
+ def generate_dummy_inputs(
+ self,
+ preprocessor: "FeatureExtractionMixin",
+ batch_size: int = 1,
+ seq_length: int = -1,
+ is_pair: bool = False,
+ framework: Optional["TensorType"] = None,
+ num_channels: int = 3,
+ image_width: int = 32,
+ image_height: int = 32,
+ ) -> Mapping[str, Any]:
+ """
+ Generate inputs to provide to the ONNX exporter for the specific framework
+
+ Args:
+ preprocessor ([`PreTrainedTokenizerBase`] or [`FeatureExtractionMixin`]):
+ The preprocessor associated with this model configuration.
+ batch_size (`int`, *optional*, defaults to -1):
+ The batch size to export the model for (-1 means dynamic axis).
+ num_choices (`int`, *optional*, defaults to -1):
+ The number of candidate answers provided for multiple choice task (-1 means dynamic axis).
+ seq_length (`int`, *optional*, defaults to -1):
+ The sequence length to export the model for (-1 means dynamic axis).
+ is_pair (`bool`, *optional*, defaults to `False`):
+ Indicate if the input is a pair (sentence 1, sentence 2)
+ framework (`TensorType`, *optional*, defaults to `None`):
+ The framework (PyTorch or TensorFlow) that the tokenizer will generate tensors for.
+ num_channels (`int`, *optional*, defaults to 3):
+ The number of channels of the generated images.
+ image_width (`int`, *optional*, defaults to 40):
+ The width of the generated images.
+ image_height (`int`, *optional*, defaults to 40):
+ The height of the generated images.
+
+ Returns:
+ Mapping[str, Tensor] holding the kwargs to provide to the model's forward function
+ """
+
+ input_image = self._generate_dummy_images(batch_size, num_channels, image_height, image_width)
+ inputs = dict(preprocessor(images=input_image, return_tensors=framework))
+
+ return inputs
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/convert_imagegpt_original_tf2_to_pytorch.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/convert_imagegpt_original_tf2_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..0212bd485bc1d69e8210e6b006a1100d7fd0b5b0
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/convert_imagegpt_original_tf2_to_pytorch.py
@@ -0,0 +1,72 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert OpenAI Image GPT checkpoints."""
+
+
+import argparse
+
+import torch
+
+from transformers import ImageGPTConfig, ImageGPTForCausalLM, load_tf_weights_in_imagegpt
+from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
+
+
+logging.set_verbosity_info()
+
+
+def convert_imagegpt_checkpoint_to_pytorch(imagegpt_checkpoint_path, model_size, pytorch_dump_folder_path):
+ # Construct configuration depending on size
+ MODELS = {"small": (512, 8, 24), "medium": (1024, 8, 36), "large": (1536, 16, 48)}
+ n_embd, n_head, n_layer = MODELS[model_size] # set model hyperparameters
+ config = ImageGPTConfig(n_embd=n_embd, n_layer=n_layer, n_head=n_head)
+ model = ImageGPTForCausalLM(config)
+
+ # Load weights from numpy
+ load_tf_weights_in_imagegpt(model, config, imagegpt_checkpoint_path)
+
+ # Save pytorch-model
+ pytorch_weights_dump_path = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
+ pytorch_config_dump_path = pytorch_dump_folder_path + "/" + CONFIG_NAME
+ print(f"Save PyTorch model to {pytorch_weights_dump_path}")
+ torch.save(model.state_dict(), pytorch_weights_dump_path)
+ print(f"Save configuration file to {pytorch_config_dump_path}")
+ with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
+ f.write(config.to_json_string())
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--imagegpt_checkpoint_path",
+ default=None,
+ type=str,
+ required=True,
+ help="Path to the TensorFlow checkpoint path.",
+ )
+ parser.add_argument(
+ "--model_size",
+ default=None,
+ type=str,
+ required=True,
+ help="Size of the model (can be either 'small', 'medium' or 'large').",
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
+ )
+ args = parser.parse_args()
+ convert_imagegpt_checkpoint_to_pytorch(
+ args.imagegpt_checkpoint_path, args.model_size, args.pytorch_dump_folder_path
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/feature_extraction_imagegpt.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/feature_extraction_imagegpt.py
new file mode 100644
index 0000000000000000000000000000000000000000..1780926bbf24c0ac6408e4734050afc35069a6aa
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/feature_extraction_imagegpt.py
@@ -0,0 +1,33 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Feature extractor class for ImageGPT."""
+
+import warnings
+
+from ...utils import logging
+from .image_processing_imagegpt import ImageGPTImageProcessor
+
+
+logger = logging.get_logger(__name__)
+
+
+class ImageGPTFeatureExtractor(ImageGPTImageProcessor):
+ def __init__(self, *args, **kwargs) -> None:
+ warnings.warn(
+ "The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
+ " Please use ImageGPTImageProcessor instead.",
+ FutureWarning,
+ )
+ super().__init__(*args, **kwargs)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/image_processing_imagegpt.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/image_processing_imagegpt.py
new file mode 100644
index 0000000000000000000000000000000000000000..fecdd061d4e40e0daebb3f89011056490e598200
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/image_processing_imagegpt.py
@@ -0,0 +1,314 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Image processor class for ImageGPT."""
+
+from typing import Dict, List, Optional, Union
+
+import numpy as np
+
+from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
+from ...image_transforms import rescale, resize, to_channel_dimension_format
+from ...image_utils import (
+ ChannelDimension,
+ ImageInput,
+ PILImageResampling,
+ infer_channel_dimension_format,
+ is_scaled_image,
+ make_list_of_images,
+ to_numpy_array,
+ valid_images,
+ validate_kwargs,
+ validate_preprocess_arguments,
+)
+from ...utils import TensorType, is_vision_available, logging
+
+
+if is_vision_available():
+ import PIL
+
+
+logger = logging.get_logger(__name__)
+
+
+def squared_euclidean_distance(a, b):
+ b = b.T
+ a2 = np.sum(np.square(a), axis=1)
+ b2 = np.sum(np.square(b), axis=0)
+ ab = np.matmul(a, b)
+ d = a2[:, None] - 2 * ab + b2[None, :]
+ return d
+
+
+def color_quantize(x, clusters):
+ x = x.reshape(-1, 3)
+ d = squared_euclidean_distance(x, clusters)
+ return np.argmin(d, axis=1)
+
+
+class ImageGPTImageProcessor(BaseImageProcessor):
+ r"""
+ Constructs a ImageGPT image processor. This image processor can be used to resize images to a smaller resolution
+ (such as 32x32 or 64x64), normalize them and finally color quantize them to obtain sequences of "pixel values"
+ (color clusters).
+
+ Args:
+ clusters (`np.ndarray` or `List[List[int]]`, *optional*):
+ The color clusters to use, of shape `(n_clusters, 3)` when color quantizing. Can be overriden by `clusters`
+ in `preprocess`.
+ do_resize (`bool`, *optional*, defaults to `True`):
+ Whether to resize the image's dimensions to `(size["height"], size["width"])`. Can be overridden by
+ `do_resize` in `preprocess`.
+ size (`Dict[str, int]` *optional*, defaults to `{"height": 256, "width": 256}`):
+ Size of the image after resizing. Can be overridden by `size` in `preprocess`.
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
+ Resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`.
+ do_normalize (`bool`, *optional*, defaults to `True`):
+ Whether to normalize the image pixel value to between [-1, 1]. Can be overridden by `do_normalize` in
+ `preprocess`.
+ do_color_quantize (`bool`, *optional*, defaults to `True`):
+ Whether to color quantize the image. Can be overridden by `do_color_quantize` in `preprocess`.
+ """
+
+ model_input_names = ["pixel_values"]
+
+ def __init__(
+ self,
+ # clusters is a first argument to maintain backwards compatibility with the old ImageGPTImageProcessor
+ clusters: Optional[Union[List[List[int]], np.ndarray]] = None,
+ do_resize: bool = True,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
+ do_normalize: bool = True,
+ do_color_quantize: bool = True,
+ **kwargs,
+ ) -> None:
+ super().__init__(**kwargs)
+ size = size if size is not None else {"height": 256, "width": 256}
+ size = get_size_dict(size)
+ self.clusters = np.array(clusters) if clusters is not None else None
+ self.do_resize = do_resize
+ self.size = size
+ self.resample = resample
+ self.do_normalize = do_normalize
+ self.do_color_quantize = do_color_quantize
+ self._valid_processor_keys = [
+ "images",
+ "do_resize",
+ "size",
+ "resample",
+ "do_normalize",
+ "do_color_quantize",
+ "clusters",
+ "return_tensors",
+ "data_format",
+ "input_data_format",
+ ]
+
+ # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize
+ def resize(
+ self,
+ image: np.ndarray,
+ size: Dict[str, int],
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> np.ndarray:
+ """
+ Resize an image to `(size["height"], size["width"])`.
+
+ Args:
+ image (`np.ndarray`):
+ Image to resize.
+ size (`Dict[str, int]`):
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
+ `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
+ data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
+ image is used. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+
+ Returns:
+ `np.ndarray`: The resized image.
+ """
+ size = get_size_dict(size)
+ if "height" not in size or "width" not in size:
+ raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
+ output_size = (size["height"], size["width"])
+ return resize(
+ image,
+ size=output_size,
+ resample=resample,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ **kwargs,
+ )
+
+ def normalize(
+ self,
+ image: np.ndarray,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ) -> np.ndarray:
+ """
+ Normalizes an images' pixel values to between [-1, 1].
+
+ Args:
+ image (`np.ndarray`):
+ Image to normalize.
+ data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred.
+ """
+ image = rescale(image=image, scale=1 / 127.5, data_format=data_format, input_data_format=input_data_format)
+ image = image - 1
+ return image
+
+ def preprocess(
+ self,
+ images: ImageInput,
+ do_resize: bool = None,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = None,
+ do_normalize: bool = None,
+ do_color_quantize: Optional[bool] = None,
+ clusters: Optional[Union[List[List[int]], np.ndarray]] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ data_format: Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> PIL.Image.Image:
+ """
+ Preprocess an image or batch of images.
+
+ Args:
+ images (`ImageInput`):
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
+ passing in images with pixel values between 0 and 1, set `do_normalize=False`.
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
+ Whether to resize the image.
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
+ Size of the image after resizing.
+ resample (`int`, *optional*, defaults to `self.resample`):
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
+ has an effect if `do_resize` is set to `True`.
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
+ Whether to normalize the image
+ do_color_quantize (`bool`, *optional*, defaults to `self.do_color_quantize`):
+ Whether to color quantize the image.
+ clusters (`np.ndarray` or `List[List[int]]`, *optional*, defaults to `self.clusters`):
+ Clusters used to quantize the image of shape `(n_clusters, 3)`. Only has an effect if
+ `do_color_quantize` is set to `True`.
+ return_tensors (`str` or `TensorType`, *optional*):
+ The type of tensors to return. Can be one of:
+ - Unset: Return a list of `np.ndarray`.
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
+ The channel dimension format for the output image. Can be one of:
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ Only has an effect if `do_color_quantize` is set to `False`.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ """
+ do_resize = do_resize if do_resize is not None else self.do_resize
+ size = size if size is not None else self.size
+ size = get_size_dict(size)
+ resample = resample if resample is not None else self.resample
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
+ do_color_quantize = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
+ clusters = clusters if clusters is not None else self.clusters
+ clusters = np.array(clusters)
+
+ images = make_list_of_images(images)
+
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
+
+ if not valid_images(images):
+ raise ValueError(
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+
+ # Here, normalize() is using a constant factor to divide pixel values.
+ # hence, the method does not need iamge_mean and image_std.
+ validate_preprocess_arguments(
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ )
+
+ if do_color_quantize and clusters is None:
+ raise ValueError("Clusters must be specified if do_color_quantize is True.")
+
+ # All transformations expect numpy arrays.
+ images = [to_numpy_array(image) for image in images]
+
+ if is_scaled_image(images[0]) and do_normalize:
+ logger.warning_once(
+ "It looks like you are trying to rescale already rescaled images. If you wish to do this, "
+ "make sure to set `do_normalize` to `False` and that pixel values are between [-1, 1].",
+ )
+
+ if input_data_format is None:
+ # We assume that all images have the same channel dimension format.
+ input_data_format = infer_channel_dimension_format(images[0])
+
+ if do_resize:
+ images = [
+ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ if do_normalize:
+ images = [self.normalize(image=image, input_data_format=input_data_format) for image in images]
+
+ if do_color_quantize:
+ images = [to_channel_dimension_format(image, ChannelDimension.LAST, input_data_format) for image in images]
+ # color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
+ images = np.array(images)
+ images = color_quantize(images, clusters).reshape(images.shape[:-1])
+
+ # flatten to (batch_size, height*width)
+ batch_size = images.shape[0]
+ images = images.reshape(batch_size, -1)
+
+ # We need to convert back to a list of images to keep consistent behaviour across processors.
+ images = list(images)
+ else:
+ images = [
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
+ for image in images
+ ]
+
+ data = {"input_ids": images}
+ return BatchFeature(data=data, tensor_type=return_tensors)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/modeling_imagegpt.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/modeling_imagegpt.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b9be17246e81e078af881a3e90d8b8c8c7839d9
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/modeling_imagegpt.py
@@ -0,0 +1,1200 @@
+# coding=utf-8
+# Copyright 2021 The OpenAI Team Authors and HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch OpenAI ImageGPT model."""
+
+import math
+import os
+import warnings
+from typing import Any, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.cuda.amp import autocast
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import (
+ BaseModelOutputWithPastAndCrossAttentions,
+ CausalLMOutputWithCrossAttentions,
+ SequenceClassifierOutputWithPast,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer
+from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
+from .configuration_imagegpt import ImageGPTConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "openai/imagegpt-small"
+_CONFIG_FOR_DOC = "ImageGPTConfig"
+
+
+from ..deprecated._archive_maps import IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+def load_tf_weights_in_imagegpt(model, config, imagegpt_checkpoint_path):
+ """
+ Load tf checkpoints in a pytorch model
+ """
+ try:
+ import re
+
+ import tensorflow as tf
+ except ImportError:
+ logger.error(
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
+ "https://www.tensorflow.org/install/ for installation instructions."
+ )
+ raise
+ tf_path = os.path.abspath(imagegpt_checkpoint_path)
+ logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
+ # Load weights from TF model
+ init_vars = tf.train.list_variables(tf_path)
+ names = []
+ arrays = []
+
+ for name, shape in init_vars:
+ logger.info("Loading TF weight {} with shape {}".format(name, shape))
+ array = tf.train.load_variable(tf_path, name)
+ names.append(name)
+ arrays.append(array.squeeze())
+
+ for name, array in zip(names, arrays):
+ name = name[6:] # skip "model/"
+ name = name.split("/")
+
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
+ # which are not required for using pretrained model
+ if any(
+ n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
+ for n in name
+ ) or name[-1] in ["_step"]:
+ logger.info("Skipping {}".format("/".join(name)))
+ continue
+
+ pointer = model
+ if name[-1] not in ["wtet"]:
+ pointer = getattr(pointer, "transformer")
+
+ for m_name in name:
+ if re.fullmatch(r"[A-Za-z]+\d+", m_name):
+ scope_names = re.split(r"(\d+)", m_name)
+ else:
+ scope_names = [m_name]
+
+ if scope_names[0] == "w" or scope_names[0] == "g":
+ pointer = getattr(pointer, "weight")
+ elif scope_names[0] == "b":
+ pointer = getattr(pointer, "bias")
+ elif scope_names[0] == "wpe" or scope_names[0] == "wte":
+ pointer = getattr(pointer, scope_names[0])
+ pointer = getattr(pointer, "weight")
+ elif scope_names[0] in ["q_proj", "k_proj", "v_proj"]:
+ pointer = getattr(pointer, "c_attn")
+ pointer = getattr(pointer, "weight")
+ elif len(name) == 3 and name[1] == "attn" and scope_names[0] == "c_proj":
+ pointer = getattr(pointer, scope_names[0])
+ pointer = getattr(pointer, "weight")
+ elif scope_names[0] == "wtet":
+ pointer = getattr(pointer, "lm_head")
+ pointer = getattr(pointer, "weight")
+ elif scope_names[0] == "sos":
+ pointer = getattr(pointer, "wte")
+ pointer = getattr(pointer, "weight")
+ else:
+ pointer = getattr(pointer, scope_names[0])
+ if len(scope_names) >= 2:
+ num = int(scope_names[1])
+ pointer = pointer[num]
+
+ if len(name) > 1 and name[1] == "attn" or name[-1] == "wtet" or name[-1] == "sos" or name[-1] == "wte":
+ pass # array is used to initialize only part of the pointer so sizes won't match
+ else:
+ try:
+ assert pointer.shape == array.shape
+ except AssertionError as e:
+ e.args += (pointer.shape, array.shape)
+ raise
+
+ logger.info("Initialize PyTorch weight {}".format(name))
+
+ if name[-1] == "q_proj":
+ pointer.data[:, : config.n_embd] = torch.from_numpy(array.reshape(config.n_embd, config.n_embd)).T
+ elif name[-1] == "k_proj":
+ pointer.data[:, config.n_embd : 2 * config.n_embd] = torch.from_numpy(
+ array.reshape(config.n_embd, config.n_embd)
+ ).T
+ elif name[-1] == "v_proj":
+ pointer.data[:, 2 * config.n_embd :] = torch.from_numpy(array.reshape(config.n_embd, config.n_embd)).T
+ elif len(name) == 3 and name[1] == "attn" and name[2] == "c_proj":
+ pointer.data = torch.from_numpy(array.reshape(config.n_embd, config.n_embd))
+ elif name[-1] == "wtet":
+ pointer.data = torch.from_numpy(array)
+ elif name[-1] == "wte":
+ pointer.data[: config.vocab_size - 1, :] = torch.from_numpy(array)
+ elif name[-1] == "sos":
+ pointer.data[-1] = torch.from_numpy(array)
+ else:
+ pointer.data = torch.from_numpy(array)
+
+ return model
+
+
+class ImageGPTLayerNorm(nn.Module):
+ def __init__(self, hidden_size: Tuple[int], eps: float = 1e-5):
+ super().__init__()
+ self.eps = eps
+ self.weight = nn.Parameter(torch.Tensor(hidden_size))
+
+ def forward(self, tensor: torch.Tensor) -> tuple:
+ # input is not mean centered
+ return (
+ tensor
+ / torch.sqrt(torch.mean(torch.square(tensor), axis=-1, keepdim=True) + self.eps)
+ * self.weight.data[..., :]
+ )
+
+
+class ImageGPTAttention(nn.Module):
+ def __init__(self, config, is_cross_attention: Optional[bool] = False, layer_idx: Optional[int] = None):
+ super().__init__()
+
+ max_positions = config.max_position_embeddings
+ self.register_buffer(
+ "bias",
+ torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(
+ 1, 1, max_positions, max_positions
+ ),
+ persistent=False,
+ )
+ self.register_buffer("masked_bias", torch.tensor(-1e4), persistent=False)
+
+ self.embed_dim = config.hidden_size
+ self.num_heads = config.num_attention_heads
+ self.head_dim = self.embed_dim // self.num_heads
+ self.split_size = self.embed_dim
+ if self.head_dim * self.num_heads != self.embed_dim:
+ raise ValueError(
+ f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
+ f" {self.num_heads})."
+ )
+
+ self.scale_attn_weights = config.scale_attn_weights
+ self.is_cross_attention = is_cross_attention
+
+ # Layer-wise attention scaling, reordering, and upcasting
+ self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx
+ self.layer_idx = layer_idx
+ self.reorder_and_upcast_attn = config.reorder_and_upcast_attn
+
+ if self.is_cross_attention:
+ self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim)
+ self.q_attn = Conv1D(self.embed_dim, self.embed_dim)
+ else:
+ self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim)
+ self.c_proj = Conv1D(self.embed_dim, self.embed_dim)
+
+ self.attn_dropout = nn.Dropout(config.attn_pdrop)
+ self.resid_dropout = nn.Dropout(config.resid_pdrop)
+
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(heads, self.num_heads, self.head_dim, self.pruned_heads)
+ index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
+
+ # Prune conv1d layers
+ self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
+ self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
+
+ # Update hyper params
+ self.split_size = (self.split_size // self.num_heads) * (self.num_heads - len(heads))
+ self.num_heads = self.num_heads - len(heads)
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def _attn(self, query, key, value, attention_mask=None, head_mask=None):
+ attn_weights = torch.matmul(query, key.transpose(-1, -2))
+
+ if self.scale_attn_weights:
+ attn_weights = attn_weights / (float(value.size(-1)) ** 0.5)
+
+ # Layer-wise attention scaling
+ if self.scale_attn_by_inverse_layer_idx:
+ attn_weights = attn_weights / float(self.layer_idx + 1)
+
+ if not self.is_cross_attention:
+ # if only "normal" attention layer implements causal mask
+ query_length, key_length = query.size(-2), key.size(-2)
+ causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]
+ mask_value = torch.finfo(attn_weights.dtype).min
+ # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
+ # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
+ mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
+ attn_weights = torch.where(causal_mask, attn_weights, mask_value)
+
+ if attention_mask is not None:
+ # Apply the attention mask
+ attn_weights = attn_weights + attention_mask
+
+ attn_weights = nn.Softmax(dim=-1)(attn_weights)
+
+ # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op otherwise
+ attn_weights = attn_weights.type(value.dtype)
+ attn_weights = self.attn_dropout(attn_weights)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attn_weights = attn_weights * head_mask
+
+ attn_output = torch.matmul(attn_weights, value)
+
+ return attn_output, attn_weights
+
+ def _upcast_and_reordered_attn(self, query, key, value, attention_mask=None, head_mask=None):
+ # Use `torch.baddbmm` (a bit more efficient w/ alpha param for scaling -- from Megatron-LM)
+ bsz, num_heads, q_seq_len, dk = query.size()
+ _, _, k_seq_len, _ = key.size()
+
+ # Preallocate attn_weights for `baddbmm`
+ attn_weights = torch.empty(bsz * num_heads, q_seq_len, k_seq_len, dtype=torch.float32, device=query.device)
+
+ # Compute Scale Factor
+ scale_factor = 1.0
+ if self.scale_attn_weights:
+ scale_factor /= float(value.size(-1)) ** 0.5
+
+ if self.scale_attn_by_inverse_layer_idx:
+ scale_factor /= float(self.layer_idx + 1)
+
+ # Upcast (turn off autocast) and reorder (Scale K by 1 / root(dk))
+ with autocast(enabled=False):
+ q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len)
+ attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor)
+ attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len)
+
+ if not self.is_cross_attention:
+ # if only "normal" attention layer implements causal mask
+ query_length, key_length = query.size(-2), key.size(-2)
+ causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]
+ mask_value = torch.finfo(attn_weights.dtype).min
+ # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
+ # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
+ mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
+ attn_weights = torch.where(causal_mask, attn_weights, mask_value)
+
+ if attention_mask is not None:
+ # Apply the attention mask
+ attn_weights = attn_weights + attention_mask
+
+ attn_weights = nn.Softmax(dim=-1)(attn_weights)
+
+ # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op if otherwise
+ if attn_weights.dtype != torch.float32:
+ raise RuntimeError("Error with upcasting, attn_weights does not have dtype torch.float32")
+ attn_weights = attn_weights.type(value.dtype)
+ attn_weights = self.attn_dropout(attn_weights)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attn_weights = attn_weights * head_mask
+
+ attn_output = torch.matmul(attn_weights, value)
+
+ return attn_output, attn_weights
+
+ def _split_heads(self, tensor, num_heads, attn_head_size):
+ """
+ Splits hidden_size dim into attn_head_size and num_heads
+ """
+ new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
+ tensor = tensor.view(*new_shape)
+ return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
+
+ def _merge_heads(self, tensor, num_heads, attn_head_size):
+ """
+ Merges attn_head_size dim and num_attn_heads dim into hidden_size
+ """
+ tensor = tensor.permute(0, 2, 1, 3).contiguous()
+ new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
+ return tensor.view(new_shape)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ layer_past: Optional[bool] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = False,
+ output_attentions: Optional[bool] = False,
+ ) -> tuple:
+ if encoder_hidden_states is not None:
+ if not hasattr(self, "q_attn"):
+ raise ValueError(
+ "If class is used as cross attention, the weights `q_attn` have to be defined. "
+ "Please make sure to instantiate class with `ImageGPTAttention(..., is_cross_attention=True)`."
+ )
+
+ query = self.q_attn(hidden_states)
+ key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2)
+ attention_mask = encoder_attention_mask
+ else:
+ query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2)
+
+ query = self._split_heads(query, self.num_heads, self.head_dim)
+ key = self._split_heads(key, self.num_heads, self.head_dim)
+ value = self._split_heads(value, self.num_heads, self.head_dim)
+
+ if layer_past is not None:
+ past_key, past_value = layer_past
+ key = torch.cat((past_key, key), dim=-2)
+ value = torch.cat((past_value, value), dim=-2)
+
+ if use_cache is True:
+ present = (key, value)
+ else:
+ present = None
+
+ if self.reorder_and_upcast_attn:
+ attn_output, attn_weights = self._upcast_and_reordered_attn(query, key, value, attention_mask, head_mask)
+ else:
+ attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
+
+ attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim)
+ attn_output = self.c_proj(attn_output)
+ attn_output = self.resid_dropout(attn_output)
+
+ outputs = (attn_output, present)
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs # a, present, (attentions)
+
+
+class ImageGPTMLP(nn.Module):
+ def __init__(self, intermediate_size, config):
+ super().__init__()
+ embed_dim = config.hidden_size
+ self.c_fc = Conv1D(intermediate_size, embed_dim)
+ self.c_proj = Conv1D(embed_dim, intermediate_size)
+ self.act = ACT2FN[config.activation_function]
+ self.dropout = nn.Dropout(config.resid_pdrop)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.c_fc(hidden_states)
+ hidden_states = self.act(hidden_states)
+ hidden_states = self.c_proj(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ return hidden_states
+
+
+class ImageGPTBlock(nn.Module):
+ def __init__(self, config, layer_idx=None):
+ super().__init__()
+ hidden_size = config.hidden_size
+ inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
+
+ self.ln_1 = ImageGPTLayerNorm(hidden_size, eps=config.layer_norm_epsilon)
+ self.attn = ImageGPTAttention(config, layer_idx=layer_idx)
+ self.ln_2 = ImageGPTLayerNorm(hidden_size, eps=config.layer_norm_epsilon)
+
+ if config.add_cross_attention:
+ self.crossattention = ImageGPTAttention(config, is_cross_attention=True, layer_idx=layer_idx)
+ self.ln_cross_attn = ImageGPTLayerNorm(hidden_size, eps=config.layer_norm_epsilon)
+
+ self.mlp = ImageGPTMLP(inner_dim, config)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ layer_past: Optional[bool] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = False,
+ output_attentions: Optional[bool] = False,
+ ) -> tuple:
+ residual = hidden_states
+ hidden_states = self.ln_1(hidden_states)
+ attn_outputs = self.attn(
+ hidden_states,
+ layer_past=layer_past,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ )
+ attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
+ outputs = attn_outputs[1:]
+ # residual connection
+ hidden_states = attn_output + residual
+
+ if encoder_hidden_states is not None:
+ # add one self-attention block for cross-attention
+ if not hasattr(self, "crossattention"):
+ raise ValueError(
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with "
+ "cross-attention layers by setting `config.add_cross_attention=True`"
+ )
+ residual = hidden_states
+ hidden_states = self.ln_cross_attn(hidden_states)
+ cross_attn_outputs = self.crossattention(
+ hidden_states,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ output_attentions=output_attentions,
+ )
+ attn_output = cross_attn_outputs[0]
+ # residual connection
+ hidden_states = residual + attn_output
+ outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights
+
+ residual = hidden_states
+ hidden_states = self.ln_2(hidden_states)
+ feed_forward_hidden_states = self.mlp(hidden_states)
+ # residual connection
+ hidden_states = residual + feed_forward_hidden_states
+
+ outputs = (hidden_states,) + (outputs if use_cache else outputs[1:])
+
+ return outputs # hidden_states, present, (attentions, cross_attentions)
+
+
+class ImageGPTPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = ImageGPTConfig
+ load_tf_weights = load_tf_weights_in_imagegpt
+ base_model_prefix = "transformer"
+ main_input_name = "input_ids"
+ supports_gradient_checkpointing = True
+
+ def __init__(self, *inputs, **kwargs):
+ super().__init__(*inputs, **kwargs)
+
+ def _init_weights(self, module):
+ """Initialize the weights."""
+ if isinstance(module, (nn.Linear, Conv1D)):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, ImageGPTLayerNorm):
+ module.weight.data.fill_(1.0)
+
+ # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
+ # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
+ # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
+ # > -- GPT-2 :: https://openai.com/blog/better-language-models/
+ #
+ # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
+ for name, p in module.named_parameters():
+ if "c_proj" in name and "weight" in name:
+ # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
+ p.data.normal_(mean=0.0, std=(self.config.initializer_range / math.sqrt(2 * self.config.n_layer)))
+
+
+IMAGEGPT_START_DOCSTRING = r"""
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`ImageGPTConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+IMAGEGPT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ `input_ids_length` = `sequence_length` if `past_key_values` is `None` else
+ `past_key_values[0][0].shape[-2]` (`sequence_length` of input past key value states). Indices of input
+ sequence tokens in the vocabulary.
+
+ If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
+ `input_ids`.
+
+ Indices can be obtained using [`AutoImageProcessor`]. See [`ImageGPTImageProcessor.__call__`] for details.
+
+ past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.n_layers`):
+ Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see
+ `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have
+ their past given to this model should not be passed as `input_ids` as they have already been computed.
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+
+ If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see
+ `past_key_values`).
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare ImageGPT Model transformer outputting raw hidden-states without any specific head on top.",
+ IMAGEGPT_START_DOCSTRING,
+)
+class ImageGPTModel(ImageGPTPreTrainedModel):
+ def __init__(self, config: ImageGPTConfig):
+ super().__init__(config)
+
+ self.embed_dim = config.hidden_size
+
+ self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
+ self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
+
+ self.drop = nn.Dropout(config.embd_pdrop)
+ self.h = nn.ModuleList([ImageGPTBlock(config, layer_idx=i) for i in range(config.num_hidden_layers)])
+ self.ln_f = ImageGPTLayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
+
+ # Model parallel
+ self.model_parallel = False
+ self.device_map = None
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.wte
+
+ def set_input_embeddings(self, new_embeddings):
+ self.wte = new_embeddings
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
+ """
+ for layer, heads in heads_to_prune.items():
+ self.h[layer].attn.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(IMAGEGPT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs: Any,
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoImageProcessor, ImageGPTModel
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("openai/imagegpt-small")
+ >>> model = ImageGPTModel.from_pretrained("openai/imagegpt-small")
+
+ >>> inputs = image_processor(images=image, return_tensors="pt")
+ >>> outputs = model(**inputs)
+ >>> last_hidden_states = outputs.last_hidden_state
+ ```"""
+
+ if "pixel_values" in kwargs:
+ warnings.warn(
+ "The `pixel_values` argument is deprecated and will be removed in a future version, use `input_ids`"
+ " instead.",
+ FutureWarning,
+ )
+
+ if input_ids is not None:
+ raise ValueError(
+ "You cannot pass both `pixel_values` and `input_ids`. Please make sure to only pass `input_ids`."
+ )
+
+ input_ids = kwargs.pop("pixel_values")
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ input_ids = input_ids.view(-1, input_shape[-1])
+ batch_size = input_ids.shape[0]
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ batch_size = inputs_embeds.shape[0]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ if token_type_ids is not None:
+ token_type_ids = token_type_ids.view(-1, input_shape[-1])
+
+ if past_key_values is None:
+ past_length = 0
+ past_key_values = tuple([None] * len(self.h))
+ else:
+ past_length = past_key_values[0][0].size(-2)
+ if position_ids is None:
+ position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
+ position_ids = position_ids.unsqueeze(0)
+
+ # ImageGPTAttention mask.
+ if attention_mask is not None:
+ if batch_size <= 0:
+ raise ValueError("batch_size has to be defined and > 0")
+ attention_mask = attention_mask.view(batch_size, -1)
+ # We create a 3D attention mask from a 2D tensor mask.
+ # Sizes are [batch_size, 1, 1, to_seq_length]
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
+ # this attention mask is more simple than the triangular masking of causal attention
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
+ attention_mask = attention_mask[:, None, None, :]
+
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
+ # masked positions, this operation will create a tensor which is 0.0 for
+ # positions we want to attend and the dtype's smallest value for masked positions.
+ # Since we are adding it to the raw scores before the softmax, this is
+ # effectively the same as removing these entirely.
+ attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
+ attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
+
+ # If a 2D or 3D attention mask is provided for the cross-attention
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
+ if self.config.add_cross_attention and encoder_hidden_states is not None:
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
+ if encoder_attention_mask is None:
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
+ encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
+ else:
+ encoder_attention_mask = None
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # head_mask has shape n_layer x batch x n_heads x N x N
+ head_mask = self.get_head_mask(head_mask, self.config.n_layer)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.wte(input_ids)
+ position_embeds = self.wpe(position_ids)
+ hidden_states = inputs_embeds + position_embeds
+
+ if token_type_ids is not None:
+ token_type_embeds = self.wte(token_type_ids)
+ hidden_states = hidden_states + token_type_embeds
+
+ hidden_states = self.drop(hidden_states)
+
+ output_shape = input_shape + (hidden_states.size(-1),)
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ presents = () if use_cache else None
+ all_self_attentions = () if output_attentions else None
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
+ all_hidden_states = () if output_hidden_states else None
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
+ # Model parallel
+ if self.model_parallel:
+ torch.cuda.set_device(hidden_states.device)
+ # Ensure layer_past is on same device as hidden_states (might not be correct)
+ if layer_past is not None:
+ layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past)
+ # Ensure that attention_mask is always on the same device as hidden_states
+ if attention_mask is not None:
+ attention_mask = attention_mask.to(hidden_states.device)
+ if isinstance(head_mask, torch.Tensor):
+ head_mask = head_mask.to(hidden_states.device)
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ outputs = self._gradient_checkpointing_func(
+ block.__call__,
+ hidden_states,
+ None,
+ attention_mask,
+ head_mask[i],
+ encoder_hidden_states,
+ encoder_attention_mask,
+ use_cache,
+ output_attentions,
+ )
+ else:
+ outputs = block(
+ hidden_states,
+ layer_past=layer_past,
+ attention_mask=attention_mask,
+ head_mask=head_mask[i],
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = outputs[0]
+ if use_cache is True:
+ presents = presents + (outputs[1],)
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
+ if self.config.add_cross_attention:
+ all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],)
+
+ # Model Parallel: If it's the last layer for that device, put things on the next device
+ if self.model_parallel:
+ for k, v in self.device_map.items():
+ if i == v[-1] and "cuda:" + str(k) != self.last_device:
+ hidden_states = hidden_states.to("cuda:" + str(k + 1))
+
+ hidden_states = self.ln_f(hidden_states)
+
+ hidden_states = hidden_states.view(*output_shape)
+ # Add last hidden state
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions]
+ if v is not None
+ )
+
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=presents,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ The ImageGPT Model transformer with a language modeling head on top (linear layer with weights tied to the input
+ embeddings).
+ """,
+ IMAGEGPT_START_DOCSTRING,
+)
+class ImageGPTForCausalImageModeling(ImageGPTPreTrainedModel):
+ _tied_weights_keys = ["lm_head.weight"]
+
+ def __init__(self, config: ImageGPTConfig):
+ super().__init__(config)
+ self.transformer = ImageGPTModel(config)
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size - 1, bias=False)
+
+ # Model parallel
+ self.model_parallel = False
+ self.device_map = None
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def prepare_inputs_for_generation(self, input_ids: torch.Tensor, past_key_values: Optional[bool] = None, **kwargs):
+ token_type_ids = kwargs.get("token_type_ids", None)
+ # Omit tokens covered by past_key_values
+ if past_key_values:
+ past_length = past_key_values[0][0].shape[2]
+
+ # Some generation methods already pass only the last input ID
+ if input_ids.shape[1] > past_length:
+ remove_prefix_length = past_length
+ else:
+ # Default to old behavior: keep only final ID
+ remove_prefix_length = input_ids.shape[1] - 1
+
+ input_ids = input_ids[:, remove_prefix_length:]
+ if token_type_ids is not None:
+ token_type_ids = token_type_ids[:, -input_ids.shape[1] :]
+
+ attention_mask = kwargs.get("attention_mask", None)
+ position_ids = kwargs.get("position_ids", None)
+
+ if attention_mask is not None and position_ids is None:
+ # create position_ids on the fly for batch generation
+ position_ids = attention_mask.long().cumsum(-1) - 1
+ position_ids.masked_fill_(attention_mask == 0, 1)
+ if past_key_values:
+ position_ids = position_ids[:, -input_ids.shape[1] :]
+ else:
+ position_ids = None
+ return {
+ "input_ids": input_ids,
+ "past_key_values": past_key_values,
+ "use_cache": kwargs.get("use_cache"),
+ "position_ids": position_ids,
+ "attention_mask": attention_mask,
+ "token_type_ids": token_type_ids,
+ }
+
+ @add_start_docstrings_to_model_forward(IMAGEGPT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs: Any,
+ ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoImageProcessor, ImageGPTForCausalImageModeling
+ >>> import torch
+ >>> import matplotlib.pyplot as plt
+ >>> import numpy as np
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("openai/imagegpt-small")
+ >>> model = ImageGPTForCausalImageModeling.from_pretrained("openai/imagegpt-small")
+ >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+ >>> model.to(device) # doctest: +IGNORE_RESULT
+
+ >>> # unconditional generation of 8 images
+ >>> batch_size = 4
+ >>> context = torch.full((batch_size, 1), model.config.vocab_size - 1) # initialize with SOS token
+ >>> context = context.to(device)
+ >>> output = model.generate(
+ ... input_ids=context, max_length=model.config.n_positions + 1, temperature=1.0, do_sample=True, top_k=40
+ ... )
+
+ >>> clusters = image_processor.clusters
+ >>> height = image_processor.size["height"]
+ >>> width = image_processor.size["width"]
+
+ >>> samples = output[:, 1:].cpu().detach().numpy()
+ >>> samples_img = [
+ ... np.reshape(np.rint(127.5 * (clusters[s] + 1.0)), [height, width, 3]).astype(np.uint8) for s in samples
+ ... ] # convert color cluster tokens back to pixels
+ >>> f, axes = plt.subplots(1, batch_size, dpi=300)
+
+ >>> for img, ax in zip(samples_img, axes): # doctest: +IGNORE_RESULT
+ ... ax.axis("off")
+ ... ax.imshow(img)
+ ```"""
+
+ if "pixel_values" in kwargs:
+ warnings.warn(
+ "The `pixel_values` argument is deprecated and will be removed in a future version, use `input_ids`"
+ " instead.",
+ FutureWarning,
+ )
+
+ if input_ids is not None:
+ raise ValueError(
+ "You cannot pass both `pixel_values` and `input_ids`. Please make sure to only pass `input_ids`."
+ )
+
+ input_ids = kwargs.pop("pixel_values")
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.transformer(
+ input_ids,
+ past_key_values=past_key_values,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = transformer_outputs[0]
+
+ lm_logits = self.lm_head(hidden_states)
+
+ loss = None
+ if labels is not None:
+ # Shift so that tokens < n predict n
+ shift_logits = lm_logits[..., :-1, :].contiguous()
+ shift_labels = labels[..., 1:].contiguous()
+ # Flatten the tokens
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
+
+ if not return_dict:
+ output = (lm_logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return CausalLMOutputWithCrossAttentions(
+ loss=loss,
+ logits=lm_logits,
+ past_key_values=transformer_outputs.past_key_values,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ cross_attentions=transformer_outputs.cross_attentions,
+ )
+
+ @staticmethod
+ def _reorder_cache(
+ past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
+ ) -> Tuple[Tuple[torch.Tensor]]:
+ """
+ This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
+ [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
+ beam_idx at every generation step.
+ """
+ return tuple(
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
+ for layer_past in past_key_values
+ )
+
+
+@add_start_docstrings(
+ """
+ The ImageGPT Model transformer with an image classification head on top (linear layer).
+ [`ImageGPTForImageClassification`] average-pools the hidden states in order to do the classification.
+ """,
+ IMAGEGPT_START_DOCSTRING,
+)
+class ImageGPTForImageClassification(ImageGPTPreTrainedModel):
+ def __init__(self, config: ImageGPTConfig):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.transformer = ImageGPTModel(config)
+ self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(IMAGEGPT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=SequenceClassifierOutputWithPast, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs: Any,
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoImageProcessor, ImageGPTForImageClassification
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("openai/imagegpt-small")
+ >>> model = ImageGPTForImageClassification.from_pretrained("openai/imagegpt-small")
+
+ >>> inputs = image_processor(images=image, return_tensors="pt")
+ >>> outputs = model(**inputs)
+ >>> logits = outputs.logits
+ ```"""
+
+ if "pixel_values" in kwargs:
+ warnings.warn(
+ "The `pixel_values` argument is deprecated and will be removed in a future version, use `input_ids`"
+ " instead.",
+ FutureWarning,
+ )
+
+ if input_ids is not None:
+ raise ValueError(
+ "You cannot pass both `pixel_values` and `input_ids`. Please make sure to only pass `input_ids`."
+ )
+
+ input_ids = kwargs.pop("pixel_values")
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.transformer(
+ input_ids,
+ past_key_values=past_key_values,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = transformer_outputs[0]
+ # average-pool the hidden states along the sequence dimension
+ pooled_hidden_states = hidden_states.mean(dim=1)
+ # project from (batch_size, hidden_size) to (batch_size, num_labels)
+ logits = self.score(pooled_hidden_states)
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+ if not return_dict:
+ output = (logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutputWithPast(
+ loss=loss,
+ logits=logits,
+ past_key_values=transformer_outputs.past_key_values,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/informer/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/informer/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..478ad56a72ba3c8c67814879979536c514d4b389
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/informer/__init__.py
@@ -0,0 +1,60 @@
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+# rely on isort to merge the imports
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
+
+
+_import_structure = {
+ "configuration_informer": [
+ "INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
+ "InformerConfig",
+ ],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_informer"] = [
+ "INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "InformerForPrediction",
+ "InformerModel",
+ "InformerPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_informer import (
+ INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
+ InformerForPrediction,
+ InformerModel,
+ InformerPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/informer/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/informer/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..973b60a9f44305fa431ddf03a9508f82aa5bad29
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/informer/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/informer/__pycache__/configuration_informer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/informer/__pycache__/configuration_informer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..82811beb9912f9790468029a7905b3b23694e87d
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/informer/__pycache__/configuration_informer.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/informer/__pycache__/modeling_informer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/informer/__pycache__/modeling_informer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5f26ef556d76f13952654a6b95f45fa47f9b05ce
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/informer/__pycache__/modeling_informer.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/informer/configuration_informer.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/informer/configuration_informer.py
new file mode 100644
index 0000000000000000000000000000000000000000..93b3f3556c97fe5c89e37a5c1ee92de5e149cac9
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/informer/configuration_informer.py
@@ -0,0 +1,249 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Informer model configuration"""
+
+from typing import List, Optional, Union
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class InformerConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of an [`InformerModel`]. It is used to instantiate an
+ Informer model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the Informer
+ [huggingface/informer-tourism-monthly](https://huggingface.co/huggingface/informer-tourism-monthly) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ prediction_length (`int`):
+ The prediction length for the decoder. In other words, the prediction horizon of the model. This value is
+ typically dictated by the dataset and we recommend to set it appropriately.
+ context_length (`int`, *optional*, defaults to `prediction_length`):
+ The context length for the encoder. If `None`, the context length will be the same as the
+ `prediction_length`.
+ distribution_output (`string`, *optional*, defaults to `"student_t"`):
+ The distribution emission head for the model. Could be either "student_t", "normal" or "negative_binomial".
+ loss (`string`, *optional*, defaults to `"nll"`):
+ The loss function for the model corresponding to the `distribution_output` head. For parametric
+ distributions it is the negative log likelihood (nll) - which currently is the only supported one.
+ input_size (`int`, *optional*, defaults to 1):
+ The size of the target variable which by default is 1 for univariate targets. Would be > 1 in case of
+ multivariate targets.
+ scaling (`string` or `bool`, *optional* defaults to `"mean"`):
+ Whether to scale the input targets via "mean" scaler, "std" scaler or no scaler if `None`. If `True`, the
+ scaler is set to "mean".
+ lags_sequence (`list[int]`, *optional*, defaults to `[1, 2, 3, 4, 5, 6, 7]`):
+ The lags of the input time series as covariates often dictated by the frequency of the data. Default is
+ `[1, 2, 3, 4, 5, 6, 7]` but we recommend to change it based on the dataset appropriately.
+ num_time_features (`int`, *optional*, defaults to 0):
+ The number of time features in the input time series.
+ num_dynamic_real_features (`int`, *optional*, defaults to 0):
+ The number of dynamic real valued features.
+ num_static_categorical_features (`int`, *optional*, defaults to 0):
+ The number of static categorical features.
+ num_static_real_features (`int`, *optional*, defaults to 0):
+ The number of static real valued features.
+ cardinality (`list[int]`, *optional*):
+ The cardinality (number of different values) for each of the static categorical features. Should be a list
+ of integers, having the same length as `num_static_categorical_features`. Cannot be `None` if
+ `num_static_categorical_features` is > 0.
+ embedding_dimension (`list[int]`, *optional*):
+ The dimension of the embedding for each of the static categorical features. Should be a list of integers,
+ having the same length as `num_static_categorical_features`. Cannot be `None` if
+ `num_static_categorical_features` is > 0.
+ d_model (`int`, *optional*, defaults to 64):
+ Dimensionality of the transformer layers.
+ encoder_layers (`int`, *optional*, defaults to 2):
+ Number of encoder layers.
+ decoder_layers (`int`, *optional*, defaults to 2):
+ Number of decoder layers.
+ encoder_attention_heads (`int`, *optional*, defaults to 2):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ decoder_attention_heads (`int`, *optional*, defaults to 2):
+ Number of attention heads for each attention layer in the Transformer decoder.
+ encoder_ffn_dim (`int`, *optional*, defaults to 32):
+ Dimension of the "intermediate" (often named feed-forward) layer in encoder.
+ decoder_ffn_dim (`int`, *optional*, defaults to 32):
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and decoder. If string, `"gelu"` and
+ `"relu"` are supported.
+ dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the encoder, and decoder.
+ encoder_layerdrop (`float`, *optional*, defaults to 0.1):
+ The dropout probability for the attention and fully connected layers for each encoder layer.
+ decoder_layerdrop (`float`, *optional*, defaults to 0.1):
+ The dropout probability for the attention and fully connected layers for each decoder layer.
+ attention_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for the attention probabilities.
+ activation_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability used between the two layers of the feed-forward networks.
+ num_parallel_samples (`int`, *optional*, defaults to 100):
+ The number of samples to generate in parallel for each time step of inference.
+ init_std (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated normal weight initialization distribution.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether to use the past key/values attentions (if applicable to the model) to speed up decoding.
+ attention_type (`str`, *optional*, defaults to "prob"):
+ Attention used in encoder. This can be set to "prob" (Informer's ProbAttention) or "full" (vanilla
+ transformer's canonical self-attention).
+ sampling_factor (`int`, *optional*, defaults to 5):
+ ProbSparse sampling factor (only makes affect when `attention_type`="prob"). It is used to control the
+ reduced query matrix (Q_reduce) input length.
+ distil (`bool`, *optional*, defaults to `True`):
+ Whether to use distilling in encoder.
+
+ Example:
+
+ ```python
+ >>> from transformers import InformerConfig, InformerModel
+
+ >>> # Initializing an Informer configuration with 12 time steps for prediction
+ >>> configuration = InformerConfig(prediction_length=12)
+
+ >>> # Randomly initializing a model (with random weights) from the configuration
+ >>> model = InformerModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "informer"
+ attribute_map = {
+ "hidden_size": "d_model",
+ "num_attention_heads": "encoder_attention_heads",
+ "num_hidden_layers": "encoder_layers",
+ }
+
+ def __init__(
+ self,
+ prediction_length: Optional[int] = None,
+ context_length: Optional[int] = None,
+ distribution_output: str = "student_t",
+ loss: str = "nll",
+ input_size: int = 1,
+ lags_sequence: List[int] = None,
+ scaling: Optional[Union[str, bool]] = "mean",
+ num_dynamic_real_features: int = 0,
+ num_static_real_features: int = 0,
+ num_static_categorical_features: int = 0,
+ num_time_features: int = 0,
+ cardinality: Optional[List[int]] = None,
+ embedding_dimension: Optional[List[int]] = None,
+ d_model: int = 64,
+ encoder_ffn_dim: int = 32,
+ decoder_ffn_dim: int = 32,
+ encoder_attention_heads: int = 2,
+ decoder_attention_heads: int = 2,
+ encoder_layers: int = 2,
+ decoder_layers: int = 2,
+ is_encoder_decoder: bool = True,
+ activation_function: str = "gelu",
+ dropout: float = 0.05,
+ encoder_layerdrop: float = 0.1,
+ decoder_layerdrop: float = 0.1,
+ attention_dropout: float = 0.1,
+ activation_dropout: float = 0.1,
+ num_parallel_samples: int = 100,
+ init_std: float = 0.02,
+ use_cache=True,
+ # Informer arguments
+ attention_type: str = "prob",
+ sampling_factor: int = 5,
+ distil: bool = True,
+ **kwargs,
+ ):
+ # time series specific configuration
+ self.prediction_length = prediction_length
+ self.context_length = context_length or prediction_length
+ self.distribution_output = distribution_output
+ self.loss = loss
+ self.input_size = input_size
+ self.num_time_features = num_time_features
+ self.lags_sequence = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
+ self.scaling = scaling
+ self.num_dynamic_real_features = num_dynamic_real_features
+ self.num_static_real_features = num_static_real_features
+ self.num_static_categorical_features = num_static_categorical_features
+
+ # set cardinality
+ if cardinality and num_static_categorical_features > 0:
+ if len(cardinality) != num_static_categorical_features:
+ raise ValueError(
+ "The cardinality should be a list of the same length as `num_static_categorical_features`"
+ )
+ self.cardinality = cardinality
+ else:
+ self.cardinality = [0]
+
+ # set embedding_dimension
+ if embedding_dimension and num_static_categorical_features > 0:
+ if len(embedding_dimension) != num_static_categorical_features:
+ raise ValueError(
+ "The embedding dimension should be a list of the same length as `num_static_categorical_features`"
+ )
+ self.embedding_dimension = embedding_dimension
+ else:
+ self.embedding_dimension = [min(50, (cat + 1) // 2) for cat in self.cardinality]
+
+ self.num_parallel_samples = num_parallel_samples
+
+ # Transformer architecture configuration
+ self.feature_size = input_size * len(self.lags_sequence) + self._number_of_features
+ self.d_model = d_model
+ self.encoder_attention_heads = encoder_attention_heads
+ self.decoder_attention_heads = decoder_attention_heads
+ self.encoder_ffn_dim = encoder_ffn_dim
+ self.decoder_ffn_dim = decoder_ffn_dim
+ self.encoder_layers = encoder_layers
+ self.decoder_layers = decoder_layers
+
+ self.dropout = dropout
+ self.attention_dropout = attention_dropout
+ self.activation_dropout = activation_dropout
+ self.encoder_layerdrop = encoder_layerdrop
+ self.decoder_layerdrop = decoder_layerdrop
+
+ self.activation_function = activation_function
+ self.init_std = init_std
+
+ self.use_cache = use_cache
+
+ # Informer
+ self.attention_type = attention_type
+ self.sampling_factor = sampling_factor
+ self.distil = distil
+
+ super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
+
+ @property
+ def _number_of_features(self) -> int:
+ return (
+ sum(self.embedding_dimension)
+ + self.num_dynamic_real_features
+ + self.num_time_features
+ + self.num_static_real_features
+ + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/informer/modeling_informer.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/informer/modeling_informer.py
new file mode 100644
index 0000000000000000000000000000000000000000..cf20477f375dd96c4931d90b996fd9cf8329ef18
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/informer/modeling_informer.py
@@ -0,0 +1,2046 @@
+# coding=utf-8
+# Copyright 2023 Amazon and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch Informer model."""
+
+from typing import List, Optional, Tuple, Union
+
+import numpy as np
+import torch
+from torch import nn
+
+from ...activations import ACT2FN
+from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask
+from ...modeling_outputs import (
+ BaseModelOutput,
+ BaseModelOutputWithPastAndCrossAttentions,
+ SampleTSPredictionOutput,
+ Seq2SeqTSModelOutput,
+ Seq2SeqTSPredictionOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...time_series_utils import NegativeBinomialOutput, NormalOutput, StudentTOutput
+from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
+from .configuration_informer import InformerConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "InformerConfig"
+
+
+from ..deprecated._archive_maps import INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesFeatureEmbedder with TimeSeries->Informer
+class InformerFeatureEmbedder(nn.Module):
+ """
+ Embed a sequence of categorical features.
+
+ Args:
+ cardinalities (`list[int]`):
+ List of cardinalities of the categorical features.
+ embedding_dims (`list[int]`):
+ List of embedding dimensions of the categorical features.
+ """
+
+ def __init__(self, cardinalities: List[int], embedding_dims: List[int]) -> None:
+ super().__init__()
+
+ self.num_features = len(cardinalities)
+ self.embedders = nn.ModuleList([nn.Embedding(c, d) for c, d in zip(cardinalities, embedding_dims)])
+
+ def forward(self, features: torch.Tensor) -> torch.Tensor:
+ if self.num_features > 1:
+ # we slice the last dimension, giving an array of length
+ # self.num_features with shape (N,T) or (N)
+ cat_feature_slices = torch.chunk(features, self.num_features, dim=-1)
+ else:
+ cat_feature_slices = [features]
+
+ return torch.cat(
+ [
+ embed(cat_feature_slice.squeeze(-1))
+ for embed, cat_feature_slice in zip(self.embedders, cat_feature_slices)
+ ],
+ dim=-1,
+ )
+
+
+# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesStdScaler with TimeSeriesTransformer->Informer,TimeSeries->Informer
+class InformerStdScaler(nn.Module):
+ """
+ Standardize features by calculating the mean and scaling along the first dimension, and then normalizes it by
+ subtracting from the mean and dividing by the standard deviation.
+ """
+
+ def __init__(self, config: InformerConfig):
+ super().__init__()
+ self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1
+ self.keepdim = config.keepdim if hasattr(config, "keepdim") else True
+ self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-5
+
+ def forward(
+ self, data: torch.Tensor, observed_indicator: torch.Tensor
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
+ """
+ Parameters:
+ data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
+ input for Batch norm calculation
+ observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
+ Calculating the scale on the observed indicator.
+ Returns:
+ tuple of `torch.Tensor` of shapes
+ (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
+ `(batch_size, 1, num_input_channels)`)
+ """
+ denominator = observed_indicator.sum(self.dim, keepdim=self.keepdim)
+ denominator = denominator.clamp_min(1.0)
+ loc = (data * observed_indicator).sum(self.dim, keepdim=self.keepdim) / denominator
+
+ variance = (((data - loc) * observed_indicator) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator
+ scale = torch.sqrt(variance + self.minimum_scale)
+ return (data - loc) / scale, loc, scale
+
+
+# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesMeanScaler with TimeSeriesTransformer->Informer,TimeSeries->Informer
+class InformerMeanScaler(nn.Module):
+ """
+ Computes a scaling factor as the weighted average absolute value along the first dimension, and scales the data
+ accordingly.
+ """
+
+ def __init__(self, config: InformerConfig):
+ super().__init__()
+ self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1
+ self.keepdim = config.keepdim if hasattr(config, "keepdim") else True
+ self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-10
+ self.default_scale = config.default_scale if hasattr(config, "default_scale") else None
+
+ def forward(
+ self, data: torch.Tensor, observed_indicator: torch.Tensor
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
+ """
+ Parameters:
+ data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
+ input for Batch norm calculation
+ observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
+ Calculating the scale on the observed indicator.
+ Returns:
+ tuple of `torch.Tensor` of shapes
+ (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
+ `(batch_size, 1, num_input_channels)`)
+ """
+ ts_sum = (data * observed_indicator).abs().sum(self.dim, keepdim=True)
+ num_observed = observed_indicator.sum(self.dim, keepdim=True)
+
+ scale = ts_sum / torch.clamp(num_observed, min=1)
+
+ # If `default_scale` is provided, we use it, otherwise we use the scale
+ # of the batch.
+ if self.default_scale is None:
+ batch_sum = ts_sum.sum(dim=0)
+ batch_observations = torch.clamp(num_observed.sum(0), min=1)
+ default_scale = torch.squeeze(batch_sum / batch_observations)
+ else:
+ default_scale = self.default_scale * torch.ones_like(scale)
+
+ # apply default scale where there are no observations
+ scale = torch.where(num_observed > 0, scale, default_scale)
+
+ # ensure the scale is at least `self.minimum_scale`
+ scale = torch.clamp(scale, min=self.minimum_scale)
+ scaled_data = data / scale
+
+ if not self.keepdim:
+ scale = scale.squeeze(dim=self.dim)
+
+ return scaled_data, torch.zeros_like(scale), scale
+
+
+# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesNOPScaler with TimeSeriesTransformer->Informer,TimeSeries->Informer
+class InformerNOPScaler(nn.Module):
+ """
+ Assigns a scaling factor equal to 1 along the first dimension, and therefore applies no scaling to the input data.
+ """
+
+ def __init__(self, config: InformerConfig):
+ super().__init__()
+ self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1
+ self.keepdim = config.keepdim if hasattr(config, "keepdim") else True
+
+ def forward(
+ self, data: torch.Tensor, observed_indicator: torch.Tensor = None
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
+ """
+ Parameters:
+ data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
+ input for Batch norm calculation
+ Returns:
+ tuple of `torch.Tensor` of shapes
+ (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
+ `(batch_size, 1, num_input_channels)`)
+ """
+ scale = torch.ones_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim)
+ loc = torch.zeros_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim)
+ return data, loc, scale
+
+
+# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.weighted_average
+def weighted_average(input_tensor: torch.Tensor, weights: Optional[torch.Tensor] = None, dim=None) -> torch.Tensor:
+ """
+ Computes the weighted average of a given tensor across a given `dim`, masking values associated with weight zero,
+ meaning instead of `nan * 0 = nan` you will get `0 * 0 = 0`.
+
+ Args:
+ input_tensor (`torch.FloatTensor`):
+ Input tensor, of which the average must be computed.
+ weights (`torch.FloatTensor`, *optional*):
+ Weights tensor, of the same shape as `input_tensor`.
+ dim (`int`, *optional*):
+ The dim along which to average `input_tensor`.
+
+ Returns:
+ `torch.FloatTensor`: The tensor with values averaged along the specified `dim`.
+ """
+ if weights is not None:
+ weighted_tensor = torch.where(weights != 0, input_tensor * weights, torch.zeros_like(input_tensor))
+ sum_weights = torch.clamp(weights.sum(dim=dim) if dim else weights.sum(), min=1.0)
+ return (weighted_tensor.sum(dim=dim) if dim else weighted_tensor.sum()) / sum_weights
+ else:
+ return input_tensor.mean(dim=dim)
+
+
+# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.nll
+def nll(input: torch.distributions.Distribution, target: torch.Tensor) -> torch.Tensor:
+ """
+ Computes the negative log likelihood loss from input distribution with respect to target.
+ """
+ return -input.log_prob(target)
+
+
+# Copied from transformers.models.marian.modeling_marian.MarianSinusoidalPositionalEmbedding with Marian->Informer
+class InformerSinusoidalPositionalEmbedding(nn.Embedding):
+ """This module produces sinusoidal positional embeddings of any length."""
+
+ def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None) -> None:
+ super().__init__(num_positions, embedding_dim)
+ self.weight = self._init_weight(self.weight)
+
+ @staticmethod
+ def _init_weight(out: nn.Parameter) -> nn.Parameter:
+ """
+ Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
+ the 2nd half of the vector. [dim // 2:]
+ """
+ n_pos, dim = out.shape
+ position_enc = np.array(
+ [[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]
+ )
+ out.requires_grad = False # set early to avoid an error in pytorch-1.8+
+ sentinel = dim // 2 if dim % 2 == 0 else (dim // 2) + 1
+ out[:, 0:sentinel] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
+ out[:, sentinel:] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
+ out.detach_()
+ return out
+
+ @torch.no_grad()
+ def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0) -> torch.Tensor:
+ """`input_ids_shape` is expected to be [bsz x seqlen]."""
+ bsz, seq_len = input_ids_shape[:2]
+ positions = torch.arange(
+ past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
+ )
+ return super().forward(positions)
+
+
+# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesValueEmbedding with TimeSeries->Info
+class InformerValueEmbedding(nn.Module):
+ def __init__(self, feature_size, d_model):
+ super().__init__()
+ self.value_projection = nn.Linear(in_features=feature_size, out_features=d_model, bias=False)
+
+ def forward(self, x):
+ return self.value_projection(x)
+
+
+# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Informer
+class InformerAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(
+ self,
+ embed_dim: int,
+ num_heads: int,
+ dropout: float = 0.0,
+ is_decoder: bool = False,
+ bias: bool = True,
+ is_causal: bool = False,
+ config: Optional[InformerConfig] = None,
+ ):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.num_heads = num_heads
+ self.dropout = dropout
+ self.head_dim = embed_dim // num_heads
+ self.config = config
+
+ if (self.head_dim * num_heads) != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+ self.is_decoder = is_decoder
+ self.is_causal = is_causal
+
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ key_value_states: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+
+ bsz, tgt_len, _ = hidden_states.size()
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scaling
+ # get key, value proj
+ # `past_key_value[0].shape[2] == key_value_states.shape[1]`
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
+ # the provided `key_value_states` to support prefix tuning
+ if (
+ is_cross_attention
+ and past_key_value is not None
+ and past_key_value[0].shape[2] == key_value_states.shape[1]
+ ):
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
+ elif past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
+ else:
+ # self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
+ key_states = key_states.reshape(*proj_shape)
+ value_states = value_states.reshape(*proj_shape)
+
+ src_len = key_states.size(1)
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
+
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
+ )
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ if layer_head_mask is not None:
+ if layer_head_mask.size() != (self.num_heads,):
+ raise ValueError(
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
+ f" {layer_head_mask.size()}"
+ )
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ if output_attentions:
+ # this operation is a bit awkward, but it's required to
+ # make sure that attn_weights keeps its gradient.
+ # In order to do so, attn_weights have to be reshaped
+ # twice and have to be reused in the following
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
+ else:
+ attn_weights_reshaped = None
+
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ attn_output = torch.bmm(attn_probs, value_states)
+
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
+ attn_output = attn_output.transpose(1, 2)
+
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
+ # partitioned across GPUs when using tensor-parallelism.
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
+
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights_reshaped, past_key_value
+
+
+class InformerProbSparseAttention(nn.Module):
+ """Probabilistic Attention mechanism to select the "active"
+ queries rather than the "lazy" queries and provides a sparse Transformer thus mitigating the quadratic compute and
+ memory requirements of vanilla attention"""
+
+ def __init__(
+ self,
+ embed_dim: int,
+ num_heads: int,
+ dropout: float = 0.0,
+ is_decoder: bool = False,
+ sampling_factor: int = 5,
+ bias: bool = True,
+ ):
+ super().__init__()
+ self.factor = sampling_factor
+ self.embed_dim = embed_dim
+ self.num_heads = num_heads
+ self.dropout = dropout
+ self.head_dim = embed_dim // num_heads
+
+ if (self.head_dim * num_heads) != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+ self.is_decoder = is_decoder
+
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ key_value_states: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+
+ bsz, tgt_len, _ = hidden_states.size()
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scaling
+ # get key, value proj
+ # `past_key_value[0].shape[2] == key_value_states.shape[1]`
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
+ # the provided `key_value_states` to support prefix tuning
+ if (
+ is_cross_attention
+ and past_key_value is not None
+ and past_key_value[0].shape[2] == key_value_states.shape[1]
+ ):
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
+ elif past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
+ else:
+ # self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
+ key_states = key_states.reshape(*proj_shape)
+ value_states = value_states.reshape(*proj_shape)
+
+ key_states_time_length = key_states.size(1) # L_K
+ log_key_states_time_length = np.ceil(np.log1p(key_states_time_length)).astype("int").item() # log_L_K
+
+ query_states_time_length = query_states.size(1) # L_Q
+ log_query_states_time_length = np.ceil(np.log1p(query_states_time_length)).astype("int").item() # log_L_Q
+
+ u_part = min(self.factor * query_states_time_length * log_key_states_time_length, key_states_time_length)
+ u = min(self.factor * log_query_states_time_length, query_states_time_length)
+
+ if key_states_time_length > 0:
+ index_sample = torch.randint(0, key_states_time_length, (u_part,))
+ k_sample = key_states[:, index_sample, :]
+ else:
+ k_sample = key_states
+
+ queries_keys_sample = torch.bmm(query_states, k_sample.transpose(1, 2)) # Q_K_sampled
+
+ # find the Top_k query with sparsity measurement
+ if u > 0:
+ sparsity_measurement = queries_keys_sample.max(dim=-1)[0] - torch.div(
+ queries_keys_sample.sum(dim=-1), key_states_time_length
+ ) # M
+ top_u_sparsity_measurement = sparsity_measurement.topk(u, sorted=False)[1] # M_top
+
+ # calculate q_reduce: query_states[:, top_u_sparsity_measurement]
+ dim_for_slice = torch.arange(query_states.size(0)).unsqueeze(-1)
+ q_reduce = query_states[dim_for_slice, top_u_sparsity_measurement]
+ else:
+ q_reduce = query_states
+ top_u_sparsity_measurement = None
+
+ # Use q_reduce to calculate attention weights
+ attn_weights = torch.bmm(q_reduce, key_states.transpose(1, 2))
+
+ src_len = key_states.size(1)
+ if attn_weights.size() != (bsz * self.num_heads, u, src_len):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz * self.num_heads, u, src_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
+ )
+ prob_mask = attention_mask.expand(bsz, self.num_heads, tgt_len, src_len).reshape(
+ bsz * self.num_heads, tgt_len, src_len
+ )
+
+ if top_u_sparsity_measurement is not None:
+ dim_for_slice = torch.arange(prob_mask.size(0)).unsqueeze(-1)
+ prob_mask = prob_mask[dim_for_slice, top_u_sparsity_measurement, :]
+
+ attn_weights = attn_weights.view(bsz, self.num_heads, u, src_len) + prob_mask.view(
+ bsz, self.num_heads, u, src_len
+ )
+ attn_weights = attn_weights.view(bsz * self.num_heads, u, src_len)
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ if layer_head_mask is not None:
+ if layer_head_mask.size() != (self.num_heads,):
+ raise ValueError(
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
+ f" {layer_head_mask.size()}"
+ )
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, u, src_len)
+ attn_weights = attn_weights.view(bsz * self.num_heads, u, src_len)
+
+ if output_attentions:
+ # this operation is a bit awkward, but it's required to
+ # make sure that attn_weights keeps its gradient.
+ # In order to do so, attn_weights have to be reshaped
+ # twice and have to be reused in the following
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, u, src_len)
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, u, src_len)
+ else:
+ attn_weights_reshaped = None
+
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+ attn_output = torch.bmm(attn_probs, value_states)
+
+ # calculate context for updating the attn_output, based on:
+ # https://github.com/zhouhaoyi/Informer2020/blob/ac59c7447135473fb2aafeafe94395f884d5c7a5/models/attn.py#L74
+ if self.is_decoder:
+ # cast to float32 before operation to avoid overflow
+ context = value_states.cumsum(dim=-2, dtype=torch.float32).to(value_states.dtype)
+ else:
+ v_mean_dim_time = value_states.mean(dim=-2)
+ context = (
+ v_mean_dim_time.unsqueeze(dim=1)
+ .expand(bsz * self.num_heads, query_states_time_length, v_mean_dim_time.size(-1))
+ .clone()
+ )
+
+ if top_u_sparsity_measurement is not None:
+ # update context: copy the attention output to the context at top_u_sparsity_measurement index
+ dim_for_slice = torch.arange(context.size(0)).unsqueeze(-1)
+ context[dim_for_slice, top_u_sparsity_measurement, :] = attn_output
+ attn_output = context
+
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
+ attn_output = attn_output.transpose(1, 2)
+
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
+ # partitioned across GPUs when using tensor-parallelism.
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
+
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights_reshaped, past_key_value
+
+
+# source: https://github.com/zhouhaoyi/Informer2020/blob/main/models/encoder.py
+class InformerConvLayer(nn.Module):
+ def __init__(self, c_in):
+ super().__init__()
+ self.downConv = nn.Conv1d(
+ in_channels=c_in,
+ out_channels=c_in,
+ kernel_size=3,
+ padding=1,
+ padding_mode="circular",
+ )
+ self.norm = nn.BatchNorm1d(c_in)
+ self.activation = nn.ELU()
+ self.maxPool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)
+
+ def forward(self, x):
+ x = self.downConv(x.permute(0, 2, 1))
+ x = self.norm(x)
+ x = self.activation(x)
+ x = self.maxPool(x)
+ x = x.transpose(1, 2)
+ return x
+
+
+class InformerEncoderLayer(nn.Module):
+ def __init__(self, config: InformerConfig):
+ super().__init__()
+ self.embed_dim = config.d_model
+ if config.attention_type == "prob":
+ self.self_attn = InformerProbSparseAttention(
+ embed_dim=self.embed_dim,
+ num_heads=config.encoder_attention_heads,
+ dropout=config.attention_dropout,
+ sampling_factor=config.sampling_factor,
+ )
+ else:
+ self.self_attn = InformerAttention(
+ embed_dim=self.embed_dim,
+ num_heads=config.encoder_attention_heads,
+ dropout=config.attention_dropout,
+ )
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.activation_dropout = config.activation_dropout
+ self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
+ self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
+
+ def forward(
+ self,
+ hidden_states: torch.FloatTensor,
+ attention_mask: torch.FloatTensor,
+ layer_head_mask: torch.FloatTensor,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
+ `(encoder_attention_heads,)`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+ hidden_states, attn_weights, _ = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ residual = hidden_states
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ if hidden_states.dtype == torch.float16 and (
+ torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
+ ):
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+class InformerDecoderLayer(nn.Module):
+ def __init__(self, config: InformerConfig):
+ super().__init__()
+ self.embed_dim = config.d_model
+
+ if config.attention_type == "prob":
+ self.self_attn = InformerProbSparseAttention(
+ embed_dim=self.embed_dim,
+ num_heads=config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ sampling_factor=config.sampling_factor,
+ is_decoder=True,
+ )
+ else:
+ self.self_attn = InformerAttention(
+ embed_dim=self.embed_dim,
+ num_heads=config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=True,
+ )
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.activation_dropout = config.activation_dropout
+
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.encoder_attn = InformerAttention(
+ self.embed_dim,
+ config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=True,
+ )
+ self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
+ self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = True,
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ encoder_hidden_states (`torch.FloatTensor`):
+ cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
+ encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
+ `(encoder_attention_heads,)`.
+ cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
+ size `(decoder_attention_heads,)`.
+ past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+
+ # Self Attention
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ past_key_value=self_attn_past_key_value,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ # Cross-Attention Block
+ cross_attn_present_key_value = None
+ cross_attn_weights = None
+ if encoder_hidden_states is not None:
+ residual = hidden_states
+
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
+ hidden_states=hidden_states,
+ key_value_states=encoder_hidden_states,
+ attention_mask=encoder_attention_mask,
+ layer_head_mask=cross_attn_layer_head_mask,
+ past_key_value=cross_attn_past_key_value,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
+
+ # add cross-attn to positions 3,4 of present_key_value tuple
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights, cross_attn_weights)
+
+ if use_cache:
+ outputs += (present_key_value,)
+
+ return outputs
+
+
+class InformerPreTrainedModel(PreTrainedModel):
+ config_class = InformerConfig
+ base_model_prefix = "model"
+ main_input_name = "past_values"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ std = self.config.init_std
+ if isinstance(module, (nn.Linear, nn.Conv1d)):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding) and not isinstance(module, InformerSinusoidalPositionalEmbedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+
+INFORMER_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`TimeSeriesTransformerConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+INFORMER_INPUTS_DOCSTRING = r"""
+ Args:
+ past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`):
+ Past values of the time series, that serve as context in order to predict the future. The sequence size of
+ this tensor must be larger than the `context_length` of the model, since the model will use the larger size
+ to construct lag features, i.e. additional values from the past which are added in order to serve as "extra
+ context".
+
+ The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if no
+ `lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest
+ look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length of
+ the past.
+
+ The `past_values` is what the Transformer encoder gets as input (with optional additional features, such as
+ `static_categorical_features`, `static_real_features`, `past_time_features` and lags).
+
+ Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`.
+
+ For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of
+ variates in the time series per time step.
+ past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`):
+ Required time features, which the model internally will add to `past_values`. These could be things like
+ "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These
+ could also be so-called "age" features, which basically help the model know "at which point in life" a
+ time-series is. Age features have small values for distant past time steps and increase monotonically the
+ more we approach the current time step. Holiday features are also a good example of time features.
+
+ These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
+ the position encodings are learned from scratch internally as parameters of the model, the Time Series
+ Transformer requires to provide additional time features. The Time Series Transformer only learns
+ additional embeddings for `static_categorical_features`.
+
+ Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features
+ must but known at prediction time.
+
+ The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
+ past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
+ Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in
+ `[0, 1]`:
+
+ - 1 for values that are **observed**,
+ - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
+
+ static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):
+ Optional static categorical features for which the model will learn an embedding, which it will add to the
+ values of the time series.
+
+ Static categorical features are features which have the same value for all time steps (static over time).
+
+ A typical example of a static categorical feature is a time series ID.
+ static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):
+ Optional static real features which the model will add to the values of the time series.
+
+ Static real features are features which have the same value for all time steps (static over time).
+
+ A typical example of a static real feature is promotion information.
+ future_values (`torch.FloatTensor` of shape `(batch_size, prediction_length)` or `(batch_size, prediction_length, input_size)`, *optional*):
+ Future values of the time series, that serve as labels for the model. The `future_values` is what the
+ Transformer needs during training to learn to output, given the `past_values`.
+
+ The sequence length here is equal to `prediction_length`.
+
+ See the demo notebook and code snippets for details.
+
+ Optionally, during training any missing values need to be replaced with zeros and indicated via the
+ `future_observed_mask`.
+
+ For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of
+ variates in the time series per time step.
+ future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`):
+ Required time features for the prediction window, which the model internally will add to `future_values`.
+ These could be things like "month of year", "day of the month", etc. encoded as vectors (for instance as
+ Fourier features). These could also be so-called "age" features, which basically help the model know "at
+ which point in life" a time-series is. Age features have small values for distant past time steps and
+ increase monotonically the more we approach the current time step. Holiday features are also a good example
+ of time features.
+
+ These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
+ the position encodings are learned from scratch internally as parameters of the model, the Time Series
+ Transformer requires to provide additional time features. The Time Series Transformer only learns
+ additional embeddings for `static_categorical_features`.
+
+ Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features
+ must but known at prediction time.
+
+ The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
+ future_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
+ Boolean mask to indicate which `future_values` were observed and which were missing. Mask values selected
+ in `[0, 1]`:
+
+ - 1 for values that are **observed**,
+ - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
+
+ This mask is used to filter out missing values for the final loss calculation.
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on certain token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Mask to avoid performing attention on certain token indices. By default, a causal mask will be used, to
+ make sure the model can only look at previous inputs in order to predict the future.
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
+ Tuple consists of `last_hidden_state`, `hidden_states` (*optional*) and `attentions` (*optional*)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` (*optional*) is a sequence of
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+class InformerEncoder(InformerPreTrainedModel):
+ """
+ Informer encoder consisting of *config.encoder_layers* self attention layers with distillation layers. Each
+ attention layer is an [`InformerEncoderLayer`].
+
+ Args:
+ config: InformerConfig
+ """
+
+ def __init__(self, config: InformerConfig):
+ super().__init__(config)
+
+ self.dropout = config.dropout
+ self.layerdrop = config.encoder_layerdrop
+ self.gradient_checkpointing = False
+ if config.prediction_length is None:
+ raise ValueError("The `prediction_length` config needs to be specified.")
+
+ self.value_embedding = InformerValueEmbedding(feature_size=config.feature_size, d_model=config.d_model)
+ self.embed_positions = InformerSinusoidalPositionalEmbedding(
+ config.context_length + config.prediction_length, config.d_model
+ )
+ self.layers = nn.ModuleList([InformerEncoderLayer(config) for _ in range(config.encoder_layers)])
+ self.layernorm_embedding = nn.LayerNorm(config.d_model)
+
+ if config.distil:
+ self.conv_layers = nn.ModuleList(
+ [InformerConvLayer(config.d_model) for _ in range(config.encoder_layers - 1)]
+ )
+ self.conv_layers.append(None)
+ else:
+ self.conv_layers = [None] * config.encoder_layers
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def forward(
+ self,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ r"""
+ Args:
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ hidden_states = self.value_embedding(inputs_embeds)
+ embed_pos = self.embed_positions(inputs_embeds.size())
+
+ hidden_states = self.layernorm_embedding(hidden_states + embed_pos)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ # expand attention_mask
+ if attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ # check if head_mask has a correct number of layers specified if desired
+ if head_mask is not None:
+ if head_mask.size()[0] != (len(self.layers)):
+ raise ValueError(
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
+ f" {head_mask.size()[0]}."
+ )
+
+ for idx, (encoder_layer, conv_layer) in enumerate(zip(self.layers, self.conv_layers)):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ to_drop = False
+ if self.training:
+ dropout_probability = torch.rand([])
+ if dropout_probability < self.layerdrop: # skip the layer
+ to_drop = True
+
+ if to_drop:
+ layer_outputs = (None, None)
+ else:
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ encoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ (head_mask[idx] if head_mask is not None else None),
+ output_attentions,
+ )
+ if conv_layer is not None:
+ output = self._gradient_checkpointing_func(conv_layer, layer_outputs[0])
+ layer_outputs = (output,) + layer_outputs[1:]
+ else:
+ layer_outputs = encoder_layer(
+ hidden_states,
+ attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ output_attentions=output_attentions,
+ )
+ if conv_layer is not None:
+ output = conv_layer(layer_outputs[0])
+ layer_outputs = (output,) + layer_outputs[1:]
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+
+# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesTransformerDecoder with TimeSeriesTransformer->Informer,TimeSeriesTransformerConfig->InformerConfig,time-series-transformer->informer,Transformer->Informer,TimeSeries->Informer
+class InformerDecoder(InformerPreTrainedModel):
+ """
+ Informer decoder consisting of *config.decoder_layers* layers. Each layer is a
+ [`InformerDecoderLayer`]
+
+ Args:
+ config: InformerConfig
+ """
+
+ def __init__(self, config: InformerConfig):
+ super().__init__(config)
+ self.dropout = config.dropout
+ self.layerdrop = config.decoder_layerdrop
+ if config.prediction_length is None:
+ raise ValueError("The `prediction_length` config needs to be specified.")
+
+ self.value_embedding = InformerValueEmbedding(feature_size=config.feature_size, d_model=config.d_model)
+ self.embed_positions = InformerSinusoidalPositionalEmbedding(
+ config.context_length + config.prediction_length, config.d_model
+ )
+ self.layers = nn.ModuleList([InformerDecoderLayer(config) for _ in range(config.decoder_layers)])
+ self.layernorm_embedding = nn.LayerNorm(config.d_model)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def forward(
+ self,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
+ r"""
+ Args:
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
+ of the decoder.
+ encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
+ selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
+ cross-attention on hidden heads. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ input_shape = inputs_embeds.size()[:-1]
+
+ # past_key_values_length
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
+
+ attention_mask = _prepare_4d_causal_attention_mask(
+ attention_mask, input_shape, inputs_embeds, past_key_values_length
+ )
+
+ # expand encoder attention mask
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ encoder_attention_mask = _prepare_4d_attention_mask(
+ encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
+ )
+
+ hidden_states = self.value_embedding(inputs_embeds)
+ embed_pos = self.embed_positions(inputs_embeds.size(), past_key_values_length=self.config.context_length)
+ hidden_states = self.layernorm_embedding(hidden_states + embed_pos)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
+ next_decoder_cache = () if use_cache else None
+
+ # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
+ for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
+ if attn_mask is not None:
+ if attn_mask.size()[0] != (len(self.layers)):
+ raise ValueError(
+ f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
+ f" {head_mask.size()[0]}."
+ )
+
+ for idx, decoder_layer in enumerate(self.layers):
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+ if self.training:
+ dropout_probability = torch.rand([])
+ if dropout_probability < self.layerdrop:
+ continue
+
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ head_mask[idx] if head_mask is not None else None,
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
+ None,
+ output_attentions,
+ use_cache,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ cross_attn_layer_head_mask=(
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
+ ),
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+ hidden_states = layer_outputs[0]
+
+ if use_cache:
+ next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ if encoder_hidden_states is not None:
+ all_cross_attentions += (layer_outputs[2],)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ next_cache = next_decoder_cache if use_cache else None
+ if not return_dict:
+ return tuple(
+ v
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ "The bare Informer Model outputting raw hidden-states without any specific head on top.",
+ INFORMER_START_DOCSTRING,
+)
+# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesTransformerModel with TimeSeriesTransformer->Informer,TIME_SERIES_TRANSFORMER->INFORMER,time-series-transformer->informer,TimeSeries->Informer
+class InformerModel(InformerPreTrainedModel):
+ def __init__(self, config: InformerConfig):
+ super().__init__(config)
+
+ if config.scaling == "mean" or config.scaling is True:
+ self.scaler = InformerMeanScaler(config)
+ elif config.scaling == "std":
+ self.scaler = InformerStdScaler(config)
+ else:
+ self.scaler = InformerNOPScaler(config)
+
+ if config.num_static_categorical_features > 0:
+ self.embedder = InformerFeatureEmbedder(
+ cardinalities=config.cardinality,
+ embedding_dims=config.embedding_dimension,
+ )
+
+ # transformer encoder-decoder and mask initializer
+ self.encoder = InformerEncoder(config)
+ self.decoder = InformerDecoder(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @property
+ def _past_length(self) -> int:
+ return self.config.context_length + max(self.config.lags_sequence)
+
+ def get_lagged_subsequences(
+ self, sequence: torch.Tensor, subsequences_length: int, shift: int = 0
+ ) -> torch.Tensor:
+ """
+ Returns lagged subsequences of a given sequence. Returns a tensor of shape (N, S, C, I),
+ where S = subsequences_length and I = len(indices), containing lagged subsequences. Specifically, lagged[i,
+ j, :, k] = sequence[i, -indices[k]-S+j, :].
+
+ Args:
+ sequence: Tensor
+ The sequence from which lagged subsequences should be extracted. Shape: (N, T, C).
+ subsequences_length : int
+ Length of the subsequences to be extracted.
+ shift: int
+ Shift the lags by this amount back.
+ """
+ sequence_length = sequence.shape[1]
+ indices = [lag - shift for lag in self.config.lags_sequence]
+
+ if max(indices) + subsequences_length > sequence_length:
+ raise ValueError(
+ f"lags cannot go further than history length, found lag {max(indices)} "
+ f"while history length is only {sequence_length}"
+ )
+
+ lagged_values = []
+ for lag_index in indices:
+ begin_index = -lag_index - subsequences_length
+ end_index = -lag_index if lag_index > 0 else None
+ lagged_values.append(sequence[:, begin_index:end_index, ...])
+ return torch.stack(lagged_values, dim=-1)
+
+ def create_network_inputs(
+ self,
+ past_values: torch.Tensor,
+ past_time_features: torch.Tensor,
+ static_categorical_features: Optional[torch.Tensor] = None,
+ static_real_features: Optional[torch.Tensor] = None,
+ past_observed_mask: Optional[torch.Tensor] = None,
+ future_values: Optional[torch.Tensor] = None,
+ future_time_features: Optional[torch.Tensor] = None,
+ ):
+ # time feature
+ time_feat = (
+ torch.cat(
+ (
+ past_time_features[:, self._past_length - self.config.context_length :, ...],
+ future_time_features,
+ ),
+ dim=1,
+ )
+ if future_values is not None
+ else past_time_features[:, self._past_length - self.config.context_length :, ...]
+ )
+
+ # target
+ if past_observed_mask is None:
+ past_observed_mask = torch.ones_like(past_values)
+
+ context = past_values[:, -self.config.context_length :]
+ observed_context = past_observed_mask[:, -self.config.context_length :]
+ _, loc, scale = self.scaler(context, observed_context)
+
+ inputs = (
+ (torch.cat((past_values, future_values), dim=1) - loc) / scale
+ if future_values is not None
+ else (past_values - loc) / scale
+ )
+
+ # static features
+ log_abs_loc = loc.abs().log1p() if self.config.input_size == 1 else loc.squeeze(1).abs().log1p()
+ log_scale = scale.log() if self.config.input_size == 1 else scale.squeeze(1).log()
+ static_feat = torch.cat((log_abs_loc, log_scale), dim=1)
+
+ if static_real_features is not None:
+ static_feat = torch.cat((static_real_features, static_feat), dim=1)
+ if static_categorical_features is not None:
+ embedded_cat = self.embedder(static_categorical_features)
+ static_feat = torch.cat((embedded_cat, static_feat), dim=1)
+ expanded_static_feat = static_feat.unsqueeze(1).expand(-1, time_feat.shape[1], -1)
+
+ # all features
+ features = torch.cat((expanded_static_feat, time_feat), dim=-1)
+
+ # lagged features
+ subsequences_length = (
+ self.config.context_length + self.config.prediction_length
+ if future_values is not None
+ else self.config.context_length
+ )
+ lagged_sequence = self.get_lagged_subsequences(sequence=inputs, subsequences_length=subsequences_length)
+ lags_shape = lagged_sequence.shape
+ reshaped_lagged_sequence = lagged_sequence.reshape(lags_shape[0], lags_shape[1], -1)
+
+ if reshaped_lagged_sequence.shape[1] != time_feat.shape[1]:
+ raise ValueError(
+ f"input length {reshaped_lagged_sequence.shape[1]} and time feature lengths {time_feat.shape[1]} does not match"
+ )
+
+ # transformer inputs
+ transformer_inputs = torch.cat((reshaped_lagged_sequence, features), dim=-1)
+
+ return transformer_inputs, loc, scale, static_feat
+
+ def get_encoder(self):
+ return self.encoder
+
+ def get_decoder(self):
+ return self.decoder
+
+ @add_start_docstrings_to_model_forward(INFORMER_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqTSModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ past_values: torch.Tensor,
+ past_time_features: torch.Tensor,
+ past_observed_mask: torch.Tensor,
+ static_categorical_features: Optional[torch.Tensor] = None,
+ static_real_features: Optional[torch.Tensor] = None,
+ future_values: Optional[torch.Tensor] = None,
+ future_time_features: Optional[torch.Tensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[List[torch.FloatTensor]] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ use_cache: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Seq2SeqTSModelOutput, Tuple]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from huggingface_hub import hf_hub_download
+ >>> import torch
+ >>> from transformers import InformerModel
+
+ >>> file = hf_hub_download(
+ ... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset"
+ ... )
+ >>> batch = torch.load(file)
+
+ >>> model = InformerModel.from_pretrained("huggingface/informer-tourism-monthly")
+
+ >>> # during training, one provides both past and future values
+ >>> # as well as possible additional features
+ >>> outputs = model(
+ ... past_values=batch["past_values"],
+ ... past_time_features=batch["past_time_features"],
+ ... past_observed_mask=batch["past_observed_mask"],
+ ... static_categorical_features=batch["static_categorical_features"],
+ ... static_real_features=batch["static_real_features"],
+ ... future_values=batch["future_values"],
+ ... future_time_features=batch["future_time_features"],
+ ... )
+
+ >>> last_hidden_state = outputs.last_hidden_state
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_inputs, loc, scale, static_feat = self.create_network_inputs(
+ past_values=past_values,
+ past_time_features=past_time_features,
+ past_observed_mask=past_observed_mask,
+ static_categorical_features=static_categorical_features,
+ static_real_features=static_real_features,
+ future_values=future_values,
+ future_time_features=future_time_features,
+ )
+
+ if encoder_outputs is None:
+ enc_input = transformer_inputs[:, : self.config.context_length, ...]
+ encoder_outputs = self.encoder(
+ inputs_embeds=enc_input,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ dec_input = transformer_inputs[:, self.config.context_length :, ...]
+ decoder_outputs = self.decoder(
+ inputs_embeds=dec_input,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_outputs[0],
+ head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs + (loc, scale, static_feat)
+
+ return Seq2SeqTSModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ loc=loc,
+ scale=scale,
+ static_features=static_feat,
+ )
+
+
+@add_start_docstrings(
+ "The Informer Model with a distribution head on top for time-series forecasting.",
+ INFORMER_START_DOCSTRING,
+)
+# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesTransformerForPrediction with TimeSeriesTransformer->Informer,TIME_SERIES_TRANSFORMER->INFORMER,time-series-transformer->informer
+class InformerForPrediction(InformerPreTrainedModel):
+ def __init__(self, config: InformerConfig):
+ super().__init__(config)
+ self.model = InformerModel(config)
+ if config.distribution_output == "student_t":
+ self.distribution_output = StudentTOutput(dim=config.input_size)
+ elif config.distribution_output == "normal":
+ self.distribution_output = NormalOutput(dim=config.input_size)
+ elif config.distribution_output == "negative_binomial":
+ self.distribution_output = NegativeBinomialOutput(dim=config.input_size)
+ else:
+ raise ValueError(f"Unknown distribution output {config.distribution_output}")
+
+ self.parameter_projection = self.distribution_output.get_parameter_projection(self.model.config.d_model)
+ self.target_shape = self.distribution_output.event_shape
+
+ if config.loss == "nll":
+ self.loss = nll
+ else:
+ raise ValueError(f"Unknown loss function {config.loss}")
+
+ # Initialize weights of distribution_output and apply final processing
+ self.post_init()
+
+ def output_params(self, dec_output):
+ return self.parameter_projection(dec_output)
+
+ def get_encoder(self):
+ return self.model.get_encoder()
+
+ def get_decoder(self):
+ return self.model.get_decoder()
+
+ @torch.jit.ignore
+ def output_distribution(self, params, loc=None, scale=None, trailing_n=None) -> torch.distributions.Distribution:
+ sliced_params = params
+ if trailing_n is not None:
+ sliced_params = [p[:, -trailing_n:] for p in params]
+ return self.distribution_output.distribution(sliced_params, loc=loc, scale=scale)
+
+ @add_start_docstrings_to_model_forward(INFORMER_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqTSModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ past_values: torch.Tensor,
+ past_time_features: torch.Tensor,
+ past_observed_mask: torch.Tensor,
+ static_categorical_features: Optional[torch.Tensor] = None,
+ static_real_features: Optional[torch.Tensor] = None,
+ future_values: Optional[torch.Tensor] = None,
+ future_time_features: Optional[torch.Tensor] = None,
+ future_observed_mask: Optional[torch.Tensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[List[torch.FloatTensor]] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ use_cache: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Seq2SeqTSModelOutput, Tuple]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from huggingface_hub import hf_hub_download
+ >>> import torch
+ >>> from transformers import InformerForPrediction
+
+ >>> file = hf_hub_download(
+ ... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset"
+ ... )
+ >>> batch = torch.load(file)
+
+ >>> model = InformerForPrediction.from_pretrained(
+ ... "huggingface/informer-tourism-monthly"
+ ... )
+
+ >>> # during training, one provides both past and future values
+ >>> # as well as possible additional features
+ >>> outputs = model(
+ ... past_values=batch["past_values"],
+ ... past_time_features=batch["past_time_features"],
+ ... past_observed_mask=batch["past_observed_mask"],
+ ... static_categorical_features=batch["static_categorical_features"],
+ ... static_real_features=batch["static_real_features"],
+ ... future_values=batch["future_values"],
+ ... future_time_features=batch["future_time_features"],
+ ... )
+
+ >>> loss = outputs.loss
+ >>> loss.backward()
+
+ >>> # during inference, one only provides past values
+ >>> # as well as possible additional features
+ >>> # the model autoregressively generates future values
+ >>> outputs = model.generate(
+ ... past_values=batch["past_values"],
+ ... past_time_features=batch["past_time_features"],
+ ... past_observed_mask=batch["past_observed_mask"],
+ ... static_categorical_features=batch["static_categorical_features"],
+ ... static_real_features=batch["static_real_features"],
+ ... future_time_features=batch["future_time_features"],
+ ... )
+
+ >>> mean_prediction = outputs.sequences.mean(dim=1)
+ ```"""
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ if future_values is not None:
+ use_cache = False
+
+ outputs = self.model(
+ past_values=past_values,
+ past_time_features=past_time_features,
+ past_observed_mask=past_observed_mask,
+ static_categorical_features=static_categorical_features,
+ static_real_features=static_real_features,
+ future_values=future_values,
+ future_time_features=future_time_features,
+ decoder_attention_mask=decoder_attention_mask,
+ head_mask=head_mask,
+ decoder_head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ encoder_outputs=encoder_outputs,
+ past_key_values=past_key_values,
+ output_hidden_states=output_hidden_states,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ return_dict=return_dict,
+ )
+
+ prediction_loss = None
+ params = None
+ if future_values is not None:
+ params = self.output_params(outputs[0]) # outputs.last_hidden_state
+ # loc is 3rd last and scale is 2nd last output
+ distribution = self.output_distribution(params, loc=outputs[-3], scale=outputs[-2])
+
+ loss = self.loss(distribution, future_values)
+
+ if future_observed_mask is None:
+ future_observed_mask = torch.ones_like(future_values)
+
+ if len(self.target_shape) == 0:
+ loss_weights = future_observed_mask
+ else:
+ loss_weights, _ = future_observed_mask.min(dim=-1, keepdim=False)
+
+ prediction_loss = weighted_average(loss, weights=loss_weights)
+
+ if not return_dict:
+ outputs = ((params,) + outputs[1:]) if params is not None else outputs[1:]
+ return ((prediction_loss,) + outputs) if prediction_loss is not None else outputs
+
+ return Seq2SeqTSPredictionOutput(
+ loss=prediction_loss,
+ params=params,
+ past_key_values=outputs.past_key_values,
+ decoder_hidden_states=outputs.decoder_hidden_states,
+ decoder_attentions=outputs.decoder_attentions,
+ cross_attentions=outputs.cross_attentions,
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
+ encoder_hidden_states=outputs.encoder_hidden_states,
+ encoder_attentions=outputs.encoder_attentions,
+ loc=outputs.loc,
+ scale=outputs.scale,
+ static_features=outputs.static_features,
+ )
+
+ @torch.no_grad()
+ def generate(
+ self,
+ past_values: torch.Tensor,
+ past_time_features: torch.Tensor,
+ future_time_features: torch.Tensor,
+ past_observed_mask: Optional[torch.Tensor] = None,
+ static_categorical_features: Optional[torch.Tensor] = None,
+ static_real_features: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ ) -> SampleTSPredictionOutput:
+ r"""
+ Greedily generate sequences of sample predictions from a model with a probability distribution head.
+
+ Parameters:
+ past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`):
+ Past values of the time series, that serve as context in order to predict the future. The sequence size
+ of this tensor must be larger than the `context_length` of the model, since the model will use the
+ larger size to construct lag features, i.e. additional values from the past which are added in order to
+ serve as "extra context".
+
+ The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if
+ no `lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest
+ look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length
+ of the past.
+
+ The `past_values` is what the Transformer encoder gets as input (with optional additional features,
+ such as `static_categorical_features`, `static_real_features`, `past_time_features` and lags).
+
+ Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`.
+
+ For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number
+ of variates in the time series per time step.
+ past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`):
+ Required time features, which the model internally will add to `past_values`. These could be things
+ like "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features).
+ These could also be so-called "age" features, which basically help the model know "at which point in
+ life" a time-series is. Age features have small values for distant past time steps and increase
+ monotonically the more we approach the current time step. Holiday features are also a good example of
+ time features.
+
+ These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT,
+ where the position encodings are learned from scratch internally as parameters of the model, the Time
+ Series Transformer requires to provide additional time features. The Time Series Transformer only
+ learns additional embeddings for `static_categorical_features`.
+
+ Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these
+ features must but known at prediction time.
+
+ The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
+ future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`):
+ Required time features for the prediction window, which the model internally will add to sampled
+ predictions. These could be things like "month of year", "day of the month", etc. encoded as vectors
+ (for instance as Fourier features). These could also be so-called "age" features, which basically help
+ the model know "at which point in life" a time-series is. Age features have small values for distant
+ past time steps and increase monotonically the more we approach the current time step. Holiday features
+ are also a good example of time features.
+
+ These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT,
+ where the position encodings are learned from scratch internally as parameters of the model, the Time
+ Series Transformer requires to provide additional time features. The Time Series Transformer only
+ learns additional embeddings for `static_categorical_features`.
+
+ Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these
+ features must but known at prediction time.
+
+ The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
+ past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
+ Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
+ in `[0, 1]`:
+
+ - 1 for values that are **observed**,
+ - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
+
+ static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):
+ Optional static categorical features for which the model will learn an embedding, which it will add to
+ the values of the time series.
+
+ Static categorical features are features which have the same value for all time steps (static over
+ time).
+
+ A typical example of a static categorical feature is a time series ID.
+ static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):
+ Optional static real features which the model will add to the values of the time series.
+
+ Static real features are features which have the same value for all time steps (static over time).
+
+ A typical example of a static real feature is promotion information.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers.
+
+ Return:
+ [`SampleTSPredictionOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of
+ samples, prediction_length)` or `(batch_size, number of samples, prediction_length, input_size)` for
+ multivariate predictions.
+ """
+ outputs = self(
+ static_categorical_features=static_categorical_features,
+ static_real_features=static_real_features,
+ past_time_features=past_time_features,
+ past_values=past_values,
+ past_observed_mask=past_observed_mask,
+ future_time_features=future_time_features,
+ future_values=None,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=True,
+ use_cache=True,
+ )
+
+ decoder = self.model.get_decoder()
+ enc_last_hidden = outputs.encoder_last_hidden_state
+ loc = outputs.loc
+ scale = outputs.scale
+ static_feat = outputs.static_features
+
+ num_parallel_samples = self.config.num_parallel_samples
+ repeated_loc = loc.repeat_interleave(repeats=num_parallel_samples, dim=0)
+ repeated_scale = scale.repeat_interleave(repeats=num_parallel_samples, dim=0)
+
+ repeated_past_values = (
+ past_values.repeat_interleave(repeats=num_parallel_samples, dim=0) - repeated_loc
+ ) / repeated_scale
+
+ expanded_static_feat = static_feat.unsqueeze(1).expand(-1, future_time_features.shape[1], -1)
+ features = torch.cat((expanded_static_feat, future_time_features), dim=-1)
+ repeated_features = features.repeat_interleave(repeats=num_parallel_samples, dim=0)
+
+ repeated_enc_last_hidden = enc_last_hidden.repeat_interleave(repeats=num_parallel_samples, dim=0)
+
+ future_samples = []
+
+ # greedy decoding
+ for k in range(self.config.prediction_length):
+ lagged_sequence = self.model.get_lagged_subsequences(
+ sequence=repeated_past_values,
+ subsequences_length=1 + k,
+ shift=1,
+ )
+
+ lags_shape = lagged_sequence.shape
+ reshaped_lagged_sequence = lagged_sequence.reshape(lags_shape[0], lags_shape[1], -1)
+
+ decoder_input = torch.cat((reshaped_lagged_sequence, repeated_features[:, : k + 1]), dim=-1)
+
+ dec_output = decoder(inputs_embeds=decoder_input, encoder_hidden_states=repeated_enc_last_hidden)
+ dec_last_hidden = dec_output.last_hidden_state
+
+ params = self.parameter_projection(dec_last_hidden[:, -1:])
+ distr = self.output_distribution(params, loc=repeated_loc, scale=repeated_scale)
+ next_sample = distr.sample()
+
+ repeated_past_values = torch.cat(
+ (repeated_past_values, (next_sample - repeated_loc) / repeated_scale), dim=1
+ )
+ future_samples.append(next_sample)
+
+ concat_future_samples = torch.cat(future_samples, dim=1)
+
+ return SampleTSPredictionOutput(
+ sequences=concat_future_samples.reshape(
+ (-1, num_parallel_samples, self.config.prediction_length) + self.target_shape,
+ )
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..93b9121c33f3932a86813cf5d47b102c503a86d8
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/__init__.py
@@ -0,0 +1,84 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
+
+
+_import_structure = {
+ "configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_longt5"] = [
+ "LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "LongT5EncoderModel",
+ "LongT5ForConditionalGeneration",
+ "LongT5Model",
+ "LongT5PreTrainedModel",
+ ]
+
+try:
+ if not is_flax_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_flax_longt5"] = [
+ "FlaxLongT5ForConditionalGeneration",
+ "FlaxLongT5Model",
+ "FlaxLongT5PreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_longt5 import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongT5Config, LongT5OnnxConfig
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_longt5 import (
+ LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
+ LongT5EncoderModel,
+ LongT5ForConditionalGeneration,
+ LongT5Model,
+ LongT5PreTrainedModel,
+ )
+
+ try:
+ if not is_flax_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_flax_longt5 import (
+ FlaxLongT5ForConditionalGeneration,
+ FlaxLongT5Model,
+ FlaxLongT5PreTrainedModel,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e75a1843b2b0ed1a382a249f68f19ab5d8193f6f
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/configuration_longt5.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/configuration_longt5.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c86ff1e503701b2080811c70297b141ed6caf5c1
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/configuration_longt5.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/convert_longt5x_checkpoint_to_flax.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/convert_longt5x_checkpoint_to_flax.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..09d183796458970068dde716bd8be4eb474f2925
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/convert_longt5x_checkpoint_to_flax.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/modeling_flax_longt5.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/modeling_flax_longt5.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..33397857ea5d68805d15643fb047f047dd6ed919
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/modeling_flax_longt5.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/modeling_longt5.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/modeling_longt5.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..739dda4937b4528a95748150530c27cc9df143fb
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/modeling_longt5.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/configuration_longt5.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/configuration_longt5.py
new file mode 100644
index 0000000000000000000000000000000000000000..f6e8284ed0af84ec7d661885a39de6cd19c6371f
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/configuration_longt5.py
@@ -0,0 +1,174 @@
+# coding=utf-8
+# Copyright 2022, The LongT5 Authors and HuggingFace Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" LongT5 model configuration"""
+from typing import Mapping
+
+from ...configuration_utils import PretrainedConfig
+from ...onnx import OnnxSeq2SeqConfigWithPast
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class LongT5Config(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`LongT5Model`] or a [`FlaxLongT5Model`]. It is
+ used to instantiate a LongT5 model according to the specified arguments, defining the model architecture.
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the LongT5
+ [google/long-t5-local-base](https://huggingface.co/google/long-t5-local-base) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Arguments:
+ vocab_size (`int`, *optional*, defaults to 32128):
+ Vocabulary size of the LongT5 model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`LongT5Model`].
+ d_model (`int`, *optional*, defaults to 512):
+ Size of the encoder layers and the pooler layer.
+ d_kv (`int`, *optional*, defaults to 64):
+ Size of the key, query, value projections per attention head. `d_kv` has to be equal to `d_model //
+ num_heads`.
+ d_ff (`int`, *optional*, defaults to 2048):
+ Size of the intermediate feed forward layer in each `LongT5Block`.
+ num_layers (`int`, *optional*, defaults to 6):
+ Number of hidden layers in the Transformer encoder.
+ num_decoder_layers (`int`, *optional*):
+ Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set.
+ num_heads (`int`, *optional*, defaults to 8):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ local_radius (`int`, *optional*, defaults to 127)
+ Number of tokens to the left/right for each token to locally self-attend in a local attention mechanism.
+ global_block_size (`int`, *optional*, defaults to 16)
+ Lenght of blocks an input sequence is divided into for a global token representation. Used only for
+ `encoder_attention_type = "transient-global"`.
+ relative_attention_num_buckets (`int`, *optional*, defaults to 32):
+ The number of buckets to use for each attention layer.
+ relative_attention_max_distance (`int`, *optional*, defaults to 128):
+ The maximum distance of the longer sequences for the bucket separation.
+ dropout_rate (`float`, *optional*, defaults to 0.1):
+ The ratio for all dropout layers.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-6):
+ The epsilon used by the layer normalization layers.
+ initializer_factor (`float`, *optional*, defaults to 1):
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
+ testing).
+ feed_forward_proj (`string`, *optional*, defaults to `"relu"`):
+ Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. LongT5v1.1 uses the
+ `"gated-gelu"` feed forward projection. Original LongT5 implementation uses `"gated-gelu"`.
+ encoder_attention_type (`string`, *optional*, defaults to `"local"`):
+ Type of encoder attention to be used. Should be one of `"local"` or `"transient-global"`, which are
+ supported by LongT5 implementation.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models).
+ """
+
+ model_type = "longt5"
+ keys_to_ignore_at_inference = ["past_key_values"]
+ attribute_map = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
+
+ def __init__(
+ self,
+ vocab_size=32128,
+ d_model=512,
+ d_kv=64,
+ d_ff=2048,
+ num_layers=6,
+ num_decoder_layers=None,
+ num_heads=8,
+ local_radius=127,
+ global_block_size=16,
+ relative_attention_num_buckets=32,
+ relative_attention_max_distance=128,
+ dropout_rate=0.1,
+ layer_norm_epsilon=1e-6,
+ initializer_factor=1.0,
+ feed_forward_proj="relu",
+ is_encoder_decoder=True,
+ encoder_attention_type="local",
+ use_cache=True,
+ pad_token_id=0,
+ eos_token_id=1,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.d_model = d_model
+ self.d_kv = d_kv
+ self.d_ff = d_ff
+ self.num_layers = num_layers
+ # default = symmetry
+ self.num_decoder_layers = num_decoder_layers if num_decoder_layers is not None else self.num_layers
+ self.num_heads = num_heads
+ self.local_radius = local_radius
+ self.global_block_size = global_block_size
+ self.relative_attention_num_buckets = relative_attention_num_buckets
+ self.relative_attention_max_distance = relative_attention_max_distance
+ self.dropout_rate = dropout_rate
+ self.layer_norm_epsilon = layer_norm_epsilon
+ self.initializer_factor = initializer_factor
+ self.feed_forward_proj = feed_forward_proj
+ self.encoder_attention_type = encoder_attention_type
+ self.use_cache = use_cache
+
+ act_info = self.feed_forward_proj.split("-")
+ self.dense_act_fn = act_info[-1]
+ self.is_gated_act = act_info[0] == "gated"
+
+ if len(act_info) > 1 and act_info[0] != "gated" or len(act_info) > 2:
+ raise ValueError(
+ f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer. "
+ "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
+ "'gated-gelu' or 'relu'"
+ )
+
+ # for backwards compatibility
+ if feed_forward_proj == "gated-gelu":
+ self.dense_act_fn = "gelu_new"
+
+ super().__init__(
+ pad_token_id=pad_token_id,
+ eos_token_id=eos_token_id,
+ is_encoder_decoder=is_encoder_decoder,
+ **kwargs,
+ )
+
+
+class LongT5OnnxConfig(OnnxSeq2SeqConfigWithPast):
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ common_inputs = {
+ "input_ids": {0: "batch", 1: "encoder_sequence"},
+ "attention_mask": {0: "batch", 1: "encoder_sequence"},
+ }
+ if self.use_past:
+ common_inputs["attention_mask"][1] = "past_encoder_sequence + sequence"
+ common_inputs["decoder_input_ids"] = {0: "batch"}
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"}
+ else:
+ common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"}
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"}
+
+ if self.use_past:
+ self.fill_with_past_key_values_(common_inputs, direction="inputs")
+
+ return common_inputs
+
+ @property
+ def default_onnx_opset(self) -> int:
+ return 13
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/convert_longt5x_checkpoint_to_flax.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/convert_longt5x_checkpoint_to_flax.py
new file mode 100644
index 0000000000000000000000000000000000000000..5a1394c719d2d836ebc59693755671b936291be5
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/convert_longt5x_checkpoint_to_flax.py
@@ -0,0 +1,215 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Convert T5/LongT5X checkpoints from the original repository to JAX/FLAX model. This script is an extension of
+'src/transformers/models/t5/convert_t5x_checkpoint_to_flax.
+"""
+
+import argparse
+
+from t5x import checkpoints
+
+from transformers import AutoConfig, FlaxAutoModelForSeq2SeqLM
+
+
+def convert_t5x_checkpoint_to_flax(t5x_checkpoint_path, config_name, flax_dump_folder_path):
+ config = AutoConfig.from_pretrained(config_name)
+ flax_model = FlaxAutoModelForSeq2SeqLM.from_config(config=config)
+ t5x_model = checkpoints.load_t5x_checkpoint(t5x_checkpoint_path)
+
+ split_mlp_wi = "wi_0" in t5x_model["target"]["encoder"]["layers_0"]["mlp"]
+
+ if config.model_type == "t5":
+ encoder_attn_name = "SelfAttention"
+ if config.model_type == "longt5" and config.encoder_attention_type == "local":
+ encoder_attn_name = "LocalSelfAttention"
+ elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
+ encoder_attn_name = "TransientGlobalSelfAttention"
+ else:
+ raise ValueError(
+ "Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
+ " attribute with a value from ['local', 'transient-global]."
+ )
+
+ # Encoder
+ for layer_index in range(config.num_layers):
+ layer_name = f"layers_{str(layer_index)}"
+
+ # Self-Attention
+ t5x_attention_key = t5x_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
+ t5x_attention_out = t5x_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
+ t5x_attention_query = t5x_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
+ t5x_attention_value = t5x_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
+
+ # Global input layer norm
+ if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
+ t5x_global_layer_norm = t5x_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
+
+ # Layer Normalization
+ t5x_attention_layer_norm = t5x_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
+
+ if split_mlp_wi:
+ t5x_mlp_wi_0 = t5x_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
+ t5x_mlp_wi_1 = t5x_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
+ else:
+ t5x_mlp_wi = t5x_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
+
+ t5x_mlp_wo = t5x_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
+
+ # Layer Normalization
+ t5x_mlp_layer_norm = t5x_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
+
+ # Assigning
+ flax_model_encoder_layer_block = flax_model.params["encoder"]["block"][str(layer_index)]["layer"]
+ flax_model_encoder_layer_block["0"][encoder_attn_name]["k"]["kernel"] = t5x_attention_key
+ flax_model_encoder_layer_block["0"][encoder_attn_name]["o"]["kernel"] = t5x_attention_out
+ flax_model_encoder_layer_block["0"][encoder_attn_name]["q"]["kernel"] = t5x_attention_query
+ flax_model_encoder_layer_block["0"][encoder_attn_name]["v"]["kernel"] = t5x_attention_value
+
+ flax_model_encoder_layer_block["0"]["layer_norm"]["weight"] = t5x_attention_layer_norm
+
+ # Global input layer norm
+ if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
+ flax_model_encoder_layer_block["0"][encoder_attn_name]["global_input_layer_norm"][
+ "weight"
+ ] = t5x_global_layer_norm
+
+ if split_mlp_wi:
+ flax_model_encoder_layer_block["1"]["DenseReluDense"]["wi_0"]["kernel"] = t5x_mlp_wi_0
+ flax_model_encoder_layer_block["1"]["DenseReluDense"]["wi_1"]["kernel"] = t5x_mlp_wi_1
+ else:
+ flax_model_encoder_layer_block["1"]["DenseReluDense"]["wi"]["kernel"] = t5x_mlp_wi
+
+ flax_model_encoder_layer_block["1"]["DenseReluDense"]["wo"]["kernel"] = t5x_mlp_wo
+ flax_model_encoder_layer_block["1"]["layer_norm"]["weight"] = t5x_mlp_layer_norm
+
+ flax_model.params["encoder"]["block"][str(layer_index)]["layer"] = flax_model_encoder_layer_block
+
+ # Only for layer 0:
+ t5x_encoder_rel_embedding = t5x_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
+ flax_model.params["encoder"]["block"]["0"]["layer"]["0"][encoder_attn_name]["relative_attention_bias"][
+ "embedding"
+ ] = t5x_encoder_rel_embedding
+
+ # Side/global relative position_bias + layer norm
+ if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
+ t5x_encoder_global_rel_embedding = t5x_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
+ flax_model.params["encoder"]["block"]["0"]["layer"]["0"][encoder_attn_name]["global_relative_attention_bias"][
+ "embedding"
+ ] = t5x_encoder_global_rel_embedding
+
+ # Assigning
+ t5x_encoder_norm = t5x_model["target"]["encoder"]["encoder_norm"]["scale"]
+ flax_model.params["encoder"]["final_layer_norm"]["weight"] = t5x_encoder_norm
+
+ # Decoder
+ for layer_index in range(config.num_layers):
+ layer_name = f"layers_{str(layer_index)}"
+
+ # Self-Attention
+ t5x_attention_key = t5x_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
+ t5x_attention_out = t5x_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
+ t5x_attention_query = t5x_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
+ t5x_attention_value = t5x_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
+
+ # Layer Normalization
+ t5x_pre_attention_layer_norm = t5x_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
+ "scale"
+ ]
+
+ # Encoder-Decoder-Attention
+ t5x_enc_dec_attention_module = t5x_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
+ t5x_enc_dec_attention_key = t5x_enc_dec_attention_module["key"]["kernel"]
+ t5x_enc_dec_attention_out = t5x_enc_dec_attention_module["out"]["kernel"]
+ t5x_enc_dec_attention_query = t5x_enc_dec_attention_module["query"]["kernel"]
+ t5x_enc_dec_attention_value = t5x_enc_dec_attention_module["value"]["kernel"]
+
+ # Layer Normalization
+ t5x_cross_layer_norm = t5x_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
+
+ # MLP
+ if split_mlp_wi:
+ t5x_mlp_wi_0 = t5x_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
+ t5x_mlp_wi_1 = t5x_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
+ else:
+ t5x_mlp_wi = t5x_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
+
+ t5x_mlp_wo = t5x_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
+
+ # Layer Normalization
+ tx5_mlp_layer_norm = t5x_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
+
+ # Assigning
+ flax_model_decoder_layer_block = flax_model.params["decoder"]["block"][str(layer_index)]["layer"]
+ flax_model_decoder_layer_block["0"]["SelfAttention"]["k"]["kernel"] = t5x_attention_key
+ flax_model_decoder_layer_block["0"]["SelfAttention"]["o"]["kernel"] = t5x_attention_out
+ flax_model_decoder_layer_block["0"]["SelfAttention"]["q"]["kernel"] = t5x_attention_query
+ flax_model_decoder_layer_block["0"]["SelfAttention"]["v"]["kernel"] = t5x_attention_value
+
+ flax_model_decoder_layer_block["0"]["layer_norm"]["weight"] = t5x_pre_attention_layer_norm
+
+ flax_model_decoder_layer_block["1"]["EncDecAttention"]["k"]["kernel"] = t5x_enc_dec_attention_key
+ flax_model_decoder_layer_block["1"]["EncDecAttention"]["o"]["kernel"] = t5x_enc_dec_attention_out
+ flax_model_decoder_layer_block["1"]["EncDecAttention"]["q"]["kernel"] = t5x_enc_dec_attention_query
+ flax_model_decoder_layer_block["1"]["EncDecAttention"]["v"]["kernel"] = t5x_enc_dec_attention_value
+
+ flax_model_decoder_layer_block["1"]["layer_norm"]["weight"] = t5x_cross_layer_norm
+
+ if split_mlp_wi:
+ flax_model_decoder_layer_block["2"]["DenseReluDense"]["wi_0"]["kernel"] = t5x_mlp_wi_0
+ flax_model_decoder_layer_block["2"]["DenseReluDense"]["wi_1"]["kernel"] = t5x_mlp_wi_1
+ else:
+ flax_model_decoder_layer_block["2"]["DenseReluDense"]["wi"]["kernel"] = t5x_mlp_wi
+
+ flax_model_decoder_layer_block["2"]["DenseReluDense"]["wo"]["kernel"] = t5x_mlp_wo
+
+ flax_model_decoder_layer_block["2"]["layer_norm"]["weight"] = tx5_mlp_layer_norm
+
+ flax_model.params["decoder"]["block"][str(layer_index)]["layer"] = flax_model_decoder_layer_block
+
+ # Decoder Normalization
+ tx5_decoder_norm = t5x_model["target"]["decoder"]["decoder_norm"]["scale"]
+ flax_model.params["decoder"]["final_layer_norm"]["weight"] = tx5_decoder_norm
+
+ # Only for layer 0:
+ t5x_decoder_rel_embedding = t5x_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
+ flax_model.params["decoder"]["block"]["0"]["layer"]["0"]["SelfAttention"]["relative_attention_bias"][
+ "embedding"
+ ] = t5x_decoder_rel_embedding
+
+ # Token Embeddings
+ tx5_token_embeddings = t5x_model["target"]["token_embedder"]["embedding"]
+ flax_model.params["shared"]["embedding"] = tx5_token_embeddings
+
+ # LM Head (only in v1.1 and LongT5 checkpoints)
+ if "logits_dense" in t5x_model["target"]["decoder"]:
+ flax_model.params["lm_head"]["kernel"] = t5x_model["target"]["decoder"]["logits_dense"]["kernel"]
+
+ flax_model.save_pretrained(flax_dump_folder_path)
+ print("T5X Model was sucessfully converted!")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint."
+ )
+ parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.")
+ parser.add_argument(
+ "--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model."
+ )
+ args = parser.parse_args()
+ convert_t5x_checkpoint_to_flax(args.t5x_checkpoint_path, args.config_name, args.flax_dump_folder_path)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/modeling_flax_longt5.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/modeling_flax_longt5.py
new file mode 100644
index 0000000000000000000000000000000000000000..d47f644ba37da0383732874ca3634ec9088cd6ca
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/modeling_flax_longt5.py
@@ -0,0 +1,2447 @@
+# coding=utf-8
+# Copyright 2022 LongT5 Authors and HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Flax LongT5 model."""
+
+
+import copy
+from typing import Any, Callable, List, Optional, Tuple
+
+import flax.linen as nn
+import jax
+import jax.numpy as jnp
+import numpy as np
+from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
+from flax.linen import combine_masks, make_causal_mask
+from flax.linen import partitioning as nn_partitioning
+from flax.linen.attention import dot_product_attention_weights
+from flax.traverse_util import flatten_dict, unflatten_dict
+from jax.random import PRNGKey
+
+from ...modeling_flax_outputs import (
+ FlaxBaseModelOutput,
+ FlaxBaseModelOutputWithPastAndCrossAttentions,
+ FlaxCausalLMOutputWithCrossAttentions,
+ FlaxSeq2SeqLMOutput,
+ FlaxSeq2SeqModelOutput,
+)
+from ...modeling_flax_utils import (
+ ACT2FN,
+ FlaxPreTrainedModel,
+ append_call_sample_docstring,
+ append_replace_return_docstrings,
+ overwrite_call_docstring,
+)
+from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
+from .configuration_longt5 import LongT5Config
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "google/long-t5-local-base"
+_CONFIG_FOR_DOC = "LongT5Config"
+
+remat = nn_partitioning.remat
+
+
+# Copied from transformers.models.bart.modeling_flax_bart.shift_tokens_right
+def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int, decoder_start_token_id: int) -> jnp.ndarray:
+ """
+ Shift input ids one token to the right.
+ """
+ shifted_input_ids = jnp.zeros_like(input_ids)
+ shifted_input_ids = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1])
+ shifted_input_ids = shifted_input_ids.at[:, 0].set(decoder_start_token_id)
+
+ shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids)
+ return shifted_input_ids
+
+
+def _pad_to_multiple(x: jnp.ndarray, block_len: int, axis: int, pad_value: int = 0) -> jnp.ndarray:
+ """Pad an array so that a sequence length will be a multiple of `block_len`"""
+ pad_len = -x.shape[axis] % block_len
+ pad = [(0, 0)] * x.ndim
+ pad[axis] = (0, pad_len)
+ x = jnp.pad(x, pad_width=pad, mode="constant", constant_values=pad_value)
+ return x
+
+
+def _split_into_blocks(x: jnp.ndarray, block_len: int, axis: int) -> jnp.ndarray:
+ """Split an input array into blocks of a given `block_len` along the given `axis`. If the dimension length
+ is not a multiple of `block_len`, it will be padded first with selected `pad_value`.
+ """
+ # pad tensor to multiple of block_len
+ if x.shape[axis] % block_len != 0:
+ x = _pad_to_multiple(x, block_len, axis, pad_value=0)
+ num_blocks = x.shape[axis] // block_len
+ output_shape = x.shape[:axis] + (num_blocks, block_len) + x.shape[(axis + 1) :]
+ return x.reshape(output_shape)
+
+
+def _concatenate_3_blocks(x: jnp.ndarray, block_axis: int, sequence_axis: int, pad_value: int = 0) -> jnp.ndarray:
+ """Concatenate three consecutive blocks for each input block for local attentiont.
+ For more information, see: https://arxiv.org/pdf/2112.07916.pdf.
+ """
+ num_blocks = x.shape[block_axis]
+
+ pad = [(0, 0)] * x.ndim
+ pad[block_axis] = (1, 1)
+ # [batch_size, num_blocks, block_len] -> [batch_size, num_blocks + 2, block_len]
+ x = jnp.pad(x, pad_width=pad, mode="constant", constant_values=pad_value)
+
+ blocks_list: List[np.array] = []
+ for i in range(3):
+ # We use indexing approach here:
+ # https://numpy.org/doc/stable/user/basics.indexing.html#dealing-with-variable-numbers-of-indices-within-programs
+ indices = [slice(0, None)] * x.ndim
+ indices[block_axis] = slice(i, i + num_blocks)
+ indices = tuple(indices)
+ blocks_list.append(x[indices])
+ return jnp.concatenate(blocks_list, axis=sequence_axis) # [batch_size, num_blocks, 3 * block_len, ...]
+
+
+def _make_3block_relative_position_ids(block_len: int) -> jnp.ndarray:
+ """Makes 3-blocked relative position ids for local attention."""
+ position_ids = jnp.arange(3 * block_len, dtype=jnp.int32)
+ center_position_ids = position_ids[block_len:-block_len]
+ relative_position_ids = position_ids[None, :] - center_position_ids[:, None] # [block_len, 3 * block_len]
+ return relative_position_ids
+
+
+def _mask_local_attention_mask(local_attention_mask: np.ndarray, block_len: int) -> jnp.ndarray:
+ """Mask local attention mask to enforce that tokens are not allowed to attend tokens farther than ``local_radius."""
+ relative_position_ids = _make_3block_relative_position_ids(block_len)
+ locality_mask = jnp.abs(relative_position_ids) < block_len
+ locality_mask = locality_mask[None, None, :, :]
+ return jnp.logical_and(local_attention_mask, locality_mask)
+
+
+def _get_local_attention_mask(attention_mask: np.ndarray, block_len: int) -> jnp.ndarray:
+ """Prepare attention mask to be applied for a local attention."""
+ # [batch_size, num_blocks, block_len]
+ _blocked_attention_mask = _split_into_blocks(attention_mask, block_len, axis=1)
+ # [batch_size, num_block, 3 * block_len]
+ _3blocked_attention_mask = _concatenate_3_blocks(_blocked_attention_mask, block_axis=1, sequence_axis=2)
+
+ _blocked_attention_mask = _blocked_attention_mask[..., None]
+ _3blocked_attention_mask = _3blocked_attention_mask[..., None, :]
+ # [batch_size, num_block, block_len, 3 * block_len]
+ local_attention_mask = jnp.logical_and(_blocked_attention_mask, _3blocked_attention_mask)
+ local_attention_mask = _mask_local_attention_mask(local_attention_mask, block_len)
+ # [batch_size, 1, num_block, block_len, 3 * block_len]
+ return local_attention_mask[:, None, ...]
+
+
+def _make_global_fixed_block_ids(attention_mask: np.ndarray, global_block_size: int) -> Tuple[jnp.ndarray, np.ndarray]:
+ """Obtain the "fixed block" global id corresponding to each input token.
+
+ This implementation is a simlified version of the original Flaxformr implementation adopted from:
+ https://github.com/google/flaxformer/blob/main/flaxformer/architectures/longt5/long_attention.py.
+
+ In our scenario, as we use this strategy only for a decoder, orphan tokens, i.e. those tokens which do not make for
+ the whole fixed block, are assigned to the preceding block.
+
+ Padding tokens from the original sequence are represented by -1.
+ """
+ batch_size, seq_len = attention_mask.shape[:2]
+
+ def handle_orphan_tokens(block_ids: np.ndarray) -> jnp.ndarray:
+ block_ends = (jnp.arange(seq_len) % global_block_size) == global_block_size - 1
+ true_block_ends = jnp.logical_and(block_ends, block_ids >= 0)
+ full_blocks = true_block_ends.sum(-1)[..., None]
+ block_ids = jnp.minimum(block_ids, full_blocks - 1)
+ return block_ids
+
+ fixed_block_mask = jnp.ones_like(attention_mask) / global_block_size
+ fixed_block_mask = jnp.cumsum(fixed_block_mask, axis=1) - fixed_block_mask
+ mask = jnp.where(attention_mask != 0.0, 1.0, -1000.0)
+ global_block_ids = jnp.maximum(
+ jnp.floor(mask + fixed_block_mask - 1.0), jnp.array(-1.0, dtype=attention_mask.dtype)
+ )
+ # set padding tokens to -1
+ global_block_ids = (global_block_ids * attention_mask) + (attention_mask - 1)
+ # [batch_size, seq_len]
+ global_block_ids = handle_orphan_tokens(global_block_ids)
+ num_globals = seq_len // global_block_size
+
+ # [batch_size, seq_len // global_block_size]
+ if num_globals > 0:
+ _sequence_block_ids_max = jnp.repeat(global_block_ids.max(axis=-1)[:, None], repeats=num_globals, axis=1)
+ else:
+ _sequence_block_ids_max = jnp.zeros((batch_size, 0), dtype=global_block_ids.dtype)
+ global_segment_ids = jnp.cumsum(jnp.ones((batch_size, num_globals)), axis=-1) - 1
+ global_segment_ids = jnp.where(global_segment_ids <= _sequence_block_ids_max, 1, 0)
+ return global_block_ids, global_segment_ids
+
+
+def _make_side_relative_position_ids(attention_mask: np.ndarray, global_block_size: int) -> np.ndarray:
+ """Create the relative position tensor for local -> global attention."""
+ block_ids, global_segment_ids = _make_global_fixed_block_ids(attention_mask, global_block_size)
+ global_seq_len = global_segment_ids.shape[-1]
+ global_positions = jnp.arange(global_seq_len)
+ side_relative_position = global_positions - block_ids[..., None]
+ return side_relative_position
+
+
+def _create_global_aggregates(hidden_states: np.ndarray, block_ids: np.ndarray, global_seq_len: int) -> np.ndarray:
+ """Compute individual block aggregates by summing over individual blocks."""
+ # (batch..., seq_len, global_seq_len))
+ one_hot_block_ids = jax.nn.one_hot(block_ids, global_seq_len)
+ return jnp.einsum("...nd,...ng->...gd", hidden_states, one_hot_block_ids)
+
+
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5LayerNorm with T5->LongT5
+class FlaxLongT5LayerNorm(nn.Module):
+ hidden_size: int
+ dtype: jnp.dtype = jnp.float32
+ eps: float = 1e-6
+ weight_init: Callable[..., np.ndarray] = jax.nn.initializers.ones
+
+ def setup(self):
+ self.weight = self.param("weight", self.weight_init, (self.hidden_size,))
+
+ def __call__(self, hidden_states):
+ """
+ Construct a layernorm module in the LongT5 style; No bias and no subtraction of mean.
+ """
+ # layer norm should always be calculated in float32
+ variance = jnp.power(hidden_states.astype("f4"), 2).mean(axis=-1, keepdims=True)
+ hidden_states = hidden_states / jnp.sqrt(variance + self.eps)
+
+ return self.weight * hidden_states
+
+
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5DenseActDense with T5->LongT5
+class FlaxLongT5DenseActDense(nn.Module):
+ config: LongT5Config
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ wi_init_std = self.config.initializer_factor * (self.config.d_model**-0.5)
+ wo_init_std = self.config.initializer_factor * (self.config.d_ff**-0.5)
+
+ self.wi = nn.Dense(
+ self.config.d_ff,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(wi_init_std),
+ dtype=self.dtype,
+ )
+ self.wo = nn.Dense(
+ self.config.d_model,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(wo_init_std),
+ dtype=self.dtype,
+ )
+ self.dropout = nn.Dropout(self.config.dropout_rate)
+ self.act = ACT2FN[self.config.dense_act_fn]
+
+ def __call__(self, hidden_states, deterministic=True):
+ hidden_states = self.wi(hidden_states)
+ hidden_states = self.act(hidden_states)
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ hidden_states = self.wo(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5DenseGatedActDense with T5->LongT5
+class FlaxLongT5DenseGatedActDense(nn.Module):
+ config: LongT5Config
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ wi_init_std = self.config.initializer_factor * (self.config.d_model**-0.5)
+ wo_init_std = self.config.initializer_factor * (self.config.d_ff**-0.5)
+
+ self.wi_0 = nn.Dense(
+ self.config.d_ff,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(wi_init_std),
+ dtype=self.dtype,
+ )
+ self.wi_1 = nn.Dense(
+ self.config.d_ff,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(wi_init_std),
+ dtype=self.dtype,
+ )
+ self.wo = nn.Dense(
+ self.config.d_model,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(wo_init_std),
+ dtype=self.dtype,
+ )
+ self.dropout = nn.Dropout(self.config.dropout_rate)
+ self.act = ACT2FN[self.config.dense_act_fn]
+
+ def __call__(self, hidden_states, deterministic):
+ hidden_gelu = self.act(self.wi_0(hidden_states))
+ hidden_linear = self.wi_1(hidden_states)
+ hidden_states = hidden_gelu * hidden_linear
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ hidden_states = self.wo(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5LayerFF with T5->LongT5
+class FlaxLongT5LayerFF(nn.Module):
+ config: LongT5Config
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ if self.config.is_gated_act:
+ self.DenseReluDense = FlaxLongT5DenseGatedActDense(self.config, dtype=self.dtype)
+ else:
+ self.DenseReluDense = FlaxLongT5DenseActDense(self.config, dtype=self.dtype)
+
+ self.layer_norm = FlaxLongT5LayerNorm(
+ self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype
+ )
+ self.dropout = nn.Dropout(self.config.dropout_rate)
+
+ def __call__(self, hidden_states, deterministic=True):
+ forwarded_states = self.layer_norm(hidden_states)
+ forwarded_states = self.DenseReluDense(forwarded_states, deterministic=deterministic)
+ hidden_states = hidden_states + self.dropout(forwarded_states, deterministic=deterministic)
+ return hidden_states
+
+
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5Attention with T5->LongT5
+class FlaxLongT5Attention(nn.Module):
+ config: LongT5Config
+ has_relative_attention_bias: bool = False
+ causal: bool = False
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.relative_attention_num_buckets = self.config.relative_attention_num_buckets
+ self.relative_attention_max_distance = self.config.relative_attention_max_distance
+ self.d_model = self.config.d_model
+ self.key_value_proj_dim = self.config.d_kv
+ self.n_heads = self.config.num_heads
+ self.dropout = self.config.dropout_rate
+ self.inner_dim = self.n_heads * self.key_value_proj_dim
+
+ q_init_std = self.config.initializer_factor * ((self.inner_dim * self.key_value_proj_dim) ** -0.5)
+ kv_init_std = self.config.initializer_factor * (self.inner_dim**-0.5)
+ o_init_std = self.config.initializer_factor * (self.inner_dim**-0.5)
+
+ self.q = nn.Dense(
+ self.inner_dim,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(q_init_std),
+ dtype=self.dtype,
+ )
+ self.k = nn.Dense(
+ self.inner_dim,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(kv_init_std),
+ dtype=self.dtype,
+ )
+ self.v = nn.Dense(
+ self.inner_dim,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(kv_init_std),
+ dtype=self.dtype,
+ )
+ self.o = nn.Dense(
+ self.d_model,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(o_init_std),
+ dtype=self.dtype,
+ )
+
+ if self.has_relative_attention_bias:
+ self.relative_attention_bias = nn.Embed(
+ self.relative_attention_num_buckets,
+ self.n_heads,
+ embedding_init=jax.nn.initializers.normal(kv_init_std),
+ dtype=self.dtype,
+ )
+
+ @staticmethod
+ def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
+ """
+ Adapted from Mesh Tensorflow:
+ https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
+
+ Translate relative position to a bucket number for relative attention. The relative position is defined as
+ memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
+ position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
+ small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
+ positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
+ This should allow for more graceful generalization to longer sequences than the model has been trained on
+ """
+ relative_buckets = 0
+ if bidirectional:
+ num_buckets //= 2
+ relative_buckets += (relative_position > 0) * num_buckets
+ relative_position = jnp.abs(relative_position)
+ else:
+ relative_position = -jnp.clip(relative_position, a_max=0)
+ # now relative_position is in the range [0, inf)
+
+ # half of the buckets are for exact increments in positions
+ max_exact = num_buckets // 2
+ is_small = relative_position < max_exact
+
+ # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
+ relative_position_if_large = max_exact + (
+ jnp.log(relative_position / max_exact) / jnp.log(max_distance / max_exact) * (num_buckets - max_exact)
+ )
+ relative_position_if_large = jnp.clip(relative_position_if_large, a_max=num_buckets - 1)
+
+ relative_buckets += jnp.where(is_small, relative_position, relative_position_if_large)
+
+ return relative_buckets.astype("i4")
+
+ def compute_bias(self, query_length, key_length):
+ """Compute binned relative position bias"""
+ context_position = jnp.arange(query_length, dtype="i4")[:, None]
+ memory_position = jnp.arange(key_length, dtype="i4")[None, :]
+
+ relative_position = memory_position - context_position
+ relative_position_bucket = self._relative_position_bucket(
+ relative_position,
+ bidirectional=(not self.causal),
+ num_buckets=self.relative_attention_num_buckets,
+ max_distance=self.relative_attention_max_distance,
+ )
+
+ values = self.relative_attention_bias(relative_position_bucket)
+ values = values.transpose((2, 0, 1))[None, :, :, :]
+ return values
+
+ def _split_heads(self, hidden_states):
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.n_heads, self.key_value_proj_dim))
+
+ def _merge_heads(self, hidden_states):
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.inner_dim,))
+
+ @nn.compact
+ def _concatenate_to_cache(self, key, value, query, attention_mask):
+ """
+ This function takes projected key, value states from a single input token and concatenates the states to cached
+ states from previous steps. This function is slighly adapted from the official Flax repository:
+ https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
+ """
+ # detect if we're initializing by absence of existing cache data.
+ is_initialized = self.has_variable("cache", "cached_key")
+ cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
+ cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
+ cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
+
+ if is_initialized:
+ *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
+ # update key, value caches with our new 1d spatial slices
+ cur_index = cache_index.value
+ indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
+ key = jax.lax.dynamic_update_slice(cached_key.value, key, indices)
+ value = jax.lax.dynamic_update_slice(cached_value.value, value, indices)
+ cached_key.value = key
+ cached_value.value = value
+ num_updated_cache_vectors = query.shape[1]
+ cache_index.value = cache_index.value + num_updated_cache_vectors
+ # causal mask for cached decoder self-attention: our single query position should only attend to those key positions
+ # that have already been generated and cached, not the remaining zero elements.
+ pad_mask = jnp.broadcast_to(
+ jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
+ tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
+ )
+ attention_mask = combine_masks(pad_mask, attention_mask)
+ return key, value, attention_mask
+
+ def _create_position_bias(
+ self, key_states, query_states, attention_mask, init_cache, seq_length, causal_attention_mask_shift
+ ):
+ cache_is_filled = self.causal and self.has_variable("cache", "cached_key") and (not init_cache)
+ key_length = key_states.shape[1]
+ query_length = key_length if cache_is_filled else query_states.shape[1]
+
+ if self.has_relative_attention_bias:
+ position_bias = self.compute_bias(query_length, key_length)
+ elif attention_mask is not None:
+ position_bias = jnp.zeros_like(attention_mask)
+ else:
+ position_bias = jnp.zeros((1, self.n_heads, query_length, key_length), dtype=self.dtype)
+
+ # if key and values are already calculated, only the last query position bias should be taken
+ if cache_is_filled:
+ max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
+ position_bias = jax.lax.dynamic_slice(
+ position_bias,
+ (0, 0, causal_attention_mask_shift, 0),
+ (1, self.n_heads, seq_length, max_decoder_length),
+ )
+ return position_bias
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask=None,
+ key_value_states=None,
+ position_bias=None,
+ use_cache=False,
+ output_attentions=False,
+ deterministic=True,
+ init_cache=False,
+ ):
+ """
+ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
+ """
+ batch_size, seq_length = hidden_states.shape[:2]
+
+ # q, k, v projections
+ query_states = self.q(hidden_states) # (batch_size, n_heads, seq_length, dim_per_head)
+ key_states = self.k(hidden_states) if key_value_states is None else self.k(key_value_states)
+ value_states = self.v(hidden_states) if key_value_states is None else self.v(key_value_states)
+
+ # reshape to (batch_size, seq_length, n_heads, head_dim)
+ query_states = self._split_heads(query_states)
+ key_states = self._split_heads(key_states)
+ value_states = self._split_heads(value_states)
+
+ # counter-act scaling in dot_product_attention_weights function
+ query_states *= jnp.sqrt(query_states.shape[-1])
+
+ # for fast decoding causal attention mask should be shifted
+ causal_attention_mask_shift = (
+ self.variables["cache"]["cache_index"] if (self.has_variable("cache", "cached_key") and self.causal) else 0
+ )
+ # create causal attention_mask; attention_mask has to be defined when model is causal
+ if self.causal:
+ causal_attention_mask = make_causal_mask(attention_mask, dtype="bool")
+
+ # fast decoding for generate requires special attention_mask
+ if self.has_variable("cache", "cached_key"):
+ max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
+ causal_attention_mask = jax.lax.dynamic_slice(
+ causal_attention_mask,
+ (0, 0, causal_attention_mask_shift, 0),
+ (1, 1, seq_length, max_decoder_length),
+ )
+
+ # broadcast causal attention mask & attention mask to fit for merge
+ causal_attention_mask = jnp.broadcast_to(
+ causal_attention_mask, (batch_size,) + causal_attention_mask.shape[1:]
+ )
+ attention_mask = jnp.broadcast_to(
+ jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_attention_mask.shape
+ )
+ attention_mask = combine_masks(attention_mask, causal_attention_mask)
+ elif attention_mask is not None:
+ attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
+
+ # During fast autoregressive decoding, we feed one position at a time,
+ # and cache the keys and values step by step.
+ if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
+ key_states, value_states, attention_mask = self._concatenate_to_cache(
+ key_states, value_states, query_states, attention_mask
+ )
+
+ # replace masked positions with -10_000
+ if attention_mask is not None:
+ mask_value = jnp.finfo(self.dtype).min
+ attention_mask = jax.lax.select(
+ attention_mask > 0,
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
+ jnp.full(attention_mask.shape, mask_value).astype(self.dtype),
+ )
+
+ if position_bias is None:
+ # compute position bias (only for first layer)
+ position_bias = self._create_position_bias(
+ key_states, query_states, attention_mask, init_cache, seq_length, causal_attention_mask_shift
+ )
+
+ if attention_mask is not None:
+ position_bias = position_bias + attention_mask
+
+ # create dropout rng
+ dropout_rng = None
+ if not deterministic and self.dropout > 0.0:
+ dropout_rng = self.make_rng("dropout")
+
+ # Softmax(QK^T)
+ attn_weights = dot_product_attention_weights(
+ query_states,
+ key_states,
+ bias=position_bias,
+ dropout_rng=dropout_rng,
+ dropout_rate=self.dropout,
+ broadcast_dropout=True,
+ deterministic=deterministic,
+ dtype=self.dtype,
+ )
+
+ # multiply with value states
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
+
+ # bring back to (batch_size, seq_length, d_model)
+ attn_output = self._merge_heads(attn_output)
+
+ # apply output matrix
+ attn_output = self.o(attn_output)
+
+ outputs = (attn_output, position_bias)
+
+ if output_attentions:
+ outputs = outputs + (attn_weights,)
+
+ return outputs
+
+
+class FlaxLongT5LocalAttention(nn.Module):
+ config: LongT5Config
+ has_relative_attention_bias: bool = False
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.relative_attention_num_buckets = self.config.relative_attention_num_buckets
+ self.relative_attention_max_distance = self.config.relative_attention_max_distance
+ self.d_model = self.config.d_model
+ self.key_value_proj_dim = self.config.d_kv
+ self.n_heads = self.config.num_heads
+ self.local_radius = self.config.local_radius
+ self.block_len = self.local_radius + 1
+ self.dropout = self.config.dropout_rate
+ self.inner_dim = self.n_heads * self.key_value_proj_dim
+
+ q_init_std = self.config.initializer_factor * ((self.inner_dim * self.key_value_proj_dim) ** -0.5)
+ kv_init_std = self.config.initializer_factor * (self.inner_dim**-0.5)
+ o_init_std = self.config.initializer_factor * (self.inner_dim**-0.5)
+
+ self.q = nn.Dense(
+ self.inner_dim,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(q_init_std),
+ dtype=self.dtype,
+ )
+ self.k = nn.Dense(
+ self.inner_dim,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(kv_init_std),
+ dtype=self.dtype,
+ )
+ self.v = nn.Dense(
+ self.inner_dim,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(kv_init_std),
+ dtype=self.dtype,
+ )
+ self.o = nn.Dense(
+ self.d_model,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(o_init_std),
+ dtype=self.dtype,
+ )
+
+ if self.has_relative_attention_bias:
+ self.relative_attention_bias = nn.Embed(
+ self.relative_attention_num_buckets,
+ self.n_heads,
+ embedding_init=jax.nn.initializers.normal(kv_init_std),
+ )
+
+ @staticmethod
+ # Copied from transformers.models.t5.modeling_flax_t5.FlaxT5Attention._relative_position_bucket
+ def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
+ """
+ Adapted from Mesh Tensorflow:
+ https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
+
+ Translate relative position to a bucket number for relative attention. The relative position is defined as
+ memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
+ position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
+ small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
+ positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
+ This should allow for more graceful generalization to longer sequences than the model has been trained on
+ """
+ relative_buckets = 0
+ if bidirectional:
+ num_buckets //= 2
+ relative_buckets += (relative_position > 0) * num_buckets
+ relative_position = jnp.abs(relative_position)
+ else:
+ relative_position = -jnp.clip(relative_position, a_max=0)
+ # now relative_position is in the range [0, inf)
+
+ # half of the buckets are for exact increments in positions
+ max_exact = num_buckets // 2
+ is_small = relative_position < max_exact
+
+ # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
+ relative_position_if_large = max_exact + (
+ jnp.log(relative_position / max_exact) / jnp.log(max_distance / max_exact) * (num_buckets - max_exact)
+ )
+ relative_position_if_large = jnp.clip(relative_position_if_large, a_max=num_buckets - 1)
+
+ relative_buckets += jnp.where(is_small, relative_position, relative_position_if_large)
+
+ return relative_buckets.astype("i4")
+
+ def compute_bias(self, block_length: int):
+ """Compute binned relative position bias"""
+ memory_position = jnp.arange(3 * block_length, dtype="i4")
+ context_position = memory_position[block_length:-block_length]
+
+ relative_position = memory_position[None, :] - context_position[:, None]
+ relative_position_bucket = self._relative_position_bucket(
+ relative_position,
+ bidirectional=True,
+ num_buckets=self.relative_attention_num_buckets,
+ max_distance=self.relative_attention_max_distance,
+ )
+
+ values = self.relative_attention_bias(relative_position_bucket)
+ values = values.transpose((2, 0, 1))[None, None, :, :, :]
+ return values
+
+ def _split_heads(self, hidden_states):
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.n_heads, self.key_value_proj_dim))
+
+ def _merge_heads(self, hidden_states):
+ return hidden_states.reshape(hidden_states.shape[0], -1, self.inner_dim)
+
+ def _create_position_bias(self, block_len: int, attention_mask: Optional[np.ndarray]) -> np.ndarray:
+ # position_bias shape: # (1, 1, n_heads, block_len, 3 * block_len)
+ if self.has_relative_attention_bias:
+ position_bias = self.compute_bias(block_len)
+ elif attention_mask is not None:
+ position_bias = jnp.zeros_like(attention_mask)
+ else:
+ position_bias = jnp.zeros((1, 1, self.n_heads, block_len, 3 * block_len), dtype=self.dtype)
+
+ return position_bias
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask=None,
+ key_value_states=None,
+ position_bias=None,
+ output_attentions=False,
+ deterministic=True,
+ ):
+ """
+ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
+ """
+ batch_size, seq_length = hidden_states.shape[:2]
+
+ # q, k, v projections
+ query_states = self.q(hidden_states) # (batch_size, n_heads, seq_length, dim_per_head)
+ key_states = self.k(hidden_states) if key_value_states is None else self.k(key_value_states)
+ value_states = self.v(hidden_states) if key_value_states is None else self.v(key_value_states)
+
+ # reshape to (batch_size, seq_length, n_heads, head_dim)
+ query_states = self._split_heads(query_states)
+ key_states = self._split_heads(key_states)
+ value_states = self._split_heads(value_states)
+
+ # Split into blocks -> (batch_size, num_blocks, block_len, n_heads, head_dim)
+ query_states = _split_into_blocks(query_states, self.block_len, axis=1)
+ key_states = _split_into_blocks(key_states, self.block_len, axis=1)
+ value_states = _split_into_blocks(value_states, self.block_len, axis=1)
+
+ # Concatenate 3 blocks for keys and values -> (batch_size, num_blocks, 3 * block_len, n_heads, dim_per_head)
+ key_states = _concatenate_3_blocks(key_states, block_axis=1, sequence_axis=2)
+ value_states = _concatenate_3_blocks(value_states, block_axis=1, sequence_axis=2)
+
+ # counter-act scaling in dot_product_attention_weights function
+ query_states *= jnp.sqrt(query_states.shape[-1])
+
+ if attention_mask is not None:
+ attention_mask = _get_local_attention_mask(attention_mask, self.block_len)
+
+ # replace masked positions with -10_000
+ attention_mask = jax.lax.select(
+ attention_mask > 0,
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
+ jnp.full(attention_mask.shape, -1e10).astype(self.dtype),
+ )
+
+ if position_bias is None:
+ # compute position bias (only for first layer)
+ position_bias = self._create_position_bias(self.block_len, attention_mask)
+
+ if attention_mask is not None:
+ position_bias = position_bias + attention_mask.swapaxes(1, 2)
+
+ # create dropout rng
+ dropout_rng = None
+ if not deterministic and self.dropout > 0.0:
+ dropout_rng = self.make_rng("dropout")
+
+ # Softmax(QK^T)
+ attn_weights = dot_product_attention_weights(
+ query_states,
+ key_states,
+ bias=position_bias,
+ dropout_rng=dropout_rng,
+ dropout_rate=self.dropout,
+ broadcast_dropout=True,
+ deterministic=deterministic,
+ dtype=self.dtype,
+ )
+
+ # multiply with value states
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
+
+ # bring back to (batch_size, seq_length, d_model)
+ attn_output = self._merge_heads(attn_output)
+ attn_output = attn_output[:, :seq_length, :]
+
+ # apply output matrix
+ attn_output = self.o(attn_output)
+
+ outputs = (attn_output, position_bias)
+
+ if output_attentions:
+ outputs = outputs + (attn_weights,)
+
+ return outputs
+
+
+class FlaxLongT5TransientGlobalAttention(nn.Module):
+ config: LongT5Config
+ has_relative_attention_bias: bool = False
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.relative_attention_num_buckets = self.config.relative_attention_num_buckets
+ self.relative_attention_max_distance = self.config.relative_attention_max_distance
+ self.d_model = self.config.d_model
+ self.key_value_proj_dim = self.config.d_kv
+ self.n_heads = self.config.num_heads
+ self.local_radius = self.config.local_radius
+ self.block_len = self.local_radius + 1
+ self.global_block_size = self.config.global_block_size
+ self.dropout = self.config.dropout_rate
+ self.inner_dim = self.n_heads * self.key_value_proj_dim
+
+ q_init_std = self.config.initializer_factor * ((self.inner_dim * self.key_value_proj_dim) ** -0.5)
+ kv_init_std = self.config.initializer_factor * (self.inner_dim**-0.5)
+ o_init_std = self.config.initializer_factor * (self.inner_dim**-0.5)
+
+ self.q = nn.Dense(
+ self.inner_dim,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(q_init_std),
+ dtype=self.dtype,
+ )
+ self.k = nn.Dense(
+ self.inner_dim,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(kv_init_std),
+ dtype=self.dtype,
+ )
+ self.v = nn.Dense(
+ self.inner_dim,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(kv_init_std),
+ dtype=self.dtype,
+ )
+ self.o = nn.Dense(
+ self.d_model,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(o_init_std),
+ dtype=self.dtype,
+ )
+
+ if self.has_relative_attention_bias:
+ self.relative_attention_bias = nn.Embed(
+ self.relative_attention_num_buckets,
+ self.n_heads,
+ embedding_init=jax.nn.initializers.normal(kv_init_std),
+ )
+
+ # Relativen attention bias & Layer norm for global attention
+ if self.has_relative_attention_bias:
+ self.global_relative_attention_bias = nn.Embed(
+ self.relative_attention_num_buckets,
+ self.n_heads,
+ embedding_init=jax.nn.initializers.normal(kv_init_std),
+ )
+ self.global_input_layer_norm = FlaxLongT5LayerNorm(
+ self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype
+ )
+
+ @staticmethod
+ # Copied from transformers.models.t5.modeling_flax_t5.FlaxT5Attention._relative_position_bucket
+ def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
+ """
+ Adapted from Mesh Tensorflow:
+ https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
+
+ Translate relative position to a bucket number for relative attention. The relative position is defined as
+ memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
+ position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
+ small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
+ positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
+ This should allow for more graceful generalization to longer sequences than the model has been trained on
+ """
+ relative_buckets = 0
+ if bidirectional:
+ num_buckets //= 2
+ relative_buckets += (relative_position > 0) * num_buckets
+ relative_position = jnp.abs(relative_position)
+ else:
+ relative_position = -jnp.clip(relative_position, a_max=0)
+ # now relative_position is in the range [0, inf)
+
+ # half of the buckets are for exact increments in positions
+ max_exact = num_buckets // 2
+ is_small = relative_position < max_exact
+
+ # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
+ relative_position_if_large = max_exact + (
+ jnp.log(relative_position / max_exact) / jnp.log(max_distance / max_exact) * (num_buckets - max_exact)
+ )
+ relative_position_if_large = jnp.clip(relative_position_if_large, a_max=num_buckets - 1)
+
+ relative_buckets += jnp.where(is_small, relative_position, relative_position_if_large)
+
+ return relative_buckets.astype("i4")
+
+ def compute_bias(self, block_length: int):
+ """Compute binned relative position bias"""
+ memory_position = jnp.arange(3 * block_length, dtype="i4")
+ context_position = memory_position[block_length:-block_length]
+
+ relative_position = memory_position[None, :] - context_position[:, None]
+ relative_position_bucket = self._relative_position_bucket(
+ relative_position,
+ bidirectional=True,
+ num_buckets=self.relative_attention_num_buckets,
+ max_distance=self.relative_attention_max_distance,
+ )
+
+ values = self.relative_attention_bias(relative_position_bucket)
+ values = values.transpose((2, 0, 1))[None, None, :, :, :]
+ return values
+
+ def compute_side_bias(self, attention_mask: np.ndarray, global_segment_ids: np.ndarray) -> np.ndarray:
+ # (batch_size, 1, 1, seq_len, global_seq_len)
+ side_attention_mask = jnp.equal(attention_mask[..., None], global_segment_ids[:, None, :])[:, None, ...]
+ attention_side_bias = jax.lax.select(
+ side_attention_mask > 0,
+ jnp.full(side_attention_mask.shape, 0.0).astype(self.dtype),
+ jnp.full(side_attention_mask.shape, -1e10).astype(self.dtype),
+ )
+ # (batch_size, seq_len, global_seq_len)
+ side_relative_position = _make_side_relative_position_ids(attention_mask, self.global_block_size)
+ side_relative_position_bucket = self._relative_position_bucket(
+ side_relative_position,
+ bidirectional=True,
+ num_buckets=self.relative_attention_num_buckets,
+ max_distance=self.relative_attention_max_distance,
+ )
+ # (batch_size, seq_len, global_seq_len, num_heads)
+ side_bias = self.global_relative_attention_bias(side_relative_position_bucket)
+
+ # (batch_size, 1, num_heads, seq_len, global_seq_len)
+ side_bias = jnp.transpose(side_bias, (0, 3, 1, 2))
+ # (batch_size, num_heads, seq_len, global_seq_len)
+ attention_side_bias = attention_side_bias + side_bias
+ return attention_side_bias
+
+ def _split_heads(self, hidden_states):
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.n_heads, self.key_value_proj_dim))
+
+ def _merge_heads(self, hidden_states):
+ return hidden_states.reshape(hidden_states.shape[0], -1, self.inner_dim)
+
+ def _create_position_bias(self, block_len: int, attention_mask: Optional[np.ndarray]) -> np.ndarray:
+ # position_bias shape: # (1, 1, n_heads, block_len, 3 * block_len)
+ if self.has_relative_attention_bias:
+ position_bias = self.compute_bias(block_len)
+ elif attention_mask is not None:
+ position_bias = jnp.zeros_like(attention_mask)
+ else:
+ position_bias = jnp.zeros((1, 1, self.n_heads, block_len, 3 * block_len), dtype=self.dtype)
+
+ return position_bias
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask=None,
+ key_value_states=None,
+ position_bias=None,
+ output_attentions=False,
+ deterministic=True,
+ ):
+ """
+ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
+ """
+ batch_size, seq_length = hidden_states.shape[:2]
+
+ # Prepare components for transient-global attention
+ # Obtain block_ids and global_segment_ids
+ # global_seq_len := seq_len // self.global_block_size
+ # shapes: (batch_size, seq_len) & (batch_size, global_seq_len)
+ block_ids, global_segment_ids = _make_global_fixed_block_ids(
+ attention_mask if attention_mask is not None else jnp.ones((batch_size, seq_length)),
+ self.global_block_size,
+ )
+ # Create global inputs
+ _global_seq_len = global_segment_ids.shape[-1]
+ global_inputs = _create_global_aggregates(hidden_states, block_ids, _global_seq_len)
+ global_inputs = self.global_input_layer_norm(global_inputs)
+
+ # q, k, v projections
+ query_states = self.q(hidden_states) # (batch_size, n_heads, seq_length, dim_per_head)
+ key_states = self.k(hidden_states) if key_value_states is None else self.k(key_value_states)
+ value_states = self.v(hidden_states) if key_value_states is None else self.v(key_value_states)
+
+ # reshape to (batch_size, seq_length, n_heads, head_dim)
+ query_states = self._split_heads(query_states)
+ key_states = self._split_heads(key_states)
+ value_states = self._split_heads(value_states)
+
+ # Get global/side key/value_states
+ side_key_states = self.k(global_inputs)
+ side_value_states = self.v(global_inputs)
+
+ # reshape to (batch_size, global_seq_len, n_heads, head_dim)
+ side_key_states = self._split_heads(side_key_states)
+ side_value_states = self._split_heads(side_value_states)
+
+ # Split into blocks -> (batch_size, num_blocks, block_len, n_heads, head_dim)
+ query_states = _split_into_blocks(query_states, self.block_len, axis=1)
+ key_states = _split_into_blocks(key_states, self.block_len, axis=1)
+ value_states = _split_into_blocks(value_states, self.block_len, axis=1)
+
+ # Concatenate 3 blocks for keys and values -> (batch_size, num_blocks, 3 * block_len, n_heads, dim_per_head)
+ key_states = _concatenate_3_blocks(key_states, block_axis=1, sequence_axis=2)
+ value_states = _concatenate_3_blocks(value_states, block_axis=1, sequence_axis=2)
+
+ # Tile side inputs across local key/value blocks
+ # New shape: (batch_size, num_blocks, global_seq_len, n_heads, dim_per_head)
+ reps = [1] * (side_key_states.ndim + 1)
+ reps[1] = key_states.shape[1]
+ side_key_states = jnp.tile(side_key_states[:, None, ...], reps)
+ side_value_states = jnp.tile(side_value_states[:, None, ...], reps)
+
+ # Concatenate "local" and "side"/"global" key/value states to allow each token to attend global aggregated ones
+ # New shape: (batch_size, num_blocks, 3 * block_len + global_seq_len, n_heads, dim_per_head)
+ key_states = jnp.concatenate((key_states, side_key_states), axis=2)
+ value_states = jnp.concatenate((value_states, side_value_states), axis=2)
+
+ # counter-act scaling in dot_product_attention_weights function
+ query_states *= jnp.sqrt(query_states.shape[-1])
+
+ if attention_mask is not None:
+ local_attention_mask = _get_local_attention_mask(attention_mask, self.block_len)
+ local_attention_mask = jax.lax.select(
+ local_attention_mask > 0,
+ jnp.full(local_attention_mask.shape, 0.0).astype(self.dtype),
+ jnp.full(local_attention_mask.shape, -1e10).astype(self.dtype),
+ )
+ else:
+ local_attention_mask = None
+
+ if position_bias is None:
+ # compute position bias (only for first layer)
+ position_bias = self._create_position_bias(self.block_len, attention_mask)
+ if local_attention_mask is not None:
+ position_bias = position_bias + local_attention_mask.swapaxes(1, 2)
+
+ # Calculate global/side bias - shape: # (batch_size, num_heads, seq_len, global_seq_len)
+ if attention_mask is None:
+ attention_mask = jnp.ones((batch_size, seq_length))
+ side_position_bias = self.compute_side_bias(attention_mask, global_segment_ids)
+ side_position_bias = _split_into_blocks(side_position_bias, self.block_len, axis=-2)
+ side_position_bias = jnp.swapaxes(side_position_bias, 1, 2)
+ position_bias = jnp.concatenate((position_bias, side_position_bias), axis=-1)
+
+ # create dropout rng
+ dropout_rng = None
+ if not deterministic and self.dropout > 0.0:
+ dropout_rng = self.make_rng("dropout")
+
+ # Softmax(QK^T)
+ attn_weights = dot_product_attention_weights(
+ query_states,
+ key_states,
+ bias=position_bias,
+ dropout_rng=dropout_rng,
+ dropout_rate=self.dropout,
+ broadcast_dropout=True,
+ deterministic=deterministic,
+ dtype=self.dtype,
+ )
+
+ # multiply with value states
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
+
+ # bring back to (batch_size, seq_length, d_model)
+ attn_output = self._merge_heads(attn_output)
+ attn_output = attn_output[:, :seq_length, :]
+
+ # apply output matrix
+ attn_output = self.o(attn_output)
+
+ outputs = (attn_output, position_bias)
+
+ if output_attentions:
+ outputs = outputs + (attn_weights,)
+
+ return outputs
+
+
+class FlaxLongT5LayerLocalSelfAttention(nn.Module):
+ """Local self attention used in encoder"""
+
+ config: LongT5Config
+ has_relative_attention_bias: bool = False
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.LocalSelfAttention = FlaxLongT5LocalAttention(
+ self.config, has_relative_attention_bias=self.has_relative_attention_bias, dtype=self.dtype
+ )
+ self.layer_norm = FlaxLongT5LayerNorm(
+ self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype
+ )
+ self.dropout = nn.Dropout(self.config.dropout_rate)
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask=None,
+ position_bias=None,
+ output_attentions=False,
+ deterministic=True,
+ **kwargs: Any, # to accept init_cache kwargs
+ ):
+ normed_hidden_states = self.layer_norm(hidden_states)
+ attention_output = self.LocalSelfAttention(
+ normed_hidden_states,
+ attention_mask=attention_mask,
+ position_bias=position_bias,
+ output_attentions=output_attentions,
+ deterministic=deterministic,
+ )
+ hidden_states = hidden_states + self.dropout(attention_output[0], deterministic=deterministic)
+ outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
+ return outputs
+
+
+class FlaxLongT5LayerTransientGlobalSelfAttention(nn.Module):
+ """Transient-Global self attention used in encoder"""
+
+ config: LongT5Config
+ has_relative_attention_bias: bool = False
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.TransientGlobalSelfAttention = FlaxLongT5TransientGlobalAttention(
+ self.config, has_relative_attention_bias=self.has_relative_attention_bias, dtype=self.dtype
+ )
+ self.layer_norm = FlaxLongT5LayerNorm(
+ self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype
+ )
+ self.dropout = nn.Dropout(self.config.dropout_rate)
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask=None,
+ position_bias=None,
+ output_attentions=False,
+ deterministic=True,
+ **kwargs: Any, # to accept init_cache kwargs
+ ):
+ normed_hidden_states = self.layer_norm(hidden_states)
+ attention_output = self.TransientGlobalSelfAttention(
+ normed_hidden_states,
+ attention_mask=attention_mask,
+ position_bias=position_bias,
+ output_attentions=output_attentions,
+ deterministic=deterministic,
+ )
+ hidden_states = hidden_states + self.dropout(attention_output[0], deterministic=deterministic)
+ outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5LayerSelfAttention with T5->LongT5
+class FlaxLongT5LayerSelfAttention(nn.Module):
+ config: LongT5Config
+ has_relative_attention_bias: bool = False
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.SelfAttention = FlaxLongT5Attention(
+ self.config,
+ has_relative_attention_bias=self.has_relative_attention_bias,
+ causal=self.config.causal,
+ dtype=self.dtype,
+ )
+ self.layer_norm = FlaxLongT5LayerNorm(
+ self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype
+ )
+ self.dropout = nn.Dropout(self.config.dropout_rate)
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask=None,
+ position_bias=None,
+ output_attentions=False,
+ deterministic=True,
+ init_cache=False,
+ ):
+ normed_hidden_states = self.layer_norm(hidden_states)
+ attention_output = self.SelfAttention(
+ normed_hidden_states,
+ attention_mask=attention_mask,
+ position_bias=position_bias,
+ output_attentions=output_attentions,
+ deterministic=deterministic,
+ init_cache=init_cache,
+ )
+ hidden_states = hidden_states + self.dropout(attention_output[0], deterministic=deterministic)
+ outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5LayerCrossAttention with T5->LongT5
+class FlaxLongT5LayerCrossAttention(nn.Module):
+ config: LongT5Config
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.EncDecAttention = FlaxLongT5Attention(
+ self.config, has_relative_attention_bias=False, causal=False, dtype=self.dtype
+ )
+ self.layer_norm = FlaxLongT5LayerNorm(
+ self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype
+ )
+ self.dropout = nn.Dropout(self.config.dropout_rate)
+
+ def __call__(
+ self,
+ hidden_states,
+ key_value_states,
+ attention_mask=None,
+ position_bias=None,
+ output_attentions=False,
+ deterministic=True,
+ ):
+ normed_hidden_states = self.layer_norm(hidden_states)
+ attention_output = self.EncDecAttention(
+ normed_hidden_states,
+ attention_mask=attention_mask,
+ key_value_states=key_value_states,
+ position_bias=position_bias,
+ output_attentions=output_attentions,
+ )
+ hidden_states = hidden_states + self.dropout(attention_output[0], deterministic=deterministic)
+ outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
+ return outputs
+
+
+class FlaxLongT5Block(nn.Module):
+ config: LongT5Config
+ has_relative_attention_bias: bool = False
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.causal = self.config.causal
+ if self.causal:
+ attention_layer = FlaxLongT5LayerSelfAttention
+ elif self.config.encoder_attention_type == "local":
+ attention_layer = FlaxLongT5LayerLocalSelfAttention
+ elif self.config.encoder_attention_type == "transient-global":
+ attention_layer = FlaxLongT5LayerTransientGlobalSelfAttention
+ else:
+ raise ValueError(
+ "For encoder attention mechanism, either `local` or `transient-global` attention type is expected, "
+ f"but got {self.config.encoder_attention_type}."
+ )
+ self.layer = (
+ attention_layer(
+ self.config,
+ has_relative_attention_bias=self.has_relative_attention_bias,
+ name=str(0),
+ dtype=self.dtype,
+ ),
+ )
+ feed_forward_index = 1
+ if self.causal:
+ self.layer += (FlaxLongT5LayerCrossAttention(self.config, name=str(1), dtype=self.dtype),)
+ feed_forward_index += 1
+
+ self.layer += (FlaxLongT5LayerFF(self.config, name=str(feed_forward_index), dtype=self.dtype),)
+
+ # Copied from transformers.models.t5.modeling_flax_t5.FlaxT5Block.__call__ with T5->LongT5
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask=None,
+ position_bias=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ encoder_decoder_position_bias=None,
+ output_attentions=False,
+ return_dict=True,
+ deterministic=True,
+ init_cache=False,
+ ):
+ self_attention_outputs = self.layer[0](
+ hidden_states,
+ attention_mask=attention_mask,
+ position_bias=position_bias,
+ output_attentions=output_attentions,
+ deterministic=deterministic,
+ init_cache=init_cache,
+ )
+ hidden_states = self_attention_outputs[0]
+ attention_outputs = self_attention_outputs[1:] # Keep self-attention outputs and relative position weights
+
+ do_cross_attention = self.causal and encoder_hidden_states is not None
+ if do_cross_attention:
+ cross_attention_outputs = self.layer[1](
+ hidden_states,
+ key_value_states=encoder_hidden_states,
+ attention_mask=encoder_attention_mask,
+ position_bias=encoder_decoder_position_bias,
+ output_attentions=output_attentions,
+ deterministic=deterministic,
+ )
+ hidden_states = cross_attention_outputs[0]
+
+ # Keep cross-attention outputs and relative position weights
+ attention_outputs = attention_outputs + cross_attention_outputs[1:]
+
+ # Apply Feed Forward layer
+ hidden_states = self.layer[-1](hidden_states, deterministic=deterministic)
+
+ outputs = (hidden_states,)
+
+ outputs = outputs + attention_outputs
+
+ # returns hidden-states, present_key_value_states, (self-attention position bias), (self-attention weights),
+ # (cross-attention position bias), (cross-attention weights)
+ return outputs
+
+
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5LayerCollection with T5->LongT5
+class FlaxLongT5LayerCollection(nn.Module):
+ config: LongT5Config
+ has_relative_attention_bias: bool
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.layer = FlaxLongT5Block(
+ self.config, has_relative_attention_bias=self.has_relative_attention_bias, dtype=self.dtype
+ )
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask=None,
+ position_bias=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ encoder_decoder_position_bias=None,
+ output_attentions=False,
+ deterministic=True,
+ init_cache=False,
+ ):
+ return self.layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ position_bias=position_bias,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ encoder_decoder_position_bias=encoder_decoder_position_bias,
+ output_attentions=output_attentions,
+ deterministic=deterministic,
+ init_cache=init_cache,
+ )
+
+
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5BlockCollection with T5->LongT5
+class FlaxLongT5BlockCollection(nn.Module):
+ config: LongT5Config
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+ gradient_checkpointing: bool = False
+
+ def setup(self):
+ self.causal = self.config.causal
+ if self.gradient_checkpointing:
+ FlaxLongT5CheckpointLayer = remat(FlaxLongT5LayerCollection, static_argnums=(6, 7, 8))
+ self.blocks = [
+ FlaxLongT5CheckpointLayer(
+ self.config,
+ has_relative_attention_bias=(i == 0),
+ dtype=self.dtype,
+ name=str(i),
+ )
+ for i in range(self.config.num_layers)
+ ]
+ else:
+ self.blocks = [
+ FlaxLongT5LayerCollection(
+ self.config,
+ has_relative_attention_bias=(i == 0),
+ dtype=self.dtype,
+ name=str(i),
+ )
+ for i in range(self.config.num_layers)
+ ]
+
+ def __call__(
+ self,
+ hidden_states=None,
+ attention_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ deterministic: bool = True,
+ init_cache: bool = False,
+ ):
+ # Prepare head mask if needed
+ all_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+ all_cross_attentions = () if (output_attentions and self.causal) else None
+ position_bias = None
+ encoder_decoder_position_bias = None
+
+ for i, layer_module in enumerate(self.blocks):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_outputs = layer_module(
+ hidden_states,
+ attention_mask,
+ position_bias,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ encoder_decoder_position_bias,
+ output_attentions,
+ deterministic,
+ init_cache,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ # We share the position biases between the layers - the first layer store them
+ # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights),
+ # (cross-attention position bias), (cross-attention weights)
+ position_bias = layer_outputs[1]
+
+ if self.causal and encoder_hidden_states is not None:
+ encoder_decoder_position_bias = layer_outputs[3 if output_attentions else 2]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[2],)
+ if self.causal:
+ all_cross_attentions = all_cross_attentions + (layer_outputs[4],)
+
+ return FlaxBaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_attentions,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5Stack with T5->LongT5
+class FlaxLongT5Stack(nn.Module):
+ config: LongT5Config
+ embed_tokens: nn.Embed
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+ gradient_checkpointing: bool = False
+
+ def setup(self):
+ self.causal = self.config.causal
+
+ self.block = FlaxLongT5BlockCollection(
+ self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing
+ )
+ self.final_layer_norm = FlaxLongT5LayerNorm(
+ self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype
+ )
+ self.dropout = nn.Dropout(self.config.dropout_rate)
+
+ def __call__(
+ self,
+ input_ids=None,
+ attention_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ deterministic: bool = True,
+ init_cache: bool = False,
+ ):
+ hidden_states = self.embed_tokens(input_ids)
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+
+ outputs = self.block(
+ hidden_states,
+ attention_mask=attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ deterministic=deterministic,
+ init_cache=init_cache,
+ )
+
+ hidden_states = outputs[0]
+
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+
+ # Add last layer
+ all_hidden_states = None
+
+ if output_hidden_states:
+ all_hidden_states = outputs.hidden_states
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ if output_hidden_states:
+ return (
+ hidden_states,
+ all_hidden_states,
+ ) + outputs[2:]
+ return (hidden_states,) + outputs[1:]
+
+ return FlaxBaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=outputs.attentions,
+ cross_attentions=outputs.cross_attentions,
+ )
+
+
+LONGT5_ENCODE_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so
+ you should be able to pad the inputs on both the right and the left.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for detail.
+
+ To know more on how to prepare `input_ids` for pretraining take a look a [LONGT5
+ Training](./longt5#training).
+ attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+LONGT5_DECODE_INPUTS_DOCSTRING = r"""
+ Args:
+ decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ For training, `decoder_input_ids` should be provided.
+ encoder_outputs (`tuple(tuple(jnp.ndarray)`):
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+
+ If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the
+ paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
+ past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):
+ Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
+ auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+LONGT5_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so
+ you should be able to pad the inputs on both the right and the left.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for detail.
+
+ [What are input IDs?](../glossary#input-ids)
+
+ To know more on how to prepare `input_ids` for pretraining take a look a [LONGT5
+ Training](./longt5#training).
+ attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ LONGT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+
+ To know more on how to prepare `decoder_input_ids` for pretraining take a look at [LONGT5
+ Training](./longt5#training).
+ decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+ encoder_outputs (`tuple(tuple(jnp.ndarray)`, *optional*):
+ Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at
+ the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ past_key_values (`tuple(tuple(jnp.ndarray))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+class FlaxLongT5PreTrainedModel(FlaxPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = LongT5Config
+ base_model_prefix = "transformer"
+ module_class: nn.Module = None
+
+ def __init__(
+ self,
+ config: LongT5Config,
+ input_shape: Tuple[int] = (1, 1),
+ seed: int = 0,
+ dtype: jnp.dtype = jnp.float32,
+ _do_init: bool = True,
+ **kwargs,
+ ):
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
+
+ def enable_gradient_checkpointing(self):
+ self._module = self.module_class(
+ config=self.config,
+ dtype=self.dtype,
+ gradient_checkpointing=True,
+ )
+
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
+ # init input tensors
+ input_ids = jnp.zeros(input_shape, dtype="i4")
+
+ attention_mask = jnp.ones_like(input_ids)
+ decoder_input_ids = jnp.ones_like(input_ids)
+ decoder_attention_mask = jnp.ones_like(input_ids)
+
+ params_rng, dropout_rng = jax.random.split(rng)
+ rngs = {"params": params_rng, "dropout": dropout_rng}
+
+ random_params = self.module.init(
+ rngs,
+ input_ids,
+ attention_mask,
+ decoder_input_ids,
+ decoder_attention_mask,
+ )["params"]
+
+ if params is not None:
+ random_params = flatten_dict(unfreeze(random_params))
+ params = flatten_dict(unfreeze(params))
+ for missing_key in self._missing_keys:
+ params[missing_key] = random_params[missing_key]
+ self._missing_keys = set()
+ return freeze(unflatten_dict(params))
+ else:
+ return random_params
+
+ @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING)
+ def __call__(
+ self,
+ input_ids: jnp.ndarray,
+ attention_mask: Optional[jnp.ndarray] = None,
+ decoder_input_ids: jnp.ndarray = None,
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ train: bool = False,
+ params: dict = None,
+ dropout_rng: PRNGKey = None,
+ ):
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ if decoder_input_ids is None:
+ raise ValueError(
+ "Make sure to provide both `input_ids` and `decoder_input_ids`. `decoder_input_ids` is not passed"
+ " here."
+ )
+
+ # prepare encoder inputs
+ if attention_mask is None:
+ attention_mask = jnp.ones_like(input_ids)
+
+ # prepare decoder inputs
+ if decoder_attention_mask is None:
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
+
+ # Handle any PRNG if needed
+ rngs = {"dropout": dropout_rng} if dropout_rng is not None else {}
+
+ return self.module.apply(
+ {"params": params or self.params},
+ input_ids=jnp.array(input_ids, dtype="i4"),
+ attention_mask=jnp.array(attention_mask, dtype="i4"),
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=not train,
+ rngs=rngs,
+ )
+
+ def init_cache(self, batch_size, max_length, encoder_outputs):
+ r"""
+ Args:
+ batch_size (`int`):
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
+ max_length (`int`):
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
+ cache.
+ encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`):
+ `encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*:
+ `attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*)
+ is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
+ cross-attention of the decoder.
+ """
+ # init input variables to retrieve cache
+ decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4")
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
+
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, **kwargs):
+ decoder_module = module._get_decoder_module()
+ return decoder_module(
+ decoder_input_ids,
+ decoder_attention_mask,
+ **kwargs,
+ )
+
+ init_variables = self.module.init(
+ jax.random.PRNGKey(0),
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_outputs[0],
+ init_cache=True,
+ method=_decoder_forward, # we only need to call the decoder to init the cache
+ )
+ return unfreeze(init_variables["cache"])
+
+ @add_start_docstrings(LONGT5_ENCODE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=LongT5Config)
+ def encode(
+ self,
+ input_ids: jnp.ndarray,
+ attention_mask: Optional[jnp.ndarray] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ train: bool = False,
+ params: dict = None,
+ dropout_rng: PRNGKey = None,
+ ):
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, FlaxLongT5ForConditionalGeneration
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base")
+ >>> model = FlaxLongT5ForConditionalGeneration.from_pretrained("google/long-t5-local-base")
+
+ >>> text = "My friends are cool but they eat too many carbs."
+ >>> inputs = tokenizer(text, return_tensors="np")
+ >>> encoder_outputs = model.encode(**inputs)
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ if attention_mask is None:
+ attention_mask = jnp.ones_like(input_ids)
+
+ # Handle any PRNG if needed
+ rngs = {}
+ if dropout_rng is not None:
+ rngs["dropout"] = dropout_rng
+
+ def _encoder_forward(module, input_ids, attention_mask, **kwargs):
+ encode_module = module._get_encoder_module()
+ return encode_module(input_ids, attention_mask, **kwargs)
+
+ return self.module.apply(
+ {"params": params or self.params},
+ input_ids=jnp.array(input_ids, dtype="i4"),
+ attention_mask=jnp.array(attention_mask, dtype="i4"),
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=not train,
+ rngs=rngs,
+ method=_encoder_forward,
+ )
+
+ @add_start_docstrings(LONGT5_DECODE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=FlaxBaseModelOutputWithPastAndCrossAttentions, config_class=LongT5Config)
+ def decode(
+ self,
+ decoder_input_ids,
+ encoder_outputs,
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
+ past_key_values: dict = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ train: bool = False,
+ params: dict = None,
+ dropout_rng: PRNGKey = None,
+ ):
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, FlaxLongT5ForConditionalGeneration
+ >>> import jax.numpy as jnp
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base")
+ >>> model = FlaxLongT5ForConditionalGeneration.from_pretrained("google/long-t5-local-base")
+
+ >>> text = "My friends are cool but they eat too many carbs."
+ >>> inputs = tokenizer(text, return_tensors="np")
+ >>> encoder_outputs = model.encode(**inputs)
+
+ >>> decoder_start_token_id = model.config.decoder_start_token_id
+ >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
+
+ >>> outputs = model.decode(decoder_input_ids, encoder_outputs)
+ >>> logits = outputs.logits
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ encoder_hidden_states = encoder_outputs[0]
+ if encoder_attention_mask is None:
+ batch_size, sequence_length = encoder_hidden_states.shape[:2]
+ encoder_attention_mask = jnp.ones((batch_size, sequence_length))
+
+ batch_size, sequence_length = decoder_input_ids.shape
+ if decoder_attention_mask is None:
+ decoder_attention_mask = jnp.ones((batch_size, sequence_length))
+
+ # Handle any PRNG if needed
+ rngs = {}
+ if dropout_rng is not None:
+ rngs["dropout"] = dropout_rng
+
+ inputs = {"params": params or self.params}
+
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be
+ # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
+ # it can be changed by FlaxLongT5Attention module
+ if past_key_values:
+ inputs["cache"] = past_key_values
+ mutable = ["cache"]
+ else:
+ mutable = False
+
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, **kwargs):
+ decoder_module = module._get_decoder_module()
+ return decoder_module(
+ decoder_input_ids,
+ decoder_attention_mask,
+ **kwargs,
+ )
+
+ outputs = self.module.apply(
+ inputs,
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=not train,
+ rngs=rngs,
+ mutable=mutable,
+ method=_decoder_forward,
+ )
+
+ # add updated cache to model output
+ if past_key_values is not None and return_dict:
+ outputs, past = outputs
+ outputs["past_key_values"] = unfreeze(past["cache"])
+ return outputs
+ elif past_key_values is not None and not return_dict:
+ outputs, past = outputs
+ outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
+
+ return outputs
+
+
+LONGT5_START_DOCSTRING = r"""
+ The LongT5 model was proposed in [LongT5: Efficient Text-To-Text Transformer for Long
+ Sequences](https://arxiv.org/abs/2112.07916) by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo
+ Ni, Yun-Hsuan Sung and Yinfei Yang. It's an encoder-decoder transformer pre-trained in a text-to-text denoising
+ generative setting. LongT5 model is an extension of T5 model, and it enables using one of the two different
+ efficient attention mechanisms - (1) Local attention, or (2) Transient-Global attention.
+
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a Flax Linen
+ [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
+ regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
+
+ Finally, this model supports inherent JAX features such as:
+
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
+
+ Parameters:
+ config ([`LongT5Config`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
+ `jax.numpy.bfloat16` (on TPUs).
+
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
+ specified all the computation will be performed with the given `dtype`.
+
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
+ parameters.**
+
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
+ [`~FlaxPreTrainedModel.to_bf16`].
+"""
+
+
+@add_start_docstrings(
+ "The bare LONGT5 Model transformer outputting raw hidden-stateswithout any specific head on top.",
+ LONGT5_START_DOCSTRING,
+)
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5Module with T5->LongT5
+class FlaxLongT5Module(nn.Module):
+ config: LongT5Config
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+ gradient_checkpointing: bool = False
+
+ def _get_encoder_module(self):
+ return self.encoder
+
+ def _get_decoder_module(self):
+ return self.decoder
+
+ def setup(self):
+ self.shared = nn.Embed(
+ self.config.vocab_size,
+ self.config.d_model,
+ embedding_init=jax.nn.initializers.normal(self.config.initializer_factor * 1.0),
+ dtype=self.dtype,
+ )
+
+ encoder_config = copy.deepcopy(self.config)
+ encoder_config.causal = False
+ self.encoder = FlaxLongT5Stack(
+ encoder_config,
+ embed_tokens=self.shared,
+ dtype=self.dtype,
+ gradient_checkpointing=self.gradient_checkpointing,
+ )
+
+ decoder_config = copy.deepcopy(self.config)
+ decoder_config.causal = True
+ decoder_config.num_layers = self.config.num_decoder_layers
+ self.decoder = FlaxLongT5Stack(
+ decoder_config,
+ embed_tokens=self.shared,
+ dtype=self.dtype,
+ gradient_checkpointing=self.gradient_checkpointing,
+ )
+
+ def __call__(
+ self,
+ input_ids=None,
+ attention_mask=None,
+ decoder_input_ids=None,
+ decoder_attention_mask=None,
+ encoder_outputs=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ deterministic: bool = True,
+ ):
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # Encode if needed (training, first prediction pass)
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=deterministic,
+ )
+
+ # Decode
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=deterministic,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs
+
+ return FlaxSeq2SeqModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5Model with T5->LongT5
+class FlaxLongT5Model(FlaxLongT5PreTrainedModel):
+ module_class = FlaxLongT5Module
+
+
+append_call_sample_docstring(FlaxLongT5Model, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqModelOutput, _CONFIG_FOR_DOC)
+
+FLAX_LONGT5_MODEL_DOCSTRING = """
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, FlaxLongT5Model
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base")
+ >>> model = FlaxLongT5Model.from_pretrained("google/long-t5-local-base")
+
+ >>> input_ids = tokenizer(
+ ... "Studies have been shown that owning a dog is good for you", return_tensors="np"
+ ... ).input_ids
+ >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="np").input_ids
+
+ >>> # forward pass
+ >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
+ >>> last_hidden_states = outputs.last_hidden_state
+ ```
+"""
+
+
+overwrite_call_docstring(FlaxLongT5Model, LONGT5_INPUTS_DOCSTRING + FLAX_LONGT5_MODEL_DOCSTRING)
+append_replace_return_docstrings(FlaxLongT5Model, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
+
+
+@add_start_docstrings("""LONGT5 Model with a `language modeling` head on top.""", LONGT5_START_DOCSTRING)
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5ForConditionalGenerationModule with T5->LongT5
+class FlaxLongT5ForConditionalGenerationModule(nn.Module):
+ config: LongT5Config
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+ gradient_checkpointing: bool = False
+
+ def _get_encoder_module(self):
+ return self.encoder
+
+ def _get_decoder_module(self):
+ return self.decoder
+
+ def setup(self):
+ self.model_dim = self.config.d_model
+
+ self.shared = nn.Embed(
+ self.config.vocab_size,
+ self.config.d_model,
+ embedding_init=jax.nn.initializers.normal(self.config.initializer_factor),
+ dtype=self.dtype,
+ )
+
+ encoder_config = copy.deepcopy(self.config)
+ encoder_config.causal = False
+ encoder_config.use_cache = False
+ encoder_config.is_encoder_decoder = False
+ self.encoder = FlaxLongT5Stack(
+ encoder_config, self.shared, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing
+ )
+
+ decoder_config = copy.deepcopy(self.config)
+ decoder_config.causal = True
+ decoder_config.is_encoder_decoder = False
+ decoder_config.num_layers = self.config.num_decoder_layers
+ self.decoder = FlaxLongT5Stack(
+ decoder_config, self.shared, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing
+ )
+
+ self.lm_head = nn.Dense(
+ self.config.vocab_size,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_factor),
+ dtype=self.dtype,
+ )
+
+ def __call__(
+ self,
+ input_ids=None,
+ attention_mask=None,
+ decoder_input_ids=None,
+ decoder_attention_mask=None,
+ encoder_outputs=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ deterministic: bool = True,
+ ):
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # Encode
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=deterministic,
+ )
+
+ hidden_states = encoder_outputs[0]
+
+ # Decode
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=hidden_states,
+ encoder_attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=deterministic,
+ )
+
+ sequence_output = decoder_outputs[0]
+
+ if self.config.tie_word_embeddings:
+ # Rescale output before projecting on vocab
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586
+ sequence_output = sequence_output * (self.model_dim**-0.5)
+
+ if self.config.tie_word_embeddings:
+ shared_embedding = self.shared.variables["params"]["embedding"]
+ lm_logits = self.lm_head.apply({"params": {"kernel": shared_embedding.T}}, sequence_output)
+ else:
+ lm_logits = self.lm_head(sequence_output)
+
+ if not return_dict:
+ return (lm_logits,) + decoder_outputs[1:] + encoder_outputs
+
+ return FlaxSeq2SeqLMOutput(
+ logits=lm_logits,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+
+class FlaxLongT5ForConditionalGeneration(FlaxLongT5PreTrainedModel):
+ module_class = FlaxLongT5ForConditionalGenerationModule
+
+ @add_start_docstrings(LONGT5_DECODE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=LongT5Config)
+ def decode(
+ self,
+ decoder_input_ids,
+ encoder_outputs,
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
+ past_key_values: dict = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ train: bool = False,
+ params: dict = None,
+ dropout_rng: PRNGKey = None,
+ ):
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, FlaxLongT5ForConditionalGeneration
+ >>> import jax.numpy as jnp
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base")
+ >>> model = FlaxLongT5ForConditionalGeneration.from_pretrained("google/long-t5-local-base")
+
+ >>> text = "summarize: My friends are cool but they eat too many carbs."
+ >>> inputs = tokenizer(text, return_tensors="np")
+ >>> encoder_outputs = model.encode(**inputs)
+
+ >>> decoder_start_token_id = model.config.decoder_start_token_id
+ >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
+
+ >>> outputs = model.decode(decoder_input_ids, encoder_outputs)
+ >>> logits = outputs.logits
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ encoder_hidden_states = encoder_outputs[0]
+ if encoder_attention_mask is None:
+ batch_size, sequence_length = encoder_hidden_states.shape[:2]
+ encoder_attention_mask = jnp.ones((batch_size, sequence_length))
+
+ batch_size, sequence_length = decoder_input_ids.shape
+ if decoder_attention_mask is None:
+ decoder_attention_mask = jnp.ones((batch_size, sequence_length))
+
+ # Handle any PRNG if needed
+ rngs = {}
+ if dropout_rng is not None:
+ rngs["dropout"] = dropout_rng
+
+ inputs = {"params": params or self.params}
+
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be
+ # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
+ # it can be changed by FlaxLongT5Attention module
+ if past_key_values:
+ inputs["cache"] = past_key_values
+ mutable = ["cache"]
+ else:
+ mutable = False
+
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, **kwargs):
+ decoder_module = module._get_decoder_module()
+ decoder_outputs = decoder_module(
+ decoder_input_ids,
+ decoder_attention_mask,
+ **kwargs,
+ )
+
+ sequence_output = decoder_outputs[0]
+
+ if self.config.tie_word_embeddings:
+ # Rescale output before projecting on vocab
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586
+ sequence_output = sequence_output * (self.config.d_model**-0.5)
+
+ if self.config.tie_word_embeddings:
+ shared_embedding = module.shared.variables["params"]["embedding"]
+ lm_logits = module.lm_head.apply({"params": {"kernel": shared_embedding.T}}, sequence_output)
+ else:
+ lm_logits = module.lm_head(sequence_output)
+
+ return lm_logits, decoder_outputs
+
+ outputs = self.module.apply(
+ inputs,
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=not train,
+ rngs=rngs,
+ mutable=mutable,
+ method=_decoder_forward,
+ )
+
+ if past_key_values is None:
+ lm_logits, decoder_outputs = outputs
+ else:
+ (lm_logits, decoder_outputs), past = outputs
+
+ if return_dict:
+ outputs = FlaxCausalLMOutputWithCrossAttentions(
+ logits=lm_logits,
+ hidden_states=decoder_outputs.hidden_states,
+ attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ )
+ else:
+ outputs = (lm_logits,) + decoder_outputs[1:]
+
+ # add updated cache to model output
+ if past_key_values is not None and return_dict:
+ outputs["past_key_values"] = unfreeze(past["cache"])
+ return outputs
+ elif past_key_values is not None and not return_dict:
+ outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
+
+ return outputs
+
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ max_length,
+ attention_mask: Optional[jax.Array] = None,
+ decoder_attention_mask: Optional[jax.Array] = None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ # initializing the cache
+ batch_size, seq_length = decoder_input_ids.shape
+
+ past_key_values = self.init_cache(batch_size, max_length, encoder_outputs)
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
+ # But since the decoder uses a causal mask, those positions are masked anyways.
+ # Thus we can create a single static attention_mask here, which is more efficient for compilation
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
+ if decoder_attention_mask is not None:
+ extended_attention_mask = jax.lax.dynamic_update_slice(
+ extended_attention_mask, decoder_attention_mask, (0, 0)
+ )
+
+ return {
+ "past_key_values": past_key_values,
+ "encoder_outputs": encoder_outputs,
+ "encoder_attention_mask": attention_mask,
+ "decoder_attention_mask": extended_attention_mask,
+ }
+
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
+ return model_kwargs
+
+
+FLAX_LONGT5_CONDITIONAL_GENERATION_DOCSTRING = """
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, FlaxLongT5ForConditionalGeneration
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base")
+ >>> model = FlaxLongT5ForConditionalGeneration.from_pretrained("google/long-t5-local-base")
+
+ >>> ARTICLE_TO_SUMMARIZE = "summarize: My friends are cool but they eat too many carbs."
+ >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], return_tensors="np")
+
+ >>> # Generate Summary
+ >>> summary_ids = model.generate(inputs["input_ids"]).sequences
+ >>> print(tokenizer.decode(summary_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=False))
+ ```
+"""
+
+
+overwrite_call_docstring(
+ FlaxLongT5ForConditionalGeneration, LONGT5_INPUTS_DOCSTRING + FLAX_LONGT5_CONDITIONAL_GENERATION_DOCSTRING
+)
+append_replace_return_docstrings(
+ FlaxLongT5ForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC
+)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/modeling_longt5.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/modeling_longt5.py
new file mode 100644
index 0000000000000000000000000000000000000000..e16e0951208f774e17b951bc7d83120b7c68404f
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/modeling_longt5.py
@@ -0,0 +1,2236 @@
+# coding=utf-8
+# Copyright 2022 Google LLC., LongT5 Authors and HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch LongT5 model."""
+
+
+import copy
+import math
+import warnings
+from typing import Any, List, Optional, Tuple, Union
+
+import torch
+from torch import nn
+from torch.nn import CrossEntropyLoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import (
+ BaseModelOutput,
+ BaseModelOutputWithPastAndCrossAttentions,
+ Seq2SeqLMOutput,
+ Seq2SeqModelOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import ALL_LAYERNORM_LAYERS, find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ DUMMY_INPUTS,
+ DUMMY_MASK,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ is_torch_fx_proxy,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_longt5 import LongT5Config
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "LongT5Config"
+_CHECKPOINT_FOR_DOC = "google/long-t5-local-base"
+
+# TODO: Update before the merge
+
+from ..deprecated._archive_maps import LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+def _pad_to_multiple(x: torch.Tensor, block_len: int, dim: int, pad_value: int = 0) -> torch.Tensor:
+ """Pad a tensor so that a sequence length will be a multiple of `block_len`"""
+ pad_len = -x.shape[dim] % block_len
+ # Handle cases when an empty input sequence is given
+ if not all(x.shape):
+ new_shape = list(x.shape)
+ new_shape[dim] += pad_len
+ return torch.zeros(new_shape, dtype=x.dtype)
+
+ pad = [(0, 0)] * x.ndim
+ pad[dim] = (0, pad_len)
+ pad = sum(pad[::-1], ())
+ x = nn.functional.pad(x, pad=pad, mode="constant", value=pad_value)
+ return x
+
+
+def _split_into_blocks(x: torch.Tensor, block_len: int, dim: int) -> torch.Tensor:
+ """Split an input tensor into blocks of a given `block_len` along the given `dim`. If the dimension length
+ is not a multiple of `block_len`, it will be padded first with selected `pad_value`.
+ """
+ # pad tensor to multiple of block_len
+ if x.shape[dim] % block_len != 0:
+ x = _pad_to_multiple(x, block_len, dim, pad_value=0)
+ num_blocks = x.shape[dim] // block_len
+ output_shape = x.shape[:dim] + (num_blocks, block_len) + x.shape[(dim + 1) :]
+ # If 0 is in output_shape, we cannot apply reshape because of incompatibility with ONNX conversion
+ if 0 in output_shape:
+ return torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ return x.reshape(output_shape)
+
+
+def _concatenate_3_blocks(x: torch.Tensor, block_dim: int, sequence_dim: int, pad_value: int = 0) -> torch.Tensor:
+ """Concatenate three consecutive blocks for each input block for local attentiont.
+
+ For more information, see: https://arxiv.org/pdf/2112.07916.pdf.
+ """
+ num_blocks = x.shape[block_dim]
+
+ pad = [(0, 0)] * x.ndim
+ pad[block_dim] = (1, 1)
+ pad = sum(pad[::-1], ())
+ # [batch_size, num_blocks, block_len] -> [batch_size, num_blocks + 2, block_len]
+ x = nn.functional.pad(x, pad=pad, mode="constant", value=pad_value)
+
+ blocks_list: List[torch.Tensor] = []
+ for i in range(3):
+ # We use indexing approach here:
+ # https://numpy.org/doc/stable/user/basics.indexing.html#dealing-with-variable-numbers-of-indices-within-programs
+ indices = [slice(0, None)] * x.ndim
+ indices[block_dim] = slice(i, i + num_blocks)
+ indices = tuple(indices)
+ blocks_list.append(x[indices])
+ # [batch_size, num_blocks, 3 * block_len, ...]
+ return torch.cat(blocks_list, dim=sequence_dim)
+
+
+def _make_3block_relative_position_ids(block_len: int) -> torch.Tensor:
+ """Makes 3-blocked relative position ids for local attention."""
+ position_ids = torch.arange(3 * block_len, dtype=torch.int32)
+ center_position_ids = position_ids[block_len:-block_len]
+ # [block_len, 3 * block_len]
+ relative_position_ids = position_ids.unsqueeze(0) - center_position_ids.unsqueeze(1)
+ return relative_position_ids
+
+
+def _mask_local_attention_mask(local_attention_mask: torch.Tensor, block_len: int) -> torch.Tensor:
+ """Mask local attention mask to enforce that tokens are not allowed to attend tokens farther than ``local_radius."""
+ relative_position_ids = _make_3block_relative_position_ids(block_len)
+ locality_mask = torch.abs(relative_position_ids) < block_len
+ locality_mask = locality_mask[None, None, :, :]
+ locality_mask = locality_mask.to(local_attention_mask.device)
+ return torch.logical_and(local_attention_mask, locality_mask)
+
+
+def _get_local_attention_mask(attention_mask: torch.Tensor, block_len: int, device: torch.device) -> torch.Tensor:
+ """Prepare attention mask to be applied for a local attention."""
+ # [batch_size, num_blocks, block_len]
+ _blocked_attention_mask = _split_into_blocks(attention_mask, block_len, dim=1)
+ # [batch_size, num_block, 3 * block_len]
+ _3blocked_attention_mask = _concatenate_3_blocks(_blocked_attention_mask, block_dim=1, sequence_dim=2)
+
+ _blocked_attention_mask = _blocked_attention_mask.unsqueeze(-1)
+ _3blocked_attention_mask = _3blocked_attention_mask.unsqueeze(-2)
+ # [batch_size, num_block, block_len, 3 * block_len]
+ local_attention_mask = torch.logical_and(_blocked_attention_mask, _3blocked_attention_mask)
+ local_attention_mask = _mask_local_attention_mask(local_attention_mask, block_len)
+ # [batch_size, 1, num_block, block_len, 3 * block_len]
+ return local_attention_mask.unsqueeze(1).to(device)
+
+
+def _make_global_fixed_block_ids(
+ attention_mask: torch.Tensor, global_block_size: int
+) -> Tuple[torch.Tensor, torch.Tensor]:
+ """Obtain the "fixed block" global id corresponding to each input token.
+
+ This implementation is a simlified version of the original Flaxformr implementation adopted from:
+ https://github.com/google/flaxformer/blob/main/flaxformer/architectures/longt5/long_attention.py.
+
+ In our scenario, as we use this strategy only for a decoder, orphan tokens, i.e. those tokens which do not make for
+ the whole fixed block, are assigned to the preceding block.
+
+ Padding tokens from the original sequence are represented by -1.
+ """
+ batch_size, seq_len = attention_mask.shape[:2]
+
+ def handle_orphan_tokens(block_ids: torch.Tensor) -> torch.Tensor:
+ block_ends = (torch.arange(seq_len) % global_block_size) == global_block_size - 1
+ block_ends = block_ends.to(block_ids.device)
+ true_block_ends = torch.logical_and(block_ends, block_ids >= 0)
+ full_blocks = true_block_ends.sum(-1).unsqueeze(-1).type(block_ids.dtype) - 1
+ block_ids = torch.where(block_ids < full_blocks, block_ids, full_blocks)
+ return block_ids
+
+ fixed_block_mask = torch.ones_like(attention_mask, device=attention_mask.device) / global_block_size
+ fixed_block_mask = torch.cumsum(fixed_block_mask, axis=1) - fixed_block_mask
+ mask = torch.where(attention_mask != 0.0, 1.0, -1000.0).type(attention_mask.dtype)
+ global_block_ids = torch.floor(mask + fixed_block_mask - 1.0).type(attention_mask.dtype)
+ _global_block_ids_lower_bound = torch.tensor(-1, dtype=global_block_ids.dtype, device=global_block_ids.device)
+ global_block_ids = torch.where(
+ global_block_ids > _global_block_ids_lower_bound, global_block_ids, _global_block_ids_lower_bound
+ )
+ # set padding tokens to -1
+ global_block_ids = (global_block_ids * attention_mask) + (attention_mask - 1)
+ # [batch_size, seq_len]
+ global_block_ids = handle_orphan_tokens(global_block_ids)
+ num_globals = seq_len // global_block_size
+ # [batch_size, seq_len // global_block_size]
+ if num_globals > 0:
+ _sequence_block_ids_max = torch.max(global_block_ids, dim=-1).values.repeat(num_globals, 1).transpose(0, 1)
+ else:
+ _sequence_block_ids_max = torch.zeros(
+ batch_size, 0, dtype=global_block_ids.dtype, device=global_block_ids.device
+ )
+ global_segment_ids = torch.cumsum(torch.ones(batch_size, num_globals), dim=-1) - 1
+ global_segment_ids = global_segment_ids.to(attention_mask.device)
+ global_segment_ids = torch.where(global_segment_ids <= _sequence_block_ids_max, 1, 0)
+ return global_block_ids.type(torch.int), global_segment_ids.type(torch.int)
+
+
+def _make_side_relative_position_ids(attention_mask: torch.Tensor, global_block_size: int) -> torch.Tensor:
+ """Create the relative position tensor for local -> global attention."""
+ block_ids, global_segment_ids = _make_global_fixed_block_ids(attention_mask, global_block_size)
+ global_seq_len = global_segment_ids.shape[-1]
+ global_positions = torch.arange(global_seq_len, device=block_ids.device)
+ side_relative_position = global_positions - block_ids[..., None]
+ return side_relative_position.type(torch.int64)
+
+
+def _create_global_aggregates(
+ hidden_states: torch.Tensor, block_ids: torch.Tensor, global_seq_len: int
+) -> torch.Tensor:
+ """Compute individual block aggregates by summing over individual blocks."""
+ # (batch..., seq_len, global_seq_len))
+ block_ids = block_ids.where(
+ block_ids >= 0, torch.tensor(global_seq_len, dtype=block_ids.dtype, device=block_ids.device)
+ )
+ one_hot_block_ids = nn.functional.one_hot(block_ids.type(torch.int64), global_seq_len + 1)[:, :, :-1]
+ return torch.einsum("...nd,...ng->...gd", hidden_states, one_hot_block_ids.type(hidden_states.dtype))
+
+
+# Copied from transformers.models.t5.modeling_t5.T5LayerNorm with T5->LongT5
+class LongT5LayerNorm(nn.Module):
+ def __init__(self, hidden_size, eps=1e-6):
+ """
+ Construct a layernorm module in the LongT5 style. No bias and no subtraction of mean.
+ """
+ super().__init__()
+ self.weight = nn.Parameter(torch.ones(hidden_size))
+ self.variance_epsilon = eps
+
+ def forward(self, hidden_states):
+ # LongT5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
+ # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated
+ # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
+ # half-precision inputs is done in fp32
+
+ variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
+
+ # convert into half-precision if necessary
+ if self.weight.dtype in [torch.float16, torch.bfloat16]:
+ hidden_states = hidden_states.to(self.weight.dtype)
+
+ return self.weight * hidden_states
+
+
+try:
+ from apex.normalization import FusedRMSNorm
+
+ LongT5LayerNorm = FusedRMSNorm # noqa
+
+ logger.info("Discovered apex.normalization.FusedRMSNorm - will use it instead of LongT5LayerNorm")
+except ImportError:
+ # using the normal LongT5LayerNorm
+ pass
+except Exception:
+ logger.warning("discovered apex but it failed to load, falling back to LongT5LayerNorm")
+ pass
+
+ALL_LAYERNORM_LAYERS.append(LongT5LayerNorm)
+
+
+# Copied from transformers.models.t5.modeling_t5.T5DenseActDense with T5->LongT5
+class LongT5DenseActDense(nn.Module):
+ def __init__(self, config: LongT5Config):
+ super().__init__()
+ self.wi = nn.Linear(config.d_model, config.d_ff, bias=False)
+ self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
+ self.dropout = nn.Dropout(config.dropout_rate)
+ self.act = ACT2FN[config.dense_act_fn]
+
+ def forward(self, hidden_states):
+ hidden_states = self.wi(hidden_states)
+ hidden_states = self.act(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ if (
+ isinstance(self.wo.weight, torch.Tensor)
+ and hidden_states.dtype != self.wo.weight.dtype
+ and self.wo.weight.dtype != torch.int8
+ ):
+ hidden_states = hidden_states.to(self.wo.weight.dtype)
+ hidden_states = self.wo(hidden_states)
+ return hidden_states
+
+
+class LongT5DenseGatedActDense(nn.Module):
+ def __init__(self, config: LongT5Config):
+ super().__init__()
+ self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False)
+ self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False)
+ self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
+ self.dropout = nn.Dropout(config.dropout_rate)
+ self.act = ACT2FN[config.dense_act_fn]
+
+ def forward(self, hidden_states):
+ hidden_gelu = self.act(self.wi_0(hidden_states))
+ hidden_linear = self.wi_1(hidden_states)
+ hidden_states = hidden_gelu * hidden_linear
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.wo(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.t5.modeling_t5.T5LayerFF with T5->LongT5
+class LongT5LayerFF(nn.Module):
+ def __init__(self, config: LongT5Config):
+ super().__init__()
+ if config.is_gated_act:
+ self.DenseReluDense = LongT5DenseGatedActDense(config)
+ else:
+ self.DenseReluDense = LongT5DenseActDense(config)
+
+ self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
+ self.dropout = nn.Dropout(config.dropout_rate)
+
+ def forward(self, hidden_states):
+ forwarded_states = self.layer_norm(hidden_states)
+ forwarded_states = self.DenseReluDense(forwarded_states)
+ hidden_states = hidden_states + self.dropout(forwarded_states)
+ return hidden_states
+
+
+# Copied from transformers.models.t5.modeling_t5.T5Attention with T5->LongT5
+class LongT5Attention(nn.Module):
+ def __init__(self, config: LongT5Config, has_relative_attention_bias=False):
+ super().__init__()
+ self.is_decoder = config.is_decoder
+ self.has_relative_attention_bias = has_relative_attention_bias
+ self.relative_attention_num_buckets = config.relative_attention_num_buckets
+ self.relative_attention_max_distance = config.relative_attention_max_distance
+ self.d_model = config.d_model
+ self.key_value_proj_dim = config.d_kv
+ self.n_heads = config.num_heads
+ self.dropout = config.dropout_rate
+ self.inner_dim = self.n_heads * self.key_value_proj_dim
+
+ # Mesh TensorFlow initialization to avoid scaling before softmax
+ self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)
+ self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)
+ self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)
+ self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)
+
+ if self.has_relative_attention_bias:
+ self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
+ self.pruned_heads = set()
+ self.gradient_checkpointing = False
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads
+ )
+ # Prune linear layers
+ self.q = prune_linear_layer(self.q, index)
+ self.k = prune_linear_layer(self.k, index)
+ self.v = prune_linear_layer(self.v, index)
+ self.o = prune_linear_layer(self.o, index, dim=1)
+ # Update hyper params
+ self.n_heads = self.n_heads - len(heads)
+ self.inner_dim = self.key_value_proj_dim * self.n_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ @staticmethod
+ def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
+ """
+ Adapted from Mesh Tensorflow:
+ https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
+
+ Translate relative position to a bucket number for relative attention. The relative position is defined as
+ memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
+ position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
+ small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
+ positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
+ This should allow for more graceful generalization to longer sequences than the model has been trained on
+
+ Args:
+ relative_position: an int32 Tensor
+ bidirectional: a boolean - whether the attention is bidirectional
+ num_buckets: an integer
+ max_distance: an integer
+
+ Returns:
+ a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
+ """
+ relative_buckets = 0
+ if bidirectional:
+ num_buckets //= 2
+ relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
+ relative_position = torch.abs(relative_position)
+ else:
+ relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
+ # now relative_position is in the range [0, inf)
+
+ # half of the buckets are for exact increments in positions
+ max_exact = num_buckets // 2
+ is_small = relative_position < max_exact
+
+ # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
+ relative_position_if_large = max_exact + (
+ torch.log(relative_position.float() / max_exact)
+ / math.log(max_distance / max_exact)
+ * (num_buckets - max_exact)
+ ).to(torch.long)
+ relative_position_if_large = torch.min(
+ relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)
+ )
+
+ relative_buckets += torch.where(is_small, relative_position, relative_position_if_large)
+ return relative_buckets
+
+ def compute_bias(self, query_length, key_length, device=None):
+ """Compute binned relative position bias"""
+ if device is None:
+ device = self.relative_attention_bias.weight.device
+ context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
+ memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :]
+ relative_position = memory_position - context_position # shape (query_length, key_length)
+ relative_position_bucket = self._relative_position_bucket(
+ relative_position, # shape (query_length, key_length)
+ bidirectional=(not self.is_decoder),
+ num_buckets=self.relative_attention_num_buckets,
+ max_distance=self.relative_attention_max_distance,
+ )
+ values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads)
+ values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length)
+ return values
+
+ def forward(
+ self,
+ hidden_states,
+ mask=None,
+ key_value_states=None,
+ position_bias=None,
+ past_key_value=None,
+ layer_head_mask=None,
+ query_length=None,
+ use_cache=False,
+ output_attentions=False,
+ ):
+ """
+ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
+ """
+ # Input is (batch_size, seq_length, dim)
+ # Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length)
+ # past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head)
+ batch_size, seq_length = hidden_states.shape[:2]
+
+ real_seq_length = seq_length
+
+ if past_key_value is not None:
+ if len(past_key_value) != 2:
+ raise ValueError(
+ f"past_key_value should have 2 past states: keys and values. Got { len(past_key_value)} past states"
+ )
+ real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length
+
+ key_length = real_seq_length if key_value_states is None else key_value_states.shape[1]
+
+ def shape(states):
+ """projection"""
+ return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
+
+ def unshape(states):
+ """reshape"""
+ return states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim)
+
+ def project(hidden_states, proj_layer, key_value_states, past_key_value):
+ """projects hidden states correctly to key/query states"""
+ if key_value_states is None:
+ # self-attn
+ # (batch_size, n_heads, seq_length, dim_per_head)
+ hidden_states = shape(proj_layer(hidden_states))
+ elif past_key_value is None:
+ # cross-attn
+ # (batch_size, n_heads, seq_length, dim_per_head)
+ hidden_states = shape(proj_layer(key_value_states))
+
+ if past_key_value is not None:
+ if key_value_states is None:
+ # self-attn
+ # (batch_size, n_heads, key_length, dim_per_head)
+ hidden_states = torch.cat([past_key_value, hidden_states], dim=2)
+ elif past_key_value.shape[2] != key_value_states.shape[1]:
+ # checking that the `sequence_length` of the `past_key_value` is the same as
+ # the provided `key_value_states` to support prefix tuning
+ # cross-attn
+ # (batch_size, n_heads, seq_length, dim_per_head)
+ hidden_states = shape(proj_layer(key_value_states))
+ else:
+ # cross-attn
+ hidden_states = past_key_value
+ return hidden_states
+
+ # get query states
+ query_states = shape(self.q(hidden_states)) # (batch_size, n_heads, seq_length, dim_per_head)
+
+ # get key/value states
+ key_states = project(
+ hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value is not None else None
+ )
+ value_states = project(
+ hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value is not None else None
+ )
+
+ # compute scores
+ scores = torch.matmul(
+ query_states, key_states.transpose(3, 2)
+ ) # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9
+
+ if position_bias is None:
+ if not self.has_relative_attention_bias:
+ position_bias = torch.zeros(
+ (1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype
+ )
+ if self.gradient_checkpointing and self.training:
+ position_bias.requires_grad = True
+ else:
+ position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device)
+
+ # if key and values are already calculated
+ # we want only the last query position bias
+ if past_key_value is not None:
+ position_bias = position_bias[:, :, -hidden_states.size(1) :, :]
+
+ if mask is not None:
+ position_bias = position_bias + mask # (batch_size, n_heads, seq_length, key_length)
+
+ if self.pruned_heads:
+ mask = torch.ones(position_bias.shape[1])
+ mask[list(self.pruned_heads)] = 0
+ position_bias_masked = position_bias[:, mask.bool()]
+ else:
+ position_bias_masked = position_bias
+
+ scores += position_bias_masked
+ attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(
+ scores
+ ) # (batch_size, n_heads, seq_length, key_length)
+ attn_weights = nn.functional.dropout(
+ attn_weights, p=self.dropout, training=self.training
+ ) # (batch_size, n_heads, seq_length, key_length)
+
+ # Mask heads if we want to
+ if layer_head_mask is not None:
+ attn_weights = attn_weights * layer_head_mask
+
+ attn_output = unshape(torch.matmul(attn_weights, value_states)) # (batch_size, seq_length, dim)
+ attn_output = self.o(attn_output)
+
+ present_key_value_state = (key_states, value_states) if (self.is_decoder and use_cache) else None
+ outputs = (attn_output,) + (present_key_value_state,) + (position_bias,)
+
+ if output_attentions:
+ outputs = outputs + (attn_weights,)
+ return outputs
+
+
+class LongT5LocalAttention(nn.Module):
+ def __init__(self, config: LongT5Config, has_relative_attention_bias: bool = False) -> None:
+ super().__init__()
+ self.is_decoder = config.is_decoder
+ self.has_relative_attention_bias = has_relative_attention_bias
+ self.relative_attention_num_buckets = config.relative_attention_num_buckets
+ self.relative_attention_max_distance = config.relative_attention_max_distance
+ self.d_model = config.d_model
+ self.key_value_proj_dim = config.d_kv
+ self.n_heads = config.num_heads
+ self.local_radius = config.local_radius
+ self.block_len = self.local_radius + 1
+ self.dropout = config.dropout_rate
+ self.inner_dim = self.n_heads * self.key_value_proj_dim
+
+ # Mesh TensorFlow initialization to avoid scaling before softmax
+ self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)
+ self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)
+ self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)
+ self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)
+
+ if self.has_relative_attention_bias:
+ self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
+ self.pruned_heads = set()
+ self.gradient_checkpointing = False
+
+ # Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads
+ )
+ # Prune linear layers
+ self.q = prune_linear_layer(self.q, index)
+ self.k = prune_linear_layer(self.k, index)
+ self.v = prune_linear_layer(self.v, index)
+ self.o = prune_linear_layer(self.o, index, dim=1)
+ # Update hyper params
+ self.n_heads = self.n_heads - len(heads)
+ self.inner_dim = self.key_value_proj_dim * self.n_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ @staticmethod
+ # Copied from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket
+ def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
+ """
+ Adapted from Mesh Tensorflow:
+ https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
+
+ Translate relative position to a bucket number for relative attention. The relative position is defined as
+ memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
+ position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
+ small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
+ positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
+ This should allow for more graceful generalization to longer sequences than the model has been trained on
+
+ Args:
+ relative_position: an int32 Tensor
+ bidirectional: a boolean - whether the attention is bidirectional
+ num_buckets: an integer
+ max_distance: an integer
+
+ Returns:
+ a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
+ """
+ relative_buckets = 0
+ if bidirectional:
+ num_buckets //= 2
+ relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
+ relative_position = torch.abs(relative_position)
+ else:
+ relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
+ # now relative_position is in the range [0, inf)
+
+ # half of the buckets are for exact increments in positions
+ max_exact = num_buckets // 2
+ is_small = relative_position < max_exact
+
+ # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
+ relative_position_if_large = max_exact + (
+ torch.log(relative_position.float() / max_exact)
+ / math.log(max_distance / max_exact)
+ * (num_buckets - max_exact)
+ ).to(torch.long)
+ relative_position_if_large = torch.min(
+ relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)
+ )
+
+ relative_buckets += torch.where(is_small, relative_position, relative_position_if_large)
+ return relative_buckets
+
+ def compute_bias(self, block_length: int):
+ """Compute binned relative position bias"""
+ target_device = (
+ self.relative_attention_bias.weight.device
+ if self.relative_attention_bias.weight.device.type != "meta"
+ else None
+ )
+ memory_position = torch.arange(3 * block_length, dtype=torch.long, device=target_device)
+ context_position = memory_position[block_length:-block_length]
+
+ # (block_length, 3 * block_length)
+ relative_position = memory_position[None, :] - context_position[:, None]
+ relative_position_bucket = self._relative_position_bucket(
+ relative_position, # (block_length, 3 * block_length)
+ bidirectional=(not self.is_decoder),
+ num_buckets=self.relative_attention_num_buckets,
+ max_distance=self.relative_attention_max_distance,
+ )
+ # (block_length, 3 * block_length, num_heads)
+ values = self.relative_attention_bias(relative_position_bucket)
+ # (1, 1, num_heads, block_length, 3 * block_length)
+ values = values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0)
+ return values
+
+ def forward(
+ self,
+ hidden_states,
+ mask=None,
+ position_bias=None,
+ layer_head_mask=None,
+ output_attentions=False,
+ ):
+ batch_size, seq_length = hidden_states.shape[:2]
+
+ def shape(states):
+ """projection"""
+ return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim)
+
+ def unshape(states):
+ """reshape"""
+ return states.contiguous().view(batch_size, -1, self.inner_dim)
+
+ # get query/key/value states -> (batch_size, seq_length, n_heads, dim_per_head)
+ query_states = shape(self.q(hidden_states))
+ key_states = shape(self.k(hidden_states))
+ value_states = shape(self.v(hidden_states))
+
+ # Split into blocks -> (batch_size, num_blocks, block_len, n_heads, dim_per_head)
+ query_states = _split_into_blocks(query_states, self.block_len, dim=1)
+ key_states = _split_into_blocks(key_states, self.block_len, dim=1)
+ value_states = _split_into_blocks(value_states, self.block_len, dim=1)
+
+ # Concatenate 3 blocks for keys and values -> (batch_size, num_blocks, 3 * block_len, n_heads, dim_per_head)
+ key_states = _concatenate_3_blocks(key_states, block_dim=1, sequence_dim=2)
+ value_states = _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2)
+
+ # Compute scores
+ scores = torch.einsum(
+ "...qhd,...khd->...hqk", query_states, key_states
+ ) # (batch_size, num_block, n_heads, block_len, 3 * block_len)
+
+ if position_bias is None:
+ # position_bias shape: # (1, 1, n_heads, block_len, 3 * block_len)
+ if not self.has_relative_attention_bias:
+ position_bias = torch.zeros(
+ (1, 1, self.n_heads, self.block_len, 3 * self.block_len), device=scores.device, dtype=scores.dtype
+ )
+ if self.gradient_checkpointing and self.training:
+ position_bias.requires_grad = True
+ else:
+ position_bias = self.compute_bias(self.block_len)
+
+ if mask is not None:
+ # Replace masked positions with -1e10 (according to the original implementation)
+ mask = torch.where(mask > 0, 0.0, -1e10)
+ # We need to adjust position bias shape to be sum with mask
+ position_bias = position_bias + mask.transpose(1, 2)
+
+ scores += position_bias
+ # (batch_size, num_blocks, n_heads, block_len, 3 * block_len)
+ attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores)
+ # (batch_size, num_blocks, n_heads, block_len, 3 * block_len)
+ attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ # Mask heads if we want to
+ if layer_head_mask is not None:
+ attn_weights = attn_weights * layer_head_mask
+ attn_weights = attn_weights.type(value_states.dtype)
+ attn_output = unshape(torch.einsum("...hqk,...khd->...qhd", attn_weights, value_states))
+ attn_output = attn_output[:, :seq_length, :]
+ attn_output = self.o(attn_output)
+
+ present_key_value_state = None
+ outputs = (attn_output,) + (present_key_value_state,) + (position_bias,)
+
+ if output_attentions:
+ outputs = outputs + (attn_weights,)
+ return outputs
+
+
+class LongT5TransientGlobalAttention(nn.Module):
+ def __init__(self, config: LongT5Config, has_relative_attention_bias: bool = False) -> None:
+ super().__init__()
+ self.is_decoder = config.is_decoder
+ self.has_relative_attention_bias = has_relative_attention_bias
+ self.relative_attention_num_buckets = config.relative_attention_num_buckets
+ self.relative_attention_max_distance = config.relative_attention_max_distance
+ self.d_model = config.d_model
+ self.key_value_proj_dim = config.d_kv
+ self.n_heads = config.num_heads
+ self.local_radius = config.local_radius
+ self.block_len = self.local_radius + 1
+ self.global_block_size = config.global_block_size
+ self.dropout = config.dropout_rate
+ self.inner_dim = self.n_heads * self.key_value_proj_dim
+
+ # Mesh TensorFlow initialization to avoid scaling before softmax
+ self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)
+ self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)
+ self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)
+ self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)
+
+ if self.has_relative_attention_bias:
+ self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
+ self.pruned_heads = set()
+
+ # Relativen attention bias & Layer norm for global attention
+ if self.has_relative_attention_bias:
+ self.global_relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
+ self.global_input_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
+
+ # Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads
+ )
+ # Prune linear layers
+ self.q = prune_linear_layer(self.q, index)
+ self.k = prune_linear_layer(self.k, index)
+ self.v = prune_linear_layer(self.v, index)
+ self.o = prune_linear_layer(self.o, index, dim=1)
+ # Update hyper params
+ self.n_heads = self.n_heads - len(heads)
+ self.inner_dim = self.key_value_proj_dim * self.n_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ @staticmethod
+ # Copied from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket
+ def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
+ """
+ Adapted from Mesh Tensorflow:
+ https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
+
+ Translate relative position to a bucket number for relative attention. The relative position is defined as
+ memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
+ position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
+ small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
+ positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
+ This should allow for more graceful generalization to longer sequences than the model has been trained on
+
+ Args:
+ relative_position: an int32 Tensor
+ bidirectional: a boolean - whether the attention is bidirectional
+ num_buckets: an integer
+ max_distance: an integer
+
+ Returns:
+ a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
+ """
+ relative_buckets = 0
+ if bidirectional:
+ num_buckets //= 2
+ relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
+ relative_position = torch.abs(relative_position)
+ else:
+ relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
+ # now relative_position is in the range [0, inf)
+
+ # half of the buckets are for exact increments in positions
+ max_exact = num_buckets // 2
+ is_small = relative_position < max_exact
+
+ # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
+ relative_position_if_large = max_exact + (
+ torch.log(relative_position.float() / max_exact)
+ / math.log(max_distance / max_exact)
+ * (num_buckets - max_exact)
+ ).to(torch.long)
+ relative_position_if_large = torch.min(
+ relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)
+ )
+
+ relative_buckets += torch.where(is_small, relative_position, relative_position_if_large)
+ return relative_buckets
+
+ def compute_bias(self, block_length: int):
+ """Compute binned relative position bias"""
+ target_device = (
+ self.relative_attention_bias.weight.device
+ if self.relative_attention_bias.weight.device.type != "meta"
+ else None
+ )
+ memory_position = torch.arange(3 * block_length, dtype=torch.long, device=target_device)
+ context_position = memory_position[block_length:-block_length]
+
+ # (block_length, 3 * block_length)
+ relative_position = memory_position[None, :] - context_position[:, None]
+ relative_position_bucket = self._relative_position_bucket(
+ relative_position, # (block_length, 3 * block_length)
+ bidirectional=(not self.is_decoder),
+ num_buckets=self.relative_attention_num_buckets,
+ max_distance=self.relative_attention_max_distance,
+ )
+ # (block_length, 3 * block_length, num_heads)
+ values = self.relative_attention_bias(relative_position_bucket)
+ # (1, 1, num_heads, block_length, 3 * block_length)
+ values = values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0)
+ return values
+
+ def compute_side_bias(self, mask: torch.Tensor, global_segment_ids: torch.Tensor) -> torch.Tensor:
+ # (batch_size, 1, seq_len, global_seq_len)
+ side_attention_mask = torch.eq(mask[..., None], global_segment_ids[:, None, :])[:, None, ...]
+ attention_side_bias = torch.where(side_attention_mask > 0, 0.0, -1e10)
+ # (batch_size, seq_len, global_seq_len)
+ side_relative_position = _make_side_relative_position_ids(mask, self.global_block_size)
+ side_relative_position_bucket = self._relative_position_bucket(
+ side_relative_position,
+ bidirectional=(not self.is_decoder),
+ num_buckets=self.relative_attention_num_buckets,
+ max_distance=self.relative_attention_max_distance,
+ )
+ # (batch_size, seq_len, global_seq_len, num_heads)
+ side_bias = self.global_relative_attention_bias(side_relative_position_bucket)
+
+ # (batch_size, num_heads, seq_len, global_seq_len)
+ side_bias = side_bias.permute([0, 3, 1, 2])
+ # (batch_size, num_heads, seq_len, global_seq_len)
+ attention_side_bias = attention_side_bias + side_bias
+ return attention_side_bias
+
+ def forward(
+ self,
+ hidden_states,
+ mask=None,
+ position_bias=None,
+ layer_head_mask=None,
+ output_attentions=False,
+ ):
+ batch_size, seq_length = hidden_states.shape[:2]
+
+ def shape(states):
+ """projection"""
+ return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim)
+
+ def unshape(states):
+ """reshape"""
+ return states.contiguous().view(batch_size, -1, self.inner_dim)
+
+ # Prepare components for transient-global attention
+ # Obtain block_ids and global_segment_ids
+ # global_seq_len := seq_len // self.global_block_size
+ # shapes: (batch_size, seq_len) & (batch_size, global_seq_len)
+ block_ids, global_segment_ids = _make_global_fixed_block_ids(
+ mask if mask is not None else torch.ones(hidden_states.shape[:-1]),
+ self.global_block_size,
+ )
+ # Create global inputs
+ _global_seq_len = global_segment_ids.shape[-1]
+ global_inputs = _create_global_aggregates(hidden_states, block_ids, _global_seq_len)
+ global_inputs = self.global_input_layer_norm(global_inputs)
+
+ # get query states -> (batch_size, seq_length, n_heads, dim_per_head)
+ query_states = shape(self.q(hidden_states))
+ key_states = shape(self.k(hidden_states))
+ value_states = shape(self.v(hidden_states))
+ # Get global/side key/value states shape: (batch_size, global_seq_len, n_heads, dim_per_head)
+ side_key_states = shape(self.k(global_inputs))
+ side_value_states = shape(self.v(global_inputs))
+
+ # Split into blocks -> (batch_size, num_blocks, block_len, n_heads, dim_per_head)
+ query_states = _split_into_blocks(query_states, self.block_len, dim=1)
+ key_states = _split_into_blocks(key_states, self.block_len, dim=1)
+ value_states = _split_into_blocks(value_states, self.block_len, dim=1)
+
+ # Concatenate 3 blocks for keys and values -> (batch_size, num_blocks, 3 * block_len, n_heads, dim_per_head)
+ key_states = _concatenate_3_blocks(key_states, block_dim=1, sequence_dim=2)
+ value_states = _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2)
+
+ # Tile side inputs across local key/value blocks
+ # New shape: (batch_size, num_blocks, global_seq_len, n_heads, dim_per_head)
+ reps = [1] * (side_key_states.ndim + 1)
+ reps[1] = key_states.shape[1]
+ side_key_states = side_key_states.unsqueeze(1).repeat(reps)
+ side_value_states = side_value_states.unsqueeze(1).repeat(reps)
+
+ # Concatenate "local" and "side"/"global" key/value states to allow each token to attend global aggregated ones
+ # New shape: (batch_size, num_blocks, 3 * block_len + global_seq_len, n_heads, dim_per_head)
+ key_states = torch.cat([key_states, side_key_states], dim=2)
+ value_states = torch.cat([value_states, side_value_states], dim=2)
+
+ # Compute scores -> (batch_size, num_block, n_heads, block_len, 3 * block_len + global_seq_len)
+ scores = torch.einsum("...qhd,...khd->...hqk", query_states, key_states)
+
+ if mask is not None:
+ # We need to adjust position bias shape to be sum with mask
+ local_attention_mask = _get_local_attention_mask(mask, self.block_len, hidden_states.device)
+ # Replace masked positions with -10_000 (according to the original implementation)
+ local_attention_mask = torch.where(local_attention_mask > 0, 0.0, -1e10)
+ else:
+ local_attention_mask = None
+
+ if position_bias is None:
+ # position_bias shape: # (1, 1, n_heads, block_len, 3 * block_len)
+ if not self.has_relative_attention_bias:
+ position_bias = torch.zeros(
+ (1, 1, self.n_heads, self.block_len, 3 * self.block_len),
+ device=scores.device,
+ dtype=scores.dtype,
+ )
+ if self.gradient_checkpointing and self.training:
+ position_bias.requires_grad = True
+ else:
+ position_bias = self.compute_bias(self.block_len)
+
+ if local_attention_mask is not None:
+ # (batch_size, 1, n_heads, block_len, 3 * block_len)
+ position_bias = position_bias + local_attention_mask.transpose(1, 2)
+ position_bias = position_bias.type(scores.dtype)
+
+ # Calculate global/side bias - shape: # (batch_size, num_heads, seq_len, global_seq_len)
+ if mask is None:
+ mask = torch.ones(batch_size, seq_length)
+ # (batch_size, num_heads, seq_len, global_seq_len)
+ side_position_bias = self.compute_side_bias(mask, global_segment_ids)
+ # (batch_size, num_blocks, num_heads, block_len, global_seq_len)
+ side_position_bias = _split_into_blocks(side_position_bias, self.block_len, dim=-2).transpose(1, 2)
+ side_position_bias = side_position_bias.type(scores.dtype).to(scores.device)
+ # (batch_size, num_blocks, num_heads, block_len, 3 * block_len + global_seq_len)
+ position_bias = torch.cat([position_bias, side_position_bias], dim=-1)
+
+ scores += position_bias
+ # (batch_size, num_blocks, n_heads, block_len, 3 * block_len + global_seq_len)
+ attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores)
+ attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ # Mask heads if we want to
+ if layer_head_mask is not None:
+ attn_weights = attn_weights * layer_head_mask
+ attn_weights = attn_weights.type(value_states.dtype)
+ attn_output = unshape(torch.einsum("...hqk,...khd->...qhd", attn_weights, value_states))
+ attn_output = attn_output[:, :seq_length, :]
+ attn_output = self.o(attn_output)
+
+ present_key_value_state = None
+ outputs = (attn_output,) + (present_key_value_state,) + (position_bias,)
+
+ if output_attentions:
+ outputs = outputs + (attn_weights,)
+ return outputs
+
+
+# Copied from transformers.models.t5.modeling_t5.T5LayerSelfAttention with T5->LongT5
+class LongT5LayerSelfAttention(nn.Module):
+ def __init__(self, config, has_relative_attention_bias=False):
+ super().__init__()
+ self.SelfAttention = LongT5Attention(config, has_relative_attention_bias=has_relative_attention_bias)
+ self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
+ self.dropout = nn.Dropout(config.dropout_rate)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ position_bias=None,
+ layer_head_mask=None,
+ past_key_value=None,
+ use_cache=False,
+ output_attentions=False,
+ ):
+ normed_hidden_states = self.layer_norm(hidden_states)
+ attention_output = self.SelfAttention(
+ normed_hidden_states,
+ mask=attention_mask,
+ position_bias=position_bias,
+ layer_head_mask=layer_head_mask,
+ past_key_value=past_key_value,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ )
+ hidden_states = hidden_states + self.dropout(attention_output[0])
+ outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
+ return outputs
+
+
+class LongT5LayerLocalSelfAttention(nn.Module):
+ """Local self attention used in encoder"""
+
+ def __init__(self, config, has_relative_attention_bias=False):
+ super().__init__()
+ self.LocalSelfAttention = LongT5LocalAttention(config, has_relative_attention_bias=has_relative_attention_bias)
+ self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
+ self.dropout = nn.Dropout(config.dropout_rate)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ position_bias=None,
+ layer_head_mask=None,
+ output_attentions=False,
+ **kwargs: Any, # to accept past_key_value and use_cache kwargs
+ ):
+ normed_hidden_states = self.layer_norm(hidden_states)
+ attention_output = self.LocalSelfAttention(
+ normed_hidden_states,
+ mask=attention_mask,
+ position_bias=position_bias,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = hidden_states + self.dropout(attention_output[0])
+ outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
+ return outputs
+
+
+class LongT5LayerTransientGlobalSelfAttention(nn.Module):
+ """Transient-Global self attention used in encoder"""
+
+ def __init__(self, config, has_relative_attention_bias=False):
+ super().__init__()
+ self.TransientGlobalSelfAttention = LongT5TransientGlobalAttention(
+ config, has_relative_attention_bias=has_relative_attention_bias
+ )
+ self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
+ self.dropout = nn.Dropout(config.dropout_rate)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ position_bias=None,
+ layer_head_mask=None,
+ output_attentions=False,
+ **kwargs: Any, # to accept past_key_value and use_cache kwargs
+ ):
+ normed_hidden_states = self.layer_norm(hidden_states)
+ attention_output = self.TransientGlobalSelfAttention(
+ normed_hidden_states,
+ mask=attention_mask,
+ position_bias=position_bias,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = hidden_states + self.dropout(attention_output[0])
+ outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.t5.modeling_t5.T5LayerCrossAttention with T5->LongT5
+class LongT5LayerCrossAttention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.EncDecAttention = LongT5Attention(config, has_relative_attention_bias=False)
+ self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
+ self.dropout = nn.Dropout(config.dropout_rate)
+
+ def forward(
+ self,
+ hidden_states,
+ key_value_states,
+ attention_mask=None,
+ position_bias=None,
+ layer_head_mask=None,
+ past_key_value=None,
+ use_cache=False,
+ query_length=None,
+ output_attentions=False,
+ ):
+ normed_hidden_states = self.layer_norm(hidden_states)
+ attention_output = self.EncDecAttention(
+ normed_hidden_states,
+ mask=attention_mask,
+ key_value_states=key_value_states,
+ position_bias=position_bias,
+ layer_head_mask=layer_head_mask,
+ past_key_value=past_key_value,
+ use_cache=use_cache,
+ query_length=query_length,
+ output_attentions=output_attentions,
+ )
+ layer_output = hidden_states + self.dropout(attention_output[0])
+ outputs = (layer_output,) + attention_output[1:] # add attentions if we output them
+ return outputs
+
+
+class LongT5Block(nn.Module):
+ def __init__(self, config, has_relative_attention_bias=False):
+ super().__init__()
+ self.is_decoder = config.is_decoder
+ if config.is_decoder:
+ attention_layer = LongT5LayerSelfAttention
+ elif config.encoder_attention_type == "local":
+ attention_layer = LongT5LayerLocalSelfAttention
+ elif config.encoder_attention_type == "transient-global":
+ attention_layer = LongT5LayerTransientGlobalSelfAttention
+ else:
+ raise ValueError(
+ "For encoder attention mechanism, either `local` or `transient-global` attention type is expected, "
+ f"but got {config.encoder_attention_type}."
+ )
+ self.layer = nn.ModuleList()
+ self.layer.append(attention_layer(config, has_relative_attention_bias=has_relative_attention_bias))
+ if self.is_decoder:
+ self.layer.append(LongT5LayerCrossAttention(config))
+
+ self.layer.append(LongT5LayerFF(config))
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ position_bias=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ encoder_decoder_position_bias=None,
+ layer_head_mask=None,
+ cross_attn_layer_head_mask=None,
+ past_key_value=None,
+ use_cache=False,
+ output_attentions=False,
+ return_dict=True,
+ ):
+ if past_key_value is not None:
+ if not self.is_decoder:
+ logger.warning("`past_key_values` is passed to the encoder. Please make sure this is intended.")
+ expected_num_past_key_values = 2 if encoder_hidden_states is None else 4
+
+ if len(past_key_value) != expected_num_past_key_values:
+ raise ValueError(
+ f"There should be {expected_num_past_key_values} past states. "
+ f"{'2 (past / key) for cross attention. ' if expected_num_past_key_values == 4 else ''}"
+ f"Got {len(past_key_value)} past key / value states"
+ )
+
+ self_attn_past_key_value = past_key_value[:2]
+ cross_attn_past_key_value = past_key_value[2:]
+ else:
+ self_attn_past_key_value, cross_attn_past_key_value = None, None
+
+ self_attention_outputs = self.layer[0](
+ hidden_states,
+ attention_mask=attention_mask,
+ position_bias=position_bias,
+ layer_head_mask=layer_head_mask,
+ past_key_value=self_attn_past_key_value,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ )
+ hidden_states, present_key_value_state = self_attention_outputs[:2]
+ attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights
+
+ # clamp inf values to enable fp16 inference - check https://github.com/huggingface/transformers/pull/19229/
+ if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
+
+ do_cross_attention = self.is_decoder and encoder_hidden_states is not None
+ if do_cross_attention:
+ # the actual query length is unknown for cross attention
+ # if using past key value states. Need to inject it here
+ if present_key_value_state is not None:
+ query_length = present_key_value_state[0].shape[2]
+ else:
+ query_length = None
+
+ cross_attention_outputs = self.layer[1](
+ hidden_states,
+ key_value_states=encoder_hidden_states,
+ attention_mask=encoder_attention_mask,
+ position_bias=encoder_decoder_position_bias,
+ layer_head_mask=cross_attn_layer_head_mask,
+ past_key_value=cross_attn_past_key_value,
+ query_length=query_length,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ )
+ hidden_states = cross_attention_outputs[0]
+
+ # clamp inf values to enable fp16 inference - check https://github.com/huggingface/transformers/pull/19229/
+ if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
+
+ # Combine self attn and cross attn key value states
+ if present_key_value_state is not None:
+ present_key_value_state = present_key_value_state + cross_attention_outputs[1]
+
+ # Keep cross-attention outputs and relative position weights
+ attention_outputs = attention_outputs + cross_attention_outputs[2:]
+
+ # Apply Feed Forward layer
+ hidden_states = self.layer[-1](hidden_states)
+
+ # clamp inf values to enable fp16 inference - check https://github.com/huggingface/transformers/pull/19229/
+ if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
+
+ outputs = (hidden_states,)
+
+ if use_cache:
+ outputs = outputs + (present_key_value_state,) + attention_outputs
+ else:
+ outputs = outputs + attention_outputs
+
+ return outputs # hidden-states, present_key_value_states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)
+
+
+class LongT5PreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = LongT5Config
+ base_model_prefix = "transformer"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["LongT5Block"]
+
+ @property
+ # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel.dummy_inputs
+ def dummy_inputs(self):
+ input_ids = torch.tensor(DUMMY_INPUTS)
+ input_mask = torch.tensor(DUMMY_MASK)
+ dummy_inputs = {
+ "decoder_input_ids": input_ids,
+ "input_ids": input_ids,
+ "decoder_attention_mask": input_mask,
+ }
+ return dummy_inputs
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ factor = self.config.initializer_factor # Used for testing weights initialization
+ if isinstance(module, LongT5LayerNorm):
+ module.weight.data.fill_(factor * 1.0)
+ elif isinstance(module, (LongT5Model, LongT5ForConditionalGeneration, LongT5EncoderModel)):
+ # Mesh TensorFlow embeddings initialization
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624
+ module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0)
+ if hasattr(module, "lm_head") and not self.config.tie_word_embeddings:
+ module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0)
+ elif isinstance(module, LongT5DenseActDense):
+ # Mesh TensorFlow FF initialization
+ # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56
+ # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89
+ module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
+ if hasattr(module.wi, "bias") and module.wi.bias is not None:
+ module.wi.bias.data.zero_()
+ module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))
+ if hasattr(module.wo, "bias") and module.wo.bias is not None:
+ module.wo.bias.data.zero_()
+ elif isinstance(module, LongT5DenseGatedActDense):
+ module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
+ if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None:
+ module.wi_0.bias.data.zero_()
+ module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
+ if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None:
+ module.wi_1.bias.data.zero_()
+ module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))
+ if hasattr(module.wo, "bias") and module.wo.bias is not None:
+ module.wo.bias.data.zero_()
+ elif isinstance(module, (LongT5Attention, LongT5LocalAttention, LongT5TransientGlobalAttention)):
+ # Mesh TensorFlow attention initialization to avoid scaling before softmax
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136
+ d_model = self.config.d_model
+ key_value_proj_dim = self.config.d_kv
+ n_heads = self.config.num_heads
+ module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5))
+ module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
+ module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
+ module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5))
+ if module.has_relative_attention_bias:
+ module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5))
+ if isinstance(module, LongT5TransientGlobalAttention):
+ module.global_relative_attention_bias.weight.data.normal_(
+ mean=0.0, std=factor * ((d_model) ** -0.5)
+ )
+
+ # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._shift_right with T5->LongT5
+ def _shift_right(self, input_ids):
+ decoder_start_token_id = self.config.decoder_start_token_id
+ pad_token_id = self.config.pad_token_id
+
+ if decoder_start_token_id is None:
+ raise ValueError(
+ "self.model.config.decoder_start_token_id has to be defined. In LongT5 it is usually set to the pad_token_id. "
+ "See LongT5 docs for more information."
+ )
+
+ # shift inputs to the right
+ if is_torch_fx_proxy(input_ids):
+ # Item assignment is not supported natively for proxies.
+ shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id)
+ shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1)
+ else:
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
+ shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
+ shifted_input_ids[..., 0] = decoder_start_token_id
+
+ if pad_token_id is None:
+ raise ValueError("self.model.config.pad_token_id has to be defined.")
+ # replace possible -100 values in labels by `pad_token_id`
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
+
+ return shifted_input_ids
+
+
+class LongT5Stack(LongT5PreTrainedModel):
+ def __init__(self, config, embed_tokens=None):
+ super().__init__(config)
+
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model)
+ if embed_tokens is not None:
+ self.embed_tokens.weight = embed_tokens.weight
+ self.is_decoder = config.is_decoder
+
+ self.local_radius = config.local_radius
+ self.block_len = self.local_radius + 1
+
+ self.block = nn.ModuleList(
+ [LongT5Block(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)]
+ )
+ self.final_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
+ self.dropout = nn.Dropout(config.dropout_rate)
+
+ self.gradient_checkpointing = False
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ # Copied from transformers.models.t5.modeling_t5.T5Stack.get_input_embeddings
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ # Copied from transformers.models.t5.modeling_t5.T5Stack.set_input_embeddings
+ def set_input_embeddings(self, new_embeddings):
+ self.embed_tokens = new_embeddings
+
+ def forward(
+ self,
+ input_ids=None,
+ attention_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ inputs_embeds=None,
+ head_mask=None,
+ cross_attn_head_mask=None,
+ past_key_values=None,
+ use_cache=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ ):
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and inputs_embeds is not None:
+ err_msg_prefix = "decoder_" if self.is_decoder else ""
+ raise ValueError(
+ f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time"
+ )
+ elif input_ids is not None:
+ input_shape = input_ids.size()
+ input_ids = input_ids.view(-1, input_shape[-1])
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ err_msg_prefix = "decoder_" if self.is_decoder else ""
+ raise ValueError(f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds")
+
+ if inputs_embeds is None:
+ assert self.embed_tokens is not None, "You have to initialize the model with valid token embeddings"
+ inputs_embeds = self.embed_tokens(input_ids)
+
+ batch_size, seq_length = input_shape
+
+ # required mask seq length can be calculated via length of past
+ mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length
+
+ if use_cache is True:
+ assert self.is_decoder, f"`use_cache` can only be set to `True` if {self} is used as a decoder"
+
+ if attention_mask is None:
+ attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device)
+
+ # initialize past_key_values with `None` if past does not exist
+ if past_key_values is None:
+ past_key_values = [None] * len(self.block)
+
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
+ # ourselves in which case we just need to make it broadcastable to all heads.
+ # We use local attention in encoder self-attention, otherwise standard self & cross attentions are used
+ if self.is_decoder:
+ extended_attention_mask = self.get_extended_attention_mask(
+ attention_mask, input_shape, inputs_embeds.device
+ )
+ elif self.config.encoder_attention_type == "local":
+ extended_attention_mask = _get_local_attention_mask(attention_mask, self.block_len, inputs_embeds.device)
+ else: # we need to use both local attention mask and standard extended mask for transient-global attention
+ extended_attention_mask = attention_mask
+
+ # If a 2D or 3D attention mask is provided for the cross-attention
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
+ if self.is_decoder and encoder_hidden_states is not None:
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
+ if encoder_attention_mask is None:
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device)
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
+ else:
+ encoder_extended_attention_mask = None
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ # Prepare head mask if needed
+ head_mask = self.get_head_mask(head_mask, self.config.num_layers)
+ cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers)
+ present_key_value_states = () if use_cache else None
+ all_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+ all_cross_attentions = () if (output_attentions and self.is_decoder) else None
+ position_bias = None
+ encoder_decoder_position_bias = None
+
+ hidden_states = self.dropout(inputs_embeds)
+
+ for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)):
+ layer_head_mask = head_mask[i]
+ cross_attn_layer_head_mask = cross_attn_head_mask[i]
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.forward,
+ hidden_states,
+ extended_attention_mask,
+ position_bias,
+ encoder_hidden_states,
+ encoder_extended_attention_mask,
+ encoder_decoder_position_bias,
+ layer_head_mask,
+ cross_attn_layer_head_mask,
+ None, # past_key_value is always None with gradient checkpointing
+ use_cache,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(
+ hidden_states,
+ attention_mask=extended_attention_mask,
+ position_bias=position_bias,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_extended_attention_mask,
+ encoder_decoder_position_bias=encoder_decoder_position_bias,
+ layer_head_mask=layer_head_mask,
+ cross_attn_layer_head_mask=cross_attn_layer_head_mask,
+ past_key_value=past_key_value,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ )
+
+ # layer_outputs is a tuple with:
+ # hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)
+ if use_cache is False:
+ layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:]
+
+ hidden_states, present_key_value_state = layer_outputs[:2]
+
+ # We share the position biases between the layers - the first layer store them
+ # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights),
+ # (cross-attention position bias), (cross-attention weights)
+ position_bias = layer_outputs[2]
+ if self.is_decoder and encoder_hidden_states is not None:
+ encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3]
+ # append next layer key value states
+ if use_cache:
+ present_key_value_states = present_key_value_states + (present_key_value_state,)
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[3],)
+ if self.is_decoder:
+ all_cross_attentions = all_cross_attentions + (layer_outputs[5],)
+
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ # Add last layer
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ hidden_states,
+ present_key_value_states,
+ all_hidden_states,
+ all_attentions,
+ all_cross_attentions,
+ ]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=present_key_value_states,
+ hidden_states=all_hidden_states,
+ attentions=all_attentions,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+LONGT5_START_DOCSTRING = r"""
+
+ The LongT5 model was proposed in [LongT5: Efficient Text-To-Text Transformer for Long
+ Sequences](https://arxiv.org/abs/2112.07916) by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo
+ Ni, Yun-Hsuan Sung and Yinfei Yang. It's an encoder-decoder transformer pre-trained in a text-to-text denoising
+ generative setting. LongT5 model is an extension of T5 model, and it enables using one of the two different
+ efficient attention mechanisms - (1) Local attention, or (2) Transient-Global attention.
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`LongT5Config`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+LONGT5_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so
+ you should be able to pad the inputs on both the right and the left.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for detail.
+
+ [What are input IDs?](../glossary#input-ids)
+
+ To know more on how to prepare `input_ids` for pretraining take a look a [LONGT5
+ Training](./longt5#training).
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ LONGT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+
+ To know more on how to prepare `decoder_input_ids` for pretraining take a look at [LONGT5
+ Training](./longt5#training).
+ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0,
+ 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0,
+ 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in
+ `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
+ Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at
+ the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
+ input (see `past_key_values`). This is useful if you want more control over how to convert
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
+
+ If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
+ of `inputs_embeds`.
+
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+LONGT5_ENCODER_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so
+ you should be able to pad the inputs on both the right and the left.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for detail.
+
+ To know more on how to prepare `input_ids` for pretraining take a look a [LONGT5
+ Training](./longt5#training).
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+# Warning message for FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
+__HEAD_MASK_WARNING_MSG = """
+The input argument `head_mask` was split into two arguments `head_mask` and `decoder_head_mask`. Currently,
+`decoder_head_mask` is set to copy `head_mask`, but this feature is deprecated and will be removed in future versions.
+If you do not want to use any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers,
+num_heads)`.
+"""
+
+
+@add_start_docstrings(
+ "The bare LONGT5 Model transformer outputting raw hidden-states without any specific head on top.",
+ LONGT5_START_DOCSTRING,
+)
+class LongT5Model(LongT5PreTrainedModel):
+ _keys_to_ignore_on_load_unexpected = [
+ r"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight",
+ ]
+ _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"]
+
+ def __init__(self, config: LongT5Config):
+ super().__init__(config)
+ self.shared = nn.Embedding(config.vocab_size, config.d_model)
+
+ encoder_config = copy.deepcopy(config)
+ encoder_config.is_decoder = False
+ encoder_config.use_cache = False
+ encoder_config.is_encoder_decoder = False
+ self.encoder = LongT5Stack(encoder_config, self.shared)
+
+ decoder_config = copy.deepcopy(config)
+ decoder_config.is_decoder = True
+ decoder_config.is_encoder_decoder = False
+ decoder_config.num_layers = config.num_decoder_layers
+ self.decoder = LongT5Stack(decoder_config, self.shared)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.shared
+
+ def set_input_embeddings(self, new_embeddings):
+ self.shared = new_embeddings
+ self.encoder.set_input_embeddings(new_embeddings)
+ self.decoder.set_input_embeddings(new_embeddings)
+
+ def _tie_weights(self):
+ if self.config.tie_word_embeddings:
+ self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared)
+ self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared)
+
+ def get_encoder(self):
+ return self.encoder
+
+ def get_decoder(self):
+ return self.decoder
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ decoder_head_mask: Optional[torch.FloatTensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ decoder_inputs_embeds: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]:
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, LongT5Model
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/long-t5-local-base")
+ >>> model = LongT5Model.from_pretrained("google/long-t5-local-base")
+
+ >>> # Let's try a very long encoder input.
+ >>> input_ids = tokenizer(
+ ... 100 * "Studies have been shown that owning a dog is good for you", return_tensors="pt"
+ ... ).input_ids # Batch size 1
+
+ >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
+
+ >>> # forward pass
+ >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
+ >>> last_hidden_states = outputs.last_hidden_state
+ ```"""
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
+ if head_mask is not None and decoder_head_mask is None:
+ if self.config.num_layers == self.config.num_decoder_layers:
+ warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)
+ decoder_head_mask = head_mask
+
+ # Encode if needed (training, first prediction pass)
+ if encoder_outputs is None:
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ hidden_states = encoder_outputs[0]
+
+ # Decode
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ inputs_embeds=decoder_inputs_embeds,
+ past_key_values=past_key_values,
+ encoder_hidden_states=hidden_states,
+ encoder_attention_mask=attention_mask,
+ head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs
+
+ return Seq2SeqModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings("""LONGT5 Model with a `language modeling` head on top.""", LONGT5_START_DOCSTRING)
+class LongT5ForConditionalGeneration(LongT5PreTrainedModel):
+ _keys_to_ignore_on_load_unexpected = [
+ r"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight",
+ ]
+ _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"]
+
+ def __init__(self, config: LongT5Config):
+ super().__init__(config)
+ self.model_dim = config.d_model
+
+ self.shared = nn.Embedding(config.vocab_size, config.d_model)
+
+ encoder_config = copy.deepcopy(config)
+ encoder_config.is_decoder = False
+ encoder_config.use_cache = False
+ encoder_config.is_encoder_decoder = False
+ self.encoder = LongT5Stack(encoder_config, self.shared)
+
+ decoder_config = copy.deepcopy(config)
+ decoder_config.is_decoder = True
+ decoder_config.is_encoder_decoder = False
+ decoder_config.num_layers = config.num_decoder_layers
+ self.decoder = LongT5Stack(decoder_config, self.shared)
+
+ self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.shared
+
+ def set_input_embeddings(self, new_embeddings):
+ self.shared = new_embeddings
+ self.encoder.set_input_embeddings(new_embeddings)
+ self.decoder.set_input_embeddings(new_embeddings)
+
+ def _tie_weights(self):
+ if self.config.tie_word_embeddings:
+ self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared)
+ self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared)
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def get_encoder(self):
+ return self.encoder
+
+ def get_decoder(self):
+ return self.decoder
+
+ @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ decoder_head_mask: Optional[torch.FloatTensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for
+ labels in `[0, ..., config.vocab_size]`
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("Stancld/longt5-tglobal-large-16384-pubmed-3k_steps")
+ >>> model = LongT5ForConditionalGeneration.from_pretrained(
+ ... "Stancld/longt5-tglobal-large-16384-pubmed-3k_steps"
+ ... )
+
+ >>> # Let's try a very long input.
+ >>> inputs = tokenizer(100 * "studies have shown that owning a dog is good for you ", return_tensors="pt")
+ >>> input_ids = inputs.input_ids
+
+ >>> outputs = model.generate(input_ids)
+ >>> print(tokenizer.decode(outputs[0], skip_special_tokens=True))
+ abstractthe aim of this article is to provide an overview of the literature on the role of dog
+ ```"""
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
+ if head_mask is not None and decoder_head_mask is None:
+ if self.config.num_layers == self.config.num_decoder_layers:
+ warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)
+ decoder_head_mask = head_mask
+
+ # Encode if needed (training, first prediction pass)
+ if encoder_outputs is None:
+ # Convert encoder inputs in embeddings if needed
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ hidden_states = encoder_outputs[0]
+
+ if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
+ # get decoder inputs from shifting lm labels to the right
+ decoder_input_ids = self._shift_right(labels)
+
+ # Decode
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ inputs_embeds=decoder_inputs_embeds,
+ past_key_values=past_key_values,
+ encoder_hidden_states=hidden_states,
+ encoder_attention_mask=attention_mask,
+ head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = decoder_outputs[0]
+
+ if self.config.tie_word_embeddings:
+ # Rescale output before projecting on vocab
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586
+ sequence_output = sequence_output * (self.model_dim**-0.5)
+
+ lm_logits = self.lm_head(sequence_output)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss(ignore_index=-100)
+
+ labels = labels.to(lm_logits.device)
+ loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
+ # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666
+
+ if not return_dict:
+ output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs
+ return ((loss,) + output) if loss is not None else output
+
+ return Seq2SeqLMOutput(
+ loss=loss,
+ logits=lm_logits,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+ def prepare_inputs_for_generation(
+ self,
+ input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ head_mask=None,
+ decoder_head_mask=None,
+ cross_attn_head_mask=None,
+ use_cache=None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ # cut decoder_input_ids if past_key_values is used
+ if past_key_values is not None:
+ past_length = past_key_values[0][0].shape[2]
+
+ # Some generation methods already pass only the last input ID
+ if input_ids.shape[1] > past_length:
+ remove_prefix_length = past_length
+ else:
+ # Default to old behavior: keep only final ID
+ remove_prefix_length = input_ids.shape[1] - 1
+
+ input_ids = input_ids[:, remove_prefix_length:]
+
+ return {
+ "decoder_input_ids": input_ids,
+ "past_key_values": past_key_values,
+ "encoder_outputs": encoder_outputs,
+ "attention_mask": attention_mask,
+ "head_mask": head_mask,
+ "decoder_head_mask": decoder_head_mask,
+ "cross_attn_head_mask": cross_attn_head_mask,
+ "use_cache": use_cache,
+ }
+
+ def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
+ return self._shift_right(labels)
+
+ def _reorder_cache(self, past_key_values, beam_idx):
+ # if decoder past is not included in output
+ # speedy decoding is disabled and no need to reorder
+ if past_key_values is None:
+ logger.warning("You might want to consider setting `use_cache=True` to speed up decoding")
+ return past_key_values
+
+ reordered_decoder_past = ()
+ for layer_past_states in past_key_values:
+ # get the correct batch idx from layer past batch dim
+ # batch dim of `past` is at 2nd position
+ reordered_layer_past_states = ()
+ for layer_past_state in layer_past_states:
+ # need to set correct `past` for each of the four key / value states
+ reordered_layer_past_states = reordered_layer_past_states + (
+ layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)),
+ )
+
+ assert reordered_layer_past_states[0].shape == layer_past_states[0].shape
+ assert len(reordered_layer_past_states) == len(layer_past_states)
+
+ reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,)
+ return reordered_decoder_past
+
+
+@add_start_docstrings(
+ "The bare LONGT5 Model transformer outputting encoder's raw hidden-states without any specific head on top.",
+ LONGT5_START_DOCSTRING,
+)
+class LongT5EncoderModel(LongT5PreTrainedModel):
+ _tied_weights_keys = ["encoder.embed_tokens.weight"]
+ _keys_to_ignore_on_load_unexpected = [r"decoder"]
+
+ def __init__(self, config: LongT5Config):
+ super().__init__(config)
+ self.shared = nn.Embedding(config.vocab_size, config.d_model)
+
+ encoder_config = copy.deepcopy(config)
+ encoder_config.use_cache = False
+ encoder_config.is_encoder_decoder = False
+ self.encoder = LongT5Stack(encoder_config, self.shared)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.shared
+
+ def set_input_embeddings(self, new_embeddings):
+ self.shared = new_embeddings
+ self.encoder.set_input_embeddings(new_embeddings)
+
+ def _tie_weights(self):
+ if self.config.tie_word_embeddings:
+ self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared)
+
+ def get_encoder(self):
+ return self.encoder
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(LONGT5_ENCODER_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]:
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/long-t5-local-base")
+ >>> model = LongT5EncoderModel.from_pretrained("google/long-t5-local-base")
+ >>> input_ids = tokenizer(
+ ... 100 * "Studies have been shown that owning a dog is good for you ", return_tensors="pt"
+ ... ).input_ids # Batch size 1
+ >>> outputs = model(input_ids=input_ids)
+ >>> last_hidden_states = outputs.last_hidden_state
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ return encoder_outputs
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..44a00ff39c5435a7e9efa55a981ea7b662880825
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/modeling_tf_mpnet.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/modeling_tf_mpnet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4f501a46da10e682a4d09bf0d0eccc1b46cd6aff
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/modeling_tf_mpnet.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/tokenization_mpnet.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/tokenization_mpnet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0ebc9ce964caefe0de40bd787925c59243f45681
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/tokenization_mpnet.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/tokenization_mpnet_fast.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/tokenization_mpnet_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7df9837dcc9097139613c559cfdb0ac04e974815
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/tokenization_mpnet_fast.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/olmo/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/olmo/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3cead944521b41c60f024b0b81481f90b7f09c4b
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/olmo/__init__.py
@@ -0,0 +1,59 @@
+# Copyright 2024 EleutherAI and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_sentencepiece_available,
+ is_tokenizers_available,
+ is_torch_available,
+)
+
+
+_import_structure = {
+ "configuration_olmo": ["OLMO_PRETRAINED_CONFIG_ARCHIVE_MAP", "OlmoConfig"],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_olmo"] = [
+ "OlmoForCausalLM",
+ "OlmoModel",
+ "OlmoPreTrainedModel",
+ ]
+
+if TYPE_CHECKING:
+ from .configuration_olmo import OLMO_PRETRAINED_CONFIG_ARCHIVE_MAP, OlmoConfig
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_olmo import (
+ OlmoForCausalLM,
+ OlmoModel,
+ OlmoPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/olmo/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/olmo/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..53b8afe72a44c363ed8b563f53b0a640cf014a27
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/olmo/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/olmo/__pycache__/configuration_olmo.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/olmo/__pycache__/configuration_olmo.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7b852256c226071ecb869754396d224ce5c53a89
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/olmo/__pycache__/configuration_olmo.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/olmo/__pycache__/convert_olmo_weights_to_hf.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/olmo/__pycache__/convert_olmo_weights_to_hf.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3f16b2787672f9b9f2f42bc288ccc616313aea49
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/olmo/__pycache__/convert_olmo_weights_to_hf.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/olmo/__pycache__/modeling_olmo.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/olmo/__pycache__/modeling_olmo.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..207b271a3952890ca540bf36b43a6564ad0bdf1e
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/olmo/__pycache__/modeling_olmo.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/olmo/configuration_olmo.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/olmo/configuration_olmo.py
new file mode 100644
index 0000000000000000000000000000000000000000..17a790227683bfe50b7f0320a875d4871dcfc2ca
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/olmo/configuration_olmo.py
@@ -0,0 +1,183 @@
+# coding=utf-8
+# Copyright 2024 EleutherAI and the HuggingFace Inc. team. All rights reserved.
+#
+# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
+# and OPT implementations in this library. It has been modified from its
+# original forms to accommodate minor architectural differences compared
+# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" OLMo model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+from ..deprecated._archive_maps import OLMO_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+logger = logging.get_logger(__name__)
+
+
+class OlmoConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`OlmoModel`]. It is used to instantiate an OLMo
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the [allenai/OLMo-7B-hf](https://huggingface.co/allenai/OLMo-7B-hf).
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 50304):
+ Vocabulary size of the OLMo model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`OlmoModel`]
+ hidden_size (`int`, *optional*, defaults to 4096):
+ Dimension of the hidden representations.
+ intermediate_size (`int`, *optional*, defaults to 11008):
+ Dimension of the MLP representations.
+ num_hidden_layers (`int`, *optional*, defaults to 32):
+ Number of hidden layers in the Transformer decoder.
+ num_attention_heads (`int`, *optional*, defaults to 32):
+ Number of attention heads for each attention layer in the Transformer decoder.
+ num_key_value_heads (`int`, *optional*):
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
+ by meanpooling all the original heads within that group. For more details checkout [this
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
+ `num_attention_heads`.
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
+ The non-linear activation function (function or string) in the decoder.
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
+ The maximum sequence length that this model might ever be used with.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
+ relevant if `config.is_decoder=True`.
+ pad_token_id (`int`, *optional*, defaults to 1):
+ Padding token id.
+ bos_token_id (`int`, *optional*):
+ Beginning of stream token id.
+ eos_token_id (`int`, *optional*, defaults to 50279):
+ End of stream token id.
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
+ Whether to tie weight embeddings
+ rope_theta (`float`, *optional*, defaults to 10000.0):
+ The base period of the RoPE embeddings.
+ rope_scaling (`Dict`, *optional*):
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
+ strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
+ `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
+ `max_position_embeddings` to the expected new maximum. See the following thread for more information on how
+ these scaling strategies behave:
+ https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
+ experimental feature, subject to breaking API changes in future versions.
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ clip_qkv (`float`, *optional*):
+ If not `None`, elements of query, key and value attention states are clipped so that their
+ absolute value does not exceed this value.
+
+ ```python
+ >>> from transformers import OlmoModel, OlmoConfig
+
+ >>> # Initializing a OLMo 7B style configuration
+ >>> configuration = OlmoConfig()
+
+ >>> # Initializing a model from the OLMo 7B style configuration
+ >>> model = OlmoModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "olmo"
+ keys_to_ignore_at_inference = ["past_key_values"]
+
+ def __init__(
+ self,
+ vocab_size=50304,
+ hidden_size=4096,
+ intermediate_size=11008,
+ num_hidden_layers=32,
+ num_attention_heads=32,
+ num_key_value_heads=None,
+ hidden_act="silu",
+ max_position_embeddings=2048,
+ initializer_range=0.02,
+ use_cache=True,
+ pad_token_id=1,
+ bos_token_id=None,
+ eos_token_id=50279,
+ tie_word_embeddings=False,
+ rope_theta=10000.0,
+ rope_scaling=None,
+ attention_bias=False,
+ attention_dropout=0.0,
+ clip_qkv=None,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.hidden_size = hidden_size
+ self.intermediate_size = intermediate_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+
+ # for backward compatibility
+ if num_key_value_heads is None:
+ num_key_value_heads = num_attention_heads
+
+ self.num_key_value_heads = num_key_value_heads
+ self.hidden_act = hidden_act
+ self.initializer_range = initializer_range
+ self.use_cache = use_cache
+ self.rope_theta = rope_theta
+ self.rope_scaling = rope_scaling
+ self._rope_scaling_validation()
+ self.attention_bias = attention_bias
+ self.attention_dropout = attention_dropout
+ self.clip_qkv = clip_qkv
+
+ super().__init__(
+ pad_token_id=pad_token_id,
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ tie_word_embeddings=tie_word_embeddings,
+ **kwargs,
+ )
+
+ # Copied from transformers.models.llama.configuration_llama.LlamaConfig._rope_scaling_validation
+ def _rope_scaling_validation(self):
+ """
+ Validate the `rope_scaling` configuration.
+ """
+ if self.rope_scaling is None:
+ return
+
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
+ raise ValueError(
+ "`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}"
+ )
+ rope_scaling_type = self.rope_scaling.get("type", None)
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
+ raise ValueError(
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
+ )
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
+ raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/olmo/convert_olmo_weights_to_hf.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/olmo/convert_olmo_weights_to_hf.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e77bdc69e7a0ca713a1696a486576dfd051f059
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/olmo/convert_olmo_weights_to_hf.py
@@ -0,0 +1,248 @@
+# Copyright 2024 EleutherAI and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import argparse
+import gc
+import json
+import os
+import shutil
+from pathlib import Path
+
+import torch
+import yaml
+from tokenizers import Tokenizer
+
+from transformers import OlmoConfig, OlmoForCausalLM
+from transformers.models.gpt_neox.tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
+
+
+"""
+Sample usage:
+
+```
+python src/transformers/models/olmo/convert_olmo_weights_to_hf.py \
+ --input_dir /path/to/downloaded/olmo/weights --model_size 7B --output_dir /output/path
+```
+
+Thereafter, models can be loaded via:
+
+```py
+from transformers import OlmoForCausalLM, AutoTokenizer
+
+model = OlmoForCausalLM.from_pretrained("/output/path")
+tokenizer = AutoTokenizer.from_pretrained("/output/path")
+```
+
+Important note: you need to be able to host the whole model in RAM to execute this script (even if the biggest versions
+come in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM).
+"""
+
+
+def compute_intermediate_size(n, ffn_dim_multiplier=1, multiple_of=256):
+ return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3)) + multiple_of - 1) // multiple_of)
+
+
+def read_json(path):
+ with open(path, "r") as f:
+ return json.load(f)
+
+
+def write_json(text, path):
+ with open(path, "w") as f:
+ json.dump(text, f)
+
+
+def write_model(model_path, input_base_path, tokenizer_path=None, safe_serialization=True, fix_eos_token_id=True):
+ os.makedirs(model_path, exist_ok=True)
+ tmp_model_path = os.path.join(model_path, "tmp")
+ os.makedirs(tmp_model_path, exist_ok=True)
+
+ config_path = Path(input_base_path) / "config.yaml"
+ olmo_config = yaml.safe_load(config_path.read_text())["model"]
+
+ n_layers = olmo_config["n_layers"]
+ n_heads = olmo_config["n_heads"]
+ dim = olmo_config["d_model"]
+ dims_per_head = dim // n_heads
+ base = 10000.0
+ inv_freq = 1.0 / (base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head))
+ max_position_embeddings = olmo_config["max_sequence_length"]
+
+ vocab_size = olmo_config.get("embedding_size", olmo_config["vocab_size"])
+
+ if olmo_config.get("n_kv_heads", None) is not None:
+ num_key_value_heads = olmo_config["n_kv_heads"] # for GQA / MQA
+ elif olmo_config["multi_query_attention"]: # compatibility with other checkpoints
+ num_key_value_heads = 1
+ else:
+ num_key_value_heads = n_heads
+
+ print(f"Fetching all parameters from the checkpoint at {input_base_path}.")
+
+ # Not sharded
+ # (The sharded implementation would also work, but this is simpler.)
+ loaded = torch.load(os.path.join(input_base_path, "model.pt"), map_location="cpu")
+
+ param_count = 0
+ index_dict = {"weight_map": {}}
+ for layer_i in range(n_layers):
+ filename = f"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
+ # Unsharded
+ # TODO: Layernorm stuff
+ # TODO: multi query attention
+ fused_dims = [dim, dims_per_head * num_key_value_heads, dims_per_head * num_key_value_heads]
+ q_proj_weight, k_proj_weight, v_proj_weight = torch.split(
+ loaded[f"transformer.blocks.{layer_i}.att_proj.weight"], fused_dims, dim=0
+ )
+ up_proj_weight, gate_proj_weight = torch.chunk(
+ loaded[f"transformer.blocks.{layer_i}.ff_proj.weight"], 2, dim=0
+ )
+ state_dict = {
+ f"model.layers.{layer_i}.self_attn.q_proj.weight": q_proj_weight,
+ f"model.layers.{layer_i}.self_attn.k_proj.weight": k_proj_weight,
+ f"model.layers.{layer_i}.self_attn.v_proj.weight": v_proj_weight,
+ f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[f"transformer.blocks.{layer_i}.attn_out.weight"],
+ f"model.layers.{layer_i}.mlp.gate_proj.weight": gate_proj_weight,
+ f"model.layers.{layer_i}.mlp.down_proj.weight": loaded[f"transformer.blocks.{layer_i}.ff_out.weight"],
+ f"model.layers.{layer_i}.mlp.up_proj.weight": up_proj_weight,
+ }
+
+ state_dict[f"model.layers.{layer_i}.self_attn.rotary_emb.inv_freq"] = inv_freq
+
+ for k, v in state_dict.items():
+ index_dict["weight_map"][k] = filename
+ param_count += v.numel()
+ torch.save(state_dict, os.path.join(tmp_model_path, filename))
+
+ filename = f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
+
+ # Unsharded
+ # TODO: Deal with weight-tying
+ state_dict = {
+ "model.embed_tokens.weight": loaded["transformer.wte.weight"],
+ "lm_head.weight": loaded["transformer.ff_out.weight"]
+ if "transformer.ff_out.weight" in loaded
+ else loaded["transformer.wte.weight"],
+ }
+
+ for k, v in state_dict.items():
+ index_dict["weight_map"][k] = filename
+ param_count += v.numel()
+ torch.save(state_dict, os.path.join(tmp_model_path, filename))
+
+ # Write configs
+ index_dict["metadata"] = {"total_size": param_count * 2}
+ write_json(index_dict, os.path.join(tmp_model_path, "pytorch_model.bin.index.json"))
+
+ if olmo_config.get("mlp_hidden_size", None) is not None:
+ intermediate_size = olmo_config["mlp_hidden_size"] // 2
+ else:
+ intermediate_size = (dim * olmo_config["mlp_ratio"]) // 2
+
+ config = OlmoConfig(
+ vocab_size=vocab_size,
+ hidden_size=dim,
+ intermediate_size=intermediate_size,
+ num_hidden_layers=n_layers,
+ num_attention_heads=n_heads,
+ num_key_value_heads=num_key_value_heads,
+ max_position_embeddings=max_position_embeddings,
+ pad_token_id=olmo_config["pad_token_id"],
+ bos_token_id=None,
+ eos_token_id=olmo_config["eos_token_id"],
+ tie_word_embeddings=olmo_config["weight_tying"],
+ rope_theta=base,
+ clip_qkv=olmo_config.get("clip_qkv"),
+ )
+ config.save_pretrained(tmp_model_path)
+
+ # Make space so we can load the model properly now.
+ del state_dict
+ del loaded
+ gc.collect()
+
+ if tokenizer_path is not None:
+ _write_tokenizer(model_path, config, tokenizer_path, fix_eos_token_id)
+
+ print("Loading the checkpoint in a OLMo model.")
+ model = OlmoForCausalLM.from_pretrained(tmp_model_path, torch_dtype=torch.float32, low_cpu_mem_usage=True)
+ # Avoid saving this as part of the config.
+ del model.config._name_or_path
+ print("Saving in the Transformers format.")
+ model.save_pretrained(model_path, safe_serialization=safe_serialization)
+ shutil.rmtree(tmp_model_path)
+
+
+def _write_tokenizer(
+ output_path: Path, config: OlmoConfig, input_tokenizer_path: Path, fix_eos_token_id: bool = True
+) -> None:
+ print(f"Saving a {GPTNeoXTokenizerFast.__name__} to {output_path}.")
+
+ base_tokenizer = Tokenizer.from_file(str(input_tokenizer_path))
+
+ eos_token_id = config.eos_token_id if config.eos_token_id is not None else base_tokenizer.get_vocab_size() - 1
+ pad_token_id = config.pad_token_id if config.pad_token_id is not None else eos_token_id
+
+ if fix_eos_token_id and eos_token_id == 0:
+ # Fixing a bug in OLMo where eos token id was incorrectly set
+ print("Changing eos_token_id from 0 to 50279.")
+ eos_token_id = 50279
+
+ tokenizer = GPTNeoXTokenizerFast(
+ tokenizer_object=base_tokenizer,
+ eos_token=base_tokenizer.decode([eos_token_id], skip_special_tokens=False),
+ pad_token=base_tokenizer.decode([pad_token_id], skip_special_tokens=False),
+ unk_token=None,
+ bos_token=None,
+ )
+
+ tokenizer.save_pretrained(output_path)
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--input_dir",
+ required=True,
+ help="Location of OLMo weights, which contains config.yaml and model.pt.",
+ )
+ parser.add_argument(
+ "--tokenizer_json_path",
+ default=None,
+ help="Location of OLMo tokenizer json file.",
+ )
+ parser.add_argument(
+ "--output_dir",
+ required=True,
+ help="Location to write HF model and tokenizer",
+ )
+ parser.add_argument(
+ "--no_fix_eos_token_id",
+ action="store_false",
+ dest="fix_eos_token_id",
+ help="If set, does not change eos token id from 0 to 50279 if it is 0. Changing 0 to 50279 is a bug fix, so use this option with care.",
+ )
+ parser.add_argument("--safe_serialization", type=bool, help="Whether or not to save using `safetensors`.")
+ # Different OLMo versions used different default values for max_position_embeddings, hence the need to be able to specify which version is being used.
+ args = parser.parse_args()
+ write_model(
+ model_path=args.output_dir,
+ input_base_path=args.input_dir,
+ safe_serialization=args.safe_serialization,
+ tokenizer_path=args.tokenizer_json_path,
+ fix_eos_token_id=args.fix_eos_token_id,
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/olmo/modeling_olmo.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/olmo/modeling_olmo.py
new file mode 100644
index 0000000000000000000000000000000000000000..83637536a12531a42e4dae6a26b37028da116a0e
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/olmo/modeling_olmo.py
@@ -0,0 +1,1325 @@
+# coding=utf-8
+# Copyright 2024 EleutherAI and the HuggingFace Inc. team. All rights reserved.
+#
+# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
+# and OPT implementations in this library. It has been modified from its
+# original forms to accommodate minor architectural differences compared
+# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch OLMo model."""
+
+import math
+import warnings
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.nn.functional as F
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import CrossEntropyLoss
+
+from ...activations import ACT2FN
+from ...cache_utils import Cache, DynamicCache, StaticCache
+from ...modeling_attn_mask_utils import AttentionMaskConverter
+from ...modeling_outputs import (
+ BaseModelOutputWithPast,
+ CausalLMOutputWithPast,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import ALL_LAYERNORM_LAYERS
+from ...utils import (
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ is_flash_attn_2_available,
+ is_flash_attn_greater_or_equal_2_10,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_olmo import OlmoConfig
+
+
+if is_flash_attn_2_available():
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "OlmoConfig"
+
+
+# Copied from transformers.models.llama.modeling_llama._get_unpad_data
+def _get_unpad_data(attention_mask):
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
+ return (
+ indices,
+ cu_seqlens,
+ max_seqlen_in_batch,
+ )
+
+
+class OlmoLayerNorm(nn.Module):
+ """LayerNorm but with no learnable weight or bias."""
+
+ def __init__(self, hidden_size: int) -> None:
+ super().__init__()
+ self.normalized_shape = (hidden_size,)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ orig_dtype = hidden_states.dtype
+ return F.layer_norm(hidden_states.to(dtype=torch.float32), self.normalized_shape, None, None, eps=1e-5).to(
+ orig_dtype
+ )
+
+
+ALL_LAYERNORM_LAYERS.append(OlmoLayerNorm)
+
+
+# Copied from transformers.models.llama.modeling_llama.LlamaRotaryEmbedding with Llama->Olmo
+class OlmoRotaryEmbedding(nn.Module):
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
+ super().__init__()
+ self.scaling_factor = scaling_factor
+ self.dim = dim
+ self.max_position_embeddings = max_position_embeddings
+ self.base = base
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
+ # For BC we register cos and sin cached
+ self.max_seq_len_cached = max_position_embeddings
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
+ t = t / self.scaling_factor
+ freqs = torch.outer(t, self.inv_freq)
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
+ emb = torch.cat((freqs, freqs), dim=-1)
+ self.register_buffer("_cos_cached", emb.cos().to(torch.get_default_dtype()), persistent=False)
+ self.register_buffer("_sin_cached", emb.sin().to(torch.get_default_dtype()), persistent=False)
+
+ @property
+ def sin_cached(self):
+ logger.warning_once(
+ "The sin_cached attribute will be removed in 4.39. Bear in mind that its contents changed in v4.38. Use "
+ "the forward method of RoPE from now on instead. It is not used in the `OlmoAttention` class"
+ )
+ return self._sin_cached
+
+ @property
+ def cos_cached(self):
+ logger.warning_once(
+ "The cos_cached attribute will be removed in 4.39. Bear in mind that its contents changed in v4.38. Use "
+ "the forward method of RoPE from now on instead. It is not used in the `OlmoAttention` class"
+ )
+ return self._cos_cached
+
+ @torch.no_grad()
+ def forward(self, x, position_ids):
+ # x: [bs, num_attention_heads, seq_len, head_size]
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
+ position_ids_expanded = position_ids[:, None, :].float()
+ # Force float32 since bfloat16 loses precision on long contexts
+ # See https://github.com/huggingface/transformers/pull/29285
+ device_type = x.device.type
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
+ with torch.autocast(device_type=device_type, enabled=False):
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
+ emb = torch.cat((freqs, freqs), dim=-1)
+ cos = emb.cos()
+ sin = emb.sin()
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
+
+
+# Copied from transformers.models.llama.modeling_llama.LlamaLinearScalingRotaryEmbedding with Llama->Olmo
+class OlmoLinearScalingRotaryEmbedding(OlmoRotaryEmbedding):
+ """OlmoRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
+
+ def forward(self, x, position_ids):
+ # difference to the original RoPE: a scaling factor is aplied to the position ids
+ position_ids = position_ids.float() / self.scaling_factor
+ cos, sin = super().forward(x, position_ids)
+ return cos, sin
+
+
+# Copied from transformers.models.llama.modeling_llama.LlamaDynamicNTKScalingRotaryEmbedding with Llama->Olmo
+class OlmoDynamicNTKScalingRotaryEmbedding(OlmoRotaryEmbedding):
+ """OlmoRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla"""
+
+ def forward(self, x, position_ids):
+ # difference to the original RoPE: inv_freq is recomputed when the sequence length > original length
+ seq_len = torch.max(position_ids) + 1
+ if seq_len > self.max_position_embeddings:
+ base = self.base * (
+ (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
+ ) ** (self.dim / (self.dim - 2))
+ inv_freq = 1.0 / (
+ base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(x.device) / self.dim)
+ )
+ self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: this may break with compilation
+
+ cos, sin = super().forward(x, position_ids)
+ return cos, sin
+
+
+# Copied from transformers.models.llama.modeling_llama.rotate_half
+def rotate_half(x):
+ """Rotates half the hidden dims of the input."""
+ x1 = x[..., : x.shape[-1] // 2]
+ x2 = x[..., x.shape[-1] // 2 :]
+ return torch.cat((-x2, x1), dim=-1)
+
+
+# Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
+def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
+ """Applies Rotary Position Embedding to the query and key tensors.
+
+ Args:
+ q (`torch.Tensor`): The query tensor.
+ k (`torch.Tensor`): The key tensor.
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
+ position_ids (`torch.Tensor`, *optional*):
+ Deprecated and unused.
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
+ Returns:
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
+ """
+ cos = cos.unsqueeze(unsqueeze_dim)
+ sin = sin.unsqueeze(unsqueeze_dim)
+ q_embed = (q * cos) + (rotate_half(q) * sin)
+ k_embed = (k * cos) + (rotate_half(k) * sin)
+ return q_embed, k_embed
+
+
+class OlmoMLP(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.hidden_size = config.hidden_size
+ self.intermediate_size = config.intermediate_size
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
+ self.act_fn = ACT2FN[config.hidden_act]
+
+ def forward(self, x):
+ return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
+
+
+# Copied from transformers.models.llama.modeling_llama.repeat_kv
+def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
+ """
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
+ """
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
+ if n_rep == 1:
+ return hidden_states
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
+
+
+class OlmoAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaAttention.__init__ with Llama->Olmo
+ def __init__(self, config: OlmoConfig, layer_idx: Optional[int] = None):
+ super().__init__()
+ self.config = config
+ self.layer_idx = layer_idx
+ if layer_idx is None:
+ logger.warning_once(
+ f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
+ "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
+ "when creating this class."
+ )
+
+ self.attention_dropout = config.attention_dropout
+ self.hidden_size = config.hidden_size
+ self.num_heads = config.num_attention_heads
+ self.head_dim = self.hidden_size // self.num_heads
+ self.num_key_value_heads = config.num_key_value_heads
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
+ self.max_position_embeddings = config.max_position_embeddings
+ self.rope_theta = config.rope_theta
+ self.is_causal = True
+
+ if (self.head_dim * self.num_heads) != self.hidden_size:
+ raise ValueError(
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
+ f" and `num_heads`: {self.num_heads})."
+ )
+
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
+ self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=config.attention_bias)
+ self._init_rope()
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaAttention._init_rope with Llama->Olmo
+ def _init_rope(self):
+ if self.config.rope_scaling is None:
+ self.rotary_emb = OlmoRotaryEmbedding(
+ self.head_dim,
+ max_position_embeddings=self.max_position_embeddings,
+ base=self.rope_theta,
+ )
+ else:
+ scaling_type = self.config.rope_scaling["type"]
+ scaling_factor = self.config.rope_scaling["factor"]
+ if scaling_type == "linear":
+ self.rotary_emb = OlmoLinearScalingRotaryEmbedding(
+ self.head_dim,
+ max_position_embeddings=self.max_position_embeddings,
+ scaling_factor=scaling_factor,
+ base=self.rope_theta,
+ )
+ elif scaling_type == "dynamic":
+ self.rotary_emb = OlmoDynamicNTKScalingRotaryEmbedding(
+ self.head_dim,
+ max_position_embeddings=self.max_position_embeddings,
+ scaling_factor=scaling_factor,
+ base=self.rope_theta,
+ )
+ else:
+ raise ValueError(f"Unknown RoPE scaling type {scaling_type}")
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ cache_position: Optional[torch.LongTensor] = None,
+ **kwargs,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ bsz, q_len, _ = hidden_states.size()
+
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ if self.config.clip_qkv is not None:
+ query_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv)
+ key_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv)
+ value_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv)
+
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+
+ past_key_value = getattr(self, "past_key_value", past_key_value)
+ cos, sin = self.rotary_emb(value_states, position_ids)
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
+
+ if past_key_value is not None:
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
+
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
+
+ if attention_mask is not None: # no matter the length, we just slice it
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
+ attn_weights = attn_weights + causal_mask
+
+ # upcast attention to fp32
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
+ attn_output = torch.matmul(attn_weights, value_states)
+
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.transpose(1, 2).contiguous()
+
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
+
+ attn_output = self.o_proj(attn_output)
+
+ if not output_attentions:
+ attn_weights = None
+
+ return attn_output, attn_weights, past_key_value
+
+
+class OlmoFlashAttention2(OlmoAttention):
+ """
+ OLMo flash attention module. This module inherits from `OlmoAttention` as the weights of the module stays
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
+ flash attention and deal with padding tokens in case the input contains any of them.
+ """
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ cache_position: Optional[torch.LongTensor] = None,
+ **kwargs,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ output_attentions = False
+
+ bsz, q_len, _ = hidden_states.size()
+
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ if self.config.clip_qkv is not None:
+ query_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv)
+ key_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv)
+ value_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv)
+
+ # Flash attention requires the input to have the shape
+ # batch_size x seq_length x head_dim x hidden_dim
+ # therefore we just need to keep the original shape
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+
+ cos, sin = self.rotary_emb(value_states, position_ids)
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
+
+ past_key_value = getattr(self, "past_key_value", past_key_value)
+
+ if past_key_value is not None:
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
+ # to be able to avoid many of these transpose/reshape/view.
+ query_states = query_states.transpose(1, 2)
+ key_states = key_states.transpose(1, 2)
+ value_states = value_states.transpose(1, 2)
+
+ dropout_rate = self.attention_dropout if self.training else 0.0
+
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
+ # cast them back in the correct dtype just to be sure everything works as expected.
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
+ # in fp32. (OlmoRMSNorm handles it correctly)
+
+ input_dtype = query_states.dtype
+ if input_dtype == torch.float32:
+ if torch.is_autocast_enabled():
+ target_dtype = torch.get_autocast_gpu_dtype()
+ # Handle the case where the model is quantized
+ elif hasattr(self.config, "_pre_quantization_dtype"):
+ target_dtype = self.config._pre_quantization_dtype
+ else:
+ target_dtype = self.q_proj.weight.dtype
+
+ logger.warning_once(
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
+ f" {target_dtype}."
+ )
+
+ query_states = query_states.to(target_dtype)
+ key_states = key_states.to(target_dtype)
+ value_states = value_states.to(target_dtype)
+
+ attn_output = self._flash_attention_forward(
+ query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate
+ )
+
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
+ attn_output = self.o_proj(attn_output)
+
+ if not output_attentions:
+ attn_weights = None
+
+ return attn_output, attn_weights, past_key_value
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward with Llama->Olmo
+ def _flash_attention_forward(
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
+ ):
+ """
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
+ first unpad the input, then computes the attention scores and pad the final attention scores.
+
+ Args:
+ query_states (`torch.Tensor`):
+ Input query states to be passed to Flash Attention API
+ key_states (`torch.Tensor`):
+ Input key states to be passed to Flash Attention API
+ value_states (`torch.Tensor`):
+ Input value states to be passed to Flash Attention API
+ attention_mask (`torch.Tensor`):
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
+ position of padding tokens and 1 for the position of non-padding tokens.
+ dropout (`float`):
+ Attention dropout
+ softmax_scale (`float`, *optional*):
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
+ """
+ if not self._flash_attn_uses_top_left_mask:
+ causal = self.is_causal
+ else:
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in OlmoFlashAttention2 __init__.
+ causal = self.is_causal and query_length != 1
+
+ # Contains at least one padding token in the sequence
+ if attention_mask is not None:
+ batch_size = query_states.shape[0]
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
+ query_states, key_states, value_states, attention_mask, query_length
+ )
+
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
+
+ attn_output_unpad = flash_attn_varlen_func(
+ query_states,
+ key_states,
+ value_states,
+ cu_seqlens_q=cu_seqlens_q,
+ cu_seqlens_k=cu_seqlens_k,
+ max_seqlen_q=max_seqlen_in_batch_q,
+ max_seqlen_k=max_seqlen_in_batch_k,
+ dropout_p=dropout,
+ softmax_scale=softmax_scale,
+ causal=causal,
+ )
+
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
+ else:
+ attn_output = flash_attn_func(
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
+ )
+
+ return attn_output
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
+
+ key_layer = index_first_axis(
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
+ )
+ value_layer = index_first_axis(
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
+ )
+ if query_length == kv_seq_len:
+ query_layer = index_first_axis(
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
+ )
+ cu_seqlens_q = cu_seqlens_k
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
+ indices_q = indices_k
+ elif query_length == 1:
+ max_seqlen_in_batch_q = 1
+ cu_seqlens_q = torch.arange(
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
+ ) # There is a memcpy here, that is very bad.
+ indices_q = cu_seqlens_q[:-1]
+ query_layer = query_layer.squeeze(1)
+ else:
+ # The -q_len: slice assumes left padding.
+ attention_mask = attention_mask[:, -query_length:]
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
+
+ return (
+ query_layer,
+ key_layer,
+ value_layer,
+ indices_q,
+ (cu_seqlens_q, cu_seqlens_k),
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
+ )
+
+
+class OlmoSdpaAttention(OlmoAttention):
+ """
+ OLMo attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
+ `OlmoAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
+ SDPA API.
+ """
+
+ # Adapted from OlmoAttention.forward
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ cache_position: Optional[torch.LongTensor] = None,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ if output_attentions:
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
+ logger.warning_once(
+ "OlmoModel is using OlmoSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
+ )
+ return super().forward(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ cache_position=cache_position,
+ )
+
+ bsz, q_len, _ = hidden_states.size()
+
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ if self.config.clip_qkv is not None:
+ query_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv)
+ key_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv)
+ value_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv)
+
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+
+ cos, sin = self.rotary_emb(value_states, position_ids)
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
+
+ # In case static cache is used, it is an instance attribute.
+ past_key_value = getattr(self, "past_key_value", past_key_value)
+
+ if past_key_value is not None:
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
+
+ causal_mask = attention_mask
+ # if attention_mask is not None and cache_position is not None:
+ if attention_mask is not None:
+ causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]
+
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
+ if query_states.device.type == "cuda" and causal_mask is not None:
+ query_states = query_states.contiguous()
+ key_states = key_states.contiguous()
+ value_states = value_states.contiguous()
+
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
+ query_states,
+ key_states,
+ value_states,
+ attn_mask=causal_mask,
+ dropout_p=self.attention_dropout if self.training else 0.0,
+ is_causal=causal_mask is None and q_len > 1,
+ )
+
+ attn_output = attn_output.transpose(1, 2).contiguous()
+ attn_output = attn_output.view(bsz, q_len, self.hidden_size)
+
+ attn_output = self.o_proj(attn_output)
+
+ return attn_output, None, past_key_value
+
+
+OLMO_ATTENTION_CLASSES = {
+ "eager": OlmoAttention,
+ "flash_attention_2": OlmoFlashAttention2,
+ "sdpa": OlmoSdpaAttention,
+}
+
+
+class OlmoDecoderLayer(nn.Module):
+ def __init__(self, config: OlmoConfig, layer_idx: int):
+ super().__init__()
+ self.hidden_size = config.hidden_size
+
+ self.self_attn = OLMO_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
+
+ self.mlp = OlmoMLP(config)
+ self.input_layernorm = OlmoLayerNorm(config.hidden_size)
+ self.post_attention_layernorm = OlmoLayerNorm(config.hidden_size)
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaDecoderLayer.forward
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = False,
+ cache_position: Optional[torch.LongTensor] = None,
+ **kwargs,
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`, *optional*):
+ attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
+ query_sequence_length, key_sequence_length)` if default attention is used.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
+ (see `past_key_values`).
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
+ """
+ if "padding_mask" in kwargs:
+ warnings.warn(
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
+ )
+
+ residual = hidden_states
+
+ hidden_states = self.input_layernorm(hidden_states)
+
+ # Self Attention
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ cache_position=cache_position,
+ **kwargs,
+ )
+ hidden_states = residual + hidden_states
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.post_attention_layernorm(hidden_states)
+ hidden_states = self.mlp(hidden_states)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights,)
+
+ if use_cache:
+ outputs += (present_key_value,)
+
+ return outputs
+
+
+OLMO_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`OlmoConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+@add_start_docstrings(
+ "The bare Olmo Model outputting raw hidden-states without any specific head on top.",
+ OLMO_START_DOCSTRING,
+)
+# Copied from transformers.models.llama.modeling_llama.LlamaPreTrainedModel with Llama->Olmo
+class OlmoPreTrainedModel(PreTrainedModel):
+ config_class = OlmoConfig
+ base_model_prefix = "model"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["OlmoDecoderLayer"]
+ _skip_keys_device_placement = ["past_key_values"]
+ _supports_flash_attn_2 = True
+ _supports_sdpa = True
+ _supports_cache_class = True
+
+ def _init_weights(self, module):
+ std = self.config.initializer_range
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+ def _setup_cache(self, cache_cls, max_batch_size, max_cache_len: Optional[int] = None):
+ if self.config._attn_implementation == "flash_attention_2" and cache_cls == StaticCache:
+ raise ValueError(
+ "`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` "
+ "make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers"
+ )
+
+ for layer in self.model.layers:
+ device = layer.input_layernorm.weight.device
+ if hasattr(self.config, "_pre_quantization_dtype"):
+ dtype = self.config._pre_quantization_dtype
+ else:
+ dtype = layer.self_attn.o_proj.weight.dtype
+ layer.self_attn.past_key_value = cache_cls(
+ self.config, max_batch_size, max_cache_len, device=device, dtype=dtype
+ )
+
+ def _reset_cache(self):
+ for layer in self.model.layers:
+ layer.self_attn.past_key_value = None
+
+
+OLMO_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
+ `past_key_values`).
+
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
+ information on the default strategy.
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.n_positions - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
+
+ Two formats are allowed:
+ - a [`~cache_utils.Cache`] instance;
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
+ cache format.
+
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
+ legacy cache format will be returned.
+
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
+ of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
+ the complete sequence length.
+"""
+
+
+@add_start_docstrings(
+ "The bare Olmo Model outputting raw hidden-states without any specific head on top.",
+ OLMO_START_DOCSTRING,
+)
+class OlmoModel(OlmoPreTrainedModel):
+ """
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`OlmoDecoderLayer`]
+
+ Args:
+ config: OlmoConfig
+ """
+
+ def __init__(self, config: OlmoConfig):
+ super().__init__(config)
+ self.padding_idx = config.pad_token_id
+ self.vocab_size = config.vocab_size
+
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
+ self.layers = nn.ModuleList(
+ [OlmoDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
+ )
+ self.norm = OlmoLayerNorm(config.hidden_size)
+ self.gradient_checkpointing = False
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.embed_tokens = value
+
+ @add_start_docstrings_to_model_forward(OLMO_INPUTS_DOCSTRING)
+ # Copied from transformers.models.llama.modeling_llama.LlamaModel.forward
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if (input_ids is None) ^ (inputs_embeds is not None):
+ raise ValueError(
+ "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
+ )
+
+ if self.gradient_checkpointing and self.training and use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
+ )
+ use_cache = False
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids)
+
+ past_seen_tokens = 0
+ if use_cache: # kept for BC (cache positions)
+ if not isinstance(past_key_values, StaticCache):
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
+ past_seen_tokens = past_key_values.get_seq_length()
+
+ if cache_position is None:
+ if isinstance(past_key_values, StaticCache):
+ raise ValueError("cache_position is a required argument when using StaticCache.")
+ cache_position = torch.arange(
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
+ )
+
+ if position_ids is None:
+ position_ids = cache_position.unsqueeze(0)
+
+ causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_seen_tokens)
+
+ # embed positions
+ hidden_states = inputs_embeds
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ next_decoder_cache = None
+
+ for decoder_layer in self.layers:
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ causal_mask,
+ position_ids,
+ past_key_values,
+ output_attentions,
+ use_cache,
+ cache_position,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=causal_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_values,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ cache_position=cache_position,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if use_cache:
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ hidden_states = self.norm(hidden_states)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ next_cache = None
+ if use_cache:
+ next_cache = (
+ next_decoder_cache.to_legacy_cache() if isinstance(next_decoder_cache, Cache) else next_decoder_cache
+ )
+ if not return_dict:
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
+ return BaseModelOutputWithPast(
+ last_hidden_state=hidden_states,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ )
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaModel._update_causal_mask
+ def _update_causal_mask(
+ self,
+ attention_mask: torch.Tensor,
+ input_tensor: torch.Tensor,
+ cache_position: torch.Tensor,
+ past_seen_tokens: int,
+ ):
+ # TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length even when the static
+ # KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at each decode steps due to the dynamic shapes.
+ # (`recording cudagraph tree for symint key 13`, etc.), which is VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using
+ # `fullgraph=True`. See more context in https://github.com/huggingface/transformers/pull/29114
+
+ if self.config._attn_implementation == "flash_attention_2":
+ if attention_mask is not None and 0.0 in attention_mask:
+ return attention_mask
+ return None
+
+ if self.config._attn_implementation == "sdpa":
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument,
+ # in order to dispatch on Flash Attention 2.
+ if AttentionMaskConverter._ignore_causal_mask_sdpa(
+ attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens
+ ):
+ return None
+
+ dtype, device = input_tensor.dtype, input_tensor.device
+ min_dtype = torch.finfo(dtype).min
+ sequence_length = input_tensor.shape[1]
+ if hasattr(getattr(self.layers[0], "self_attn", {}), "past_key_value"): # static cache
+ target_length = self.config.max_position_embeddings
+ else: # dynamic cache
+ target_length = (
+ attention_mask.shape[-1]
+ if isinstance(attention_mask, torch.Tensor)
+ else past_seen_tokens + sequence_length + 1
+ )
+
+ causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
+ if sequence_length != 1:
+ causal_mask = torch.triu(causal_mask, diagonal=1)
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
+ causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1)
+ if attention_mask is not None:
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
+ if attention_mask.dim() == 2:
+ mask_length = attention_mask.shape[-1]
+ padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0)
+ causal_mask[..., :mask_length] = causal_mask[..., :mask_length].masked_fill(padding_mask, min_dtype)
+ elif attention_mask.dim() == 4:
+ # backwards compatibility: we allow passing a 4D attention mask shorter than the input length with
+ # cache. In that case, the 4D attention mask attends to the newest tokens only.
+ if attention_mask.shape[-2] < cache_position[0] + sequence_length:
+ offset = cache_position[0]
+ else:
+ offset = 0
+ mask_shape = attention_mask.shape
+ mask_slice = (attention_mask.eq(0.0)).to(dtype=dtype) * min_dtype
+ causal_mask[
+ : mask_shape[0], : mask_shape[1], offset : mask_shape[2] + offset, : mask_shape[3]
+ ] = mask_slice
+
+ if (
+ self.config._attn_implementation == "sdpa"
+ and attention_mask is not None
+ and attention_mask.device.type == "cuda"
+ ):
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
+ # Details: https://github.com/pytorch/pytorch/issues/110213
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
+
+ return causal_mask
+
+
+# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM with LLAMA->OLMO,Llama->Olmo
+class OlmoForCausalLM(OlmoPreTrainedModel):
+ _tied_weights_keys = ["lm_head.weight"]
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.model = OlmoModel(config)
+ self.vocab_size = config.vocab_size
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.embed_tokens = value
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def set_decoder(self, decoder):
+ self.model = decoder
+
+ def get_decoder(self):
+ return self.model
+
+ @add_start_docstrings_to_model_forward(OLMO_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
+ # Ignore copy
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
+ r"""
+ Args:
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, OlmoForCausalLM
+
+ >>> model = OlmoForCausalLM.from_pretrained("allenai/OLMo-1B-hf")
+ >>> tokenizer = AutoTokenizer.from_pretrained("allenai/OLMo-1B-hf")
+
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
+
+ >>> # Generate
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
+ 'Hey, are you conscious? Can you talk to me?\nI’m not sure if you’re conscious of this, but I’m'
+ ```
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
+ outputs = self.model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ cache_position=cache_position,
+ )
+
+ hidden_states = outputs[0]
+ logits = self.lm_head(hidden_states)
+ logits = logits.float()
+
+ loss = None
+ if labels is not None:
+ # Shift so that tokens < n predict n
+ shift_logits = logits[..., :-1, :].contiguous()
+ shift_labels = labels[..., 1:].contiguous()
+ # Flatten the tokens
+ loss_fct = CrossEntropyLoss()
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
+ shift_labels = shift_labels.view(-1)
+ # Enable model parallelism
+ shift_labels = shift_labels.to(shift_logits.device)
+ loss = loss_fct(shift_logits, shift_labels)
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return (loss,) + output if loss is not None else output
+
+ return CausalLMOutputWithPast(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def prepare_inputs_for_generation(
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, **kwargs
+ ):
+ # With static cache, the `past_key_values` is None
+ # TODO joao: standardize interface for the different Cache classes and remove of this if
+ has_static_cache = False
+ if past_key_values is None:
+ past_key_values = getattr(getattr(self.model.layers[0], "self_attn", {}), "past_key_value", None)
+ has_static_cache = past_key_values is not None
+
+ past_length = 0
+ if past_key_values is not None:
+ if isinstance(past_key_values, Cache):
+ past_length = cache_position[0] if cache_position is not None else past_key_values.get_seq_length()
+ max_cache_length = (
+ torch.tensor(past_key_values.get_max_length(), device=input_ids.device)
+ if past_key_values.get_max_length() is not None
+ else None
+ )
+ cache_length = past_length if max_cache_length is None else torch.min(max_cache_length, past_length)
+ # TODO joao: remove this `else` after `generate` prioritizes `Cache` objects
+ else:
+ cache_length = past_length = past_key_values[0][0].shape[2]
+ max_cache_length = None
+
+ # Keep only the unprocessed tokens:
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
+ # input)
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
+ # input_ids based on the past_length.
+ elif past_length < input_ids.shape[1]:
+ input_ids = input_ids[:, past_length:]
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
+
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
+ if (
+ max_cache_length is not None
+ and attention_mask is not None
+ and cache_length + input_ids.shape[1] > max_cache_length
+ ):
+ attention_mask = attention_mask[:, -max_cache_length:]
+
+ position_ids = kwargs.get("position_ids", None)
+ if attention_mask is not None and position_ids is None:
+ # create position_ids on the fly for batch generation
+ position_ids = attention_mask.long().cumsum(-1) - 1
+ position_ids.masked_fill_(attention_mask == 0, 1)
+ if past_key_values:
+ position_ids = position_ids[:, -input_ids.shape[1] :]
+
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
+ if inputs_embeds is not None and past_key_values is None:
+ model_inputs = {"inputs_embeds": inputs_embeds}
+ else:
+ # The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
+ # recompiles graphs as the stride of the inputs is a guard. Ref: https://github.com/huggingface/transformers/pull/29114
+ # TODO: use `next_tokens` directly instead.
+ model_inputs = {"input_ids": input_ids.contiguous()}
+
+ input_length = position_ids.shape[-1] if position_ids is not None else input_ids.shape[-1]
+ if cache_position is None:
+ cache_position = torch.arange(past_length, past_length + input_length, device=input_ids.device)
+ else:
+ cache_position = cache_position[-input_length:]
+
+ if has_static_cache:
+ past_key_values = None
+
+ model_inputs.update(
+ {
+ "position_ids": position_ids,
+ "cache_position": cache_position,
+ "past_key_values": past_key_values,
+ "use_cache": kwargs.get("use_cache"),
+ "attention_mask": attention_mask,
+ }
+ )
+ return model_inputs
+
+ @staticmethod
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
+ )
+ return reordered_past
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/seamless_m4t/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/seamless_m4t/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3167311a5a6ef7df2ae198fe93a68647a9654ffe
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/seamless_m4t/__init__.py
@@ -0,0 +1,111 @@
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_sentencepiece_available,
+ is_tokenizers_available,
+ is_torch_available,
+)
+
+
+_import_structure = {
+ "configuration_seamless_m4t": ["SEAMLESS_M4T_PRETRAINED_CONFIG_ARCHIVE_MAP", "SeamlessM4TConfig"],
+ "feature_extraction_seamless_m4t": ["SeamlessM4TFeatureExtractor"],
+ "processing_seamless_m4t": ["SeamlessM4TProcessor"],
+}
+
+try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_seamless_m4t"] = ["SeamlessM4TTokenizer"]
+
+try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_seamless_m4t_fast"] = ["SeamlessM4TTokenizerFast"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_seamless_m4t"] = [
+ "SEAMLESS_M4T_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "SeamlessM4TForTextToSpeech",
+ "SeamlessM4TForSpeechToSpeech",
+ "SeamlessM4TForTextToText",
+ "SeamlessM4TForSpeechToText",
+ "SeamlessM4TModel",
+ "SeamlessM4TPreTrainedModel",
+ "SeamlessM4TCodeHifiGan",
+ "SeamlessM4THifiGan",
+ "SeamlessM4TTextToUnitForConditionalGeneration",
+ "SeamlessM4TTextToUnitModel",
+ ]
+
+if TYPE_CHECKING:
+ from .configuration_seamless_m4t import SEAMLESS_M4T_PRETRAINED_CONFIG_ARCHIVE_MAP, SeamlessM4TConfig
+ from .feature_extraction_seamless_m4t import SeamlessM4TFeatureExtractor
+ from .processing_seamless_m4t import SeamlessM4TProcessor
+
+ try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_seamless_m4t import SeamlessM4TTokenizer
+
+ try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_seamless_m4t_fast import SeamlessM4TTokenizerFast
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_seamless_m4t import (
+ SEAMLESS_M4T_PRETRAINED_MODEL_ARCHIVE_LIST,
+ SeamlessM4TCodeHifiGan,
+ SeamlessM4TForSpeechToSpeech,
+ SeamlessM4TForSpeechToText,
+ SeamlessM4TForTextToSpeech,
+ SeamlessM4TForTextToText,
+ SeamlessM4THifiGan,
+ SeamlessM4TModel,
+ SeamlessM4TPreTrainedModel,
+ SeamlessM4TTextToUnitForConditionalGeneration,
+ SeamlessM4TTextToUnitModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/seamless_m4t/convert_fairseq2_to_hf.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/seamless_m4t/convert_fairseq2_to_hf.py
new file mode 100644
index 0000000000000000000000000000000000000000..a90a30f5795f5f368841b2b3d9b3288aa4cf5c1a
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/seamless_m4t/convert_fairseq2_to_hf.py
@@ -0,0 +1,397 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Converting Meta SeamlessM4T checkpoints from seamless_communication to HF."""
+
+
+import argparse
+import os
+from pathlib import Path
+
+import torch
+from accelerate.utils.modeling import find_tied_parameters
+from seamless_communication.models.inference.translator import Translator
+
+from transformers import (
+ SeamlessM4TConfig,
+ SeamlessM4TFeatureExtractor,
+ SeamlessM4TModel,
+ SeamlessM4TProcessor,
+ SeamlessM4TTokenizer,
+)
+from transformers.utils import logging
+
+
+UNIT_SUPPORTED_LANGUAGES = ["__arb__", "__ben__", "__cat__", "__ces__", "__cmn__", "__cym__", "__dan__", "__deu__", "__eng__", "__est__", "__fin__", "__fra__", "__hin__", "__ind__", "__ita__", "__jpn__", "__kan__", "__kor__", "__mlt__", "__nld__", "__pes__", "__pol__", "__por__", "__ron__", "__rus__", "__slk__", "__spa__", "__swe__", "__swh__", "__tam__", "__tel__", "__tgl__", "__tha__", "__tur__", "__ukr__", "__urd__", "__uzn__", "__vie__", ] # fmt: skip
+VOCODER_SUPPORTED_LANGUAGES = ["__arb__", "__ben__", "__cat__", "__ces__", "__cmn__", "__cym__", "__dan__", "__deu__", "__eng__", "__est__", "__fin__", "__fra__", "__hin__", "__ind__", "__ita__", "__jpn__", "__kor__", "__mlt__", "__nld__", "__pes__", "__pol__", "__por__", "__ron__", "__rus__", "__slk__", "__spa__", "__swe__", "__swh__", "__tel__", "__tgl__", "__tha__", "__tur__", "__ukr__", "__urd__", "__uzn__", "__vie__",] # fmt: skip
+MEDIUM_SUPPORTED_LANGUAGES = ["ace","ace_Latn","acm","acq","aeb","afr","ajp","aka","amh","apc","arb","ars","ary","arz","asm","ast","awa","ayr","azb","azj","bak","bam","ban","bel","bem","ben","bho","bjn","bjn_Latn","bod","bos","bug","bul","cat","ceb","ces","cjk","ckb","crh","cym","dan","deu","dik","dyu","dzo","ell","eng","epo","est","eus","ewe","fao","pes","fij","fin","fon","fra","fur","fuv","gla","gle","glg","grn","guj","hat","hau","heb","hin","hne","hrv","hun","hye","ibo","ilo","ind","isl","ita","jav","jpn","kab","kac","kam","kan","kas","kas_Deva","kat","knc","knc_Latn","kaz","kbp","kea","khm","kik","kin","kir","kmb","kon","kor","kmr","lao","lvs","lij","lim","lin","lit","lmo","ltg","ltz","lua","lug","luo","lus","mag","mai","mal","mar","min","mkd","plt","mlt","mni","khk","mos","mri","zsm","mya","nld","nno","nob","npi","nso","nus","nya","oci","gaz","ory","pag","pan","pap","pol","por","prs","pbt","quy","ron","run","rus","sag","san","sat","scn","shn","sin","slk","slv","smo","sna","snd","som","sot","spa","als","srd","srp","ssw","sun","swe","swh","szl","tam","tat","tel","tgk","tgl","tha","tir","taq","taq_Tfng","tpi","tsn","tso","tuk","tum","tur","twi","tzm","uig","ukr","umb","urd","uzn","vec","vie","war","wol","xho","ydd","yor","yue","cmn","cmn_Hant","zul",] # fmt: skip
+LARGE_SUPPORTED_LANGUAGES = ["afr","amh","arb","ary","arz","asm","azj","bel","ben","bos","bul","cat","ceb","ces","ckb","cmn","cmn_Hant","cym","dan","deu","ell","eng","est","eus","fin","fra","fuv","gaz","gle","glg","guj","heb","hin","hrv","hun","hye","ibo","ind","isl","ita","jav","jpn","kan","kat","kaz","khk","khm","kir","kor","lao","lit","lug","luo","lvs","mai","mal","mar","mkd","mlt","mni","mya","nld","nno","nob","npi","nya","ory","pan","pbt","pes","pol","por","ron","rus","sat","slk","slv","sna","snd","som","spa","srp","swe","swh","tam","tel","tgk","tgl","tha","tur","ukr","urd","uzn","vie","yor","yue","zlm","zul",] # fmt: skip
+
+
+def assert_param_count(model_1, model_2):
+ count_1 = sum(p[1].numel() for p in model_1.named_parameters() if "final_proj" not in p[0])
+ count_2 = sum(p[1].numel() for p in model_2.named_parameters() if "final_proj" not in p[0])
+ assert count_1 == count_2, f"{model_1.__class__}: {count_1} != {model_2.__class__}: {count_2}"
+
+
+def param_count(model):
+ return sum(p[1].numel() for p in model.named_parameters() if "final_proj" not in p[0])
+
+
+def _grab_best_device(use_gpu=True):
+ if torch.cuda.device_count() > 0 and use_gpu:
+ device = "cuda"
+ else:
+ device = "cpu"
+ return torch.device(device)
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+vocoder_convert_list = [
+ ("ups", "hifi_gan.upsampler"),
+ ("conv_pre", "hifi_gan.conv_pre"),
+ ("resblocks", "hifi_gan.resblocks"),
+ ("conv_post", "hifi_gan.conv_post"),
+ ("lang", "language_embedding"),
+ ("spkr", "speaker_embedding"),
+ ("dict.", "unit_embedding."),
+ ("dur_predictor.conv1.0", "dur_predictor.conv1"),
+ ("dur_predictor.conv2.0", "dur_predictor.conv2"),
+]
+
+# order is important
+wav2vec_convert_list = [
+ ("speech_encoder_frontend.model_dim_proj", "feature_projection.projection"),
+ ("speech_encoder_frontend.post_extract_layer_norm", "feature_projection.layer_norm"),
+ ("speech_encoder_frontend.pos_encoder.conv", "encoder.pos_conv_embed.conv"),
+ ("speech_encoder.inner.layers", "encoder.layers"),
+ ("speech_encoder.inner_layer_norm", "encoder.layer_norm"),
+ ("speech_encoder.adaptor_layers", "adapter.layers"),
+ ("inner_proj", "intermediate_dense"),
+ ("self_attn.output_proj", "self_attn.linear_out"),
+ ("output_proj", "output_dense"),
+ ("self_attn.k_proj", "self_attn.linear_k"),
+ ("self_attn.v_proj", "self_attn.linear_v"),
+ ("self_attn.q_proj", "self_attn.linear_q"),
+ ("self_attn.sdpa.u_bias", "self_attn.pos_bias_u"),
+ ("self_attn.sdpa.v_bias", "self_attn.pos_bias_v"),
+ ("self_attn.sdpa.r_proj", "self_attn.linear_pos"),
+ ("conv.pointwise_conv1", "conv_module.pointwise_conv1"),
+ ("conv.pointwise_conv2", "conv_module.pointwise_conv2"),
+ ("conv.depthwise_conv", "conv_module.depthwise_conv"),
+ ("conv.batch_norm", "conv_module.batch_norm"),
+ ("conv_layer_norm", "conv_module.layer_norm"),
+ ("speech_encoder.proj1", "intermediate_ffn.intermediate_dense"),
+ ("speech_encoder.proj2", "intermediate_ffn.output_dense"),
+ ("speech_encoder.layer_norm", "inner_layer_norm"),
+]
+
+t2u_convert_list = [
+ ("t2u_model.final_proj", "lm_head"),
+ ("t2u_model.", "model."),
+ ("encoder_decoder_attn_layer_norm", "cross_attention_layer_norm"),
+ ("encoder_decoder_attn", "cross_attention"),
+ ("linear_k", "k_proj"),
+ ("linear_v", "v_proj"),
+ ("linear_q", "q_proj"),
+ ("ffn.inner_proj", "ffn.fc1"),
+ ("ffn.output_proj", "ffn.fc2"),
+ ("output_proj", "out_proj"),
+ ("decoder_frontend.embed", "decoder.embed_tokens"),
+]
+
+text_convert_list = [
+ ("text_encoder.", ""),
+ ("text_decoder.", ""),
+ ("text_encoder_frontend.embed", "embed_tokens"),
+ ("text_decoder_frontend.embed", "embed_tokens"),
+ ("encoder_decoder_attn_layer_norm", "cross_attention_layer_norm"),
+ ("encoder_decoder_attn", "cross_attention"),
+ ("linear_k", "k_proj"),
+ ("linear_v", "v_proj"),
+ ("linear_q", "q_proj"),
+ ("ffn.inner_proj", "ffn.fc1"),
+ ("ffn.output_proj", "ffn.fc2"),
+ ("output_proj", "out_proj"),
+ ("final_proj", "lm_head"),
+]
+
+CUR_PATH = os.path.dirname(os.path.abspath(__file__))
+default_cache_dir = os.path.join(os.path.expanduser("~"), ".cache")
+CACHE_DIR = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "huggingface", "hub")
+
+
+def _load_hf_config(model_type="medium"):
+ if model_type == "medium":
+ kwargs = {
+ "vocab_size": 256206,
+ "t2u_vocab_size": 10082,
+ "hidden_size": 1024,
+ "max_position_embeddings": 4096,
+ "encoder_layers": 12,
+ "decoder_layers": 12,
+ "encoder_ffn_dim": 4096,
+ "decoder_ffn_dim": 4096,
+ "t2u_encoder_layers": 4,
+ "t2u_decoder_layers": 4,
+ "speech_encoder_layers": 12,
+ }
+ return SeamlessM4TConfig(**kwargs)
+ else:
+ return SeamlessM4TConfig()
+
+
+def _convert_model(
+ original_model,
+ hf_model,
+ convert_list,
+ device,
+ unwanted_prefix="model.",
+ filter_state_dict="speech",
+ exclude_state_dict=None,
+):
+ state_dict = original_model.state_dict()
+
+ # filter func
+ if isinstance(filter_state_dict, str):
+
+ def filter_func(x):
+ return filter_state_dict in x[0]
+
+ else:
+
+ def filter_func(item):
+ if exclude_state_dict is not None and exclude_state_dict in item[0]:
+ return False
+ for filter_el in filter_state_dict:
+ if filter_el in item[0]:
+ return True
+
+ return False
+
+ state_dict = dict(filter(filter_func, state_dict.items()))
+
+ for k, v in list(state_dict.items()):
+ new_k = k[len(unwanted_prefix) :]
+ for old_layer_name, new_layer_name in convert_list:
+ if old_layer_name in new_k:
+ new_k = new_k.replace(old_layer_name, new_layer_name)
+
+ # must do it by hand
+ if ".layer_norm" in new_k and new_k.split(".layer_norm")[0][-1].isnumeric():
+ new_k = new_k.replace("layer_norm", "final_layer_norm")
+
+ state_dict[new_k] = state_dict.pop(k)
+
+ extra_keys = set(state_dict.keys()) - set(hf_model.state_dict().keys())
+ extra_keys = set(extra_keys)
+ missing_keys = set(hf_model.state_dict().keys()) - set(state_dict.keys())
+ missing_keys = set({k for k in missing_keys if "final_logits_bias" not in k})
+ if len(extra_keys) != 0:
+ raise ValueError(f"extra keys found: {extra_keys}")
+ if len(missing_keys) != 0:
+ raise ValueError(f"missing keys: {missing_keys}")
+ hf_model.load_state_dict(state_dict, strict=False)
+ n_params = param_count(hf_model)
+
+ logger.info(f"model loaded: {round(n_params/1e6,1)}M params")
+
+ hf_model.eval()
+ hf_model.to(device)
+ del state_dict
+
+ return hf_model
+
+
+def load_model(save_dir, model_type, repo_id):
+ """
+ Meta SeamlessM4T is made of 8 main components:
+ - speech_encoder (#1) and speech_encoder_frontend (#2)
+ - t2u_model (#3)
+ - text_encoder (#4) and text_encoder_frontend (#5)
+ - text_decoder (#6) [and text_decoder_frontend (#5) = equals to text_encoder_frontend]
+ - final_proj (#7)
+ - vocoder (#8)
+ """
+ device = _grab_best_device()
+ if model_type == "medium":
+ name = "seamlessM4T_medium"
+ else:
+ name = "seamlessM4T_large"
+
+ original_model = Translator(name, "vocoder_36langs", device, torch.float32)
+
+ ######### TOKENIZER
+
+ langs = MEDIUM_SUPPORTED_LANGUAGES if model_type == "medium" else LARGE_SUPPORTED_LANGUAGES
+ langs = [f"__{lang}__" for lang in langs]
+ vocab_file = os.path.join(os.path.expanduser("~"), "tokenizer", model_type, "tokenizer.model")
+
+ save_dir = os.path.join(save_dir, name)
+ Path(save_dir).mkdir(exist_ok=True)
+
+ tokenizer = SeamlessM4TTokenizer(vocab_file, additional_special_tokens=langs)
+
+ sanity_check_lang_id = tokenizer.convert_tokens_to_ids("__fra__")
+
+ tokenizer.save_pretrained(save_dir)
+ tokenizer = SeamlessM4TTokenizer.from_pretrained(save_dir)
+
+ if sanity_check_lang_id != tokenizer.convert_tokens_to_ids("__fra__"):
+ raise ValueError(
+ f"Error in tokenizer saving/loading - __fra__ lang id is not coherent: {sanity_check_lang_id} vs {tokenizer.convert_tokens_to_ids('__fra__')}"
+ )
+
+ ####### get language to ids dict
+ text_decoder_lang_code_to_id = {lang.replace("__", ""): tokenizer.convert_tokens_to_ids(lang) for lang in langs}
+ # offset: vocoder unit vocab size + 5 (for EOS/PAD/BOS/UNK/MSK) + len(supported_languages)
+ t2u_lang_code_to_id = {
+ code.replace("__", ""): i + 10005 + len(UNIT_SUPPORTED_LANGUAGES)
+ for i, code in enumerate(UNIT_SUPPORTED_LANGUAGES)
+ }
+ vocoder_lang_code_to_id = {code.replace("__", ""): i for i, code in enumerate(VOCODER_SUPPORTED_LANGUAGES)}
+
+ ######### FE
+
+ fe = SeamlessM4TFeatureExtractor(language_code=langs)
+
+ fe.save_pretrained(save_dir)
+ fe = SeamlessM4TFeatureExtractor.from_pretrained(save_dir)
+
+ processor = SeamlessM4TProcessor(feature_extractor=fe, tokenizer=tokenizer)
+ processor.save_pretrained(save_dir)
+ processor.push_to_hub(repo_id=repo_id, create_pr=True)
+
+ processor = SeamlessM4TProcessor.from_pretrained(save_dir)
+
+ ######## Model
+
+ # init model
+ hf_config = _load_hf_config(model_type)
+ hf_model = SeamlessM4TModel(hf_config)
+
+ hf_model.generation_config.__setattr__("text_decoder_lang_to_code_id", text_decoder_lang_code_to_id)
+ hf_model.generation_config.__setattr__("t2u_lang_code_to_id", t2u_lang_code_to_id)
+ hf_model.generation_config.__setattr__("vocoder_lang_code_to_id", vocoder_lang_code_to_id)
+
+ # -1. take care of vocoder
+ # similarly to speech T5 must apply and remove weight norm
+ hf_model.vocoder.apply_weight_norm()
+ hf_model.vocoder = _convert_model(
+ original_model,
+ hf_model.vocoder,
+ vocoder_convert_list,
+ device,
+ unwanted_prefix="vocoder.code_generator.",
+ filter_state_dict="vocoder",
+ )
+ hf_model.vocoder.remove_weight_norm()
+
+ # 1. take care of speech encoder
+ wav2vec = hf_model.speech_encoder
+ hf_model.speech_encoder = _convert_model(
+ original_model, wav2vec, wav2vec_convert_list, device, unwanted_prefix="model.", filter_state_dict="speech"
+ )
+
+ # 2. take care of t2u
+
+ hf_model.t2u_model = _convert_model(
+ original_model,
+ hf_model.t2u_model,
+ t2u_convert_list,
+ device,
+ unwanted_prefix="model.",
+ filter_state_dict="t2u_model",
+ )
+
+ # 3. take care of text encoder
+ hf_model.text_encoder = _convert_model(
+ original_model,
+ hf_model.text_encoder,
+ text_convert_list,
+ device,
+ unwanted_prefix="model.",
+ filter_state_dict=["model.text_encoder"],
+ exclude_state_dict="t2u_model",
+ )
+
+ # 4. take care of text decoder
+ hf_model.text_decoder = _convert_model(
+ original_model,
+ hf_model.text_decoder,
+ text_convert_list,
+ device,
+ unwanted_prefix="model.",
+ filter_state_dict=["model.text_decoder"],
+ exclude_state_dict="t2u_model",
+ )
+
+ # 5. take care of final proj
+ hf_model.lm_head = _convert_model(
+ original_model,
+ hf_model.lm_head,
+ [("final_proj.", "")],
+ device,
+ unwanted_prefix="model.",
+ filter_state_dict=["model.final_proj"],
+ exclude_state_dict="t2u_model",
+ )
+
+ # sanity check
+ print(find_tied_parameters(hf_model))
+
+ count_1 = param_count(hf_model)
+ count_2 = param_count(original_model)
+
+ print(f"HF MODEL:{count_1}, ORIGINAL_MODEL: {count_2}, diff:{count_1 - count_2}")
+ print(f"HF MODEL excluding embeddings:{hf_model.num_parameters(exclude_embeddings=True)}")
+
+ del original_model
+
+ hf_model.generation_config._from_model_config = False
+ hf_model.save_pretrained(save_dir)
+ hf_model.push_to_hub(repo_id=repo_id, create_pr=True)
+ hf_model = SeamlessM4TModel.from_pretrained(save_dir)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+
+ parser.add_argument(
+ "--model_type",
+ default="medium",
+ type=str,
+ help="Model type.",
+ )
+
+ parser.add_argument(
+ "--save_dir",
+ default="/home/ubuntu/weights",
+ type=str,
+ help="Path to the output PyTorch model.",
+ )
+
+ parser.add_argument(
+ "--repo_id",
+ default="facebook/hf-seamless-m4t-medium",
+ type=str,
+ help="Repo ID.",
+ )
+
+ args = parser.parse_args()
+
+ load_model(args.save_dir, args.model_type, args.repo_id)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/seamless_m4t/modeling_seamless_m4t.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/seamless_m4t/modeling_seamless_m4t.py
new file mode 100644
index 0000000000000000000000000000000000000000..c0fe60a6434adec2e650d345d66a774e542eb311
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/seamless_m4t/modeling_seamless_m4t.py
@@ -0,0 +1,4384 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch SeamlessM4T model."""
+
+
+import copy
+import math
+from dataclasses import dataclass
+from typing import Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import Tensor, nn
+from torch.nn import CrossEntropyLoss
+
+from ...activations import ACT2FN
+from ...deepspeed import is_deepspeed_zero3_enabled
+from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask
+from ...modeling_outputs import (
+ BaseModelOutput,
+ BaseModelOutputWithPastAndCrossAttentions,
+ Seq2SeqLMOutput,
+ Seq2SeqModelOutput,
+ Wav2Vec2BaseModelOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ ModelOutput,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+)
+from .configuration_seamless_m4t import SeamlessM4TConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "facebook/hf-seamless-m4t-medium"
+_CONFIG_FOR_DOC = "SeamlessM4TConfig"
+
+
+from ..deprecated._archive_maps import ( # noqa: F401, E402
+ SEAMLESS_M4T_PRETRAINED_MODEL_ARCHIVE_LIST, # noqa: F401, E402
+ SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, # noqa: F401, E402
+)
+
+
+@dataclass
+class SeamlessM4TGenerationOutput(ModelOutput):
+ """
+ Class defining the generated outputs from [`SeamlessM4TModel`], [`SeamlessM4TForTextToText`],
+ [`SeamlessM4TForTextToSpeech`], [`SeamlessM4TForSpeechToSpeech`] and [`SeamlessM4TForTextToSpeech`].
+
+ Args:
+ waveform (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
+ The final audio waveform predicted by the model.
+ waveform_lengths (`torch.IntTensor` of shape `(batch_size,)`, *optional*):
+ The length in samples of each element in the `waveform` batch.
+ sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ The generated translated sequences. This is the output of the text-to-text or the speech-to-text models.
+ The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished
+ early due to the `eos_token_id`.
+ unit_sequences (`torch.LongTensor` of shape `(batch_size, unit_sequence_length)`, *optional*):
+ The generated translated unit sequences. This is the output of the text-to-units model. The second
+ dimension (unit_sequence_length) is either equal to `t2u_max_length` or shorter if all batches finished
+ early due to the `t2u_eos_token_id`.
+ """
+
+ waveform: Optional[torch.FloatTensor] = None
+ waveform_lengths: Optional[torch.IntTensor] = None
+ sequences: Optional[Tuple[torch.FloatTensor]] = None
+ unit_sequences: Optional[Tuple[torch.FloatTensor]] = None
+
+
+SEAMLESS_M4T_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`~SeamlessM4TConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+SEAMLESS_M4T_INPUTS_DOCSTRING_FIRST_PART = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`SeamlessM4TTokenizer`] or [`SeamlessM4TProcessor`]. See
+ [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_banks)`):
+ Input audio features. This should be returnes by the [`SeamlessM4TFeatureExtractor`] class or the
+ [`SeamlessM4TProcessor`] class. See [`SeamlessM4TFeatureExtractor.__call__`] for details.
+ """
+
+SEAMLESS_M4T_INPUTS_DOCSTRING_TEXT_PART = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`SeamlessM4TTokenizer`] or [`SeamlessM4TProcessor`]. See
+ [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ """
+
+SEAMLESS_M4T_INPUTS_DOCSTRING_SPEECH_PART = r"""
+ Args:
+ input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_banks)`):
+ Input audio features. This should be returnes by the [`SeamlessM4TFeatureExtractor`] class or the
+ [`SeamlessM4TProcessor`] class. See [`SeamlessM4TFeatureExtractor.__call__`] for details.
+ """
+
+SEAMLESS_M4T_INPUTS_DOCSTRING_LAST_PART = r"""
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ Bart uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
+ is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
+
+ For translation and summarization training, `decoder_input_ids` should be provided. If no
+ `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
+ for denoising pre-training following the paper.
+ decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+
+ If you want to change padding behavior, you should read [`modeling_bart._prepare_decoder_attention_mask`]
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
+ information on the default strategy.
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape`(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
+ input (see `past_key_values`). This is useful if you want more control over how to convert
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
+
+ If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
+ of `inputs_embeds`.
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+M4T_MODEL_INPUTS_DOCSTRING = SEAMLESS_M4T_INPUTS_DOCSTRING_FIRST_PART + SEAMLESS_M4T_INPUTS_DOCSTRING_LAST_PART
+
+M4T_TEXT_INPUTS_DOCSTRING = SEAMLESS_M4T_INPUTS_DOCSTRING_TEXT_PART + SEAMLESS_M4T_INPUTS_DOCSTRING_LAST_PART
+
+M4T_SPEECH_INPUTS_DOCSTRING = SEAMLESS_M4T_INPUTS_DOCSTRING_SPEECH_PART + SEAMLESS_M4T_INPUTS_DOCSTRING_LAST_PART
+
+
+############ UTILS ################
+
+
+# Copied from transformers.models.roberta.modeling_roberta.create_position_ids_from_input_ids
+def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
+ """
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
+ are ignored. This is modified from fairseq's `utils.make_positions`.
+
+ Args:
+ x: torch.Tensor x:
+
+ Returns: torch.Tensor
+ """
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
+ mask = input_ids.ne(padding_idx).int()
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
+ return incremental_indices.long() + padding_idx
+
+
+# Copied from transformers.models.bart.modeling_bart.shift_tokens_right
+def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
+ """
+ Shift input ids one token to the right.
+ """
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
+ shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
+ shifted_input_ids[:, 0] = decoder_start_token_id
+
+ if pad_token_id is None:
+ raise ValueError("self.model.config.pad_token_id has to be defined.")
+ # replace possible -100 values in labels by `pad_token_id`
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
+
+ return shifted_input_ids
+
+
+def _compute_new_attention_mask(hidden_states: torch.Tensor, seq_lens: torch.Tensor):
+ """
+ Computes an attention mask of the form `(batch, seq_len)` with an attention for each element in the batch that
+ stops at the corresponding element in `seq_lens`.
+
+ Args:
+ hidden_states (`torch.FloatTensor` of shape `(batch, seq_len, *)`):
+ The sequences to mask, where `*` is any number of sequence-specific dimensions including none.
+ seq_lens (`torch.Tensor` of shape `(batch)`:
+ Each element represents the length of the sequence at the same index in `hidden_states`
+
+ Returns:
+ `torch.FloatTensor`: The float attention mask of shape `(batch, seq_len)`
+ """
+ batch_size, mask_seq_len = hidden_states.shape[:2]
+
+ indices = torch.arange(mask_seq_len, device=seq_lens.device).expand(batch_size, -1)
+
+ bool_mask = indices >= seq_lens.unsqueeze(1).expand(-1, mask_seq_len)
+
+ mask = hidden_states.new_ones((batch_size, mask_seq_len))
+
+ mask = mask.masked_fill(bool_mask, 0)
+
+ return mask
+
+
+def format_speech_generation_kwargs(kwargs):
+ """
+ Format kwargs for SeamlessM4T models that generate speech, attribute kwargs to either the text generation or the
+ speech generation models.
+
+ Args:
+ kwargs (`dict`)`:
+ Keyword arguments are of two types:
+
+ - Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model,
+ except for `decoder_input_ids` which will only be passed through the text components.
+ - With a *text_* or *speech_* prefix, they will be input for the `generate` method of the
+ text model and speech model respectively. It has the priority over the keywords without a prefix.
+
+ This means you can, for example, specify a generation strategy for one generation but not for the
+ other.
+ """
+ # attribute kwargs to models
+ kwargs_text = {}
+ kwargs_speech = {}
+ for key, value in kwargs.items():
+ if key.startswith("text_"):
+ key = key[len("text_") :]
+ kwargs_text[key] = value
+ elif key.startswith("speech_"):
+ key = key[len("speech_") :]
+ kwargs_speech[key] = value
+ else:
+ # If the key is already in a specific config, then it's been set with a
+ # submodules specific value and we don't override
+ if key not in kwargs_text:
+ kwargs_text[key] = value
+ if key not in kwargs_speech:
+ kwargs_speech[key] = value
+ return kwargs_text, kwargs_speech
+
+
+############ SPEECH ENCODER related code ################
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2PositionalConvEmbedding with Wav2Vec2->SeamlessM4TConformer, feat_extract_activation->speech_encoder_hidden_act
+class SeamlessM4TConformerPositionalConvEmbedding(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.conv = nn.Conv1d(
+ config.hidden_size,
+ config.hidden_size,
+ kernel_size=config.num_conv_pos_embeddings,
+ padding=config.num_conv_pos_embeddings // 2,
+ groups=config.num_conv_pos_embedding_groups,
+ )
+
+ weight_norm = nn.utils.weight_norm
+ if hasattr(nn.utils.parametrizations, "weight_norm"):
+ weight_norm = nn.utils.parametrizations.weight_norm
+
+ if is_deepspeed_zero3_enabled():
+ import deepspeed
+
+ with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0):
+ self.conv = weight_norm(self.conv, name="weight", dim=2)
+ deepspeed.zero.register_external_parameter(self, self.conv.weight_v)
+ deepspeed.zero.register_external_parameter(self, self.conv.weight_g)
+ else:
+ self.conv = weight_norm(self.conv, name="weight", dim=2)
+
+ self.padding = SeamlessM4TConformerSamePadLayer(config.num_conv_pos_embeddings)
+ self.activation = ACT2FN[config.speech_encoder_hidden_act]
+
+ def forward(self, hidden_states):
+ hidden_states = hidden_states.transpose(1, 2)
+
+ hidden_states = self.conv(hidden_states)
+ hidden_states = self.padding(hidden_states)
+ hidden_states = self.activation(hidden_states)
+
+ hidden_states = hidden_states.transpose(1, 2)
+ return hidden_states
+
+
+# Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerRotaryPositionalEmbedding with Wav2Vec2->SeamlessM4T, num_attention_heads->speech_encoder_attention_heads
+class SeamlessM4TConformerRotaryPositionalEmbedding(nn.Module):
+ """Rotary positional embedding
+ Reference : https://blog.eleuther.ai/rotary-embeddings/ Paper: https://arxiv.org/pdf/2104.09864.pdf
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ dim = config.hidden_size // config.speech_encoder_attention_heads
+ base = config.rotary_embedding_base
+
+ inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.int64).float() / dim))
+ self.register_buffer("inv_freq", inv_freq)
+ self.cached_sequence_length = None
+ self.cached_rotary_positional_embedding = None
+
+ def forward(self, hidden_states):
+ sequence_length = hidden_states.shape[1]
+
+ if sequence_length == self.cached_sequence_length and self.cached_rotary_positional_embedding is not None:
+ return self.cached_rotary_positional_embedding
+
+ self.cached_sequence_length = sequence_length
+ # Embeddings are computed in the dtype of the inv_freq constant
+ time_stamps = torch.arange(sequence_length).type_as(self.inv_freq)
+ freqs = torch.einsum("i,j->ij", time_stamps, self.inv_freq)
+ embeddings = torch.cat((freqs, freqs), dim=-1)
+
+ cos_embeddings = embeddings.cos()[:, None, None, :]
+ sin_embeddings = embeddings.sin()[:, None, None, :]
+ # Computed embeddings are cast to the dtype of the hidden state inputs
+ self.cached_rotary_positional_embedding = torch.stack([cos_embeddings, sin_embeddings]).type_as(hidden_states)
+ return self.cached_rotary_positional_embedding
+
+
+# Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerRelPositionalEmbedding with Wav2Vec2->SeamlessM4T
+class SeamlessM4TConformerRelPositionalEmbedding(nn.Module):
+ """Relative positional encoding module."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.max_len = config.max_source_positions
+ self.d_model = config.hidden_size
+ self.pe = None
+ self.extend_pe(torch.tensor(0.0).expand(1, self.max_len))
+
+ def extend_pe(self, x):
+ # Reset the positional encodings
+ if self.pe is not None:
+ # self.pe contains both positive and negative parts
+ # the length of self.pe is 2 * input_len - 1
+ if self.pe.size(1) >= x.size(1) * 2 - 1:
+ if self.pe.dtype != x.dtype or self.pe.device != x.device:
+ self.pe = self.pe.to(dtype=x.dtype, device=x.device)
+ return
+ # Suppose `i` is the position of query vector and `j` is the
+ # position of key vector. We use positive relative positions when keys
+ # are to the left (i>j) and negative relative positions otherwise (iSeamlessM4T
+class SeamlessM4TConformerSamePadLayer(nn.Module):
+ def __init__(self, num_conv_pos_embeddings):
+ super().__init__()
+ self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0
+
+ def forward(self, hidden_states):
+ if self.num_pad_remove > 0:
+ hidden_states = hidden_states[:, :, : -self.num_pad_remove]
+ return hidden_states
+
+
+class SeamlessM4TConformerFeatureProjection(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.layer_norm = nn.LayerNorm(config.feature_projection_input_dim, eps=config.layer_norm_eps)
+ self.projection = nn.Linear(config.feature_projection_input_dim, config.hidden_size)
+ self.dropout = nn.Dropout(config.speech_encoder_dropout)
+
+ def forward(self, hidden_states):
+ # non-projected hidden states are needed for quantization
+ norm_hidden_states = self.layer_norm(hidden_states)
+ hidden_states = self.projection(norm_hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ return hidden_states
+
+
+class SeamlessM4TConformerFeedForward(nn.Module):
+ def __init__(self, config, act_fn=None, dropout=None):
+ super().__init__()
+ dropout = dropout if dropout is not None else config.speech_encoder_dropout
+ act_fn = act_fn if act_fn is not None else config.speech_encoder_hidden_act
+
+ self.intermediate_dropout = nn.Dropout(dropout)
+ self.intermediate_dense = nn.Linear(config.hidden_size, config.speech_encoder_intermediate_size)
+ self.intermediate_act_fn = ACT2FN[act_fn] if isinstance(act_fn, str) else act_fn
+
+ self.output_dense = nn.Linear(config.speech_encoder_intermediate_size, config.hidden_size)
+ self.output_dropout = nn.Dropout(dropout)
+
+ def forward(self, hidden_states):
+ hidden_states = self.intermediate_dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ hidden_states = self.intermediate_dropout(hidden_states)
+
+ hidden_states = self.output_dense(hidden_states)
+ hidden_states = self.output_dropout(hidden_states)
+ return hidden_states
+
+
+class SeamlessM4TConformerConvolutionModule(nn.Module):
+ """Convolution block used in the conformer block"""
+
+ def __init__(self, config):
+ super().__init__()
+ if (config.conv_depthwise_kernel_size - 1) % 2 == 1:
+ raise ValueError("`config.conv_depthwise_kernel_size` should be a odd number for 'SAME' padding")
+ self.layer_norm = nn.LayerNorm(config.hidden_size)
+ self.pointwise_conv1 = nn.Conv1d(
+ config.hidden_size,
+ 2 * config.hidden_size,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ bias=False,
+ )
+ self.glu = nn.GLU(dim=1)
+ self.depthwise_conv = nn.Conv1d(
+ config.hidden_size,
+ config.hidden_size,
+ config.conv_depthwise_kernel_size,
+ stride=1,
+ padding="same",
+ groups=config.hidden_size,
+ bias=False,
+ )
+ self.batch_norm = nn.BatchNorm1d(config.hidden_size)
+ self.activation = ACT2FN[config.speech_encoder_hidden_act]
+ self.pointwise_conv2 = nn.Conv1d(
+ config.hidden_size,
+ config.hidden_size,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ bias=False,
+ )
+ self.dropout = nn.Dropout(config.speech_encoder_dropout)
+
+ def forward(self, hidden_states, attention_mask=None):
+ hidden_states = self.layer_norm(hidden_states)
+
+ # Ensure that we do not leak padded positions in depthwise convolution.
+ # Put 0 where necessary
+ if attention_mask is not None:
+ hidden_states = hidden_states.masked_fill(~attention_mask.bool().unsqueeze(-1), 0.0)
+
+ # exchange the temporal dimension and the feature dimension
+ hidden_states = hidden_states.transpose(1, 2)
+
+ # GLU mechanism
+ # => (batch, 2*channel, dim)
+ hidden_states = self.pointwise_conv1(hidden_states)
+ # => (batch, channel, dim)
+ hidden_states = self.glu(hidden_states)
+
+ # 1D Depthwise Conv
+ hidden_states = self.depthwise_conv(hidden_states)
+ hidden_states = self.batch_norm(hidden_states)
+ hidden_states = self.activation(hidden_states)
+
+ hidden_states = self.pointwise_conv2(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = hidden_states.transpose(1, 2)
+ return hidden_states
+
+
+class SeamlessM4TConformerSelfAttention(nn.Module):
+ """Construct a SeamlessM4TConformerSelfAttention object.
+ Can be enhanced with rotary or relative position embeddings.
+ """
+
+ def __init__(self, config, use_position_embeddings=True):
+ super().__init__()
+
+ self.head_size = config.hidden_size // config.speech_encoder_attention_heads
+ self.num_heads = config.speech_encoder_attention_heads
+ self.position_embeddings_type = config.position_embeddings_type if use_position_embeddings else None
+
+ self.linear_q = nn.Linear(config.hidden_size, config.hidden_size)
+ self.linear_k = nn.Linear(config.hidden_size, config.hidden_size)
+ self.linear_v = nn.Linear(config.hidden_size, config.hidden_size)
+ self.linear_out = nn.Linear(config.hidden_size, config.hidden_size)
+
+ self.dropout = nn.Dropout(p=config.speech_encoder_dropout)
+
+ if self.position_embeddings_type == "relative":
+ # linear transformation for positional encoding
+ self.linear_pos = nn.Linear(config.hidden_size, config.hidden_size, bias=False)
+ # these two learnable bias are used in matrix c and matrix d
+ # as described in https://arxiv.org/abs/1901.02860 Section 3.3
+ self.pos_bias_u = nn.Parameter(torch.zeros(self.num_heads, self.head_size))
+ self.pos_bias_v = nn.Parameter(torch.zeros(self.num_heads, self.head_size))
+
+ # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerSelfAttention.forward
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ relative_position_embeddings: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ # self-attention mechanism
+ batch_size, sequence_length, hidden_size = hidden_states.size()
+
+ # make sure query/key states can be != value states
+ query_key_states = hidden_states
+ value_states = hidden_states
+
+ if self.position_embeddings_type == "rotary":
+ if relative_position_embeddings is None:
+ raise ValueError(
+ "`relative_position_embeddings` has to be defined when `self.position_embeddings_type == 'rotary'"
+ )
+ query_key_states = self._apply_rotary_embedding(query_key_states, relative_position_embeddings)
+
+ # project query_key_states and value_states
+ query = self.linear_q(query_key_states).view(batch_size, -1, self.num_heads, self.head_size)
+ key = self.linear_k(query_key_states).view(batch_size, -1, self.num_heads, self.head_size)
+ value = self.linear_v(value_states).view(batch_size, -1, self.num_heads, self.head_size)
+
+ # => (batch, head, time1, d_k)
+ query = query.transpose(1, 2)
+ key = key.transpose(1, 2)
+ value = value.transpose(1, 2)
+
+ if self.position_embeddings_type == "relative":
+ if relative_position_embeddings is None:
+ raise ValueError(
+ "`relative_position_embeddings` has to be defined when `self.position_embeddings_type =="
+ " 'relative'"
+ )
+ # apply relative_position_embeddings to qk scores
+ # as proposed in Transformer_XL: https://arxiv.org/abs/1901.02860
+ scores = self._apply_relative_embeddings(
+ query=query, key=key, relative_position_embeddings=relative_position_embeddings
+ )
+ else:
+ scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.head_size)
+
+ # apply attention_mask if necessary
+ if attention_mask is not None:
+ scores = scores + attention_mask
+
+ # => (batch, head, time1, time2)
+ probs = torch.softmax(scores, dim=-1)
+ probs = self.dropout(probs)
+
+ # => (batch, head, time1, d_k)
+ hidden_states = torch.matmul(probs, value)
+
+ # => (batch, time1, hidden_size)
+ hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, self.num_heads * self.head_size)
+ hidden_states = self.linear_out(hidden_states)
+
+ return hidden_states, probs
+
+ # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerSelfAttention._apply_rotary_embedding
+ def _apply_rotary_embedding(self, hidden_states, relative_position_embeddings):
+ batch_size, sequence_length, hidden_size = hidden_states.size()
+ hidden_states = hidden_states.view(batch_size, sequence_length, self.num_heads, self.head_size)
+
+ cos = relative_position_embeddings[0, :sequence_length, ...]
+ sin = relative_position_embeddings[1, :sequence_length, ...]
+
+ # rotate hidden_states with rotary embeddings
+ hidden_states = hidden_states.transpose(0, 1)
+ rotated_states_begin = hidden_states[..., : self.head_size // 2]
+ rotated_states_end = hidden_states[..., self.head_size // 2 :]
+ rotated_states = torch.cat((-rotated_states_end, rotated_states_begin), dim=rotated_states_begin.ndim - 1)
+ hidden_states = (hidden_states * cos) + (rotated_states * sin)
+ hidden_states = hidden_states.transpose(0, 1)
+
+ hidden_states = hidden_states.view(batch_size, sequence_length, self.num_heads * self.head_size)
+
+ return hidden_states
+
+ # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerSelfAttention._apply_relative_embeddings
+ def _apply_relative_embeddings(self, query, key, relative_position_embeddings):
+ # 1. project positional embeddings
+ # => (batch, head, 2*time1-1, d_k)
+ proj_relative_position_embeddings = self.linear_pos(relative_position_embeddings)
+ proj_relative_position_embeddings = proj_relative_position_embeddings.view(
+ relative_position_embeddings.size(0), -1, self.num_heads, self.head_size
+ )
+ proj_relative_position_embeddings = proj_relative_position_embeddings.transpose(1, 2)
+ proj_relative_position_embeddings = proj_relative_position_embeddings.transpose(2, 3)
+
+ # 2. Add bias to query
+ # => (batch, head, time1, d_k)
+ query = query.transpose(1, 2)
+ q_with_bias_u = (query + self.pos_bias_u).transpose(1, 2)
+ q_with_bias_v = (query + self.pos_bias_v).transpose(1, 2)
+
+ # 3. attention score: first compute matrix a and matrix c
+ # as described in https://arxiv.org/abs/1901.02860 Section 3.3
+ # => (batch, head, time1, time2)
+ scores_ac = torch.matmul(q_with_bias_u, key.transpose(-2, -1))
+
+ # 4. then compute matrix b and matrix d
+ # => (batch, head, time1, 2*time1-1)
+ scores_bd = torch.matmul(q_with_bias_v, proj_relative_position_embeddings)
+
+ # 5. shift matrix b and matrix d
+ zero_pad = torch.zeros((*scores_bd.size()[:3], 1), device=scores_bd.device, dtype=scores_bd.dtype)
+ scores_bd_padded = torch.cat([zero_pad, scores_bd], dim=-1)
+ scores_bd_padded_shape = scores_bd.size()[:2] + (scores_bd.shape[3] + 1, scores_bd.shape[2])
+ scores_bd_padded = scores_bd_padded.view(*scores_bd_padded_shape)
+ scores_bd = scores_bd_padded[:, :, 1:].view_as(scores_bd)
+ scores_bd = scores_bd[:, :, :, : scores_bd.size(-1) // 2 + 1]
+
+ # 6. sum matrices
+ # => (batch, head, time1, time2)
+ scores = (scores_ac + scores_bd) / math.sqrt(self.head_size)
+
+ return scores
+
+
+class SeamlessM4TConformerEncoderLayer(nn.Module):
+ """Conformer block based on https://arxiv.org/abs/2005.08100."""
+
+ # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerEncoderLayer.__init__ with Wav2Vec2->SeamlessM4T, attention_dropout->speech_encoder_dropout, torch.nn->nn
+ def __init__(self, config):
+ super().__init__()
+ embed_dim = config.hidden_size
+ dropout = config.speech_encoder_dropout
+
+ # Feed-forward 1
+ self.ffn1_layer_norm = nn.LayerNorm(embed_dim)
+ self.ffn1 = SeamlessM4TConformerFeedForward(config)
+
+ # Self-Attention
+ self.self_attn_layer_norm = nn.LayerNorm(embed_dim)
+ self.self_attn_dropout = nn.Dropout(dropout)
+ self.self_attn = SeamlessM4TConformerSelfAttention(config)
+
+ # Conformer Convolution
+ self.conv_module = SeamlessM4TConformerConvolutionModule(config)
+
+ # Feed-forward 2
+ self.ffn2_layer_norm = nn.LayerNorm(embed_dim)
+ self.ffn2 = SeamlessM4TConformerFeedForward(config)
+ self.final_layer_norm = nn.LayerNorm(embed_dim)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask: Optional[torch.Tensor] = None,
+ relative_position_embeddings: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ conv_attention_mask: Optional[torch.Tensor] = None,
+ ):
+ hidden_states = hidden_states
+
+ # 1. Feed-Forward 1 layer
+ residual = hidden_states
+ hidden_states = self.ffn1_layer_norm(hidden_states)
+ hidden_states = self.ffn1(hidden_states)
+ hidden_states = hidden_states * 0.5 + residual
+ residual = hidden_states
+
+ # 2. Self-Attention layer
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+ hidden_states, attn_weigts = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ relative_position_embeddings=relative_position_embeddings,
+ output_attentions=output_attentions,
+ )
+ hidden_states = self.self_attn_dropout(hidden_states)
+ hidden_states = hidden_states + residual
+
+ # 3. Convolutional Layer
+ residual = hidden_states
+ hidden_states = self.conv_module(hidden_states, attention_mask=conv_attention_mask)
+ hidden_states = residual + hidden_states
+
+ # 4. Feed-Forward 2 Layer
+ residual = hidden_states
+ hidden_states = self.ffn2_layer_norm(hidden_states)
+ hidden_states = self.ffn2(hidden_states)
+ hidden_states = hidden_states * 0.5 + residual
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ return hidden_states, attn_weigts
+
+
+class SeamlessM4TConformerEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+
+ if config.position_embeddings_type == "relative":
+ self.embed_positions = SeamlessM4TConformerRelPositionalEmbedding(config)
+ elif config.position_embeddings_type == "rotary":
+ self.embed_positions = SeamlessM4TConformerRotaryPositionalEmbedding(config)
+ else:
+ self.embed_positions = None
+
+ self.dropout = nn.Dropout(config.speech_encoder_dropout)
+ self.layers = nn.ModuleList(
+ [SeamlessM4TConformerEncoderLayer(config) for _ in range(config.speech_encoder_layers)]
+ )
+
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ output_attentions=False,
+ output_hidden_states=False,
+ return_dict=True,
+ ):
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ conv_attention_mask = attention_mask
+ if attention_mask is not None:
+ # make sure padded tokens output 0
+ hidden_states = hidden_states.masked_fill(~attention_mask.bool().unsqueeze(-1), 0.0)
+ # extend attention_mask
+ attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)
+ attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min
+ attention_mask = attention_mask.expand(
+ attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
+ )
+
+ hidden_states = self.dropout(hidden_states)
+
+ if self.embed_positions is not None:
+ relative_position_embeddings = self.embed_positions(hidden_states)
+ else:
+ relative_position_embeddings = None
+
+ deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
+
+ for i, layer in enumerate(self.layers):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ dropout_probability = torch.rand([])
+
+ skip_the_layer = (
+ True if self.training and (dropout_probability < self.config.speech_encoder_layerdrop) else False
+ )
+ if not skip_the_layer or deepspeed_zero3_is_enabled:
+ # under deepspeed zero3 all gpus must run in sync
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer.__call__,
+ hidden_states,
+ attention_mask,
+ relative_position_embeddings,
+ )
+ else:
+ layer_outputs = layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ relative_position_embeddings=relative_position_embeddings,
+ output_attentions=output_attentions,
+ conv_attention_mask=conv_attention_mask,
+ )
+ hidden_states = layer_outputs[0]
+
+ if skip_the_layer:
+ layer_outputs = (None, None)
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+
+ hidden_states = self.layer_norm(hidden_states)
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+
+class SeamlessM4TConformerAdapterLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ embed_dim = config.hidden_size
+ dropout = config.adaptor_dropout
+
+ self.kernel_size = config.adaptor_kernel_size
+ self.stride = config.adaptor_stride
+
+ # 1. residual convolution
+ self.residual_layer_norm = nn.LayerNorm(embed_dim)
+ self.residual_conv = nn.Conv1d(
+ embed_dim,
+ 2 * embed_dim,
+ self.kernel_size,
+ stride=self.stride,
+ padding=self.stride // 2,
+ )
+ self.activation = nn.GLU(dim=1)
+
+ # Self-Attention
+ self.self_attn_layer_norm = nn.LayerNorm(embed_dim)
+ self.self_attn_conv = nn.Conv1d(
+ embed_dim,
+ 2 * embed_dim,
+ self.kernel_size,
+ stride=self.stride,
+ padding=self.stride // 2,
+ )
+ self.self_attn = SeamlessM4TConformerSelfAttention(config, use_position_embeddings=False)
+ self.self_attn_dropout = nn.Dropout(dropout)
+
+ # Feed-forward
+ self.ffn_layer_norm = nn.LayerNorm(embed_dim)
+ self.ffn = SeamlessM4TConformerFeedForward(config, act_fn="relu", dropout=dropout)
+
+ def _compute_sub_sample_lengths_from_attention_mask(self, attention_mask):
+ pad = self.kernel_size // 2
+ seq_lens = attention_mask.size(1) - (1 - attention_mask.int()).sum(1)
+
+ seq_lens = ((seq_lens + 2 * pad - self.kernel_size) / self.stride) + 1
+
+ return seq_lens.floor()
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ):
+ residual = self.residual_layer_norm(hidden_states)
+
+ # Apply pooling to the residual to match the sequence length of the
+ # multi-head attention output.
+ # (batch, seq_len, feature_dim) -> (batch, feature_dim, seq_len)
+ residual = residual.transpose(1, 2)
+ residual = self.residual_conv(residual)
+ residual = self.activation(residual)
+ # (batch, feature_dim, seq_len) -> (batch, seq_len, feature_dim)
+ residual = residual.transpose(1, 2)
+
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+ # Apply pooling before feeding to the multihead-attention layer.
+ # (batch, seq_len, feature_dim) -> (batch, feature_dim, seq_len)
+ hidden_states = hidden_states.transpose(1, 2)
+ hidden_states = self.self_attn_conv(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ # (batch, feature_dim, seq_len) -> (batch, seq_len, feature_dim)
+ hidden_states = hidden_states.transpose(1, 2)
+
+ if attention_mask is not None:
+ sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to(
+ hidden_states.device
+ )
+ attention_mask = _compute_new_attention_mask(hidden_states=hidden_states, seq_lens=sub_sampled_lengths)
+ attention_mask = _prepare_4d_attention_mask(
+ attention_mask,
+ hidden_states.dtype,
+ )
+
+ # The rest of the computation is identical to a vanilla Transformer
+ # encoder layer.
+ hidden_states, attn_weigths = self.self_attn(
+ hidden_states,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = self.self_attn_dropout(hidden_states)
+ hidden_states = hidden_states + residual
+
+ residual = hidden_states
+
+ hidden_states = self.ffn_layer_norm(hidden_states)
+ hidden_states = self.ffn(hidden_states) + residual
+
+ return hidden_states
+
+
+class SeamlessM4TConformerAdapter(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+
+ self.layers = nn.ModuleList(SeamlessM4TConformerAdapterLayer(config) for _ in range(config.num_adapter_layers))
+
+ def forward(self, hidden_states, attention_mask):
+ # down project hidden_states if necessary
+
+ for layer in self.layers:
+ hidden_states = layer(hidden_states, attention_mask)
+
+ return hidden_states
+
+
+############ TEXT / UNITS related code ################
+
+
+# Copied from transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding
+class SeamlessM4TSinusoidalPositionalEmbedding(nn.Module):
+ """This module produces sinusoidal positional embeddings of any length."""
+
+ def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None):
+ super().__init__()
+ self.offset = 2
+ self.embedding_dim = embedding_dim
+ self.padding_idx = padding_idx
+ self.make_weights(num_positions + self.offset, embedding_dim, padding_idx)
+
+ def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
+ emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx)
+ if hasattr(self, "weights"):
+ # in forward put the weights on the correct dtype and device of the param
+ emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device)
+
+ self.register_buffer("weights", emb_weights, persistent=False)
+
+ @staticmethod
+ def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
+ """
+ Build sinusoidal embeddings.
+
+ This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of
+ "Attention Is All You Need".
+ """
+ half_dim = embedding_dim // 2
+ emb = math.log(10000) / (half_dim - 1)
+ emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb)
+ emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0)
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
+ if embedding_dim % 2 == 1:
+ # zero pad
+ emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
+ if padding_idx is not None:
+ emb[padding_idx, :] = 0
+
+ return emb.to(torch.get_default_dtype())
+
+ @torch.no_grad()
+ def forward(
+ self, input_ids: torch.Tensor = None, inputs_embeds: torch.Tensor = None, past_key_values_length: int = 0
+ ):
+ if input_ids is not None:
+ bsz, seq_len = input_ids.size()
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
+ position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to(
+ input_ids.device
+ )
+ else:
+ bsz, seq_len = inputs_embeds.size()[:-1]
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds, past_key_values_length)
+
+ # expand embeddings if needed
+ max_pos = self.padding_idx + 1 + seq_len + past_key_values_length
+ if max_pos > self.weights.size(0):
+ self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx)
+
+ return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, self.weights.shape[-1]).detach()
+
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds, past_key_values_length):
+ """
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
+
+ Args:
+ inputs_embeds: torch.Tensor
+
+ Returns: torch.Tensor
+ """
+ input_shape = inputs_embeds.size()[:-1]
+ sequence_length = input_shape[1]
+
+ position_ids = torch.arange(
+ self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
+ )
+ return position_ids.unsqueeze(0).expand(input_shape).contiguous() + past_key_values_length
+
+
+class SeamlessM4TAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ # Copied from transformers.models.bart.modeling_bart.BartAttention.__init__ with Bart->SeamlessM4T
+ def __init__(
+ self,
+ embed_dim: int,
+ num_heads: int,
+ dropout: float = 0.0,
+ is_decoder: bool = False,
+ bias: bool = True,
+ is_causal: bool = False,
+ config: Optional[SeamlessM4TConfig] = None,
+ ):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.num_heads = num_heads
+ self.dropout = dropout
+ self.head_dim = embed_dim // num_heads
+ self.config = config
+
+ if (self.head_dim * num_heads) != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+ self.is_decoder = is_decoder
+ self.is_causal = is_causal
+
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if encoder_hidden_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = encoder_hidden_states is not None
+
+ bsz, tgt_len, _ = hidden_states.size()
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scaling
+ # get key, value proj
+ # `past_key_value[0].shape[2] == encoder_hidden_states.shape[1]`
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
+ # the provided `encoder_hidden_states` to support prefix tuning
+ if (
+ is_cross_attention
+ and past_key_value is not None
+ and past_key_value[0].shape[2] == encoder_hidden_states.shape[1]
+ ):
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.k_proj(encoder_hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(encoder_hidden_states), -1, bsz)
+ elif past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
+ else:
+ # self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
+ key_states = key_states.reshape(*proj_shape)
+ value_states = value_states.reshape(*proj_shape)
+
+ src_len = key_states.size(1)
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
+
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
+ )
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ if output_attentions:
+ # this operation is a bit awkward, but it's required to
+ # make sure that attn_weights keeps its gradient.
+ # In order to do so, attn_weights have to be reshaped
+ # twice and have to be reused in the following
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
+ else:
+ attn_weights_reshaped = None
+
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ attn_output = torch.bmm(attn_probs, value_states)
+
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
+ attn_output = attn_output.transpose(1, 2)
+
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
+ # partitioned across GPUs when using tensor-parallelism.
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
+
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights_reshaped, past_key_value
+
+
+# Copied from transformers.models.nllb_moe.modeling_nllb_moe.NllbMoeDenseActDense with NllbMoe->SeamlessM4T,DenseActDense->FeedForwardNetwork, d_model->hidden_size
+class SeamlessM4TFeedForwardNetwork(nn.Module):
+ def __init__(self, config: SeamlessM4TConfig, ffn_dim: int):
+ super().__init__()
+ self.fc1 = nn.Linear(config.hidden_size, ffn_dim)
+ self.fc2 = nn.Linear(ffn_dim, config.hidden_size)
+ self.dropout = nn.Dropout(config.activation_dropout)
+ self.act = ACT2FN[config.activation_function]
+
+ def forward(self, hidden_states):
+ hidden_states = self.fc1(hidden_states)
+ hidden_states = self.act(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ if (
+ isinstance(self.fc2.weight, torch.Tensor)
+ and hidden_states.dtype != self.fc2.weight.dtype
+ and (self.fc2.weight.dtype != torch.int8 and self.fc2.weight.dtype != torch.uint8)
+ ):
+ hidden_states = hidden_states.to(self.fc2.weight.dtype)
+ hidden_states = self.fc2(hidden_states)
+ return hidden_states
+
+
+class SeamlessM4TEncoderLayer(nn.Module):
+ def __init__(self, config: SeamlessM4TConfig, encoder_ffn_dim=None, encoder_attention_heads=None):
+ super().__init__()
+ encoder_ffn_dim = config.encoder_ffn_dim if encoder_ffn_dim is None else encoder_ffn_dim
+ encoder_attention_heads = (
+ config.encoder_attention_heads if encoder_attention_heads is None else encoder_attention_heads
+ )
+
+ self.embed_dim = config.hidden_size
+ self.self_attn = SeamlessM4TAttention(
+ embed_dim=self.embed_dim,
+ num_heads=encoder_attention_heads,
+ dropout=config.attention_dropout,
+ )
+ self.attn_dropout = nn.Dropout(config.dropout)
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+
+ self.ffn = SeamlessM4TFeedForwardNetwork(config, ffn_dim=encoder_ffn_dim)
+
+ self.ffn_layer_norm = nn.LayerNorm(config.hidden_size)
+ self.ffn_dropout = nn.Dropout(config.activation_dropout)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: torch.Tensor,
+ output_attentions: bool = False,
+ ) -> torch.Tensor:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`):
+ input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`):
+ attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very
+ large negative values.
+ """
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+ hidden_states, attn_weights, _ = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = self.attn_dropout(hidden_states)
+ hidden_states = residual + hidden_states
+
+ residual = hidden_states
+
+ hidden_states = self.ffn_layer_norm(hidden_states)
+
+ hidden_states = self.ffn(hidden_states)
+ hidden_states = self.ffn_dropout(hidden_states)
+
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+class SeamlessM4TDecoderLayer(nn.Module):
+ def __init__(self, config: SeamlessM4TConfig, decoder_ffn_dim=None, decoder_attention_heads=None):
+ super().__init__()
+ decoder_ffn_dim = config.decoder_ffn_dim if decoder_ffn_dim is None else decoder_ffn_dim
+ decoder_attention_heads = (
+ config.decoder_attention_heads if decoder_attention_heads is None else decoder_attention_heads
+ )
+
+ self.embed_dim = config.hidden_size
+ self.self_attn = SeamlessM4TAttention(
+ embed_dim=self.embed_dim,
+ num_heads=decoder_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=True,
+ )
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.attn_dropout = nn.Dropout(config.dropout)
+
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.cross_attention = SeamlessM4TAttention(
+ self.embed_dim, decoder_attention_heads, config.attention_dropout, is_decoder=True
+ )
+ self.cross_attention_layer_norm = nn.LayerNorm(self.embed_dim)
+
+ self.ffn = SeamlessM4TFeedForwardNetwork(config, ffn_dim=decoder_ffn_dim)
+
+ self.ffn_layer_norm = nn.LayerNorm(config.hidden_size)
+ self.ffn_dropout = nn.Dropout(config.activation_dropout)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = True,
+ ) -> torch.Tensor:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`):
+ input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`):
+ attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very
+ large negative values.
+ encoder_hidden_states (`torch.FloatTensor`):
+ cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
+ encoder_attention_mask (`torch.FloatTensor`):
+ encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by
+ very large negative values.
+ past_key_value (`Tuple(torch.FloatTensor)`):
+ cached past key and value projection states
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ # Self Attention
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ past_key_value=self_attn_past_key_value,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = self.attn_dropout(hidden_states)
+ hidden_states = residual + hidden_states
+
+ # Cross-Attention Block
+ cross_attn_present_key_value = None
+ cross_attn_weights = None
+ if encoder_hidden_states is not None:
+ residual = hidden_states
+ hidden_states = self.cross_attention_layer_norm(hidden_states)
+
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.cross_attention(
+ hidden_states=hidden_states,
+ encoder_hidden_states=encoder_hidden_states,
+ past_key_value=cross_attn_past_key_value,
+ attention_mask=encoder_attention_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = self.attn_dropout(hidden_states)
+ hidden_states = residual + hidden_states
+
+ # add cross-attn to positions 3,4 of present_key_value tuple
+ present_key_value += cross_attn_present_key_value
+
+ # Fully Connected
+ residual = hidden_states
+
+ hidden_states = self.ffn_layer_norm(hidden_states)
+
+ hidden_states = self.ffn(hidden_states)
+ hidden_states = self.ffn_dropout(hidden_states)
+
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states, present_key_value)
+
+ if output_attentions:
+ outputs += (self_attn_weights, cross_attn_weights)
+
+ return outputs
+
+
+############ SUB-MODELS related code ################
+
+
+class SeamlessM4TPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = SeamlessM4TConfig
+ base_model_prefix = "seamless_m4t"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["SeamlessM4TEncoderLayer", "SeamlessM4TDecoderLayer", "SeamlessM4TConformerEncoderLayer"]
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ std = self.config.initializer_range
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, SeamlessM4TConformerSelfAttention):
+ if hasattr(module, "pos_bias_u"):
+ nn.init.xavier_uniform_(module.pos_bias_u)
+ if hasattr(module, "pos_bias_v"):
+ nn.init.xavier_uniform_(module.pos_bias_v)
+ elif isinstance(module, SeamlessM4TConformerPositionalConvEmbedding):
+ nn.init.normal_(
+ module.conv.weight,
+ mean=0,
+ std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)),
+ )
+ nn.init.constant_(module.conv.bias, 0)
+ elif isinstance(module, SeamlessM4TConformerFeatureProjection):
+ k = math.sqrt(1 / module.projection.in_features)
+ nn.init.uniform_(module.projection.weight, a=-k, b=k)
+ nn.init.uniform_(module.projection.bias, a=-k, b=k)
+ elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+ elif isinstance(module, nn.Conv1d):
+ nn.init.kaiming_normal_(module.weight)
+ if module.bias is not None:
+ k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))
+ nn.init.uniform_(module.bias, a=-k, b=k)
+
+ def _compute_sub_sample_lengths_from_attention_mask(self, attention_mask):
+ kernel_size, stride = self.config.adaptor_kernel_size, self.config.adaptor_stride
+ pad = kernel_size // 2
+ seq_lens = attention_mask.size(1) - (1 - attention_mask.int()).sum(1)
+
+ seq_lens = ((seq_lens + 2 * pad - kernel_size) / stride) + 1
+
+ return seq_lens.floor()
+
+ def compute_last_hidden_states_per_sample(
+ self,
+ hidden_states: Tuple[Tuple[torch.Tensor]],
+ beam_indices: Optional[torch.Tensor] = None,
+ ) -> torch.Tensor:
+ """
+ Computes the last hidden states.
+
+ Parameters:
+ hidden_states (`Tuple[Tuple[torch.Tensor]]`):
+ The generated hidden states. Tuple (one element for each generated token) of tuples (one element for
+ each layer of the decoder) of torch.FloatTensor of shape (batch_size*num_beams*num_return_sequences,
+ generated_length, hidden_size).
+ beam_indices (`torch.LongTensor`, *optional*):
+ Beam indices of generated token id at each generation step. `torch.LongTensor` of shape
+ `(batch_size*num_return_sequences, sequence_length)`. Only required if a `num_beams>1` at
+ generate-time.
+
+ Return:
+ `torch.Tensor`: A `torch.Tensor` of shape `(batch_size*num_return_sequences, sequence_length, hidden_size)`
+ containing
+ the last hidden states.
+ ```"""
+ # 1. First, let's compute last_hidden_states from hidden_states.
+ # For each generation step, takes the hidden state from the last layer.
+ # shape: (batch_size*vocab_size*num_return_sequences, # generation_steps, hidden_dim)
+ last_hidden_states = torch.concat([hidden_states[-1] for hidden_states in hidden_states], dim=1)
+
+ # 2. In absence of `beam_indices`, we can assume that we come from e.g. greedy search, which is equivalent
+ # to a beam search approach were the first (and only) beam is always selected
+ # in that case, return directly last_hidden_states
+ if beam_indices is None:
+ return last_hidden_states
+
+ # 3. cut beam_indices to longest beam length
+ beam_indices_mask = beam_indices < 0
+ max_beam_length = (1 - beam_indices_mask.long()).sum(-1).max()
+ beam_indices = beam_indices.clone()[:, :max_beam_length]
+ beam_indices_mask = beam_indices_mask[:, :max_beam_length]
+
+ # 4. Set indices of beams that finished early to 0; such indices will be masked correctly afterwards anyways
+ beam_indices[beam_indices_mask] = 0
+
+ # 5. expand beam_indices to last_hidden_states dim
+ beam_indices = beam_indices.unsqueeze(-1)
+ beam_indices = beam_indices.expand(-1, -1, last_hidden_states.shape[-1])
+
+ # 6. select the right candidate for each beam
+ # in other words, new_last_hidden_states[i,j,k] = last_hidden_states[beam_indices[i,j,k], j, k] for all i, j, k
+ last_hidden_states = torch.gather(last_hidden_states, 0, beam_indices)
+
+ return last_hidden_states
+
+
+@add_start_docstrings(
+ """Transformer speech encoder consisting of *config.speech_encoder_layers* conformer self attention layers.
+ Each layer is a [`SeamlessM4TConformerEncoderLayer`].""",
+ SEAMLESS_M4T_START_DOCSTRING,
+)
+class SeamlessM4TSpeechEncoder(SeamlessM4TPreTrainedModel):
+ main_input_name = "input_features"
+
+ def __init__(self, config: SeamlessM4TConfig):
+ super().__init__(config)
+
+ self.feature_projection = SeamlessM4TConformerFeatureProjection(config)
+ self.encoder = SeamlessM4TConformerEncoder(config)
+ self.intermediate_ffn = SeamlessM4TConformerFeedForward(config, act_fn="relu", dropout=0.0)
+ self.adapter = SeamlessM4TConformerAdapter(config) if config.add_adapter else None
+ self.inner_layer_norm = nn.LayerNorm(config.hidden_size)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def forward(
+ self,
+ input_features: Optional[torch.Tensor],
+ attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs,
+ ) -> Union[Tuple, Wav2Vec2BaseModelOutput]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_features is None:
+ raise ValueError(
+ """Both `input_features` and `inputs_embeds` are `None` in `SeamlessM4TSpeechEncoder.forward`.
+ Make sure one of them is not `None`."""
+ )
+
+ hidden_states = self.feature_projection(input_features)
+
+ encoder_outputs = self.encoder(
+ hidden_states,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = encoder_outputs[0]
+
+ expanded_hidden_states = self.intermediate_ffn(hidden_states)
+ hidden_states = hidden_states + 0.5 * expanded_hidden_states
+
+ if self.adapter is not None:
+ hidden_states = self.adapter(hidden_states, attention_mask=attention_mask)
+
+ hidden_states = self.inner_layer_norm(hidden_states)
+
+ if not return_dict:
+ return (hidden_states,) + encoder_outputs[1:]
+
+ return Wav2Vec2BaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+# inspired from MBart and NllbMoe
+@add_start_docstrings(
+ "Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`SeamlessM4TEncoderLayer`].",
+ SEAMLESS_M4T_START_DOCSTRING,
+ """
+ embed_tokens (`nn.Embedding`, *optional*):
+ Input embedding
+ is_t2u_encoder (`bool`, *optional*, defaults to `False`):
+ indicates if it belongs to the text-to-units model, in which case it won't have input embeddings
+ """,
+)
+class SeamlessM4TEncoder(SeamlessM4TPreTrainedModel):
+ def __init__(
+ self,
+ config: SeamlessM4TConfig,
+ embed_tokens: Optional[nn.Embedding] = None,
+ is_t2u_encoder: bool = False,
+ ):
+ super().__init__(config)
+
+ self.dropout = config.dropout
+ self.layerdrop = config.encoder_layerdrop
+ self.padding_idx = config.pad_token_id
+ embed_dim = config.hidden_size
+
+ self.is_t2u_encoder = is_t2u_encoder
+ self.max_source_positions = config.max_position_embeddings
+
+ if not self.is_t2u_encoder:
+ self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
+
+ self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
+
+ if embed_tokens is not None:
+ self.embed_tokens.weight = embed_tokens.weight
+
+ self.embed_positions = SeamlessM4TSinusoidalPositionalEmbedding(
+ self.max_source_positions,
+ embed_dim,
+ self.padding_idx,
+ )
+
+ layers = []
+ for _ in range(config.encoder_layers):
+ layers.append(
+ SeamlessM4TEncoderLayer(
+ config,
+ encoder_attention_heads=config.encoder_attention_heads,
+ encoder_ffn_dim=config.encoder_ffn_dim,
+ )
+ )
+
+ self.layers = nn.ModuleList(layers)
+
+ self.layer_norm = nn.LayerNorm(config.hidden_size)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs,
+ ) -> Union[Tuple, BaseModelOutput]:
+ r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and self.is_t2u_encoder:
+ raise ValueError(
+ "You cannot pass input_ids to the encoder of the text_to_units model. Pass inputs_embeds instead."
+ )
+
+ # retrieve input_ids and inputs_embeds
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input = input_ids
+ input_shape = input.shape
+ input_ids = input_ids.view(-1, input_shape[-1])
+ elif inputs_embeds is not None:
+ input = inputs_embeds[:, :, -1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
+
+ if not self.is_t2u_encoder:
+ embed_pos = self.embed_positions(input)
+
+ hidden_states = inputs_embeds + embed_pos.to(inputs_embeds.device)
+ else:
+ hidden_states = inputs_embeds
+
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ # expand attention_mask
+ if attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ to_drop = False
+ if self.training:
+ dropout_probability = torch.rand([])
+ if dropout_probability < self.layerdrop: # skip the layer
+ to_drop = True
+
+ if to_drop:
+ layer_outputs = (None, None)
+ else:
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ encoder_layer.forward,
+ hidden_states,
+ attention_mask,
+ output_attentions,
+ )
+ else:
+ layer_outputs = encoder_layer(
+ hidden_states,
+ attention_mask,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ hidden_states = self.layer_norm(hidden_states)
+
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+
+@add_start_docstrings(
+ "Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`SeamlessM4TDecoderLayer`].",
+ SEAMLESS_M4T_START_DOCSTRING,
+ """
+ embed_tokens (`nn.Embedding`, *optional*):
+ Input embedding
+ """,
+)
+class SeamlessM4TDecoder(SeamlessM4TPreTrainedModel):
+ def __init__(
+ self,
+ config: SeamlessM4TConfig,
+ embed_tokens: Optional[nn.Embedding] = None,
+ ):
+ super().__init__(config)
+ self.dropout = config.dropout
+ self.layerdrop = config.decoder_layerdrop
+ self.padding_idx = config.pad_token_id
+ self.vocab_size = config.vocab_size
+ self.max_target_positions = config.max_position_embeddings
+ self.embed_scale = math.sqrt(config.hidden_size) if config.scale_embedding else 1.0
+
+ if embed_tokens is not None:
+ # if embed_tokens defined, use its shape instead
+ self.embed_tokens = nn.Embedding(embed_tokens.num_embeddings, embed_tokens.embedding_dim, self.padding_idx)
+ self.embed_tokens.weight = embed_tokens.weight
+ else:
+ self.embed_tokens = nn.Embedding(self.vocab_size, config.hidden_size, self.padding_idx)
+
+ self.embed_positions = SeamlessM4TSinusoidalPositionalEmbedding(
+ self.max_target_positions,
+ config.hidden_size,
+ padding_idx=self.padding_idx,
+ )
+
+ layers = []
+ for _ in range(config.decoder_layers):
+ layers.append(
+ SeamlessM4TDecoderLayer(
+ config,
+ decoder_attention_heads=config.decoder_attention_heads,
+ decoder_ffn_dim=config.decoder_ffn_dim,
+ )
+ )
+ self.layers = nn.ModuleList(layers)
+ self.layer_norm = nn.LayerNorm(config.hidden_size)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.embed_tokens = value
+
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
+ r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
+ of the decoder.
+ encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
+ selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # retrieve input_ids and inputs_embeds
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
+ elif input_ids is not None:
+ input = input_ids
+ input_shape = input.size()
+ input_ids = input_ids.view(-1, input_shape[-1])
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ input = inputs_embeds[:, :, -1]
+ else:
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
+
+ # past_key_values_length
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
+
+ attention_mask = _prepare_4d_causal_attention_mask(
+ attention_mask, input_shape, inputs_embeds, past_key_values_length
+ )
+
+ # expand encoder attention mask
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ encoder_attention_mask = _prepare_4d_attention_mask(
+ encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
+ )
+
+ # embed positions
+ positions = self.embed_positions(input, past_key_values_length=past_key_values_length)
+
+ hidden_states = inputs_embeds + positions.to(inputs_embeds.device)
+
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing`. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
+ next_decoder_cache = () if use_cache else None
+
+ for idx, decoder_layer in enumerate(self.layers):
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+ if self.training:
+ dropout_probability = torch.rand([])
+ if dropout_probability < self.layerdrop:
+ continue
+
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ None,
+ output_attentions,
+ use_cache,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+ hidden_states = layer_outputs[0]
+
+ if use_cache:
+ next_decoder_cache += (layer_outputs[1],)
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[2],)
+
+ if encoder_hidden_states is not None:
+ all_cross_attentions += (layer_outputs[3],)
+
+ hidden_states = self.layer_norm(hidden_states)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ next_cache = next_decoder_cache if use_cache else None
+ if not return_dict:
+ return tuple(
+ v
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ "Transformer bare text-to-unit encoder-decoder. The encoder is a [`SeamlessM4TEncoder`] without embeddings and the decoder is a [`SeamlessM4TDecoder`].",
+ SEAMLESS_M4T_START_DOCSTRING,
+ """
+ embed_tokens_decoder (`nn.Embedding`, *optional*): input embedding of the decoder.
+ """,
+)
+class SeamlessM4TTextToUnitModel(SeamlessM4TPreTrainedModel):
+ def __init__(
+ self,
+ config: SeamlessM4TConfig,
+ embed_tokens_decoder: Optional[nn.Embedding] = None,
+ ):
+ super().__init__(config)
+
+ self.encoder = SeamlessM4TEncoder(config, is_t2u_encoder=True)
+ self.decoder = SeamlessM4TDecoder(config, embed_tokens_decoder)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], Seq2SeqModelOutput]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if encoder_outputs is None:
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=attention_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs
+
+ return Seq2SeqModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ "Transformer text-to-unit encoder-decoder with a language model head. The base encoder-decoder model is a [`SeamlessM4TTextToUnit`].",
+ SEAMLESS_M4T_START_DOCSTRING,
+ """
+ embed_tokens_decoder (`nn.Embedding`, *optional*): input embedding of the decoder.
+ """,
+)
+class SeamlessM4TTextToUnitForConditionalGeneration(SeamlessM4TPreTrainedModel):
+ _keys_to_ignore_on_load_missing = [
+ "vocoder",
+ "speech_encoder",
+ "text_encoder",
+ "text_decoder",
+ ]
+ _tied_weights_keys = ["decoder.embed_tokens.weight", "lm_head.weight"]
+
+ def __init__(
+ self,
+ config: SeamlessM4TConfig,
+ embed_tokens_decoder: Optional[nn.Embedding] = None,
+ ):
+ # update config - used principaly for bos_token_id etc.
+ config = copy.deepcopy(config)
+ for param, val in config.to_dict().items():
+ if param.startswith("t2u_"):
+ config.__setattr__(param[4:], val)
+ super().__init__(config)
+
+ self.model = SeamlessM4TTextToUnitModel(config, embed_tokens_decoder)
+
+ self.lm_head = nn.Linear(config.hidden_size, config.t2u_vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_encoder(self):
+ return self.model.encoder
+
+ def get_decoder(self):
+ return self.model.decoder
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def get_input_embeddings(self):
+ return self.model.decoder.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.decoder.embed_tokens = value
+
+ @add_start_docstrings_to_model_forward(M4T_TEXT_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Seq2SeqLMOutput, Tuple[torch.FloatTensor]]:
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if labels is not None:
+ if use_cache:
+ logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
+ use_cache = False
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
+ decoder_input_ids = shift_tokens_right(
+ labels, self.config.t2u_pad_token_id, self.config.t2u_decoder_start_token_id
+ )
+
+ outputs = self.model(
+ input_ids,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ encoder_outputs=encoder_outputs,
+ decoder_attention_mask=decoder_attention_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ decoder_inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ lm_logits = self.lm_head(outputs[0])
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ labels = labels.to(lm_logits.device)
+ masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (lm_logits,) + outputs[1:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return Seq2SeqLMOutput(
+ loss=masked_lm_loss,
+ logits=lm_logits,
+ past_key_values=outputs.past_key_values,
+ decoder_hidden_states=outputs.decoder_hidden_states,
+ decoder_attentions=outputs.decoder_attentions,
+ cross_attentions=outputs.cross_attentions,
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
+ encoder_hidden_states=outputs.encoder_hidden_states,
+ encoder_attentions=outputs.encoder_attentions,
+ )
+
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ use_cache=None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ # cut decoder_input_ids if past is used
+ if past_key_values is not None:
+ decoder_input_ids = decoder_input_ids[:, -1:]
+
+ return {
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
+ "encoder_outputs": encoder_outputs,
+ "past_key_values": past_key_values,
+ "decoder_input_ids": decoder_input_ids,
+ "attention_mask": attention_mask,
+ "use_cache": use_cache,
+ }
+
+ def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
+ return shift_tokens_right(labels, self.config.t2u_pad_token_id, self.config.t2u_decoder_start_token_id)
+
+ @staticmethod
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ # cached cross_attention states don't have to be reordered -> they are always the same
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],
+ )
+ return reordered_past
+
+ def _tie_weights(self) -> None:
+ if getattr(self.config, "tie_word_embeddings", True):
+ output_embeddings = self.get_output_embeddings()
+ if output_embeddings is not None:
+ self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())
+
+
+############ VOCODER related code ################
+
+
+HIFIGAN_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`SeamlessM4TConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+# Copied from transformers.models.speecht5.modeling_speecht5.HifiGanResidualBlock
+class HifiGanResidualBlock(nn.Module):
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5), leaky_relu_slope=0.1):
+ super().__init__()
+ self.leaky_relu_slope = leaky_relu_slope
+
+ self.convs1 = nn.ModuleList(
+ [
+ nn.Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ stride=1,
+ dilation=dilation[i],
+ padding=self.get_padding(kernel_size, dilation[i]),
+ )
+ for i in range(len(dilation))
+ ]
+ )
+ self.convs2 = nn.ModuleList(
+ [
+ nn.Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ stride=1,
+ dilation=1,
+ padding=self.get_padding(kernel_size, 1),
+ )
+ for _ in range(len(dilation))
+ ]
+ )
+
+ def get_padding(self, kernel_size, dilation=1):
+ return (kernel_size * dilation - dilation) // 2
+
+ def apply_weight_norm(self):
+ for layer in self.convs1:
+ nn.utils.weight_norm(layer)
+ for layer in self.convs2:
+ nn.utils.weight_norm(layer)
+
+ def remove_weight_norm(self):
+ for layer in self.convs1:
+ nn.utils.remove_weight_norm(layer)
+ for layer in self.convs2:
+ nn.utils.remove_weight_norm(layer)
+
+ def forward(self, hidden_states):
+ for conv1, conv2 in zip(self.convs1, self.convs2):
+ residual = hidden_states
+ hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope)
+ hidden_states = conv1(hidden_states)
+ hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope)
+ hidden_states = conv2(hidden_states)
+ hidden_states = hidden_states + residual
+ return hidden_states
+
+
+class SeamlessM4TVariancePredictor(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+
+ embed_dim = config.unit_embed_dim
+ kernel_size = config.variance_predictor_kernel_size
+ var_pred_dropout = config.var_pred_dropout
+
+ self.conv1 = nn.Conv1d(
+ embed_dim,
+ embed_dim,
+ kernel_size=kernel_size,
+ padding=(kernel_size - 1) // 2,
+ )
+ self.activation_fuction = nn.ReLU()
+ self.ln1 = nn.LayerNorm(embed_dim)
+ self.dropout_module = nn.Dropout(p=var_pred_dropout)
+ self.conv2 = nn.Conv1d(
+ embed_dim,
+ embed_dim,
+ kernel_size=kernel_size,
+ padding=1,
+ )
+ self.ln2 = nn.LayerNorm(embed_dim)
+ self.proj = nn.Linear(embed_dim, 1)
+
+ def forward(self, hidden_states: Tensor) -> Tensor:
+ # Input: B x T x C; Output: B x T
+ hidden_states = self.conv1(hidden_states.transpose(1, 2))
+ hidden_states = self.activation_fuction(hidden_states).transpose(1, 2)
+ hidden_states = self.dropout_module(self.ln1(hidden_states))
+ hidden_states = self.conv2(hidden_states.transpose(1, 2))
+ hidden_states = self.activation_fuction(hidden_states).transpose(1, 2)
+ hidden_states = self.dropout_module(self.ln2(hidden_states))
+ return self.proj(hidden_states).squeeze(dim=2)
+
+
+class SeamlessM4THifiGan(nn.Module):
+ def __init__(self, config: SeamlessM4TConfig):
+ super().__init__()
+ model_in_dim = config.unit_embed_dim + config.lang_embed_dim + config.spkr_embed_dim
+ self.leaky_relu_slope = config.leaky_relu_slope
+ self.num_kernels = len(config.resblock_kernel_sizes)
+ self.num_upsamples = len(config.upsample_rates)
+ self.conv_pre = nn.Conv1d(
+ model_in_dim,
+ config.upsample_initial_channel,
+ kernel_size=7,
+ stride=1,
+ padding=3,
+ )
+
+ self.upsampler = nn.ModuleList()
+ for i, (upsample_rate, kernel_size) in enumerate(zip(config.upsample_rates, config.upsample_kernel_sizes)):
+ self.upsampler.append(
+ nn.ConvTranspose1d(
+ config.upsample_initial_channel // (2**i),
+ config.upsample_initial_channel // (2 ** (i + 1)),
+ kernel_size=kernel_size,
+ stride=upsample_rate,
+ padding=(kernel_size - upsample_rate) // 2,
+ )
+ )
+
+ self.resblocks = nn.ModuleList()
+ for i in range(len(self.upsampler)):
+ channels = config.upsample_initial_channel // (2 ** (i + 1))
+ for kernel_size, dilation in zip(config.resblock_kernel_sizes, config.resblock_dilation_sizes):
+ self.resblocks.append(HifiGanResidualBlock(channels, kernel_size, dilation, config.leaky_relu_slope))
+
+ self.conv_post = nn.Conv1d(channels, 1, kernel_size=7, stride=1, padding=3)
+
+ def forward(self, input_embeds: torch.FloatTensor) -> torch.FloatTensor:
+ r"""
+ Converts a log-mel spectrogram into a speech waveform. Passing a batch of log-mel spectrograms returns a batch
+ of speech waveforms. Passing a single, un-batched log-mel spectrogram returns a single, un-batched speech
+ waveform.
+
+ Args:
+ spectrogram (`torch.FloatTensor`):
+ Tensor containing the log-mel spectrograms. Can be batched and of shape `(batch_size, sequence_length,
+ model_in_dim)`, or un-batched and of shape `(sequence_length, model_in_dim)`. Note that `model_in_dim`
+ is the sum of `config.unit_embed_dim`, `config.lang_embed_dim` and `config.spkr_embed_dim`.
+
+ Returns:
+ `torch.FloatTensor`: Tensor containing the speech waveform. If the input spectrogram is batched, will be of
+ shape `(batch_size, num_frames,)`. If un-batched, will be of shape `(num_frames,)`.
+ """
+
+ hidden_states = self.conv_pre(input_embeds)
+ for i in range(self.num_upsamples):
+ hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope)
+ hidden_states = self.upsampler[i](hidden_states)
+
+ res_state = self.resblocks[i * self.num_kernels](hidden_states)
+ for j in range(1, self.num_kernels):
+ res_state += self.resblocks[i * self.num_kernels + j](hidden_states)
+ hidden_states = res_state / self.num_kernels
+
+ hidden_states = nn.functional.leaky_relu(hidden_states)
+ hidden_states = self.conv_post(hidden_states)
+ hidden_states = torch.tanh(hidden_states)
+
+ # remove seq-len dim since this collapses to 1
+ waveform = hidden_states.squeeze(1)
+
+ return waveform
+
+
+@add_start_docstrings(
+ """Code HiFi-GAN vocoder as described in this [repository](https://github.com/facebookresearch/speech-resynthesis).""",
+ HIFIGAN_START_DOCSTRING,
+)
+class SeamlessM4TCodeHifiGan(PreTrainedModel):
+ config_class = SeamlessM4TConfig
+ main_input_name = "input_embeds"
+ _no_split_modules = []
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.pad_token_id = config.t2u_pad_token_id
+ self.dur_predictor = SeamlessM4TVariancePredictor(config)
+
+ self.unit_embedding = nn.Embedding(config.unit_hifi_gan_vocab_size, config.unit_embed_dim)
+ self.speaker_embedding = nn.Embedding(config.vocoder_num_spkrs, config.spkr_embed_dim)
+ self.language_embedding = nn.Embedding(config.vocoder_num_langs, config.lang_embed_dim)
+
+ self.hifi_gan = SeamlessM4THifiGan(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def _get_dur_output_lengths(self, input_ids, dur_out):
+ """
+ Computes the output length after the duration layer.
+ """
+ unit_lengths = (input_ids != self.pad_token_id).sum(1)
+
+ # take care of edge cases where no padding or too many padding
+ unit_lengths = torch.clamp(unit_lengths, 0, dur_out.shape[1] - 1)
+
+ cumulative_dur_out = torch.cumsum(dur_out, dim=1)
+ unit_lengths = cumulative_dur_out.gather(dim=1, index=unit_lengths.unsqueeze(1)).squeeze()
+
+ return unit_lengths
+
+ def _get_output_hifigan_lengths(self, input_lengths: Union[torch.LongTensor, int]):
+ """
+ Computes the output length of the hifigan convolutional layers
+ """
+
+ def _conv_out_length(input_length, kernel_size, stride, pad, dilation=1):
+ # 1D convolutional layer output length formula taken
+ # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
+ return (
+ torch.div(input_length + 2 * pad - dilation * (kernel_size - 1) - 1, stride, rounding_mode="floor") + 1
+ )
+
+ def _transpose_conv_out_length(input_length, kernel_size, stride, pad, dilation=1):
+ return (input_length - 1) * stride - 2 * pad + dilation * (kernel_size - 1) + 1
+
+ # conv_pre
+ input_lengths = _conv_out_length(input_lengths, 7, 1, 3)
+
+ # upsampler
+ for i, (upsample_rate, kernel_size) in enumerate(
+ zip(self.config.upsample_rates, self.config.upsample_kernel_sizes)
+ ):
+ input_lengths = _transpose_conv_out_length(
+ input_lengths, kernel_size, upsample_rate, (kernel_size - upsample_rate) // 2
+ )
+
+ # resblock
+ for i in range(len(self.config.upsample_rates)):
+ for kernel_size, dilation in zip(self.config.resblock_kernel_sizes, self.config.resblock_dilation_sizes):
+ for dil in dilation:
+ input_lengths = _conv_out_length(
+ input_lengths, kernel_size, 1, (kernel_size - 1) * dil // 2, dilation=dil
+ )
+
+ for dil in dilation:
+ input_lengths = _conv_out_length(input_lengths, kernel_size, 1, (kernel_size - 1) // 2, dilation=1)
+
+ # conv_post
+ input_lengths = _conv_out_length(input_lengths, 7, 1, 3)
+
+ return input_lengths
+
+ def forward(
+ self, input_ids: torch.LongTensor, spkr_id: torch.Tensor, lang_id: torch.Tensor
+ ) -> Tuple[torch.Tensor]:
+ """
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`SeamlessM4TTextToUnitForConditionalGeneration`]. [What are input
+ IDs?](../glossary#input-ids)
+ spkr_id (`int`, *optional*):
+ The id of the speaker used for speech synthesis. Must be lower than `config.vocoder_num_spkrs`.
+ tgt_lang (`str`, *optional*):
+ The language id to use as target language for translation.
+ """
+ hidden_states = self.unit_embedding(input_ids).transpose(1, 2)
+ spkr = self.speaker_embedding(spkr_id).transpose(1, 2)
+ lang = self.language_embedding(lang_id).transpose(1, 2)
+
+ log_dur_pred = self.dur_predictor(hidden_states.transpose(1, 2))
+ dur_out = torch.clamp(torch.round((torch.exp(log_dur_pred) - 1)).long(), min=1)
+ # B x C x T
+ if hidden_states.size(0) == 1:
+ hidden_states = torch.repeat_interleave(hidden_states, dur_out.view(-1), dim=2)
+ else:
+ # if batched sample, need to interleave per sample, and pad -> loss of parallelism
+ if hidden_states.shape[0] > 1 and self.training:
+ logger.warning(
+ """`self.training=True` and you use batching. You lose parallelism during the hifigan
+ forward pass because the samples are interleaved."""
+ )
+ hidden_states = [
+ torch.repeat_interleave(hidden_state, duration, dim=-1).transpose(0, 1)
+ for (hidden_state, duration) in zip(hidden_states, dur_out)
+ ]
+
+ hidden_states = nn.utils.rnn.pad_sequence(hidden_states, batch_first=True).transpose(1, 2)
+
+ spkr = spkr.repeat(1, 1, hidden_states.shape[-1])
+ lang = lang.repeat(1, 1, hidden_states.shape[-1])
+ hidden_states = torch.cat([lang, hidden_states, spkr], dim=1)
+
+ hidden_states = self.hifi_gan(hidden_states)
+
+ unit_lengths = self._get_dur_output_lengths(input_ids, dur_out)
+ lengths = self._get_output_hifigan_lengths(unit_lengths)
+
+ return hidden_states, lengths
+
+ def _init_weights(self, module):
+ """Initialize the weights."""
+ if isinstance(module, (nn.Linear, nn.Conv1d, nn.ConvTranspose1d)):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+ def apply_weight_norm(self):
+ nn.utils.weight_norm(self.hifi_gan.conv_pre)
+ for layer in self.hifi_gan.upsampler:
+ nn.utils.weight_norm(layer)
+ for layer in self.hifi_gan.resblocks:
+ layer.apply_weight_norm()
+ nn.utils.weight_norm(self.hifi_gan.conv_post)
+
+ def remove_weight_norm(self):
+ nn.utils.remove_weight_norm(self.hifi_gan.conv_pre)
+ for layer in self.hifi_gan.upsampler:
+ nn.utils.remove_weight_norm(layer)
+ for layer in self.hifi_gan.resblocks:
+ layer.remove_weight_norm()
+ nn.utils.remove_weight_norm(self.hifi_gan.conv_post)
+
+
+############ WHOLE MODEL related code ################
+
+
+@add_start_docstrings(
+ "The text-to-text SeamlessM4T Model transformer which can be used for T2TT.",
+ SEAMLESS_M4T_START_DOCSTRING,
+)
+class SeamlessM4TForTextToText(SeamlessM4TPreTrainedModel):
+ _keys_to_ignore_on_load_missing = ["speech_encoder", "t2u_model", "vocoder"]
+ main_input_name = "input_ids"
+
+ _tied_weights_keys = [
+ "lm_head.weight",
+ "text_encoder.embed_tokens.weight",
+ "text_decoder.embed_tokens.weight",
+ ]
+
+ def __init__(self, config: SeamlessM4TConfig):
+ super().__init__(config)
+
+ self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id)
+
+ self.text_encoder = SeamlessM4TEncoder(config, self.shared)
+ self.text_decoder = SeamlessM4TDecoder(config, self.shared)
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_encoder(self):
+ return self.text_encoder
+
+ def get_decoder(self):
+ return self.text_decoder
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def get_input_embeddings(self):
+ return self.text_decoder.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.text_encoder.embed_tokens = value
+ self.text_decoder.embed_tokens = value
+ self.shared = value
+
+ def _tie_weights(self):
+ if self.config.tie_word_embeddings:
+ self._tie_or_clone_weights(self.text_encoder.embed_tokens, self.shared)
+ self._tie_or_clone_weights(self.text_decoder.embed_tokens, self.shared)
+ self._tie_or_clone_weights(self.lm_head, self.shared)
+
+ @add_start_docstrings_to_model_forward(M4T_TEXT_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs,
+ ) -> Union[Seq2SeqLMOutput, Tuple[torch.FloatTensor]]:
+ if labels is not None:
+ if use_cache:
+ logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
+ use_cache = False
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
+ decoder_input_ids = shift_tokens_right(
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
+ )
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if encoder_outputs is None:
+ encoder_outputs = self.text_encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ encoder_attention_mask = attention_mask
+
+ # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
+ decoder_outputs = self.text_decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=encoder_attention_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ lm_logits = self.lm_head(decoder_outputs[0])
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ labels = labels.to(lm_logits.device)
+ masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ outputs = decoder_outputs + encoder_outputs
+ output = (lm_logits,) + outputs[1:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return Seq2SeqLMOutput(
+ loss=masked_lm_loss,
+ logits=lm_logits,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+ def generate(
+ self,
+ input_ids=None,
+ tgt_lang=None,
+ generation_config=None,
+ logits_processor=None,
+ stopping_criteria=None,
+ prefix_allowed_tokens_fn=None,
+ synced_gpus=False,
+ **kwargs,
+ ):
+ """
+ Generates sequences of token ids.
+
+
+
+ Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the
+ model's default generation configuration. You can override any `generation_config` by passing the corresponding
+ parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`.
+
+ For an overview of generation strategies and code examples, check out the [following
+ guide](./generation_strategies).
+
+
+
+ Parameters:
+ input_ids (`torch.Tensor` of varying shape depending on the modality, *optional*):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`SeamlessM4TTokenizer`] or [`SeamlessM4TProcessor`]. See
+ [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ tgt_lang (`str`, *optional*):
+ The language to use as target language for translation.
+ generation_config (`~generation.GenerationConfig`, *optional*):
+ The generation configuration to be used as base parametrization for the generation call. `**kwargs`
+ passed to generate matching the attributes of `generation_config` will override them. If
+ `generation_config` is not provided, the default will be used, which had the following loading
+ priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
+ configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
+ default values, whose documentation should be checked to parameterize generation.
+ logits_processor (`LogitsProcessorList`, *optional*):
+ Custom logits processors that complement the default logits processors built from arguments and
+ generation config. If a logit processor is passed that is already created with the arguments or a
+ generation config an error is thrown. This feature is intended for advanced users.
+ stopping_criteria (`StoppingCriteriaList`, *optional*):
+ Custom stopping criteria that complement the default stopping criteria built from arguments and a
+ generation config. If a stopping criteria is passed that is already created with the arguments or a
+ generation config an error is thrown. This feature is intended for advanced users.
+ prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], List[int]]`, *optional*):
+ If provided, this function constraints the beam search to allowed tokens only at each step. If not
+ provided no constraint is applied. This function takes 2 arguments: the batch ID `batch_id` and
+ `input_ids`. It has to return a list with the allowed tokens for the next generation step conditioned
+ on the batch ID `batch_id` and the previously generated tokens `inputs_ids`. This argument is useful
+ for constrained generation conditioned on the prefix, as described in [Autoregressive Entity
+ Retrieval](https://arxiv.org/abs/2010.00904).
+ synced_gpus (`bool`, *optional*, defaults to `False`):
+ Whether to continue running the while loop until max_length (needed for ZeRO stage 3)
+ kwargs (`Dict[str, Any]`, *optional*):
+ Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
+ forwarded to the `forward` function of the model.
+
+ Return:
+ [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True`
+ or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`. The possible
+ [`~utils.ModelOutput`] types are:
+ - [`~generation.GenerateEncoderDecoderOutput`],
+ - [`~generation.GenerateBeamEncoderDecoderOutput`]
+ """
+ # prepare text_decoder_input_ids
+ text_decoder_input_ids = kwargs.pop("decoder_input_ids", None)
+ # overwrite text_decoder_input_ids if tgt_lang is passed. The latter gets priority over decoder_input_ids.
+ if tgt_lang is not None:
+ batch_size = len(input_ids) if input_ids is not None else len(kwargs.get("inputs_embeds"))
+
+ if hasattr(self.generation_config, "text_decoder_lang_to_code_id"):
+ # also accept __xxx__
+ tgt_lang = tgt_lang.replace("__", "")
+ if tgt_lang not in self.generation_config.text_decoder_lang_to_code_id:
+ raise ValueError(
+ f"""`tgt_lang={tgt_lang}` is not supported by this model. Please specify a `tgt_lang` in
+ {', '.join(self.generation_config.text_decoder_lang_to_code_id.keys())}"""
+ )
+ # tgt_lang gets priority over decoder input ids
+ text_tgt_lang_id = self.generation_config.text_decoder_lang_to_code_id.get(tgt_lang)
+ text_decoder_input_ids = torch.tensor([[text_tgt_lang_id]] * batch_size).to(self.device)
+ else:
+ raise ValueError(
+ """This model generation config doesn't have a `text_decoder_lang_to_code_id` key which maps
+ the target language to the right token id. Make sure to load the right generation config."""
+ )
+ else:
+ # only a warning, otherwise errors appear in the tests
+ logger.warning(
+ """You must either specify a `tgt_lang` or pass a correct `text_decoder_input_ids` to get
+ a correct generation, otherwise the generation will probably make no sense."""
+ )
+
+ return super().generate(
+ input_ids,
+ generation_config,
+ logits_processor,
+ stopping_criteria,
+ prefix_allowed_tokens_fn,
+ synced_gpus,
+ decoder_input_ids=text_decoder_input_ids,
+ **kwargs,
+ )
+
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ use_cache=None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ # cut decoder_input_ids if past is used
+ if past_key_values is not None:
+ decoder_input_ids = decoder_input_ids[:, -1:]
+
+ return {
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
+ "encoder_outputs": encoder_outputs,
+ "past_key_values": past_key_values,
+ "decoder_input_ids": decoder_input_ids,
+ "attention_mask": attention_mask,
+ "use_cache": use_cache,
+ }
+
+ @staticmethod
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ # cached cross_attention states don't have to be reordered -> they are always the same
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],
+ )
+ return reordered_past
+
+
+@add_start_docstrings(
+ "The speech-to-text SeamlessM4T Model transformer which can be used for S2TT.",
+ SEAMLESS_M4T_START_DOCSTRING,
+)
+class SeamlessM4TForSpeechToText(SeamlessM4TPreTrainedModel):
+ _keys_to_ignore_on_load_missing = ["text_decoder", "t2u_model", "vocoder"]
+ main_input_name = "input_features"
+
+ _tied_weights_keys = [
+ "lm_head.weight",
+ "text_decoder.embed_tokens.weight",
+ ]
+
+ def __init__(self, config: SeamlessM4TConfig):
+ super().__init__(config)
+
+ self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id)
+ self.speech_encoder = SeamlessM4TSpeechEncoder(config)
+ self.text_decoder = SeamlessM4TDecoder(config, self.shared)
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_encoder(self):
+ return self.speech_encoder
+
+ def get_decoder(self):
+ return self.text_decoder
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def get_input_embeddings(self):
+ return self.text_decoder.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.text_decoder.embed_tokens = value
+
+ def _tie_weights(self):
+ if self.config.tie_word_embeddings:
+ self._tie_or_clone_weights(self.text_decoder.embed_tokens, self.shared)
+ self._tie_or_clone_weights(self.lm_head, self.shared)
+
+ @add_start_docstrings_to_model_forward(M4T_SPEECH_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_features: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs,
+ ) -> Union[Seq2SeqLMOutput, Tuple[torch.FloatTensor]]:
+ if labels is not None:
+ if use_cache:
+ logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
+ use_cache = False
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
+ decoder_input_ids = shift_tokens_right(
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
+ )
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if encoder_outputs is None:
+ encoder_outputs = self.speech_encoder(
+ input_features=input_features,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ encoder_attention_mask = attention_mask
+ if attention_mask is not None:
+ sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to(
+ encoder_outputs[0].device
+ )
+ encoder_attention_mask = _compute_new_attention_mask(
+ hidden_states=encoder_outputs[0], seq_lens=sub_sampled_lengths
+ )
+
+ # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
+ decoder_outputs = self.text_decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=encoder_attention_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ lm_logits = self.lm_head(decoder_outputs[0])
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ labels = labels.to(lm_logits.device)
+ masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ outputs = decoder_outputs + encoder_outputs
+ output = (lm_logits,) + outputs[1:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return Seq2SeqLMOutput(
+ loss=masked_lm_loss,
+ logits=lm_logits,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+ def generate(
+ self,
+ input_features=None,
+ tgt_lang=None,
+ generation_config=None,
+ logits_processor=None,
+ stopping_criteria=None,
+ prefix_allowed_tokens_fn=None,
+ synced_gpus=False,
+ **kwargs,
+ ):
+ """
+ Generates sequences of token ids.
+
+
+
+ Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the
+ model's default generation configuration. You can override any `generation_config` by passing the corresponding
+ parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`.
+
+ For an overview of generation strategies and code examples, check out the [following
+ guide](./generation_strategies).
+
+
+
+ Parameters:
+ input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_banks)`):
+ Input audio features. This should be returnes by the [`SeamlessM4TFeatureExtractor`] class or the
+ [`SeamlessM4TProcessor`] class. See [`SeamlessM4TFeatureExtractor.__call__`] for details.
+
+ tgt_lang (`str`, *optional*):
+ The language to use as target language for translation.
+ generation_config (`~generation.GenerationConfig`, *optional*):
+ The generation configuration to be used as base parametrization for the generation call. `**kwargs`
+ passed to generate matching the attributes of `generation_config` will override them. If
+ `generation_config` is not provided, the default will be used, which had the following loading
+ priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
+ configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
+ default values, whose documentation should be checked to parameterize generation.
+ logits_processor (`LogitsProcessorList`, *optional*):
+ Custom logits processors that complement the default logits processors built from arguments and
+ generation config. If a logit processor is passed that is already created with the arguments or a
+ generation config an error is thrown. This feature is intended for advanced users.
+ stopping_criteria (`StoppingCriteriaList`, *optional*):
+ Custom stopping criteria that complement the default stopping criteria built from arguments and a
+ generation config. If a stopping criteria is passed that is already created with the arguments or a
+ generation config an error is thrown. This feature is intended for advanced users.
+ prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], List[int]]`, *optional*):
+ If provided, this function constraints the beam search to allowed tokens only at each step. If not
+ provided no constraint is applied. This function takes 2 arguments: the batch ID `batch_id` and
+ `input_ids`. It has to return a list with the allowed tokens for the next generation step conditioned
+ on the batch ID `batch_id` and the previously generated tokens `inputs_ids`. This argument is useful
+ for constrained generation conditioned on the prefix, as described in [Autoregressive Entity
+ Retrieval](https://arxiv.org/abs/2010.00904).
+ synced_gpus (`bool`, *optional*, defaults to `False`):
+ Whether to continue running the while loop until max_length (needed for ZeRO stage 3)
+ kwargs (`Dict[str, Any]`, *optional*):
+ Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
+ forwarded to the `forward` function of the model.
+
+ Return:
+ [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True`
+ or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`. The possible
+ [`~utils.ModelOutput`] types are:
+ - [`~generation.GenerateEncoderDecoderOutput`],
+ - [`~generation.GenerateBeamEncoderDecoderOutput`]
+ """
+ text_decoder_input_ids = kwargs.pop("decoder_input_ids", None)
+ # overwrite text_decoder_input_ids if tgt_lang is passed. The latter gets priority over decoder_input_ids.
+ if tgt_lang is not None:
+ inputs = kwargs.get("input_embeds") if input_features is None else input_features
+ inputs = (
+ inputs
+ if inputs is not None
+ else kwargs.get("encoder_outputs", {"last_hidden_state": None})["last_hidden_state"]
+ )
+ batch_size = len(inputs)
+
+ if hasattr(self.generation_config, "text_decoder_lang_to_code_id"):
+ # also accept __xxx__
+ tgt_lang = tgt_lang.replace("__", "")
+ if tgt_lang not in self.generation_config.text_decoder_lang_to_code_id:
+ raise ValueError(
+ f"""`tgt_lang={tgt_lang}` is not supported by this model. Please specify a `tgt_lang` in
+ {', '.join(self.generation_config.text_decoder_lang_to_code_id.keys())}"""
+ )
+ # tgt_lang gets priority over decoder input ids
+ text_tgt_lang_id = self.generation_config.text_decoder_lang_to_code_id.get(tgt_lang)
+ text_decoder_input_ids = torch.tensor([[text_tgt_lang_id]] * batch_size).to(self.device)
+ else:
+ raise ValueError(
+ """This model generation config doesn't have a `text_decoder_lang_to_code_id` key which maps
+ the target language to the right token id. Make sure to load the right generation config."""
+ )
+ else:
+ # only a warning, otherwise errors appear in the tests
+ logger.warning(
+ """You must either specify a `tgt_lang` or pass a correct `text_decoder_input_ids` to get
+ a correct generation, otherwise the generation will probably make no sense."""
+ )
+ return super().generate(
+ input_features,
+ generation_config,
+ logits_processor,
+ stopping_criteria,
+ prefix_allowed_tokens_fn,
+ synced_gpus,
+ decoder_input_ids=text_decoder_input_ids,
+ **kwargs,
+ )
+
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ use_cache=None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ # cut decoder_input_ids if past is used
+ if past_key_values is not None:
+ decoder_input_ids = decoder_input_ids[:, -1:]
+
+ return {
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
+ "encoder_outputs": encoder_outputs,
+ "past_key_values": past_key_values,
+ "decoder_input_ids": decoder_input_ids,
+ "attention_mask": attention_mask,
+ "use_cache": use_cache,
+ }
+
+ @staticmethod
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ # cached cross_attention states don't have to be reordered -> they are always the same
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],
+ )
+ return reordered_past
+
+
+@add_start_docstrings(
+ "The text-to-speech SeamlessM4T Model transformer which can be used for T2ST.",
+ SEAMLESS_M4T_START_DOCSTRING,
+)
+class SeamlessM4TForTextToSpeech(SeamlessM4TPreTrainedModel):
+ _keys_to_ignore_on_load_missing = ["speech_encoder"]
+ main_input_name = "input_ids"
+
+ _tied_weights_keys = [
+ "lm_head.weight",
+ "text_encoder.embed_tokens.weight",
+ "text_decoder.embed_tokens.weight",
+ ]
+
+ def __init__(self, config: SeamlessM4TConfig):
+ super().__init__(config)
+
+ self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id)
+
+ self.text_encoder = SeamlessM4TEncoder(config, self.shared)
+ self.text_decoder = SeamlessM4TDecoder(config, self.shared)
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ self.t2u_model = SeamlessM4TTextToUnitForConditionalGeneration(config)
+ self.vocoder = SeamlessM4TCodeHifiGan(config)
+
+ def get_encoder(self):
+ return self.text_encoder
+
+ def get_decoder(self):
+ return self.text_decoder
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def get_input_embeddings(self):
+ return self.text_decoder.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.text_encoder.embed_tokens = value
+ self.text_decoder.embed_tokens = value
+ self.shared = value
+
+ def _tie_weights(self):
+ if self.config.tie_word_embeddings:
+ self._tie_or_clone_weights(self.text_encoder.embed_tokens, self.shared)
+ self._tie_or_clone_weights(self.text_decoder.embed_tokens, self.shared)
+ self._tie_or_clone_weights(self.lm_head, self.shared)
+
+ @add_start_docstrings_to_model_forward(M4T_TEXT_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Seq2SeqLMOutput, Tuple[torch.FloatTensor]]:
+ if labels is not None:
+ if use_cache:
+ logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
+ use_cache = False
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
+ decoder_input_ids = shift_tokens_right(
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
+ )
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if encoder_outputs is None:
+ # if encoder_outputs is not None, it's probably used within a .generate method so no need to warn
+ logger.warning(
+ "This is the same forward method as `SeamlessM4TForTextToText`."
+ "It doesn't use the text-to-unit model `SeamlessM4TTextToUnitForConditionalGeneration`."
+ "If you want to generate speech, use the `.generate` method."
+ )
+ encoder_outputs = self.text_encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ encoder_attention_mask = attention_mask
+
+ # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
+ decoder_outputs = self.text_decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=encoder_attention_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ lm_logits = self.lm_head(decoder_outputs[0])
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ labels = labels.to(lm_logits.device)
+ masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ outputs = decoder_outputs + encoder_outputs
+ output = (lm_logits,) + outputs[1:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return Seq2SeqLMOutput(
+ loss=masked_lm_loss,
+ logits=lm_logits,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+ @torch.no_grad()
+ def generate(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ return_intermediate_token_ids: Optional[bool] = None,
+ tgt_lang: Optional[str] = None,
+ spkr_id: Optional[int] = 0,
+ **kwargs,
+ ) -> Union[torch.Tensor, SeamlessM4TGenerationOutput]:
+ """
+ Generates translated audio waveforms.
+
+
+
+ This method successively calls the `.generate` function of two different sub-models. You can specify keyword
+ arguments at two different levels: general arguments that will be passed to both models, or prefixed arguments
+ that will be passed to one of them.
+
+ For example, calling `.generate(input_ids, num_beams=4, speech_do_sample=True)` will successively perform
+ beam-search decoding on the text model, and multinomial beam-search sampling on the speech model.
+
+ For an overview of generation strategies and code examples, check out the [following
+ guide](./generation_strategies).
+
+
+
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`SeamlessM4TTokenizer`] or [`SeamlessM4TProcessor`]. See
+ [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ return_intermediate_token_ids (`bool`, *optional*):
+ If `True`, also returns the intermediate generated text and unit tokens. Set to `True` if you also want
+ to get translated text alongside the audio.
+ tgt_lang (`str`, *optional*):
+ The language to use as target language for translation.
+ spkr_id (`int`, *optional*, defaults to 0):
+ The id of the speaker used for speech synthesis. Must be lower than `config.vocoder_num_spkrs`.
+ kwargs (*optional*):
+ Remaining dictionary of keyword arguments that will be passed to [`GenerationMixin.generate`]. Keyword
+ arguments are of two types:
+
+ - Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model,
+ except for `decoder_input_ids` which will only be passed through the text components.
+ - With a *text_* or *speech_* prefix, they will be input for the `generate` method of the
+ text model and speech model respectively. It has the priority over the keywords without a prefix.
+
+ This means you can, for example, specify a generation strategy for one generation but not for the
+ other.
+
+
+ Returns:
+ `Union[SeamlessM4TGenerationOutput, Tuple[Tensor]]`:
+ - If `return_intermediate_token_ids`, returns [`SeamlessM4TGenerationOutput`].
+ - If not `return_intermediate_token_ids`, returns a tuple composed of waveforms of shape `(batch_size,
+ sequence_length)`and and `waveform_lengths` which gives the length of each sample.
+ """
+ batch_size = len(input_ids) if input_ids is not None else len(kwargs.get("inputs_embeds"))
+
+ if tgt_lang is None:
+ raise ValueError("You must specify a `tgt_lang` to generate translated speech.")
+ else:
+ # also accept __xxx__
+ tgt_lang = tgt_lang.replace("__", "")
+ for key in ["text_decoder_lang_to_code_id", "t2u_lang_code_to_id", "vocoder_lang_code_to_id"]:
+ lang_code_to_id = getattr(self.generation_config, key, None)
+ if lang_code_to_id is None:
+ raise ValueError(
+ f"""This model generation config doesn't have a `{key}` key which maps the target language
+ to the right token id. Make sure to load the right generation config."""
+ )
+ elif tgt_lang not in lang_code_to_id:
+ raise ValueError(
+ f"""`tgt_lang={tgt_lang}` is not supported by this model.
+ Please specify a `tgt_lang` in {','.join(lang_code_to_id.keys())}. Note that SeamlessM4T supports
+ more languages for text translation than for speech synthesis."""
+ )
+
+ kwargs_text, kwargs_speech = format_speech_generation_kwargs(kwargs)
+ kwargs_text["output_hidden_states"] = True
+ kwargs_text["return_dict_in_generate"] = True
+ kwargs_text["output_scores"] = True
+
+ text_decoder_input_ids = kwargs_text.get("decoder_input_ids")
+
+ # overwrite text_decoder_input_ids if tgt_lang is passed. The latter gets priority over decoder_input_ids.
+ text_tgt_lang_id = self.generation_config.text_decoder_lang_to_code_id.get(tgt_lang)
+ text_decoder_input_ids = torch.tensor([[text_tgt_lang_id]] * batch_size).to(self.device)
+
+ kwargs_text["decoder_input_ids"] = text_decoder_input_ids
+
+ # first generation
+ text_generation_output = super().generate(input_ids, **kwargs_text)
+ sequences = text_generation_output.sequences
+
+ # prepare second generation
+ num_return_sequences = len(sequences) // batch_size
+ attention_mask = kwargs_speech.get("attention_mask", kwargs_text.get("attention_mask", None))
+
+ encoder_hidden_states = text_generation_output.encoder_hidden_states[-1]
+
+ # take care of num_return_sequences
+ # take most probable hidden states per batch of return_sequences
+ # (batch_size*num_return_sequences, ...) -> (batch_size,...)
+ if num_return_sequences > 1:
+ idx_most_probable_sequences_per_batch = text_generation_output.sequences_scores.view(batch_size, -1)
+ idx_most_probable_sequences_per_batch = idx_most_probable_sequences_per_batch.argmax(-1)
+ idx_most_probable_sequences_per_batch = (
+ idx_most_probable_sequences_per_batch + torch.arange(batch_size).to(self.device) * num_return_sequences
+ )
+ sequences = sequences[idx_most_probable_sequences_per_batch]
+
+ # get decoder last hidden state - must do a pass through the text decoder
+ t2u_input_embeds = self.text_decoder(
+ input_ids=sequences,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=attention_mask,
+ ).last_hidden_state
+
+ pad_token_id = self.generation_config.pad_token_id
+
+ # Compute new attention mask
+ seq_lens = (sequences != pad_token_id).int().sum(1)
+ t2u_model_attention_mask = _compute_new_attention_mask(t2u_input_embeds, seq_lens)
+ kwargs_speech["attention_mask"] = t2u_model_attention_mask
+
+ # Compute t2u decoder_input_ids
+ t2u_decoder_input_ids = kwargs_speech.get("decoder_input_ids")
+ t2u_tgt_lang_id = self.generation_config.t2u_lang_code_to_id.get(tgt_lang)
+ t2u_decoder_input_ids = torch.tensor([[self.config.t2u_eos_token_id, t2u_tgt_lang_id]] * batch_size).to(
+ self.device
+ )
+ kwargs_speech["decoder_input_ids"] = t2u_decoder_input_ids
+ # second generation
+ unit_ids = self.t2u_model.generate(inputs_embeds=t2u_input_embeds, **kwargs_speech)
+ output_unit_ids = unit_ids.detach().clone()
+
+ # get rid of t2u_decoder_input_ids
+ unit_ids = unit_ids[:, kwargs_speech["decoder_input_ids"].shape[1] :]
+ # replace eos per pad
+ unit_ids[unit_ids == self.config.t2u_eos_token_id] = self.config.t2u_pad_token_id
+ # offset of control symbols
+ unit_ids = torch.where(
+ unit_ids == self.config.t2u_pad_token_id, unit_ids, unit_ids - self.config.vocoder_offset
+ )
+
+ vocoder_tgt_lang_id = self.generation_config.vocoder_lang_code_to_id.get(tgt_lang)
+ vocoder_tgt_lang_id = torch.tensor([[vocoder_tgt_lang_id]] * len(unit_ids)).to(self.device)
+
+ spkr_id = torch.tensor([[spkr_id]] * len(unit_ids)).to(self.device)
+
+ waveform, waveform_lengths = self.vocoder(input_ids=unit_ids, spkr_id=spkr_id, lang_id=vocoder_tgt_lang_id)
+
+ if return_intermediate_token_ids:
+ return SeamlessM4TGenerationOutput(
+ waveform=waveform,
+ waveform_lengths=waveform_lengths,
+ sequences=sequences,
+ unit_sequences=output_unit_ids,
+ )
+
+ return waveform, waveform_lengths
+
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ use_cache=None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ # cut decoder_input_ids if past is used
+ if past_key_values is not None:
+ decoder_input_ids = decoder_input_ids[:, -1:]
+
+ return {
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
+ "encoder_outputs": encoder_outputs,
+ "past_key_values": past_key_values,
+ "decoder_input_ids": decoder_input_ids,
+ "attention_mask": attention_mask,
+ "use_cache": use_cache,
+ }
+
+ @staticmethod
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ # cached cross_attention states don't have to be reordered -> they are always the same
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],
+ )
+ return reordered_past
+
+
+@add_start_docstrings(
+ "The speech-to-speech SeamlessM4T Model transformer which can be used for S2ST.",
+ SEAMLESS_M4T_START_DOCSTRING,
+)
+class SeamlessM4TForSpeechToSpeech(SeamlessM4TPreTrainedModel):
+ _keys_to_ignore_on_load_missing = ["text_encoder"]
+ main_input_name = "input_features"
+
+ _tied_weights_keys = [
+ "lm_head.weight",
+ "text_decoder.embed_tokens.weight",
+ ]
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id)
+ self.speech_encoder = SeamlessM4TSpeechEncoder(config)
+ self.text_decoder = SeamlessM4TDecoder(config, self.shared)
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ self.t2u_model = SeamlessM4TTextToUnitForConditionalGeneration(config)
+ self.vocoder = SeamlessM4TCodeHifiGan(config)
+
+ def get_encoder(self):
+ return self.speech_encoder
+
+ def get_decoder(self):
+ return self.text_decoder
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def get_input_embeddings(self):
+ return self.text_decoder.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.text_decoder.embed_tokens = value
+
+ def _tie_weights(self):
+ if self.config.tie_word_embeddings:
+ self._tie_or_clone_weights(self.text_decoder.embed_tokens, self.shared)
+ self._tie_or_clone_weights(self.lm_head, self.shared)
+
+ @add_start_docstrings_to_model_forward(M4T_SPEECH_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_features: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs,
+ ) -> Union[Seq2SeqLMOutput, Tuple[torch.FloatTensor]]:
+ if labels is not None:
+ if use_cache:
+ logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
+ use_cache = False
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
+ decoder_input_ids = shift_tokens_right(
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
+ )
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if encoder_outputs is None:
+ # if encoder_outputs is not None, it's probably used within a .generate method so no need to warn
+ logger.warning(
+ "This is the same forward method as `SeamlessM4TForSpeechToText`. It doesn't use `self.t2u_model`."
+ "If you want to generate speech, use the `generate` method."
+ )
+
+ encoder_outputs = self.speech_encoder(
+ input_features=input_features,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ encoder_attention_mask = attention_mask
+ if attention_mask is not None:
+ sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to(
+ encoder_outputs[0].device
+ )
+ encoder_attention_mask = _compute_new_attention_mask(
+ hidden_states=encoder_outputs[0], seq_lens=sub_sampled_lengths
+ )
+
+ # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
+ decoder_outputs = self.text_decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=encoder_attention_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ lm_logits = self.lm_head(decoder_outputs[0])
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ labels = labels.to(lm_logits.device)
+ masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ outputs = decoder_outputs + encoder_outputs
+ output = (lm_logits,) + outputs[1:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return Seq2SeqLMOutput(
+ loss=masked_lm_loss,
+ logits=lm_logits,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+ @torch.no_grad()
+ def generate(
+ self,
+ input_features: Optional[torch.Tensor] = None,
+ return_intermediate_token_ids: Optional[bool] = None,
+ tgt_lang: Optional[str] = None,
+ spkr_id: Optional[int] = 0,
+ **kwargs,
+ ) -> Union[torch.Tensor, SeamlessM4TGenerationOutput]:
+ """
+ Generates translated audio waveforms.
+
+
+
+ This method successively calls the `.generate` function of two different sub-models. You can specify keyword
+ arguments at two different levels: general arguments that will be passed to both models, or prefixed arguments
+ that will be passed to one of them.
+
+ For example, calling `.generate(input_features, num_beams=4, speech_do_sample=True)` will successively perform
+ beam-search decoding on the text model, and multinomial beam-search sampling on the speech model.
+
+ For an overview of generation strategies and code examples, check out the [following
+ guide](./generation_strategies).
+
+
+
+ Args:
+ input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_banks)`):
+ Input audio features. This should be returnes by the [`SeamlessM4TFeatureExtractor`] class or the
+ [`SeamlessM4TProcessor`] class. See [`SeamlessM4TFeatureExtractor.__call__`] for details.
+ return_intermediate_token_ids (`bool`, *optional*):
+ If `True`, also returns the intermediate generated text and unit tokens. Set to `True` if you also want
+ to get translated text alongside the audio.
+ tgt_lang (`str`, *optional*):
+ The language to use as target language for translation.
+ spkr_id (`int`, *optional*, defaults to 0):
+ The id of the speaker used for speech synthesis. Must be lower than `config.vocoder_num_spkrs`.
+
+ kwargs (*optional*):
+ Remaining dictionary of keyword arguments that will be passed to [`GenerationMixin.generate`]. Keyword
+ arguments are of two types:
+
+ - Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model,
+ except for `decoder_input_ids` which will only be passed through the text components.
+ - With a *text_* or *speech_* prefix, they will be input for the `generate` method of the
+ text model and speech model respectively. It has the priority over the keywords without a prefix.
+
+ This means you can, for example, specify a generation strategy for one generation but not for the
+ other.
+
+
+ Returns:
+ `Union[SeamlessM4TGenerationOutput, Tuple[Tensor]]`:
+ - If `return_intermediate_token_ids`, returns [`SeamlessM4TGenerationOutput`].
+ - If not `return_intermediate_token_ids`, returns a tuple composed of waveforms of shape `(batch_size,
+ sequence_length)`and and `waveform_lengths` which gives the length of each sample.
+ """
+ batch_size = len(input_features) if input_features is not None else len(kwargs.get("inputs_embeds"))
+
+ if tgt_lang is None:
+ raise ValueError("You must specify a `tgt_lang` to generate translated speech.")
+ else:
+ # also accept __xxx__
+ tgt_lang = tgt_lang.replace("__", "")
+ for key in ["text_decoder_lang_to_code_id", "t2u_lang_code_to_id", "vocoder_lang_code_to_id"]:
+ lang_code_to_id = getattr(self.generation_config, key, None)
+ if lang_code_to_id is None:
+ raise ValueError(
+ f"""This model generation config doesn't have a `{key}` key which maps the target language
+ to the right token id. Make sure to load the right generation config."""
+ )
+ elif tgt_lang not in lang_code_to_id:
+ raise ValueError(
+ f"""`tgt_lang={tgt_lang}` is not supported by this model.
+ Please specify a `tgt_lang` in {','.join(lang_code_to_id.keys())}. Note that SeamlessM4T supports
+ more languages for text translation than for speech synthesis."""
+ )
+
+ kwargs_text, kwargs_speech = format_speech_generation_kwargs(kwargs)
+ kwargs_text["output_hidden_states"] = True
+ kwargs_text["return_dict_in_generate"] = True
+ kwargs_text["output_scores"] = True
+
+ text_decoder_input_ids = kwargs_text.get("decoder_input_ids")
+ # overwrite text_decoder_input_ids if tgt_lang is passed. The latter gets priority over decoder_input_ids.
+ text_tgt_lang_id = self.generation_config.text_decoder_lang_to_code_id.get(tgt_lang)
+ text_decoder_input_ids = torch.tensor([[text_tgt_lang_id]] * batch_size).to(self.device)
+
+ kwargs_text["decoder_input_ids"] = text_decoder_input_ids
+
+ # first generation
+ text_generation_output = super().generate(input_features, **kwargs_text)
+ sequences = text_generation_output.sequences
+
+ # prepare second generation
+ num_return_sequences = len(sequences) // batch_size
+ attention_mask = kwargs_speech.get("attention_mask", kwargs_text.get("attention_mask", None))
+
+ # get last_hidden_state from encoder
+ encoder_hidden_states = self.speech_encoder(input_features=input_features, attention_mask=attention_mask)[0]
+
+ # input modality = speech so new attention mask for the decoder
+ if attention_mask is not None:
+ sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to(
+ encoder_hidden_states.device
+ )
+ attention_mask = _compute_new_attention_mask(
+ hidden_states=encoder_hidden_states, seq_lens=sub_sampled_lengths
+ )
+
+ # take care of num_return_sequences
+ # take most probable hidden states per batch of return_sequences
+ # (batch_size*num_return_sequences, ...) -> (batch_size,...)
+ if num_return_sequences > 1:
+ idx_most_probable_sequences_per_batch = text_generation_output.sequences_scores.view(batch_size, -1)
+ idx_most_probable_sequences_per_batch = idx_most_probable_sequences_per_batch.argmax(-1)
+ idx_most_probable_sequences_per_batch = (
+ idx_most_probable_sequences_per_batch + torch.arange(batch_size).to(self.device) * num_return_sequences
+ )
+ sequences = sequences[idx_most_probable_sequences_per_batch]
+
+ # get decoder last hidden state - must do a pass through the text decoder
+ t2u_input_embeds = self.text_decoder(
+ input_ids=sequences,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=attention_mask,
+ ).last_hidden_state
+
+ pad_token_id = self.generation_config.pad_token_id
+
+ # Compute new attention mask
+ seq_lens = (sequences != pad_token_id).int().sum(1)
+ t2u_model_attention_mask = _compute_new_attention_mask(t2u_input_embeds, seq_lens)
+ kwargs_speech["attention_mask"] = t2u_model_attention_mask
+
+ # Compute t2u decoder_input_ids
+ t2u_decoder_input_ids = kwargs_speech.get("decoder_input_ids")
+ t2u_tgt_lang_id = self.generation_config.t2u_lang_code_to_id.get(tgt_lang)
+ t2u_decoder_input_ids = torch.tensor([[self.config.t2u_eos_token_id, t2u_tgt_lang_id]] * batch_size).to(
+ self.device
+ )
+ kwargs_speech["decoder_input_ids"] = t2u_decoder_input_ids
+
+ # second generation
+ unit_ids = self.t2u_model.generate(inputs_embeds=t2u_input_embeds, **kwargs_speech)
+ output_unit_ids = unit_ids.detach().clone()
+
+ # get rid of t2u_decoder_input_ids
+ unit_ids = unit_ids[:, kwargs_speech["decoder_input_ids"].shape[1] :]
+ # replace eos per pad
+ unit_ids[unit_ids == self.config.t2u_eos_token_id] = self.config.t2u_pad_token_id
+ # offset of control symbols
+ unit_ids = torch.where(
+ unit_ids == self.config.t2u_pad_token_id, unit_ids, unit_ids - self.config.vocoder_offset
+ )
+
+ vocoder_tgt_lang_id = self.generation_config.vocoder_lang_code_to_id.get(tgt_lang)
+ vocoder_tgt_lang_id = torch.tensor([[vocoder_tgt_lang_id]] * len(unit_ids)).to(self.device)
+
+ spkr_id = torch.tensor([[spkr_id]] * len(unit_ids)).to(self.device)
+
+ waveform, waveform_lengths = self.vocoder(input_ids=unit_ids, spkr_id=spkr_id, lang_id=vocoder_tgt_lang_id)
+
+ if return_intermediate_token_ids:
+ return SeamlessM4TGenerationOutput(
+ waveform=waveform,
+ waveform_lengths=waveform_lengths,
+ sequences=sequences,
+ unit_sequences=output_unit_ids,
+ )
+
+ return waveform, waveform_lengths
+
+ @staticmethod
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ # cached cross_attention states don't have to be reordered -> they are always the same
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],
+ )
+ return reordered_past
+
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ use_cache=None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ # cut decoder_input_ids if past is used
+ if past_key_values is not None:
+ decoder_input_ids = decoder_input_ids[:, -1:]
+
+ return {
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
+ "encoder_outputs": encoder_outputs,
+ "past_key_values": past_key_values,
+ "decoder_input_ids": decoder_input_ids,
+ "attention_mask": attention_mask,
+ "use_cache": use_cache,
+ }
+
+
+@add_start_docstrings(
+ "The original SeamlessM4T Model transformer which can be used for every tasks available (S2ST, S2TT, T2TT, T2ST).",
+ SEAMLESS_M4T_START_DOCSTRING,
+ """
+ current_modality (`str`, *optional*, defaults to `"text"`):
+ Default modality. Used to initialize the model.
+ """,
+)
+class SeamlessM4TModel(SeamlessM4TPreTrainedModel):
+ _tied_weights_keys = [
+ "lm_head.weight",
+ "text_encoder.embed_tokens.weight",
+ "text_decoder.embed_tokens.weight",
+ ]
+
+ def __init__(self, config, current_modality="text"):
+ super().__init__(config)
+
+ self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id)
+
+ self.text_encoder = SeamlessM4TEncoder(config, self.shared)
+ self.speech_encoder = SeamlessM4TSpeechEncoder(config)
+ self.text_decoder = SeamlessM4TDecoder(config, self.shared)
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ self.current_modality = current_modality
+ if current_modality == "speech":
+ self.main_input_name = "input_features"
+
+ # these models already call post_init in their initialization
+ self.t2u_model = SeamlessM4TTextToUnitForConditionalGeneration(config)
+ self.vocoder = SeamlessM4TCodeHifiGan(config)
+
+ def set_modality(self, modality="text"):
+ if modality == "text":
+ self.main_input_name = "input_ids"
+ self.current_modality = "text"
+ elif modality == "speech":
+ self.main_input_name = "input_features"
+ self.current_modality = "speech"
+ else:
+ raise ValueError(f"`modality={modality}` is not a valid modality. It must be `text` or `speech`.")
+
+ def get_encoder(self):
+ if self.current_modality == "text":
+ return self.text_encoder
+ else:
+ return self.speech_encoder
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def get_input_embeddings(self):
+ return self.text_decoder.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.text_encoder.embed_tokens = value
+ self.text_decoder.embed_tokens = value
+ self.shared = value
+
+ def _tie_weights(self):
+ if self.config.tie_word_embeddings:
+ self._tie_or_clone_weights(self.text_encoder.embed_tokens, self.shared)
+ self._tie_or_clone_weights(self.text_decoder.embed_tokens, self.shared)
+ self._tie_or_clone_weights(self.lm_head, self.shared)
+
+ @add_start_docstrings_to_model_forward(M4T_MODEL_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ input_features: Optional[torch.FloatTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs,
+ ) -> Union[Seq2SeqLMOutput, Tuple[torch.FloatTensor]]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if labels is not None:
+ if use_cache:
+ logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
+ use_cache = False
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
+ decoder_input_ids = shift_tokens_right(
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
+ )
+
+ if input_ids is None and input_features is None and inputs_embeds is None and encoder_outputs is None:
+ raise ValueError(
+ "`input_ids`,`input_features`, `inputs_embeds` and `encoder_outputs` are all empty. Make sure at least one of them is not."
+ )
+ elif input_features is not None:
+ if input_ids is not None:
+ logger.warning(
+ "`input_ids` is not `None` but `input_features` has been given."
+ "`input_features` will be used in priority through the `speech_encoder`. "
+ "Make sure that `input_features` and `input_ids` are mutually exclusive."
+ )
+
+ if inputs_embeds is not None:
+ logger.warning(
+ "`inputs_embeds` is not `None` but `input_features` has been given."
+ "`input_features` will be used in priority through `speech_encoder`. "
+ "`inputs_embeds` will be ignored."
+ )
+
+ # if encoder_outputs is not None, it's probably used within a .generate method so no need to warn
+ logger.warning(
+ "This calls the same method `forward` as `SeamlessM4TForTextToText` and `SeamlessM4TForSpeechToText`"
+ "depending on the input modality. If you want to generate speech, use the `generate` method."
+ )
+
+ self.set_modality("speech")
+
+ encoder_outputs = self.speech_encoder(
+ input_features=input_features,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ elif input_ids is not None or inputs_embeds is not None:
+ # if encoder_outputs is not None, it's probably used within a .generate method so no need to warn
+ logger.warning(
+ "This calls the same method `forward` as `SeamlessM4TForTextToText` and `SeamlessM4TForSpeechToText`"
+ "depending on the input modality. If you want to generate speech, use the `generate` method."
+ )
+ self.set_modality("text")
+ encoder_outputs = self.text_encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ encoder_attention_mask = attention_mask
+ # input modality = speech so new attention mask
+ if self.current_modality == "speech" and attention_mask is not None:
+ sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to(
+ encoder_outputs[0].device
+ )
+ encoder_attention_mask = _compute_new_attention_mask(
+ hidden_states=encoder_outputs[0], seq_lens=sub_sampled_lengths
+ )
+
+ # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
+ decoder_outputs = self.text_decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=encoder_attention_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ lm_logits = self.lm_head(decoder_outputs[0])
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ labels = labels.to(lm_logits.device)
+ masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ outputs = decoder_outputs + encoder_outputs
+ output = (lm_logits,) + outputs[1:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return Seq2SeqLMOutput(
+ loss=masked_lm_loss,
+ logits=lm_logits,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+ @torch.no_grad()
+ def generate(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ input_features: Optional[torch.Tensor] = None,
+ return_intermediate_token_ids: Optional[bool] = None,
+ tgt_lang: Optional[str] = None,
+ spkr_id: Optional[int] = 0,
+ generate_speech: Optional[bool] = True,
+ **kwargs,
+ ) -> Union[torch.Tensor, SeamlessM4TGenerationOutput]:
+ """
+ Generates translated token ids and/or translated audio waveforms.
+
+
+
+ This method successively calls the `.generate` function of two different sub-models. You can specify keyword
+ arguments at two different levels: general arguments that will be passed to both models, or prefixed arguments
+ that will be passed to one of them.
+
+ For example, calling `.generate(input_ids=input_ids, num_beams=4, speech_do_sample=True)` will successively
+ perform beam-search decoding on the text model, and multinomial beam-search sampling on the speech model.
+
+ For an overview of generation strategies and code examples, check out the [following
+ guide](./generation_strategies).
+
+
+
+
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`SeamlessM4TTokenizer`] or [`SeamlessM4TProcessor`]. See
+ [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_banks)`, *optional*):
+ Input audio features. This should be returnes by the [`SeamlessM4TFeatureExtractor`] class or the
+ [`SeamlessM4TProcessor`] class. See [`SeamlessM4TFeatureExtractor.__call__`] for details.
+ return_intermediate_token_ids (`bool`, *optional*):
+ If `True`, also returns the intermediate generated text and unit tokens. Set to `True` if you also want
+ to get translated text alongside the audio. Note that if `generate_speech=True`, this parameter will be
+ ignored.
+ tgt_lang (`str`, *optional*):
+ The language to use as target language for translation.
+ spkr_id (`int`, *optional*, defaults to 0):
+ The id of the speaker used for speech synthesis. Must be lower than `config.vocoder_num_spkrs`.
+ generate_speech (`bool`, *optional*, defaults to `True`):
+ If `False`, will only returns the text tokens and won't generate speech.
+
+ kwargs (*optional*):
+ Remaining dictionary of keyword arguments that will be passed to [`GenerationMixin.generate`]. Keyword
+ arguments are of two types:
+
+ - Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model,
+ except for `decoder_input_ids` which will only be passed through the text components.
+ - With a *text_* or *speech_* prefix, they will be input for the `generate` method of the
+ text model and speech model respectively. It has the priority over the keywords without a prefix.
+
+ This means you can, for example, specify a generation strategy for one generation but not for the
+ other.
+
+ Returns:
+ `Union[SeamlessM4TGenerationOutput, Tuple[Tensor], ModelOutput]`:
+ - If `generate_speech` and `return_intermediate_token_ids`, returns [`SeamlessM4TGenerationOutput`].
+ - If `generate_speech` and not `return_intermediate_token_ids`, returns a tuple composed of waveforms of
+ shape `(batch_size, sequence_length)`and and `waveform_lengths` which gives the length of each sample.
+ - If `generate_speech=False`, it will returns `ModelOutput`.
+ """
+ if input_ids is None and input_features is None and kwargs.get("inputs_embeds", None) is None:
+ raise ValueError(
+ "`input_ids`,`input_features` and `inputs_embeds` are all empty. Make sure at least one of them is not."
+ )
+
+ if generate_speech and tgt_lang is None:
+ raise ValueError("You must specify a `tgt_lang` to generate translated speech.")
+
+ if tgt_lang is not None:
+ # also accept __xxx__
+ tgt_lang = tgt_lang.replace("__", "")
+ for key in ["text_decoder_lang_to_code_id", "t2u_lang_code_to_id", "vocoder_lang_code_to_id"]:
+ lang_code_to_id = getattr(self.generation_config, key, None)
+ if lang_code_to_id is None:
+ raise ValueError(
+ f"""This model generation config doesn't have a `{key}` key which maps the target language
+ to the right token id. Make sure to load the right generation config."""
+ )
+ elif tgt_lang not in lang_code_to_id:
+ raise ValueError(
+ f"""`tgt_lang={tgt_lang}` is not supported by this model.
+ Please specify a `tgt_lang` in {','.join(lang_code_to_id.keys())}. Note that SeamlessM4T supports
+ more languages for text translation than for speech synthesis."""
+ )
+
+ batch_size = (
+ len(input_features)
+ if input_features is not None
+ else (len(input_ids) if input_ids is not None else len(kwargs.get("inputs_embeds")))
+ )
+
+ kwargs_text, kwargs_speech = format_speech_generation_kwargs(kwargs)
+ kwargs_text["output_hidden_states"] = True
+ kwargs_text["return_dict_in_generate"] = True
+ kwargs_text["output_scores"] = True
+
+ text_decoder_input_ids = kwargs_text.get("decoder_input_ids")
+ # overwrite text_decoder_input_ids if tgt_lang is passed. The latter gets priority over decoder_input_ids.
+ if tgt_lang is not None:
+ # tgt_lang gets priority over decoder input ids
+ text_tgt_lang_id = self.generation_config.text_decoder_lang_to_code_id.get(tgt_lang)
+ text_decoder_input_ids = torch.tensor([[text_tgt_lang_id]] * batch_size).to(self.device)
+
+ kwargs_text["decoder_input_ids"] = text_decoder_input_ids
+
+ # first generation
+ if input_features is not None:
+ self.set_modality("speech")
+ if input_ids is not None:
+ logger.warning(
+ "`input_features` and `input_ids` are both non empty. `input_features` will be used in priority "
+ "through the speech encoder. Make sure `input_features=None` if you want to use the text encoder."
+ )
+ text_generation_output = super().generate(input_features=input_features, **kwargs_text)
+ else:
+ self.set_modality("text")
+ text_generation_output = super().generate(input_ids=input_ids, input_features=None, **kwargs_text)
+ sequences = text_generation_output.sequences
+
+ if not generate_speech:
+ return text_generation_output
+
+ # prepare second generation
+ num_return_sequences = len(sequences) // batch_size
+ attention_mask = kwargs_speech.get("attention_mask", kwargs_text.get("attention_mask", None))
+
+ # get encoder last hidden states
+ if self.current_modality == "speech":
+ # get last_hidden_state from encoder - must do a pass through the speech encoder
+ encoder_hidden_states = self.speech_encoder(
+ input_features=input_features, attention_mask=attention_mask
+ ).last_hidden_state
+
+ # input modality = speech so new attention mask for the decoder
+ if attention_mask is not None:
+ sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to(
+ encoder_hidden_states.device
+ )
+ attention_mask = _compute_new_attention_mask(
+ hidden_states=encoder_hidden_states, seq_lens=sub_sampled_lengths
+ )
+ else:
+ encoder_hidden_states = text_generation_output.encoder_hidden_states[-1]
+
+ # take care of num_return_sequences
+ # take most probable hidden states per batch of return_sequences
+ # (batch_size*num_return_sequences, ...) -> (batch_size,...)
+ if num_return_sequences > 1:
+ idx_most_probable_sequences_per_batch = text_generation_output.sequences_scores.view(batch_size, -1)
+ idx_most_probable_sequences_per_batch = idx_most_probable_sequences_per_batch.argmax(-1)
+ idx_most_probable_sequences_per_batch = (
+ idx_most_probable_sequences_per_batch + torch.arange(batch_size).to(self.device) * num_return_sequences
+ )
+ sequences = sequences[idx_most_probable_sequences_per_batch]
+
+ # get decoder last hidden state - must do a pass through the text decoder
+ t2u_input_embeds = self.text_decoder(
+ input_ids=sequences,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=attention_mask,
+ ).last_hidden_state
+
+ pad_token_id = self.generation_config.pad_token_id
+
+ # Compute new attention mask
+ seq_lens = (sequences != pad_token_id).int().sum(1)
+ t2u_model_attention_mask = _compute_new_attention_mask(t2u_input_embeds, seq_lens)
+ kwargs_speech["attention_mask"] = t2u_model_attention_mask
+
+ # Compute t2u decoder_input_ids
+ t2u_decoder_input_ids = kwargs_speech.get("decoder_input_ids")
+ t2u_tgt_lang_id = self.generation_config.t2u_lang_code_to_id.get(tgt_lang)
+ t2u_decoder_input_ids = torch.tensor([[self.config.t2u_eos_token_id, t2u_tgt_lang_id]] * batch_size).to(
+ self.device
+ )
+ kwargs_speech["decoder_input_ids"] = t2u_decoder_input_ids
+
+ # second generation
+ unit_ids = self.t2u_model.generate(inputs_embeds=t2u_input_embeds, **kwargs_speech)
+ output_unit_ids = unit_ids.detach().clone()
+
+ # get rid of t2u_decoder_input_ids
+ unit_ids = unit_ids[:, kwargs_speech["decoder_input_ids"].shape[1] :]
+ # replace eos per pad
+ unit_ids[unit_ids == self.config.t2u_eos_token_id] = self.config.t2u_pad_token_id
+ # offset of control symbols
+ unit_ids = torch.where(
+ unit_ids == self.config.t2u_pad_token_id, unit_ids, unit_ids - self.config.vocoder_offset
+ )
+
+ vocoder_tgt_lang_id = self.generation_config.vocoder_lang_code_to_id.get(tgt_lang)
+ vocoder_tgt_lang_id = torch.tensor([[vocoder_tgt_lang_id]] * len(unit_ids)).to(self.device)
+
+ spkr_id = torch.tensor([[spkr_id]] * len(unit_ids)).to(self.device)
+
+ waveform, waveform_lengths = self.vocoder(input_ids=unit_ids, spkr_id=spkr_id, lang_id=vocoder_tgt_lang_id)
+
+ if return_intermediate_token_ids:
+ return SeamlessM4TGenerationOutput(
+ waveform=waveform,
+ waveform_lengths=waveform_lengths,
+ sequences=sequences,
+ unit_sequences=output_unit_ids,
+ )
+
+ return waveform, waveform_lengths
+
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ use_cache=None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ # cut decoder_input_ids if past is used
+ if past_key_values is not None:
+ decoder_input_ids = decoder_input_ids[:, -1:]
+
+ return {
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
+ "encoder_outputs": encoder_outputs,
+ "past_key_values": past_key_values,
+ "decoder_input_ids": decoder_input_ids,
+ "attention_mask": attention_mask,
+ "use_cache": use_cache,
+ }
+
+ @staticmethod
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ # cached cross_attention states don't have to be reordered -> they are always the same
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],
+ )
+ return reordered_past
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/seamless_m4t/tokenization_seamless_m4t.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/seamless_m4t/tokenization_seamless_m4t.py
new file mode 100644
index 0000000000000000000000000000000000000000..bb6beb760a0e14c582aa1d83dc2d44c69e956c3d
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/seamless_m4t/tokenization_seamless_m4t.py
@@ -0,0 +1,562 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes for SeamlessM4T."""
+import os
+from shutil import copyfile
+from typing import Any, Dict, List, Optional, Tuple, Union
+
+import sentencepiece as spm
+
+from ...convert_slow_tokenizer import import_protobuf
+from ...tokenization_utils import (
+ BatchEncoding,
+ PreTokenizedInput,
+ PreTrainedTokenizer,
+ TextInput,
+)
+from ...tokenization_utils_base import AddedToken
+from ...utils import PaddingStrategy, logging
+
+
+logger = logging.get_logger(__name__)
+
+
+SPIECE_UNDERLINE = "▁"
+
+
+VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"}
+
+
+class SeamlessM4TTokenizer(PreTrainedTokenizer):
+ """
+ Construct a SeamlessM4T tokenizer.
+
+ Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
+ [SentencePiece](https://github.com/google/sentencepiece).
+
+ The tokenization method is ` ` for source language documents, and ` ` for target language documents.
+
+ Examples:
+
+ ```python
+ >>> from transformers import SeamlessM4TTokenizer
+
+ >>> tokenizer = SeamlessM4TTokenizer.from_pretrained(
+ ... "facebook/hf-seamless-m4t-medium", src_lang="eng", tgt_lang="fra"
+ ... )
+ >>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria"
+ >>> expected_translation_french = "Le chef de l'ONU affirme qu'il n'y a pas de solution militaire en Syrie."
+ >>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_french, return_tensors="pt")
+ ```
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ bos_token (`str`, *optional*, defaults to `""`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+
+
+
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
+ The token used is the `sep_token`.
+
+
+
+ sep_token (`str`, *optional*, defaults to `""`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ cls_token (`str`, *optional*, defaults to `""`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ tokenizer_file (`str`, *optional*):
+ The path to a tokenizer file to use instead of the vocab file.
+ src_lang (`str`, *optional*, defaults to `"eng"`):
+ The language to use as source language for translation.
+ tgt_lang (`str`, *optional*, defaults to `"fra"`):
+ The language to use as target language for translation.
+ sp_model_kwargs (`Dict[str, Any]`, *optional*):
+ Additional keyword arguments to pass to the model initialization.
+ additional_special_tokens (tuple or list of `str` or `tokenizers.AddedToken`, *optional*):
+ A tuple or a list of additional special tokens. Can be used to specify the list of languages that will be
+ supported by the tokenizer.
+ add_prefix_space (`bool`, *optional*, defaults to `True`):
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
+ other word.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ prefix_tokens: List[int] = []
+ suffix_tokens: List[int] = []
+
+ def __init__(
+ self,
+ vocab_file,
+ bos_token="",
+ eos_token="",
+ sep_token="",
+ cls_token="",
+ unk_token="",
+ pad_token="",
+ tokenizer_file=None,
+ src_lang="eng",
+ tgt_lang="fra",
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
+ additional_special_tokens=None,
+ add_prefix_space=True,
+ **kwargs,
+ ):
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
+ # Add this unused argument to keep some important Copied from statements
+ self.legacy = False
+ self.vocab_file = vocab_file
+
+ self.sp_model = self.get_spm_processor(kwargs.pop("from_slow", False))
+
+ # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
+ # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
+ # spm | '' | '' | '' | 'an' | 'en' | '_d' | 'er' | 'in' | '_s' | '_a'
+ # fairseq | '' | '' | '' | '' | 'an' | 'en' | '▁d' | 'er' | 'in' | '▁s'
+
+ # Mimic fairseq token-to-id alignment for the first 4 token
+ self._added_tokens_decoder = {
+ 0: AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token,
+ 1: AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token,
+ 2: AddedToken(bos_token, special=True) if isinstance(bos_token, str) else bos_token,
+ 3: AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token,
+ }
+
+ # The first "real" token "an" has position 4 in the original fairseq vocab and position 3 in the spm vocab
+ self.fairseq_offset = 1
+
+ self.sp_model_size = len(self.sp_model)
+
+ self._src_lang = f"__{src_lang}__" if "__" not in src_lang else src_lang
+ self._tgt_lang = f"__{tgt_lang}__" if "__" not in tgt_lang else tgt_lang
+ self.add_prefix_space = add_prefix_space
+
+ super().__init__(
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ cls_token=cls_token,
+ pad_token=pad_token,
+ tokenizer_file=tokenizer_file,
+ src_lang=src_lang,
+ tgt_lang=tgt_lang,
+ additional_special_tokens=additional_special_tokens,
+ sp_model_kwargs=self.sp_model_kwargs,
+ add_prefix_space=add_prefix_space,
+ **kwargs,
+ )
+
+ self.set_src_lang_special_tokens(self._src_lang)
+ self.set_tgt_lang_special_tokens(self._tgt_lang)
+
+ # Copied from transformers.models.nllb.tokenization_nllb.NllbTokenizer.__getstate__
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state["sp_model"] = None
+ state["sp_model_proto"] = self.sp_model.serialized_model_proto()
+ return state
+
+ # Copied from transformers.models.nllb.tokenization_nllb.NllbTokenizer.__setstate__
+ def __setstate__(self, d):
+ self.__dict__ = d
+
+ # for backward compatibility
+ if not hasattr(self, "sp_model_kwargs"):
+ self.sp_model_kwargs = {}
+
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
+
+ @property
+ def vocab_size(self):
+ return len(self.sp_model)
+
+ def __call__(
+ self,
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
+ text_pair: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None,
+ text_target: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
+ text_pair_target: Optional[
+ Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]
+ ] = None,
+ padding: Union[bool, str, PaddingStrategy] = True,
+ pad_to_multiple_of: Optional[int] = 2,
+ src_lang: Optional[str] = None,
+ tgt_lang: Optional[str] = None,
+ **kwargs,
+ ):
+ """
+ Args:
+ text (`str`, `List[str]`, `List[List[str]]`, *optional*):
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
+ text_pair (`str`, `List[str]`, `List[List[str]]`, *optional*):
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
+ text_target (`str`, `List[str]`, `List[List[str]]`, *optional*):
+ The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a
+ list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized),
+ you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
+ text_pair_target (`str`, `List[str]`, `List[List[str]]`, *optional*):
+ The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a
+ list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized),
+ you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding
+ index) among:
+
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
+ sequence if provided).
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
+ acceptable input length for the model if that argument is not provided.
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
+ lengths).
+ pad_to_multiple_of (`int`, *optional*):
+ If set will pad the sequence to a multiple of the provided value.
+
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
+ `>= 7.5` (Volta).
+ src_lang (`str`, *optional*):
+ A string representing the source language. If not specified, the last `src_lang` specified (either
+ during initialization or when calling this tokenizer) will be used.
+ tgt_lang (`str`, *optional*):
+ A string representing the target language. If not specified, the last `tgt_lang` specified (either
+ during initialization or when calling this tokenizer) will be used.
+ kwargs (*optional*):
+ Remaining dictionary of keyword arguments that will be passed to [`PreTrainedTokenizer.__call__`].
+ """
+ if src_lang is not None:
+ self.src_lang = src_lang
+ if tgt_lang is not None:
+ self.tgt_lang = tgt_lang
+
+ output = super().__call__(
+ text=text,
+ text_pair=text_pair,
+ text_target=text_target,
+ text_pair_target=text_pair_target,
+ padding=padding,
+ pad_to_multiple_of=pad_to_multiple_of,
+ **kwargs,
+ )
+
+ return BatchEncoding(output, tensor_type=kwargs.get("return_tensors"))
+
+ @property
+ # Copied from transformers.models.nllb.tokenization_nllb.NllbTokenizer.src_lang
+ def src_lang(self) -> str:
+ return self._src_lang
+
+ @src_lang.setter
+ def src_lang(self, new_src_lang: str) -> None:
+ if "__" not in new_src_lang:
+ self._src_lang = f"__{new_src_lang}__"
+ else:
+ self._src_lang = new_src_lang
+ self.set_src_lang_special_tokens(self._src_lang)
+
+ @property
+ def tgt_lang(self) -> str:
+ return self._tgt_lang
+
+ @tgt_lang.setter
+ def tgt_lang(self, new_tgt_lang: str) -> None:
+ if "__" not in new_tgt_lang:
+ self._tgt_lang = f"__{new_tgt_lang}__"
+ else:
+ self._tgt_lang = new_tgt_lang
+ self.set_tgt_lang_special_tokens(self._tgt_lang)
+
+ # Copied from transformers.models.nllb.tokenization_nllb.NllbTokenizer.get_special_tokens_mask
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ prefix_ones = [1] * len(self.prefix_tokens)
+ suffix_ones = [1] * len(self.suffix_tokens)
+ if token_ids_1 is None:
+ return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones
+ return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones
+
+ # Copied from transformers.models.nllb.tokenization_nllb.NllbTokenizer.build_inputs_with_special_tokens
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. An NLLB sequence has the following format, where `X` represents the sequence:
+
+ - `input_ids` (for encoder) `X [eos, src_lang_code]`
+ - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`
+
+ BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
+ separator.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ if token_ids_1 is None:
+ return self.prefix_tokens + token_ids_0 + self.suffix_tokens
+ # We don't expect to process pairs, but leave the pair logic for API consistency
+ return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
+
+ # Copied from transformers.models.nllb.tokenization_nllb.NllbTokenizer.create_token_type_ids_from_sequences
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. nllb does not
+ make use of token type ids, therefore a list of zeros is returned.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of zeros.
+
+ """
+
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
+
+ def _build_translation_inputs(
+ self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs
+ ):
+ """Used by translation pipeline, to prepare inputs for the generate function"""
+ if src_lang is None or tgt_lang is None:
+ raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model.")
+ self.src_lang = src_lang
+ inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
+ if "__" not in tgt_lang:
+ tgt_lang = f"__{tgt_lang}__"
+ tgt_lang_id = self.convert_tokens_to_ids(tgt_lang)
+ inputs["forced_bos_token_id"] = tgt_lang_id
+ return inputs
+
+ def get_vocab(self):
+ vocab = {
+ self.convert_ids_to_tokens(i): i for i in range(self.fairseq_offset, self.vocab_size + self.fairseq_offset)
+ }
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ @property
+ def unk_token_length(self):
+ return len(self.sp_model.encode(str(self.unk_token)))
+
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.get_spm_processor
+ def get_spm_processor(self, from_slow=False):
+ tokenizer = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ if self.legacy or from_slow: # no dependency on protobuf
+ tokenizer.Load(self.vocab_file)
+ return tokenizer
+
+ with open(self.vocab_file, "rb") as f:
+ sp_model = f.read()
+ model_pb2 = import_protobuf(f"The new behaviour of {self.__class__.__name__} (with `self.legacy = False`)")
+ model = model_pb2.ModelProto.FromString(sp_model)
+ normalizer_spec = model_pb2.NormalizerSpec()
+ normalizer_spec.add_dummy_prefix = False
+ model.normalizer_spec.MergeFrom(normalizer_spec)
+ sp_model = model.SerializeToString()
+ tokenizer.LoadFromSerializedProto(sp_model)
+ return tokenizer
+
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.tokenize
+ def tokenize(self, text: "TextInput", **kwargs) -> List[str]:
+ """
+ Converts a string to a list of tokens. If `self.legacy` is set to `False`, a prefix token is added unless the
+ first token is special.
+ """
+ if self.legacy or len(text) == 0:
+ return super().tokenize(text, **kwargs)
+
+ text = text.replace(SPIECE_UNDERLINE, " ")
+ if self.add_prefix_space:
+ text = SPIECE_UNDERLINE + text
+
+ tokens = super().tokenize(text, **kwargs)
+
+ if len(tokens) > 1 and tokens[0] == SPIECE_UNDERLINE and tokens[1] in self.all_special_tokens:
+ tokens = tokens[1:]
+ return tokens
+
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer._tokenize
+ def _tokenize(self, text, **kwargs):
+ """
+ Returns a tokenized string.
+
+ We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any
+ SPIECE_UNDERLINE. For example: `self.sp_model.encode(f"{SPIECE_UNDERLINE}Hey", out_type = str)` will give
+ `['H', 'e', 'y']` instead of `['▁He', 'y']`. Thus we always encode `f"{unk_token}text"` and strip the
+ `unk_token`. Here is an example with `unk_token = ""` and `unk_token_length = 4`.
+ `self.tokenizer.sp_model.encode(" Hey", out_type = str)[4:]`.
+ """
+ tokens = self.sp_model.encode(text, out_type=str)
+ if self.legacy or not text.startswith((SPIECE_UNDERLINE, " ")):
+ return tokens
+
+ # 1. Encode string + prefix ex: " Hey"
+ tokens = self.sp_model.encode(self.unk_token + text, out_type=str)
+ # 2. Remove self.unk_token from ['<','unk','>', '▁Hey']
+ return tokens[self.unk_token_length :] if len(tokens) >= self.unk_token_length else tokens
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ spm_id = self.sp_model.PieceToId(token)
+
+ # Need to return unknown token if the SP model returned 0
+ return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.sp_model.IdToPiece(index - self.fairseq_offset)
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (strings for sub-words) in a single string."""
+ # since we manually add the prefix space, we have to remove it when decoding
+ if tokens[0].startswith(SPIECE_UNDERLINE) and self.add_prefix_space:
+ tokens[0] = tokens[0][1:]
+
+ out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
+ return out_string
+
+ # Copied from transformers.models.nllb.tokenization_nllb.NllbTokenizer.save_vocabulary
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ out_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+ elif not os.path.isfile(self.vocab_file):
+ with open(out_vocab_file, "wb") as fi:
+ content_spiece_model = self.sp_model.serialized_model_proto()
+ fi.write(content_spiece_model)
+
+ return (out_vocab_file,)
+
+ # Copied from transformers.models.nllb.tokenization_nllb.NllbTokenizer.prepare_seq2seq_batch with eng_Latn->eng, fra_Latn->fra
+ def prepare_seq2seq_batch(
+ self,
+ src_texts: List[str],
+ src_lang: str = "eng",
+ tgt_texts: Optional[List[str]] = None,
+ tgt_lang: str = "fra",
+ **kwargs,
+ ) -> BatchEncoding:
+ self.src_lang = src_lang
+ self.tgt_lang = tgt_lang
+ return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
+
+ # Copied from transformers.models.nllb.tokenization_nllb.NllbTokenizer._switch_to_input_mode
+ def _switch_to_input_mode(self):
+ return self.set_src_lang_special_tokens(self.src_lang)
+
+ # Copied from transformers.models.nllb.tokenization_nllb.NllbTokenizer._switch_to_target_mode
+ def _switch_to_target_mode(self):
+ return self.set_tgt_lang_special_tokens(self.tgt_lang)
+
+ def set_src_lang_special_tokens(self, src_lang) -> None:
+ """Reset the special tokens to the source lang setting.
+ Prefix=[src_lang_code], suffix = [eos]
+ """
+ self.cur_lang_code = self.convert_tokens_to_ids(src_lang)
+ self.init_kwargs["src_lang"] = src_lang
+
+ if self.cur_lang_code == self.unk_token_id:
+ logger.warning_once(
+ f"`src_lang={src_lang}` has not be found in the vocabulary. Behaviour will probably be unexpected because the language token id will be replaced by the unknown token id."
+ )
+
+ self.prefix_tokens = [self.cur_lang_code]
+ self.suffix_tokens = [self.eos_token_id]
+
+ # https://github.com/facebookresearch/fairseq2/blob/c53f18e6be6b8b46b722f2249b8397b7eccd7ad3/src/fairseq2/models/nllb/tokenizer.py#L112-L116
+ def set_tgt_lang_special_tokens(self, lang: str) -> None:
+ """Reset the special tokens to the target lang setting.
+ Prefix=[eos, tgt_lang_code] and suffix=[eos].
+ """
+ self.cur_lang_code = self.convert_tokens_to_ids(lang)
+ self.init_kwargs["tgt_lang"] = lang
+
+ if self.cur_lang_code == self.unk_token_id:
+ logger.warning_once(
+ f"`tgt_lang={lang}` has not be found in the vocabulary. Behaviour will probably be unexpected because the language token id will be replaced by the unknown token id."
+ )
+
+ self.prefix_tokens = [self.eos_token_id, self.cur_lang_code]
+ self.suffix_tokens = [self.eos_token_id]
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..2df95dbc49200e76b2e18f0744a2e33e05cd9cd6
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/__init__.py
@@ -0,0 +1,74 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
+
+
+_import_structure = {
+ "configuration_xlm_roberta_xl": [
+ "XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP",
+ "XLMRobertaXLConfig",
+ "XLMRobertaXLOnnxConfig",
+ ],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_xlm_roberta_xl"] = [
+ "XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "XLMRobertaXLForCausalLM",
+ "XLMRobertaXLForMaskedLM",
+ "XLMRobertaXLForMultipleChoice",
+ "XLMRobertaXLForQuestionAnswering",
+ "XLMRobertaXLForSequenceClassification",
+ "XLMRobertaXLForTokenClassification",
+ "XLMRobertaXLModel",
+ "XLMRobertaXLPreTrainedModel",
+ ]
+
+if TYPE_CHECKING:
+ from .configuration_xlm_roberta_xl import (
+ XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
+ XLMRobertaXLConfig,
+ XLMRobertaXLOnnxConfig,
+ )
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_xlm_roberta_xl import (
+ XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
+ XLMRobertaXLForCausalLM,
+ XLMRobertaXLForMaskedLM,
+ XLMRobertaXLForMultipleChoice,
+ XLMRobertaXLForQuestionAnswering,
+ XLMRobertaXLForSequenceClassification,
+ XLMRobertaXLForTokenClassification,
+ XLMRobertaXLModel,
+ XLMRobertaXLPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..80e79d74e25805c533aeb8b3a82566c2523ab0c2
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/__pycache__/configuration_xlm_roberta_xl.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/__pycache__/configuration_xlm_roberta_xl.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5ea46edb608052aed483b882dcd2f38056d5866b
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/__pycache__/configuration_xlm_roberta_xl.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/__pycache__/convert_xlm_roberta_xl_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/__pycache__/convert_xlm_roberta_xl_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..508daaf748d114fcc1afddc18002443de52e61f3
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/__pycache__/convert_xlm_roberta_xl_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/__pycache__/modeling_xlm_roberta_xl.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/__pycache__/modeling_xlm_roberta_xl.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8acb2eb839f2ae1eeafa2571f10de74996217bad
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/__pycache__/modeling_xlm_roberta_xl.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/configuration_xlm_roberta_xl.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/configuration_xlm_roberta_xl.py
new file mode 100644
index 0000000000000000000000000000000000000000..23deeea7435e7f2937ecd09fdee3c2c663999cd3
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/configuration_xlm_roberta_xl.py
@@ -0,0 +1,153 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" XLM_ROBERTa_XL configuration"""
+
+from collections import OrderedDict
+from typing import Mapping
+
+from ...configuration_utils import PretrainedConfig
+from ...onnx import OnnxConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class XLMRobertaXLConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`XLMRobertaXLModel`] or a [`TFXLMRobertaXLModel`].
+ It is used to instantiate a XLM_ROBERTA_XL model according to the specified arguments, defining the model
+ architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the
+ XLM_ROBERTA_XL [facebook/xlm-roberta-xl](https://huggingface.co/facebook/xlm-roberta-xl) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 250880):
+ Vocabulary size of the XLM_ROBERTA_XL model. Defines the number of different tokens that can be represented
+ by the `inputs_ids` passed when calling [`XLMRobertaXLModel`].
+ hidden_size (`int`, *optional*, defaults to 2560):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 36):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 32):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 10240):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ max_position_embeddings (`int`, *optional*, defaults to 514):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ type_vocab_size (`int`, *optional*, defaults to 1):
+ The vocabulary size of the `token_type_ids` passed when calling [`XLMRobertaXLModel`] or
+ [`TFXLMRobertaXLModel`].
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-5):
+ The epsilon used by the layer normalization layers.
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
+ relevant if `config.is_decoder=True`.
+ classifier_dropout (`float`, *optional*):
+ The dropout ratio for the classification head.
+
+ Examples:
+
+ ```python
+ >>> from transformers import XLMRobertaXLConfig, XLMRobertaXLModel
+
+ >>> # Initializing a XLM_ROBERTA_XL google-bert/bert-base-uncased style configuration
+ >>> configuration = XLMRobertaXLConfig()
+
+ >>> # Initializing a model (with random weights) from the google-bert/bert-base-uncased style configuration
+ >>> model = XLMRobertaXLModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "xlm-roberta-xl"
+
+ def __init__(
+ self,
+ vocab_size=250880,
+ hidden_size=2560,
+ num_hidden_layers=36,
+ num_attention_heads=32,
+ intermediate_size=10240,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.1,
+ attention_probs_dropout_prob=0.1,
+ max_position_embeddings=514,
+ type_vocab_size=1,
+ initializer_range=0.02,
+ layer_norm_eps=1e-05,
+ pad_token_id=1,
+ bos_token_id=0,
+ eos_token_id=2,
+ position_embedding_type="absolute",
+ use_cache=True,
+ classifier_dropout=None,
+ **kwargs,
+ ):
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.hidden_act = hidden_act
+ self.intermediate_size = intermediate_size
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.max_position_embeddings = max_position_embeddings
+ self.type_vocab_size = type_vocab_size
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.position_embedding_type = position_embedding_type
+ self.use_cache = use_cache
+ self.classifier_dropout = classifier_dropout
+
+
+# Copied from transformers.models.roberta.configuration_roberta.RobertaOnnxConfig with Roberta->XLMRobertaXL
+class XLMRobertaXLOnnxConfig(OnnxConfig):
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ if self.task == "multiple-choice":
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
+ else:
+ dynamic_axis = {0: "batch", 1: "sequence"}
+ return OrderedDict(
+ [
+ ("input_ids", dynamic_axis),
+ ("attention_mask", dynamic_axis),
+ ]
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/convert_xlm_roberta_xl_original_pytorch_checkpoint_to_pytorch.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/convert_xlm_roberta_xl_original_pytorch_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..7f0fec32c387852535b90a2db111b2a487b1f61d
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/convert_xlm_roberta_xl_original_pytorch_checkpoint_to_pytorch.py
@@ -0,0 +1,183 @@
+# coding=utf-8
+# Copyright 2018 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert RoBERTa checkpoint."""
+
+import argparse
+import pathlib
+
+import fairseq
+import torch
+from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
+from fairseq.modules import TransformerSentenceEncoderLayer
+from packaging import version
+
+from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
+from transformers.models.bert.modeling_bert import (
+ BertIntermediate,
+ BertLayer,
+ BertOutput,
+ BertSelfAttention,
+ BertSelfOutput,
+)
+from transformers.models.roberta.modeling_roberta import RobertaAttention
+from transformers.utils import logging
+
+
+if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
+ raise Exception("requires fairseq >= 1.0.0a")
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+SAMPLE_TEXT = "Hello world! cécé herlolip"
+
+
+def convert_xlm_roberta_xl_checkpoint_to_pytorch(
+ roberta_checkpoint_path: str, pytorch_dump_folder_path: str, classification_head: bool
+):
+ """
+ Copy/paste/tweak roberta's weights to our BERT structure.
+ """
+ roberta = FairseqRobertaModel.from_pretrained(roberta_checkpoint_path)
+ roberta.eval() # disable dropout
+ roberta_sent_encoder = roberta.model.encoder.sentence_encoder
+ config = XLMRobertaConfig(
+ vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings,
+ hidden_size=roberta.cfg.model.encoder_embed_dim,
+ num_hidden_layers=roberta.cfg.model.encoder_layers,
+ num_attention_heads=roberta.cfg.model.encoder_attention_heads,
+ intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim,
+ max_position_embeddings=514,
+ type_vocab_size=1,
+ layer_norm_eps=1e-5, # PyTorch default used in fairseq
+ )
+ if classification_head:
+ config.num_labels = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
+
+ print("Our RoBERTa config:", config)
+
+ model = XLMRobertaXLForSequenceClassification(config) if classification_head else XLMRobertaXLForMaskedLM(config)
+ model.eval()
+
+ # Now let's copy all the weights.
+ # Embeddings
+ model.roberta.embeddings.word_embeddings.weight = roberta_sent_encoder.embed_tokens.weight
+ model.roberta.embeddings.position_embeddings.weight = roberta_sent_encoder.embed_positions.weight
+ model.roberta.embeddings.token_type_embeddings.weight.data = torch.zeros_like(
+ model.roberta.embeddings.token_type_embeddings.weight
+ ) # just zero them out b/c RoBERTa doesn't use them.
+
+ model.roberta.encoder.LayerNorm.weight = roberta_sent_encoder.layer_norm.weight
+ model.roberta.encoder.LayerNorm.bias = roberta_sent_encoder.layer_norm.bias
+
+ for i in range(config.num_hidden_layers):
+ # Encoder: start of layer
+ layer: BertLayer = model.roberta.encoder.layer[i]
+ roberta_layer: TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
+
+ attention: RobertaAttention = layer.attention
+ attention.self_attn_layer_norm.weight = roberta_layer.self_attn_layer_norm.weight
+ attention.self_attn_layer_norm.bias = roberta_layer.self_attn_layer_norm.bias
+
+ # self attention
+ self_attn: BertSelfAttention = layer.attention.self
+ assert (
+ roberta_layer.self_attn.k_proj.weight.data.shape
+ == roberta_layer.self_attn.q_proj.weight.data.shape
+ == roberta_layer.self_attn.v_proj.weight.data.shape
+ == torch.Size((config.hidden_size, config.hidden_size))
+ )
+
+ self_attn.query.weight.data = roberta_layer.self_attn.q_proj.weight
+ self_attn.query.bias.data = roberta_layer.self_attn.q_proj.bias
+ self_attn.key.weight.data = roberta_layer.self_attn.k_proj.weight
+ self_attn.key.bias.data = roberta_layer.self_attn.k_proj.bias
+ self_attn.value.weight.data = roberta_layer.self_attn.v_proj.weight
+ self_attn.value.bias.data = roberta_layer.self_attn.v_proj.bias
+
+ # self-attention output
+ self_output: BertSelfOutput = layer.attention.output
+ assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
+ self_output.dense.weight = roberta_layer.self_attn.out_proj.weight
+ self_output.dense.bias = roberta_layer.self_attn.out_proj.bias
+
+ # this one is final layer norm
+ layer.LayerNorm.weight = roberta_layer.final_layer_norm.weight
+ layer.LayerNorm.bias = roberta_layer.final_layer_norm.bias
+
+ # intermediate
+ intermediate: BertIntermediate = layer.intermediate
+ assert intermediate.dense.weight.shape == roberta_layer.fc1.weight.shape
+ intermediate.dense.weight = roberta_layer.fc1.weight
+ intermediate.dense.bias = roberta_layer.fc1.bias
+
+ # output
+ bert_output: BertOutput = layer.output
+ assert bert_output.dense.weight.shape == roberta_layer.fc2.weight.shape
+ bert_output.dense.weight = roberta_layer.fc2.weight
+ bert_output.dense.bias = roberta_layer.fc2.bias
+ # end of layer
+
+ if classification_head:
+ model.classifier.dense.weight = roberta.model.classification_heads["mnli"].dense.weight
+ model.classifier.dense.bias = roberta.model.classification_heads["mnli"].dense.bias
+ model.classifier.out_proj.weight = roberta.model.classification_heads["mnli"].out_proj.weight
+ model.classifier.out_proj.bias = roberta.model.classification_heads["mnli"].out_proj.bias
+ else:
+ # LM Head
+ model.lm_head.dense.weight = roberta.model.encoder.lm_head.dense.weight
+ model.lm_head.dense.bias = roberta.model.encoder.lm_head.dense.bias
+ model.lm_head.layer_norm.weight = roberta.model.encoder.lm_head.layer_norm.weight
+ model.lm_head.layer_norm.bias = roberta.model.encoder.lm_head.layer_norm.bias
+ model.lm_head.decoder.weight = roberta.model.encoder.lm_head.weight
+ model.lm_head.decoder.bias = roberta.model.encoder.lm_head.bias
+
+ # Let's check that we get the same results.
+ input_ids: torch.Tensor = roberta.encode(SAMPLE_TEXT).unsqueeze(0) # batch of size 1
+
+ our_output = model(input_ids)[0]
+ if classification_head:
+ their_output = roberta.model.classification_heads["mnli"](roberta.extract_features(input_ids))
+ else:
+ their_output = roberta.model(input_ids)[0]
+ print(our_output.shape, their_output.shape)
+ max_absolute_diff = torch.max(torch.abs(our_output - their_output)).item()
+ print(f"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7
+ success = torch.allclose(our_output, their_output, atol=1e-3)
+ print("Do both models output the same tensors?", "🔥" if success else "💩")
+ if not success:
+ raise Exception("Something went wRoNg")
+
+ pathlib.Path(pytorch_dump_folder_path).mkdir(parents=True, exist_ok=True)
+ print(f"Saving model to {pytorch_dump_folder_path}")
+ model.save_pretrained(pytorch_dump_folder_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
+ )
+ parser.add_argument(
+ "--classification_head", action="store_true", help="Whether to convert a final classification head."
+ )
+ args = parser.parse_args()
+ convert_xlm_roberta_xl_checkpoint_to_pytorch(
+ args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c17652dfa0cb49697b6a971f31d724649ad8ca4
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py
@@ -0,0 +1,1515 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch XLM RoBERTa xl,xxl model."""
+
+import math
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN, gelu
+from ...modeling_outputs import (
+ BaseModelOutputWithPastAndCrossAttentions,
+ BaseModelOutputWithPoolingAndCrossAttentions,
+ CausalLMOutputWithCrossAttentions,
+ MaskedLMOutput,
+ MultipleChoiceModelOutput,
+ QuestionAnsweringModelOutput,
+ SequenceClassifierOutput,
+ TokenClassifierOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_xlm_roberta_xl import XLMRobertaXLConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "facebook/xlm-roberta-xl"
+_CONFIG_FOR_DOC = "XLMRobertaXLConfig"
+
+
+from ..deprecated._archive_maps import XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+class XLMRobertaXLEmbeddings(nn.Module):
+ """
+ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
+
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
+ # any TensorFlow checkpoint file
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
+ self.register_buffer(
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
+ )
+ self.register_buffer(
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
+ )
+
+ # End copy
+ self.padding_idx = config.pad_token_id
+ self.position_embeddings = nn.Embedding(
+ config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
+ )
+
+ def forward(
+ self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
+ ):
+ if position_ids is None:
+ if input_ids is not None:
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
+ position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
+ else:
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
+
+ if input_ids is not None:
+ input_shape = input_ids.size()
+ else:
+ input_shape = inputs_embeds.size()[:-1]
+
+ seq_length = input_shape[1]
+
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
+ # issue #5664
+ if token_type_ids is None:
+ if hasattr(self, "token_type_ids"):
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
+ token_type_ids = buffered_token_type_ids_expanded
+ else:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
+
+ embeddings = inputs_embeds + token_type_embeddings
+ if self.position_embedding_type == "absolute":
+ position_embeddings = self.position_embeddings(position_ids)
+ embeddings += position_embeddings
+
+ embeddings = self.dropout(embeddings)
+ return embeddings
+
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaEmbeddings.create_position_ids_from_inputs_embeds
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds):
+ """
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
+
+ Args:
+ inputs_embeds: torch.Tensor
+
+ Returns: torch.Tensor
+ """
+ input_shape = inputs_embeds.size()[:-1]
+ sequence_length = input_shape[1]
+
+ position_ids = torch.arange(
+ self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
+ )
+ return position_ids.unsqueeze(0).expand(input_shape)
+
+
+# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->XLMRobertaXL
+class XLMRobertaXLSelfAttention(nn.Module):
+ def __init__(self, config, position_embedding_type=None):
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({config.num_attention_heads})"
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+ self.position_embedding_type = position_embedding_type or getattr(
+ config, "position_embedding_type", "absolute"
+ )
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ self.max_position_embeddings = config.max_position_embeddings
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
+
+ self.is_decoder = config.is_decoder
+
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ mixed_query_layer = self.query(hidden_states)
+
+ # If this is instantiated as a cross-attention module, the keys
+ # and values come from an encoder; the attention mask needs to be
+ # such that the encoder's padding tokens are not attended to.
+ is_cross_attention = encoder_hidden_states is not None
+
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_layer = past_key_value[0]
+ value_layer = past_key_value[1]
+ attention_mask = encoder_attention_mask
+ elif is_cross_attention:
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
+ attention_mask = encoder_attention_mask
+ elif past_key_value is not None:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
+ else:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ use_cache = past_key_value is not None
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_layer, value_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
+ if use_cache:
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
+ -1, 1
+ )
+ else:
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
+ distance = position_ids_l - position_ids_r
+
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
+
+ if self.position_embedding_type == "relative_key":
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores
+ elif self.position_embedding_type == "relative_key_query":
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
+
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in XLMRobertaXLModel forward() function)
+ attention_scores = attention_scores + attention_mask
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ if self.is_decoder:
+ outputs = outputs + (past_key_value,)
+ return outputs
+
+
+class XLMRobertaXLSelfOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states, input_tensor):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = hidden_states + input_tensor
+ return hidden_states
+
+
+class XLMRobertaXLAttention(nn.Module):
+ def __init__(self, config, position_embedding_type=None):
+ super().__init__()
+ self.self_attn_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.self = XLMRobertaXLSelfAttention(config, position_embedding_type=position_embedding_type)
+ self.output = XLMRobertaXLSelfOutput(config)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.self.query = prune_linear_layer(self.self.query, index)
+ self.self.key = prune_linear_layer(self.self.key, index)
+ self.self.value = prune_linear_layer(self.self.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ past_key_value=None,
+ output_attentions=False,
+ ):
+ intermediate = self.self_attn_layer_norm(hidden_states)
+ self_outputs = self.self(
+ intermediate,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+ attention_output = self.output(self_outputs[0], hidden_states)
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertIntermediate
+class XLMRobertaXLIntermediate(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+class XLMRobertaXLOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+
+ def forward(self, hidden_states, input_tensor):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = hidden_states + input_tensor
+ return hidden_states
+
+
+class XLMRobertaXLLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.attention = XLMRobertaXLAttention(config)
+ self.is_decoder = config.is_decoder
+ self.add_cross_attention = config.add_cross_attention
+ if self.add_cross_attention:
+ if not self.is_decoder:
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
+ self.crossattention = XLMRobertaXLAttention(config, position_embedding_type="absolute")
+ self.intermediate = XLMRobertaXLIntermediate(config)
+ self.output = XLMRobertaXLOutput(config)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ past_key_value=None,
+ output_attentions=False,
+ ):
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ self_attention_outputs = self.attention(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ output_attentions=output_attentions,
+ past_key_value=self_attn_past_key_value,
+ )
+ attention_output = self_attention_outputs[0]
+
+ # if decoder, the last output is tuple of self-attn cache
+ if self.is_decoder:
+ outputs = self_attention_outputs[1:-1]
+ present_key_value = self_attention_outputs[-1]
+ else:
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ cross_attn_present_key_value = None
+ if self.is_decoder and encoder_hidden_states is not None:
+ if not hasattr(self, "crossattention"):
+ raise ValueError(
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
+ " by setting `config.add_cross_attention=True`"
+ )
+
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ cross_attention_outputs = self.crossattention(
+ attention_output,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ cross_attn_past_key_value,
+ output_attentions,
+ )
+ attention_output = cross_attention_outputs[0]
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
+
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
+ cross_attn_present_key_value = cross_attention_outputs[-1]
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ layer_output = apply_chunking_to_forward(
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
+ )
+ outputs = (layer_output,) + outputs
+
+ # if decoder, return the attn key/values as the last output
+ if self.is_decoder:
+ outputs = outputs + (present_key_value,)
+
+ return outputs
+
+ def feed_forward_chunk(self, attention_output):
+ intermediate_output = self.LayerNorm(attention_output)
+ intermediate_output = self.intermediate(intermediate_output)
+ layer_output = self.output(intermediate_output, attention_output)
+ return layer_output
+
+
+class XLMRobertaXLEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.layer = nn.ModuleList([XLMRobertaXLLayer(config) for _ in range(config.num_hidden_layers)])
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ past_key_values=None,
+ use_cache=None,
+ output_attentions=False,
+ output_hidden_states=False,
+ return_dict=True,
+ ):
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
+
+ next_decoder_cache = () if use_cache else None
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+ past_key_value = past_key_values[i] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+ if use_cache:
+ next_decoder_cache += (layer_outputs[-1],)
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+ if self.config.add_cross_attention:
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
+
+ hidden_states = self.LayerNorm(hidden_states)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ hidden_states,
+ next_decoder_cache,
+ all_hidden_states,
+ all_self_attentions,
+ all_cross_attentions,
+ ]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_decoder_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+# Copied from transformers.models.bert.modeling_bert.BertPooler
+class XLMRobertaXLPooler(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.activation = nn.Tanh()
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ first_token_tensor = hidden_states[:, 0]
+ pooled_output = self.dense(first_token_tensor)
+ pooled_output = self.activation(pooled_output)
+ return pooled_output
+
+
+class XLMRobertaXLPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = XLMRobertaXLConfig
+ base_model_prefix = "roberta"
+
+ # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, nn.Linear):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+XLM_ROBERTA_XL_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module)
+ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
+ general usage and behavior.
+
+ Parameters:
+ config ([`XLMRobertaXLConfig`]): Model configuration class with all the parameters of the
+ model. Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+XLM_ROBERTA_XL_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
+ [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
+ IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare XLM-RoBERTa-XL Model transformer outputting raw hidden-states without any specific head on top.",
+ XLM_ROBERTA_XL_START_DOCSTRING,
+)
+class XLMRobertaXLModel(XLMRobertaXLPreTrainedModel):
+ """
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
+ cross-attention is added between the self-attention layers, following the architecture described in *Attention is
+ all you need*_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz
+ Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the `is_decoder`
+ argument of the configuration set to `True`. To be used in a Seq2Seq model, the model needs to initialized with
+ both `is_decoder` argument and `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as
+ an input to the forward pass. .. _*Attention is all you need*: https://arxiv.org/abs/1706.03762
+ """
+
+ # Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->XLMRobertaXL
+ def __init__(self, config, add_pooling_layer=True):
+ super().__init__(config)
+ self.config = config
+
+ self.embeddings = XLMRobertaXLEmbeddings(config)
+ self.encoder = XLMRobertaXLEncoder(config)
+
+ self.pooler = XLMRobertaXLPooler(config) if add_pooling_layer else None
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.word_embeddings = value
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(XLM_ROBERTA_XL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutputWithPoolingAndCrossAttentions,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ # Copied from transformers.models.bert.modeling_bert.BertModel.forward
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
+ r"""
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
+ the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if self.config.is_decoder:
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ else:
+ use_cache = False
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ batch_size, seq_length = input_shape
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ # past_key_values_length
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
+
+ if attention_mask is None:
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
+
+ if token_type_ids is None:
+ if hasattr(self.embeddings, "token_type_ids"):
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
+ token_type_ids = buffered_token_type_ids_expanded
+ else:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
+
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
+ # ourselves in which case we just need to make it broadcastable to all heads.
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
+
+ # If a 2D or 3D attention mask is provided for the cross-attention
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
+ if self.config.is_decoder and encoder_hidden_states is not None:
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
+ if encoder_attention_mask is None:
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
+ else:
+ encoder_extended_attention_mask = None
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ embedding_output = self.embeddings(
+ input_ids=input_ids,
+ position_ids=position_ids,
+ token_type_ids=token_type_ids,
+ inputs_embeds=inputs_embeds,
+ past_key_values_length=past_key_values_length,
+ )
+ encoder_outputs = self.encoder(
+ embedding_output,
+ attention_mask=extended_attention_mask,
+ head_mask=head_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_extended_attention_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
+
+ if not return_dict:
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPoolingAndCrossAttentions(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ past_key_values=encoder_outputs.past_key_values,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ cross_attentions=encoder_outputs.cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ """XLM-RoBERTa-XL Model with a `language modeling` head on top for CLM fine-tuning.""",
+ XLM_ROBERTA_XL_START_DOCSTRING,
+)
+class XLMRobertaXLForCausalLM(XLMRobertaXLPreTrainedModel):
+ _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"]
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ if not config.is_decoder:
+ logger.warning("If you want to use `RobertaLMHeadModel` as a standalone, add `is_decoder=True.`")
+
+ self.roberta = XLMRobertaXLModel(config, add_pooling_layer=False)
+ self.lm_head = XLMRobertaXLLMHead(config)
+
+ self.init_weights()
+
+ def get_output_embeddings(self):
+ return self.lm_head.decoder
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head.decoder = new_embeddings
+
+ @add_start_docstrings_to_model_forward(XLM_ROBERTA_XL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
+ r"""
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
+ the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
+ ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, RobertaForCausalLM, RobertaConfig
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("FacebookAI/roberta-base")
+ >>> config = RobertaConfig.from_pretrained("FacebookAI/roberta-base")
+ >>> config.is_decoder = True
+ >>> model = RobertaForCausalLM.from_pretrained("FacebookAI/roberta-base", config=config)
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
+ >>> outputs = model(**inputs)
+ >>> prediction_logits = outputs.logits
+ ```
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ if labels is not None:
+ use_cache = False
+
+ outputs = self.roberta(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+ prediction_scores = self.lm_head(sequence_output)
+
+ lm_loss = None
+ if labels is not None:
+ # we are doing next-token prediction; shift prediction scores and input ids by one
+ shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
+ labels = labels[:, 1:].contiguous()
+ loss_fct = CrossEntropyLoss()
+ lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[2:]
+ return ((lm_loss,) + output) if lm_loss is not None else output
+
+ return CausalLMOutputWithCrossAttentions(
+ loss=lm_loss,
+ logits=prediction_scores,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ cross_attentions=outputs.cross_attentions,
+ )
+
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs):
+ input_shape = input_ids.shape
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
+ if attention_mask is None:
+ attention_mask = input_ids.new_ones(input_shape)
+
+ # cut decoder_input_ids if past_key_values is used
+ if past_key_values is not None:
+ past_length = past_key_values[0][0].shape[2]
+
+ # Some generation methods already pass only the last input ID
+ if input_ids.shape[1] > past_length:
+ remove_prefix_length = past_length
+ else:
+ # Default to old behavior: keep only final ID
+ remove_prefix_length = input_ids.shape[1] - 1
+
+ input_ids = input_ids[:, remove_prefix_length:]
+
+ return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values}
+
+ def _reorder_cache(self, past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
+ )
+ return reordered_past
+
+
+@add_start_docstrings(
+ """XLM-RoBERTa-XL Model with a `language modeling` head on top.""", XLM_ROBERTA_XL_START_DOCSTRING
+)
+class XLMRobertaXLForMaskedLM(XLMRobertaXLPreTrainedModel):
+ _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"]
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ if config.is_decoder:
+ logger.warning(
+ "If you want to use `RobertaForMaskedLM` make sure `config.is_decoder=False` for "
+ "bi-directional self-attention."
+ )
+
+ self.roberta = XLMRobertaXLModel(config, add_pooling_layer=False)
+ self.lm_head = XLMRobertaXLLMHead(config)
+
+ self.init_weights()
+
+ def get_output_embeddings(self):
+ return self.lm_head.decoder
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head.decoder = new_embeddings
+
+ @add_start_docstrings_to_model_forward(XLM_ROBERTA_XL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=MaskedLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ mask="",
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, MaskedLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ kwargs (`Dict[str, any]`, optional, defaults to *{}*):
+ Used to hide legacy arguments that have been deprecated.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.roberta(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = outputs[0]
+ prediction_scores = self.lm_head(sequence_output)
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[2:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return MaskedLMOutput(
+ loss=masked_lm_loss,
+ logits=prediction_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+class XLMRobertaXLLMHead(nn.Module):
+ """XLM-RoBERTa-XL Head for masked language modeling."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
+ self.decoder.bias = self.bias
+
+ def forward(self, features, **kwargs):
+ x = self.dense(features)
+ x = gelu(x)
+ x = self.layer_norm(x)
+
+ # project back to size of vocabulary with bias
+ x = self.decoder(x)
+
+ return x
+
+ def _tie_weights(self):
+ # To tie those two weights if they get disconnected (on TPU or when the bias is resized)
+ self.bias = self.decoder.bias
+
+
+@add_start_docstrings(
+ """
+ XLM-RoBERTa-XL Model transformer with a sequence classification/regression head on top (a linear layer on top
+ of the pooled output) e.g. for GLUE tasks.
+ """,
+ XLM_ROBERTA_XL_START_DOCSTRING,
+)
+class XLMRobertaXLForSequenceClassification(XLMRobertaXLPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.config = config
+
+ self.roberta = XLMRobertaXLModel(config, add_pooling_layer=False)
+ self.classifier = XLMRobertaXLClassificationHead(config)
+
+ self.init_weights()
+
+ @add_start_docstrings_to_model_forward(XLM_ROBERTA_XL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=SequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, SequenceClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.roberta(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = outputs[0]
+ logits = self.classifier(sequence_output)
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ XLM-RoBERTa-XL Model with a multiple choice classification head on top (a linear layer on top of the pooled
+ output and a softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ XLM_ROBERTA_XL_START_DOCSTRING,
+)
+class XLMRobertaXLForMultipleChoice(XLMRobertaXLPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.roberta = XLMRobertaXLModel(config)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.classifier = nn.Linear(config.hidden_size, 1)
+
+ self.init_weights()
+
+ @add_start_docstrings_to_model_forward(
+ XLM_ROBERTA_XL_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
+ )
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=MultipleChoiceModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, MultipleChoiceModelOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
+ `input_ids` above)
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
+
+ flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
+ flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
+ flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
+ flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
+ flat_inputs_embeds = (
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
+ if inputs_embeds is not None
+ else None
+ )
+
+ outputs = self.roberta(
+ flat_input_ids,
+ position_ids=flat_position_ids,
+ token_type_ids=flat_token_type_ids,
+ attention_mask=flat_attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=flat_inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ pooled_output = outputs[1]
+
+ pooled_output = self.dropout(pooled_output)
+ logits = self.classifier(pooled_output)
+ reshaped_logits = logits.view(-1, num_choices)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(reshaped_logits, labels)
+
+ if not return_dict:
+ output = (reshaped_logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return MultipleChoiceModelOutput(
+ loss=loss,
+ logits=reshaped_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ XLM-RoBERTa-XL Model with a token classification head on top (a linear layer on top of the hidden-states
+ output) e.g. for Named-Entity-Recognition (NER) tasks.
+ """,
+ XLM_ROBERTA_XL_START_DOCSTRING,
+)
+class XLMRobertaXLForTokenClassification(XLMRobertaXLPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.roberta = XLMRobertaXLModel(config, add_pooling_layer=False)
+ classifier_dropout = (
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
+ )
+ self.dropout = nn.Dropout(classifier_dropout)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ self.init_weights()
+
+ @add_start_docstrings_to_model_forward(XLM_ROBERTA_XL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, TokenClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.roberta(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ sequence_output = self.dropout(sequence_output)
+ logits = self.classifier(sequence_output)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ # Only keep active parts of the loss
+ if attention_mask is not None:
+ active_loss = attention_mask.view(-1) == 1
+ active_logits = logits.view(-1, self.num_labels)
+ active_labels = torch.where(
+ active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
+ )
+ loss = loss_fct(active_logits, active_labels)
+ else:
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+class XLMRobertaXLClassificationHead(nn.Module):
+ """Head for sentence-level classification tasks."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ classifier_dropout = (
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
+ )
+ self.dropout = nn.Dropout(classifier_dropout)
+ self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
+
+ def forward(self, features, **kwargs):
+ x = features[:, 0, :] # take token (equiv. to [CLS])
+ x = self.dropout(x)
+ x = self.dense(x)
+ x = torch.tanh(x)
+ x = self.dropout(x)
+ x = self.out_proj(x)
+ return x
+
+
+@add_start_docstrings(
+ """
+ XLM-RoBERTa-XL Model with a span classification head on top for extractive question-answering tasks like SQuAD
+ (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ XLM_ROBERTA_XL_START_DOCSTRING,
+)
+class XLMRobertaXLForQuestionAnswering(XLMRobertaXLPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.roberta = XLMRobertaXLModel(config, add_pooling_layer=False)
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
+
+ self.init_weights()
+
+ @add_start_docstrings_to_model_forward(XLM_ROBERTA_XL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=QuestionAnsweringModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ start_positions: Optional[torch.LongTensor] = None,
+ end_positions: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.roberta(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ logits = self.qa_outputs(sequence_output)
+ start_logits, end_logits = logits.split(1, dim=-1)
+ start_logits = start_logits.squeeze(-1).contiguous()
+ end_logits = end_logits.squeeze(-1).contiguous()
+
+ total_loss = None
+ if start_positions is not None and end_positions is not None:
+ # If we are on multi-GPU, split add a dimension
+ if len(start_positions.size()) > 1:
+ start_positions = start_positions.squeeze(-1)
+ if len(end_positions.size()) > 1:
+ end_positions = end_positions.squeeze(-1)
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
+ ignored_index = start_logits.size(1)
+ start_positions = start_positions.clamp(0, ignored_index)
+ end_positions = end_positions.clamp(0, ignored_index)
+
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
+ start_loss = loss_fct(start_logits, start_positions)
+ end_loss = loss_fct(end_logits, end_positions)
+ total_loss = (start_loss + end_loss) / 2
+
+ if not return_dict:
+ output = (start_logits, end_logits) + outputs[2:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return QuestionAnsweringModelOutput(
+ loss=total_loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+# Copied from transformers.models.roberta.modeling_roberta.create_position_ids_from_input_ids
+def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
+ """
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
+ are ignored. This is modified from fairseq's `utils.make_positions`.
+
+ Args:
+ x: torch.Tensor x:
+
+ Returns: torch.Tensor
+ """
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
+ mask = input_ids.ne(padding_idx).int()
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
+ return incremental_indices.long() + padding_idx