diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/cache_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/cache_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..435faa55a6729e1c6328e1b1cbe225c1a17c24cd
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/cache_utils.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/configuration_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/configuration_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ed1b88f4c415e5b85c47f3b5f1491eb3b1f052ee
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/configuration_utils.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/convert_pytorch_checkpoint_to_tf2.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/convert_pytorch_checkpoint_to_tf2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..495e0ddc4b91e0a10c57c84458b74b8f7e133e5e
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/convert_pytorch_checkpoint_to_tf2.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/debug_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/debug_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..55b7d0f9eafb3bf5e1ccc4ea214d9f8e2fcd48ad
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/debug_utils.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/feature_extraction_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/feature_extraction_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b809b7549eb1d9b798f262a740bf607ca68ec384
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/feature_extraction_utils.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/generation_tf_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/generation_tf_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ba80f6e1dd2cc284fa67925bff4fcfb628549931
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/generation_tf_utils.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/hyperparameter_search.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/hyperparameter_search.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..418425940dc42fa5cefb5ba3720f572587d2cc0a
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/hyperparameter_search.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/image_processing_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/image_processing_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..65877ff48461781225f32f77a2f972db6a7c6ca4
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/image_processing_utils.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/keras_callbacks.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/keras_callbacks.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f420ec6e16f67f12328abd4c19093ef964ff1d0d
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/keras_callbacks.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_attn_mask_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_attn_mask_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..212a10dc0f7196e699823b3b453ddc9a249ab3e5
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_attn_mask_utils.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_flax_outputs.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_flax_outputs.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a02c4fb6e6b5b00897f9d55827e543c2925f928c
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_flax_outputs.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_flax_pytorch_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_flax_pytorch_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e094ab5301a21d7adfaec99097e2d043e9b44cf1
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_flax_pytorch_utils.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_flax_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_flax_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1fd144c28941dc7073935fa01774c753111386ec
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_flax_utils.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_outputs.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_outputs.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f17dfb5ab9c58832d964cd5b2a47e8144a3a99bc
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_outputs.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_tf_pytorch_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_tf_pytorch_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0073b07481eecd5b4bdb6956180131e44ed40d72
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_tf_pytorch_utils.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_tf_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_tf_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e04e900eb8f6e31b81ce25c46b6766db8f162030
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_tf_utils.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c6462f289de0f8c19c8276322623caaf4832adf7
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_utils.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/optimization.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/optimization.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f418e314f39d776cc76cc07c6dcbbc7d45c4a7a4
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/optimization.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/processing_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/processing_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8aa35046bc6a023c6e0b76a43d1fb81e52727193
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/processing_utils.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/safetensors_conversion.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/safetensors_conversion.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..312b573183523f0268afde25ec1ff4deea90a59e
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/safetensors_conversion.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/testing_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/testing_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f1b48a6e0730060a80be17edcb3410468e87815e
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/testing_utils.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/trainer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/trainer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ab7e624b3b2df5f789261e2a57da447fea976b85
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/trainer.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/trainer_seq2seq.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/trainer_seq2seq.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2464200ea8fa555a63a3c16eb080e560b08c5dc2
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/trainer_seq2seq.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/trainer_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/trainer_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0bbf9ce34b510175e2ac5a98ee064cdc2c864796
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/trainer_utils.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/training_args_tf.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/training_args_tf.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..14ca412b73c011dac77b516f838261956b077767
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/training_args_tf.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/data/processors/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/data/processors/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6c4c8309597b04aacf999c4e63973fe935628276
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/data/processors/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/generation/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/generation/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..8f2a6ad9600d97cc0d6ba38635e4702ae76e74a5
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/generation/__init__.py
@@ -0,0 +1,306 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ..utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available
+
+
+_import_structure = {
+ "configuration_utils": ["GenerationConfig", "GenerationMode"],
+ "streamers": ["TextIteratorStreamer", "TextStreamer"],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["beam_constraints"] = [
+ "Constraint",
+ "ConstraintListState",
+ "DisjunctiveConstraint",
+ "PhrasalConstraint",
+ ]
+ _import_structure["beam_search"] = [
+ "BeamHypotheses",
+ "BeamScorer",
+ "BeamSearchScorer",
+ "ConstrainedBeamSearchScorer",
+ ]
+ _import_structure["candidate_generator"] = [
+ "AssistedCandidateGenerator",
+ "CandidateGenerator",
+ "PromptLookupCandidateGenerator",
+ ]
+ _import_structure["logits_process"] = [
+ "AlternatingCodebooksLogitsProcessor",
+ "ClassifierFreeGuidanceLogitsProcessor",
+ "EncoderNoRepeatNGramLogitsProcessor",
+ "EncoderRepetitionPenaltyLogitsProcessor",
+ "EpsilonLogitsWarper",
+ "EtaLogitsWarper",
+ "ExponentialDecayLengthPenalty",
+ "ForcedBOSTokenLogitsProcessor",
+ "ForcedEOSTokenLogitsProcessor",
+ "ForceTokensLogitsProcessor",
+ "HammingDiversityLogitsProcessor",
+ "InfNanRemoveLogitsProcessor",
+ "LogitNormalization",
+ "LogitsProcessor",
+ "LogitsProcessorList",
+ "LogitsWarper",
+ "MinLengthLogitsProcessor",
+ "MinNewTokensLengthLogitsProcessor",
+ "NoBadWordsLogitsProcessor",
+ "NoRepeatNGramLogitsProcessor",
+ "PrefixConstrainedLogitsProcessor",
+ "RepetitionPenaltyLogitsProcessor",
+ "SequenceBiasLogitsProcessor",
+ "SuppressTokensLogitsProcessor",
+ "SuppressTokensAtBeginLogitsProcessor",
+ "TemperatureLogitsWarper",
+ "TopKLogitsWarper",
+ "TopPLogitsWarper",
+ "TypicalLogitsWarper",
+ "UnbatchedClassifierFreeGuidanceLogitsProcessor",
+ "WhisperTimeStampLogitsProcessor",
+ ]
+ _import_structure["stopping_criteria"] = [
+ "MaxNewTokensCriteria",
+ "MaxLengthCriteria",
+ "MaxTimeCriteria",
+ "StoppingCriteria",
+ "StoppingCriteriaList",
+ "validate_stopping_criteria",
+ ]
+ _import_structure["utils"] = [
+ "GenerationMixin",
+ "GreedySearchEncoderDecoderOutput",
+ "GreedySearchDecoderOnlyOutput",
+ "SampleEncoderDecoderOutput",
+ "SampleDecoderOnlyOutput",
+ "BeamSearchEncoderDecoderOutput",
+ "BeamSearchDecoderOnlyOutput",
+ "BeamSampleEncoderDecoderOutput",
+ "BeamSampleDecoderOnlyOutput",
+ "ContrastiveSearchEncoderDecoderOutput",
+ "ContrastiveSearchDecoderOnlyOutput",
+ "GenerateBeamDecoderOnlyOutput",
+ "GenerateBeamEncoderDecoderOutput",
+ "GenerateDecoderOnlyOutput",
+ "GenerateEncoderDecoderOutput",
+ ]
+
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tf_logits_process"] = [
+ "TFForcedBOSTokenLogitsProcessor",
+ "TFForcedEOSTokenLogitsProcessor",
+ "TFForceTokensLogitsProcessor",
+ "TFLogitsProcessor",
+ "TFLogitsProcessorList",
+ "TFLogitsWarper",
+ "TFMinLengthLogitsProcessor",
+ "TFNoBadWordsLogitsProcessor",
+ "TFNoRepeatNGramLogitsProcessor",
+ "TFRepetitionPenaltyLogitsProcessor",
+ "TFSuppressTokensAtBeginLogitsProcessor",
+ "TFSuppressTokensLogitsProcessor",
+ "TFTemperatureLogitsWarper",
+ "TFTopKLogitsWarper",
+ "TFTopPLogitsWarper",
+ ]
+ _import_structure["tf_utils"] = [
+ "TFGenerationMixin",
+ "TFGreedySearchDecoderOnlyOutput",
+ "TFGreedySearchEncoderDecoderOutput",
+ "TFSampleEncoderDecoderOutput",
+ "TFSampleDecoderOnlyOutput",
+ "TFBeamSearchEncoderDecoderOutput",
+ "TFBeamSearchDecoderOnlyOutput",
+ "TFBeamSampleEncoderDecoderOutput",
+ "TFBeamSampleDecoderOnlyOutput",
+ "TFContrastiveSearchEncoderDecoderOutput",
+ "TFContrastiveSearchDecoderOnlyOutput",
+ ]
+
+try:
+ if not is_flax_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["flax_logits_process"] = [
+ "FlaxForcedBOSTokenLogitsProcessor",
+ "FlaxForcedEOSTokenLogitsProcessor",
+ "FlaxForceTokensLogitsProcessor",
+ "FlaxLogitsProcessor",
+ "FlaxLogitsProcessorList",
+ "FlaxLogitsWarper",
+ "FlaxMinLengthLogitsProcessor",
+ "FlaxSuppressTokensAtBeginLogitsProcessor",
+ "FlaxSuppressTokensLogitsProcessor",
+ "FlaxTemperatureLogitsWarper",
+ "FlaxTopKLogitsWarper",
+ "FlaxTopPLogitsWarper",
+ "FlaxWhisperTimeStampLogitsProcessor",
+ ]
+ _import_structure["flax_utils"] = [
+ "FlaxGenerationMixin",
+ "FlaxGreedySearchOutput",
+ "FlaxSampleOutput",
+ "FlaxBeamSearchOutput",
+ ]
+
+if TYPE_CHECKING:
+ from .configuration_utils import GenerationConfig, GenerationMode
+ from .streamers import TextIteratorStreamer, TextStreamer
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .beam_constraints import Constraint, ConstraintListState, DisjunctiveConstraint, PhrasalConstraint
+ from .beam_search import BeamHypotheses, BeamScorer, BeamSearchScorer, ConstrainedBeamSearchScorer
+ from .candidate_generator import AssistedCandidateGenerator, CandidateGenerator, PromptLookupCandidateGenerator
+ from .logits_process import (
+ AlternatingCodebooksLogitsProcessor,
+ ClassifierFreeGuidanceLogitsProcessor,
+ EncoderNoRepeatNGramLogitsProcessor,
+ EncoderRepetitionPenaltyLogitsProcessor,
+ EpsilonLogitsWarper,
+ EtaLogitsWarper,
+ ExponentialDecayLengthPenalty,
+ ForcedBOSTokenLogitsProcessor,
+ ForcedEOSTokenLogitsProcessor,
+ ForceTokensLogitsProcessor,
+ HammingDiversityLogitsProcessor,
+ InfNanRemoveLogitsProcessor,
+ LogitNormalization,
+ LogitsProcessor,
+ LogitsProcessorList,
+ LogitsWarper,
+ MinLengthLogitsProcessor,
+ MinNewTokensLengthLogitsProcessor,
+ NoBadWordsLogitsProcessor,
+ NoRepeatNGramLogitsProcessor,
+ PrefixConstrainedLogitsProcessor,
+ RepetitionPenaltyLogitsProcessor,
+ SequenceBiasLogitsProcessor,
+ SuppressTokensAtBeginLogitsProcessor,
+ SuppressTokensLogitsProcessor,
+ TemperatureLogitsWarper,
+ TopKLogitsWarper,
+ TopPLogitsWarper,
+ TypicalLogitsWarper,
+ UnbatchedClassifierFreeGuidanceLogitsProcessor,
+ WhisperTimeStampLogitsProcessor,
+ )
+ from .stopping_criteria import (
+ MaxLengthCriteria,
+ MaxNewTokensCriteria,
+ MaxTimeCriteria,
+ StoppingCriteria,
+ StoppingCriteriaList,
+ validate_stopping_criteria,
+ )
+ from .utils import (
+ BeamSampleDecoderOnlyOutput,
+ BeamSampleEncoderDecoderOutput,
+ BeamSearchDecoderOnlyOutput,
+ BeamSearchEncoderDecoderOutput,
+ ContrastiveSearchDecoderOnlyOutput,
+ ContrastiveSearchEncoderDecoderOutput,
+ GenerateBeamDecoderOnlyOutput,
+ GenerateBeamEncoderDecoderOutput,
+ GenerateDecoderOnlyOutput,
+ GenerateEncoderDecoderOutput,
+ GenerationMixin,
+ GreedySearchDecoderOnlyOutput,
+ GreedySearchEncoderDecoderOutput,
+ SampleDecoderOnlyOutput,
+ SampleEncoderDecoderOutput,
+ )
+
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tf_logits_process import (
+ TFForcedBOSTokenLogitsProcessor,
+ TFForcedEOSTokenLogitsProcessor,
+ TFForceTokensLogitsProcessor,
+ TFLogitsProcessor,
+ TFLogitsProcessorList,
+ TFLogitsWarper,
+ TFMinLengthLogitsProcessor,
+ TFNoBadWordsLogitsProcessor,
+ TFNoRepeatNGramLogitsProcessor,
+ TFRepetitionPenaltyLogitsProcessor,
+ TFSuppressTokensAtBeginLogitsProcessor,
+ TFSuppressTokensLogitsProcessor,
+ TFTemperatureLogitsWarper,
+ TFTopKLogitsWarper,
+ TFTopPLogitsWarper,
+ )
+ from .tf_utils import (
+ TFBeamSampleDecoderOnlyOutput,
+ TFBeamSampleEncoderDecoderOutput,
+ TFBeamSearchDecoderOnlyOutput,
+ TFBeamSearchEncoderDecoderOutput,
+ TFContrastiveSearchDecoderOnlyOutput,
+ TFContrastiveSearchEncoderDecoderOutput,
+ TFGenerationMixin,
+ TFGreedySearchDecoderOnlyOutput,
+ TFGreedySearchEncoderDecoderOutput,
+ TFSampleDecoderOnlyOutput,
+ TFSampleEncoderDecoderOutput,
+ )
+
+ try:
+ if not is_flax_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .flax_logits_process import (
+ FlaxForcedBOSTokenLogitsProcessor,
+ FlaxForcedEOSTokenLogitsProcessor,
+ FlaxForceTokensLogitsProcessor,
+ FlaxLogitsProcessor,
+ FlaxLogitsProcessorList,
+ FlaxLogitsWarper,
+ FlaxMinLengthLogitsProcessor,
+ FlaxSuppressTokensAtBeginLogitsProcessor,
+ FlaxSuppressTokensLogitsProcessor,
+ FlaxTemperatureLogitsWarper,
+ FlaxTopKLogitsWarper,
+ FlaxTopPLogitsWarper,
+ FlaxWhisperTimeStampLogitsProcessor,
+ )
+ from .flax_utils import FlaxBeamSearchOutput, FlaxGenerationMixin, FlaxGreedySearchOutput, FlaxSampleOutput
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c42cb668ff524e677a392439a9831a79853d82f8
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/beam_constraints.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/beam_constraints.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..313fbfb104d060be834fd0d69a4de6f1e3206df2
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/beam_constraints.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/beam_search.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/beam_search.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2bd2bb419a024f2bb4d1d3f4c5f244c108061b3b
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/beam_search.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/candidate_generator.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/candidate_generator.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..71d682a66874003651242ec073416bdc2398a2a5
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/candidate_generator.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/configuration_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/configuration_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5788ba0a285082b401a70d258ebfad4e3bb9a3a6
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/configuration_utils.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/flax_logits_process.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/flax_logits_process.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5db3a6f27a87c8dab38e323452335f6100ca0134
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/flax_logits_process.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/flax_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/flax_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d922e786aaf7465853d9f112e7e437391d58b2dd
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/flax_utils.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/logits_process.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/logits_process.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..72323af023cec087e391abd96460a258f4e599db
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/logits_process.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/stopping_criteria.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/stopping_criteria.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..250564a922cd350f428ae5982e16b0466e367ce4
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/stopping_criteria.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/streamers.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/streamers.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e56b3645c07eabdbd12343dd2951044587234eb2
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/streamers.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/tf_logits_process.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/tf_logits_process.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..466e9df80f8885b87344623d4074444520e52c84
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/tf_logits_process.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/tf_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/tf_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..16b701eb71402c38c452689b47b71ea3865abe76
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/tf_utils.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7b772440843ad3a649fcf588356324498bc9cdd5
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/utils.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/generation/beam_constraints.py b/env-llmeval/lib/python3.10/site-packages/transformers/generation/beam_constraints.py
new file mode 100644
index 0000000000000000000000000000000000000000..b53c4512427a8793449da9f68c39a12527721d40
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/generation/beam_constraints.py
@@ -0,0 +1,521 @@
+from abc import ABC, abstractmethod
+from typing import List, Optional
+
+
+class Constraint(ABC):
+ r"""Abstract base class for all constraints that can be applied during generation.
+ It must define how the constraint can be satisfied.
+
+ All classes that inherit Constraint must follow the requirement that
+
+ ```py
+ completed = False
+ while not completed:
+ _, completed = constraint.update(constraint.advance())
+ ```
+
+ will always terminate (halt).
+ """
+
+ def __init__(self):
+ # test for the above condition
+ self.test()
+
+ def test(self):
+ """
+ Tests whether this constraint has been properly defined.
+ """
+ counter = 0
+ completed = False
+ while not completed:
+ if counter == 1:
+ self.reset()
+ advance = self.advance()
+ if not self.does_advance(advance):
+ raise Exception(
+ "Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true."
+ )
+
+ stepped, completed, reset = self.update(advance)
+ counter += 1
+
+ if counter > 10000:
+ raise Exception("update() does not fulfill the constraint.")
+
+ if self.remaining() != 0:
+ raise Exception("Custom Constraint is not defined correctly.")
+
+ @abstractmethod
+ def advance(self):
+ """
+ When called, returns the token that would take this constraint one step closer to being fulfilled.
+
+ Return:
+ token_ids(`torch.tensor`): Must be a tensor of a list of indexable tokens, not some integer.
+ """
+ raise NotImplementedError(
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
+ )
+
+ @abstractmethod
+ def does_advance(self, token_id: int):
+ """
+ Reads in a token and returns whether it creates progress.
+ """
+ raise NotImplementedError(
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
+ )
+
+ @abstractmethod
+ def update(self, token_id: int):
+ """
+ Reads in a token and returns booleans that indicate the progress made by it. This function will update the
+ state of this object unlikes `does_advance(self, token_id: int)`.
+
+ This isn't to test whether a certain token will advance the progress; it's to update its state as if it has
+ been generated. This becomes important if token_id != desired token (refer to else statement in
+ PhrasalConstraint)
+
+ Args:
+ token_id(`int`):
+ The id of a newly generated token in the beam search.
+ Return:
+ stepped(`bool`):
+ Whether this constraint has become one step closer to being fulfuilled.
+ completed(`bool`):
+ Whether this constraint has been completely fulfilled by this token being generated.
+ reset (`bool`):
+ Whether this constraint has reset its progress by this token being generated.
+ """
+ raise NotImplementedError(
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
+ )
+
+ @abstractmethod
+ def reset(self):
+ """
+ Resets the state of this constraint to its initialization. We would call this in cases where the fulfillment of
+ a constraint is abrupted by an unwanted token.
+ """
+ raise NotImplementedError(
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
+ )
+
+ @abstractmethod
+ def remaining(self):
+ """
+ Returns the number of remaining steps of `advance()` in order to complete this constraint.
+ """
+ raise NotImplementedError(
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
+ )
+
+ @abstractmethod
+ def copy(self, stateful=False):
+ """
+ Creates a new instance of this constraint.
+
+ Args:
+ stateful(`bool`): Whether to not only copy the constraint for new instance, but also its state.
+
+ Return:
+ constraint(`Constraint`): The same constraint as the one being called from.
+ """
+ raise NotImplementedError(
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
+ )
+
+
+class PhrasalConstraint(Constraint):
+ r"""
+ [`Constraint`] enforcing that an ordered sequence of tokens is included in the output.
+
+ Args:
+ token_ids (`List[int]`):
+ The id of the token that must be generated by the output.
+ """
+
+ def __init__(self, token_ids: List[int]):
+ super(Constraint, self).__init__()
+
+ if not isinstance(token_ids, list) or len(token_ids) == 0:
+ raise ValueError(f"`token_ids` has to be a non-empty list, but is {token_ids}.")
+ if any((not isinstance(token_id, int) or token_id < 0) for token_id in token_ids):
+ raise ValueError(f"Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.")
+
+ self.token_ids = token_ids
+
+ self.seqlen = len(self.token_ids)
+ self.fulfilled_idx = -1 # the index of the currently fulfilled step
+ self.completed = False
+
+ def advance(self):
+ if self.completed:
+ return None
+ return self.token_ids[self.fulfilled_idx + 1]
+
+ def does_advance(self, token_id: int):
+ if not isinstance(token_id, int):
+ raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(token_id)}")
+
+ if self.completed:
+ return False
+
+ return token_id == self.token_ids[self.fulfilled_idx + 1]
+
+ def update(self, token_id: int):
+ if not isinstance(token_id, int):
+ raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(token_id)}")
+
+ stepped = False
+ completed = False
+ reset = False
+
+ if self.does_advance(token_id):
+ self.fulfilled_idx += 1
+ stepped = True
+ if self.fulfilled_idx == (self.seqlen - 1):
+ completed = True
+ self.completed = completed
+ else:
+ # failed to make progress.
+ reset = True
+ self.reset()
+ return stepped, completed, reset
+
+ def reset(self):
+ self.completed = False
+ self.fulfilled_idx = 0
+
+ def remaining(self):
+ return self.seqlen - (self.fulfilled_idx + 1)
+
+ def copy(self, stateful=False):
+ new_constraint = PhrasalConstraint(self.token_ids)
+
+ if stateful:
+ new_constraint.seq_len = self.seqlen
+ new_constraint.fulfilled_idx = self.fulfilled_idx
+ new_constraint.completed = self.completed
+
+ return new_constraint
+
+
+class DisjunctiveTrie:
+ def __init__(self, nested_token_ids: List[List[int]], no_subsets=True):
+ r"""
+ A helper class that builds a trie with the words represented in `nested_token_ids`.
+ """
+ self.max_height = max([len(one) for one in nested_token_ids])
+
+ root = {}
+ for token_ids in nested_token_ids:
+ level = root
+ for tidx, token_id in enumerate(token_ids):
+ if token_id not in level:
+ level[token_id] = {}
+
+ level = level[token_id]
+
+ if no_subsets and self.has_subsets(root, nested_token_ids):
+ raise ValueError(
+ "Each list in `nested_token_ids` can't be a complete subset of another list, but is"
+ f" {nested_token_ids}."
+ )
+
+ self.trie = root
+
+ def next_tokens(self, current_seq):
+ """
+ The next possible tokens that will progress the trie, given the current sequence of tokens in `current_seq`.
+ """
+ start = self.trie
+
+ for current_token in current_seq:
+ start = start[current_token]
+
+ next_tokens = list(start.keys())
+
+ return next_tokens
+
+ def reached_leaf(self, current_seq):
+ next_tokens = self.next_tokens(current_seq)
+
+ return len(next_tokens) == 0
+
+ def count_leaves(self, root):
+ next_nodes = list(root.values())
+ if len(next_nodes) == 0:
+ return 1
+ else:
+ return sum([self.count_leaves(nn) for nn in next_nodes])
+
+ def has_subsets(self, trie, nested_token_ids):
+ """
+ Returns whether # of leaves == # of words. Otherwise some word is a subset of another.
+ """
+ leaf_count = self.count_leaves(trie)
+ return len(nested_token_ids) != leaf_count
+
+
+class DisjunctiveConstraint(Constraint):
+ r"""
+ A special [`Constraint`] that is fulfilled by fulfilling just one of several constraints.
+
+ Args:
+ nested_token_ids (`List[List[int]]`):
+ A list of words, where each word is a list of ids. This constraint is fulfilled by generating just one from
+ the list of words.
+ """
+
+ def __init__(self, nested_token_ids: List[List[int]]):
+ super(Constraint, self).__init__()
+
+ if not isinstance(nested_token_ids, list) or len(nested_token_ids) == 0:
+ raise ValueError(f"`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.")
+ if any(not isinstance(token_ids, list) for token_ids in nested_token_ids):
+ raise ValueError(f"`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.")
+ if any(
+ any((not isinstance(token_id, int) or token_id < 0) for token_id in token_ids)
+ for token_ids in nested_token_ids
+ ):
+ raise ValueError(
+ f"Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}."
+ )
+
+ self.trie = DisjunctiveTrie(nested_token_ids)
+ self.token_ids = nested_token_ids
+
+ self.seqlen = self.trie.max_height
+ self.current_seq = []
+ self.completed = False
+
+ def advance(self):
+ token_list = self.trie.next_tokens(self.current_seq)
+
+ if len(token_list) == 0:
+ return None
+ else:
+ return token_list
+
+ def does_advance(self, token_id: int):
+ if not isinstance(token_id, int):
+ raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(token_id)}")
+
+ next_tokens = self.trie.next_tokens(self.current_seq)
+
+ return token_id in next_tokens
+
+ def update(self, token_id: int):
+ if not isinstance(token_id, int):
+ raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(token_id)}")
+
+ stepped = False
+ completed = False
+ reset = False
+
+ if self.does_advance(token_id):
+ self.current_seq.append(token_id)
+ stepped = True
+ else:
+ reset = True
+ self.reset()
+
+ completed = self.trie.reached_leaf(self.current_seq)
+ self.completed = completed
+
+ return stepped, completed, reset
+
+ def reset(self):
+ self.completed = False
+ self.current_seq = []
+
+ def remaining(self):
+ if self.completed:
+ # since this can be completed without reaching max height
+ return 0
+ else:
+ return self.seqlen - len(self.current_seq)
+
+ def copy(self, stateful=False):
+ new_constraint = DisjunctiveConstraint(self.token_ids)
+
+ if stateful:
+ new_constraint.seq_len = self.seqlen
+ new_constraint.current_seq = self.current_seq
+ new_constraint.completed = self.completed
+
+ return new_constraint
+
+
+class ConstraintListState:
+ r"""
+ A class for beam scorers to track its progress through a list of constraints.
+
+ Args:
+ constraints (`List[Constraint]`):
+ A list of [`Constraint`] objects that must be fulfilled by the beam scorer.
+ """
+
+ def __init__(self, constraints: List[Constraint]):
+ self.constraints = constraints
+
+ # max # of steps required to fulfill a given constraint
+ self.max_seqlen = max([c.seqlen for c in constraints])
+ self.n_constraints = len(constraints)
+ self.completed = False
+
+ self.init_state()
+
+ def init_state(self):
+ self.complete_constraints = []
+ self.inprogress_constraint = None
+ self.pending_constraints = [constraint.copy(stateful=False) for constraint in self.constraints]
+
+ def get_bank(self):
+ add = 0
+ if self.inprogress_constraint:
+ # extra points for having a constraint mid-fulfilled
+ add += self.max_seqlen - self.inprogress_constraint.remaining()
+
+ return (len(self.complete_constraints) * self.max_seqlen) + add
+
+ def advance(self):
+ """The list of tokens to generate such that we can make progress.
+ By "list" we don't mean the list of token that will fully fulfill a constraint.
+
+ Given constraints `c_i = {t_ij | j == # of tokens}`, If we're not in the middle of progressing through a
+ specific constraint `c_i`, we return:
+
+ `[t_k1 for k in indices of unfulfilled constraints]`
+
+ If we are in the middle of a constraint, then we return:
+ `[t_ij]`, where `i` is the index of the inprogress constraint, `j` is the next step for the constraint.
+
+ Though we don't care which constraint is fulfilled first, if we are in the progress of fulfilling a constraint,
+ that's the only one we'll return.
+ """
+ token_list = []
+ if self.inprogress_constraint is None:
+ for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
+ advance = constraint.advance()
+ if isinstance(advance, int):
+ token_list.append(advance)
+ elif isinstance(advance, list):
+ token_list.extend(advance)
+ else:
+ advance = self.inprogress_constraint.advance()
+ if isinstance(advance, int):
+ token_list.append(advance)
+ elif isinstance(advance, list):
+ token_list.extend(advance)
+
+ if len(token_list) == 0:
+ return None
+ else:
+ return token_list
+
+ def reset(self, token_ids: Optional[List[int]]):
+ """
+ token_ids: the tokens generated thus far to reset the state of the progress through constraints.
+ """
+ self.init_state()
+
+ if token_ids is not None:
+ for token in token_ids:
+ # completes or steps **one** constraint
+ complete, stepped = self.add(token)
+
+ # the entire list of constraints are fulfilled
+ if self.completed:
+ break
+
+ def add(self, token_id: int):
+ if not isinstance(token_id, int):
+ raise ValueError(f"`token_id` should be an `int`, but is `{token_id}`.")
+
+ complete, stepped = False, False
+
+ if self.completed:
+ complete = True
+ stepped = False
+ return complete, stepped
+
+ if self.inprogress_constraint is not None:
+ # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
+ # job, simply update the state
+
+ stepped, complete, reset = self.inprogress_constraint.update(token_id)
+ if reset:
+ # 1. If the next token breaks the progress, then we must restart.
+ # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
+
+ # But that doesn't mean we self.init_state(), since we only reset the state for this particular
+ # constraint, not the full list of constraints.
+
+ self.pending_constraints.append(self.inprogress_constraint.copy(stateful=False))
+ self.inprogress_constraint = None
+
+ if complete:
+ # 2. If the next token completes the constraint, move it to completed list, set
+ # inprogress to None. If there are no pending constraints either, then this full list of constraints
+ # is complete.
+
+ self.complete_constraints.append(self.inprogress_constraint)
+ self.inprogress_constraint = None
+
+ if len(self.pending_constraints) == 0:
+ # we're done!
+ self.completed = True
+
+ else:
+ # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
+ # of constraints?
+
+ for cidx, pending_constraint in enumerate(self.pending_constraints):
+ if pending_constraint.does_advance(token_id):
+ stepped, complete, reset = pending_constraint.update(token_id)
+
+ if not stepped:
+ raise Exception(
+ "`constraint.update(token_id)` is not yielding incremental progress, "
+ "even though `constraint.does_advance(token_id)` is true."
+ )
+
+ if complete:
+ self.complete_constraints.append(pending_constraint)
+ self.inprogress_constraint = None
+
+ if not complete and stepped:
+ self.inprogress_constraint = pending_constraint
+
+ if complete or stepped:
+ # If we made any progress at all, then it's at least not a "pending constraint".
+
+ self.pending_constraints = (
+ self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
+ )
+
+ if len(self.pending_constraints) == 0 and self.inprogress_constraint is None:
+ # If there's no longer any pending after this and no inprogress either, then we must be
+ # complete.
+
+ self.completed = True
+
+ break # prevent accidentally stepping through multiple constraints with just one token.
+
+ return complete, stepped
+
+ def copy(self, stateful=True):
+ new_state = ConstraintListState(self.constraints) # we actually never though self.constraints objects
+ # throughout this process. So it's at initialization state.
+
+ if stateful:
+ new_state.complete_constraints = [
+ constraint.copy(stateful=True) for constraint in self.complete_constraints
+ ]
+ if self.inprogress_constraint is not None:
+ new_state.inprogress_constraint = self.inprogress_constraint.copy(stateful=True)
+ new_state.pending_constraints = [constraint.copy() for constraint in self.pending_constraints]
+
+ return new_state
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/generation/beam_search.py b/env-llmeval/lib/python3.10/site-packages/transformers/generation/beam_search.py
new file mode 100644
index 0000000000000000000000000000000000000000..5e73862e163ddfd53122420b88614d2818681e78
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/generation/beam_search.py
@@ -0,0 +1,1005 @@
+# coding=utf-8
+# Copyright 2020 The HuggingFace Inc. team
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from abc import ABC, abstractmethod
+from collections import UserDict
+from typing import Dict, List, Optional, Tuple, Union
+
+import numpy as np
+import torch
+
+from ..utils import add_start_docstrings
+from .beam_constraints import Constraint, ConstraintListState
+
+
+PROCESS_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size * num_beams, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using any class inheriting from [`PreTrainedTokenizer`]. See
+ [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ next_scores (`torch.FloatTensor` of shape `(batch_size, 2 * num_beams)`):
+ Current scores of the top `2 * num_beams` non-finished beam hypotheses.
+ next_tokens (`torch.LongTensor` of shape `(batch_size, 2 * num_beams)`):
+ `input_ids` of the tokens corresponding to the top `2 * num_beams` non-finished beam hypotheses.
+ next_indices (`torch.LongTensor` of shape `(batch_size, 2 * num_beams)`):
+ Beam indices indicating to which beam hypothesis the `next_tokens` correspond.
+ pad_token_id (`int`, *optional*):
+ The id of the *padding* token.
+ eos_token_id (`Union[int, List[int]]`, *optional*):
+ The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
+ beam_indices (`torch.LongTensor`, *optional*):
+ Beam indices indicating to which beam hypothesis each token correspond.
+ group_index (`int`, *optional*):
+ The index of the group of beams. Used with [`~PreTrainedModel.group_beam_search`].
+
+ Return:
+ `UserDict`: A dictionary composed of the fields as defined above:
+
+ - **next_beam_scores** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Updated scores of all
+ non-finished beams.
+ - **next_beam_tokens** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Next tokens to be added
+ to the non-finished beam_hypotheses.
+ - **next_beam_indices** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Beam indices
+ indicating to which beam the next tokens shall be added.
+
+"""
+
+FINALIZE_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size * num_beams, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using any class inheriting from [`PreTrainedTokenizer`]. See
+ [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ final_beam_scores (`torch.FloatTensor` of shape `(batch_size * num_beams)`):
+ The final scores of all non-finished beams.
+ final_beam_tokens (`torch.FloatTensor` of shape `(batch_size * num_beams)`):
+ The last tokens to be added to the non-finished beam_hypotheses.
+ final_beam_indices (`torch.FloatTensor` of shape `(batch_size * num_beams)`):
+ The beam indices indicating to which beam the `final_beam_tokens` shall be added.
+ pad_token_id (`int`, *optional*):
+ The id of the *padding* token.
+ eos_token_id (`Union[int, List[int]]`, *optional*):
+ The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
+
+ Return:
+ `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated sequences.
+ The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early
+ due to the `eos_token_id`.
+
+"""
+
+
+class BeamScorer(ABC):
+ """
+ Abstract base class for all beam scorers that are used for [`~PreTrainedModel.beam_search`] and
+ [`~PreTrainedModel.beam_sample`].
+ """
+
+ @abstractmethod
+ @add_start_docstrings(PROCESS_INPUTS_DOCSTRING)
+ def process(
+ self,
+ input_ids: torch.LongTensor,
+ next_scores: torch.FloatTensor,
+ next_tokens: torch.LongTensor,
+ next_indices: torch.LongTensor,
+ **kwargs,
+ ) -> Tuple[torch.Tensor]:
+ raise NotImplementedError("This is an abstract method.")
+
+ @abstractmethod
+ @add_start_docstrings(FINALIZE_INPUTS_DOCSTRING)
+ def finalize(
+ self,
+ input_ids: torch.LongTensor,
+ next_scores: torch.FloatTensor,
+ next_tokens: torch.LongTensor,
+ next_indices: torch.LongTensor,
+ max_length: int,
+ **kwargs,
+ ) -> torch.LongTensor:
+ raise NotImplementedError("This is an abstract method.")
+
+
+class BeamSearchScorer(BeamScorer):
+ r"""
+ [`BeamScorer`] implementing standard beam search decoding.
+
+ Adapted in part from [Facebook's XLM beam search
+ code](https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529).
+
+ Reference for the diverse beam search algorithm and implementation [Ashwin Kalyan's DBS
+ implementation](https://github.com/ashwinkalyan/dbs/blob/master/dbs/beam_utils.lua)
+
+ Args:
+ batch_size (`int`):
+ Batch Size of `input_ids` for which standard beam search decoding is run in parallel.
+ num_beams (`int`):
+ Number of beams for beam search.
+ device (`torch.device`):
+ Defines the device type (*e.g.*, `"cpu"` or `"cuda"`) on which this instance of `BeamSearchScorer` will be
+ allocated.
+ length_penalty (`float`, *optional*, defaults to 1.0):
+ Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to
+ the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log
+ likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while
+ `length_penalty` < 0.0 encourages shorter sequences.
+ do_early_stopping (`bool` or `str`, *optional*, defaults to `False`):
+ Controls the stopping condition for beam-based methods, like beam-search. It accepts the following values:
+ `True`, where the generation stops as soon as there are `num_beams` complete candidates; `False`, where an
+ heuristic is applied and the generation stops when is it very unlikely to find better candidates;
+ `"never"`, where the beam search procedure only stops when there cannot be better candidates (canonical
+ beam search algorithm).
+ num_beam_hyps_to_keep (`int`, *optional*, defaults to 1):
+ The number of beam hypotheses that shall be returned upon calling
+ [`~transformers.BeamSearchScorer.finalize`].
+ num_beam_groups (`int`, *optional*, defaults to 1):
+ Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams.
+ See [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details.
+ max_length (`int`, *optional*):
+ The maximum length of the sequence to be generated.
+ """
+
+ def __init__(
+ self,
+ batch_size: int,
+ num_beams: int,
+ device: torch.device,
+ length_penalty: Optional[float] = 1.0,
+ do_early_stopping: Optional[Union[bool, str]] = False,
+ num_beam_hyps_to_keep: Optional[int] = 1,
+ num_beam_groups: Optional[int] = 1,
+ max_length: Optional[int] = None,
+ ):
+ self.num_beams = num_beams
+ self.device = device
+ self.length_penalty = length_penalty
+ self.do_early_stopping = do_early_stopping
+ self.num_beam_hyps_to_keep = num_beam_hyps_to_keep
+ self.num_beam_groups = num_beam_groups
+ self.group_size = self.num_beams // self.num_beam_groups
+
+ self._is_init = False
+ # self._beam_hyps[i*self.num_beam_groups+j] is the beam_hyps of the j-th group in the i-th mini-batch.
+ # If group_beam_search is not used, the list consists of `batch_size` beam_hyps.
+ self._beam_hyps = [
+ BeamHypotheses(
+ num_beams=self.group_size,
+ length_penalty=self.length_penalty,
+ early_stopping=self.do_early_stopping,
+ max_length=max_length,
+ )
+ for _ in range(batch_size * self.num_beam_groups)
+ ]
+ # self._done[i*self.num_beam_groups+j] indicates whether the generation of the beam_hyps of the j-th group
+ # in the i-th mini-batch is complete.
+ self._done = torch.tensor(
+ [False for _ in range(batch_size * self.num_beam_groups)], dtype=torch.bool, device=self.device
+ )
+
+ if not isinstance(num_beams, int) or num_beams <= 1:
+ raise ValueError(
+ f"`num_beams` has to be an integer strictly greater than 1, but is {num_beams}. For `num_beams` == 1,"
+ " one should make use of `greedy_search` instead."
+ )
+
+ if not isinstance(num_beam_groups, int) or (num_beam_groups > num_beams) or (num_beams % num_beam_groups != 0):
+ raise ValueError(
+ "`num_beam_groups` has to be an integer smaller or equal than `num_beams` and `num_beams` has to be"
+ f" divisible by `num_beam_groups`, but is {num_beam_groups} with `num_beams` being {num_beams}."
+ )
+
+ @property
+ def is_done(self) -> bool:
+ return self._done.all()
+
+ def process(
+ self,
+ input_ids: torch.LongTensor,
+ next_scores: torch.FloatTensor,
+ next_tokens: torch.LongTensor,
+ next_indices: torch.LongTensor,
+ pad_token_id: Optional[int] = None,
+ eos_token_id: Optional[Union[int, List[int]]] = None,
+ beam_indices: Optional[torch.LongTensor] = None,
+ group_index: Optional[int] = 0,
+ decoder_prompt_len: Optional[int] = 0,
+ ) -> Dict[str, torch.Tensor]:
+ # add up to the length which the next_scores is calculated on (including decoder prompt)
+ cur_len = input_ids.shape[-1] + 1
+ batch_size = len(self._beam_hyps) // self.num_beam_groups
+
+ if not (batch_size == (input_ids.shape[0] // self.group_size)):
+ if self.num_beam_groups > 1:
+ raise ValueError(
+ f"A group beam size of {input_ids.shape[0]} is used as the input, but a group beam "
+ f"size of {self.group_size} is expected by the beam scorer."
+ )
+ else:
+ raise ValueError(
+ f"A beam size of {input_ids.shape[0]} is used as the input, but a beam size of "
+ f"{self.group_size} is expected by the beam scorer."
+ )
+
+ device = input_ids.device
+ next_beam_scores = torch.zeros((batch_size, self.group_size), dtype=next_scores.dtype, device=device)
+ next_beam_tokens = torch.zeros((batch_size, self.group_size), dtype=next_tokens.dtype, device=device)
+ next_beam_indices = torch.zeros((batch_size, self.group_size), dtype=next_indices.dtype, device=device)
+
+ if isinstance(eos_token_id, int):
+ eos_token_id = [eos_token_id]
+
+ for batch_idx in range(batch_size):
+ batch_group_idx = batch_idx * self.num_beam_groups + group_index
+ if self._done[batch_group_idx]:
+ if self.num_beams < len(self._beam_hyps[batch_group_idx]):
+ raise ValueError(f"Batch can only be done if at least {self.num_beams} beams have been generated")
+ if eos_token_id is None or pad_token_id is None:
+ raise ValueError("Generated beams >= num_beams -> eos_token_id and pad_token have to be defined")
+ # pad the batch
+ next_beam_scores[batch_idx, :] = 0
+ next_beam_tokens[batch_idx, :] = pad_token_id
+ next_beam_indices[batch_idx, :] = 0
+ continue
+
+ # next tokens for this sentence
+ beam_idx = 0
+ for beam_token_rank, (next_token, next_score, next_index) in enumerate(
+ zip(next_tokens[batch_idx], next_scores[batch_idx], next_indices[batch_idx])
+ ):
+ batch_beam_idx = batch_idx * self.group_size + next_index
+ # add to generated hypotheses if end of sentence
+ if (eos_token_id is not None) and (next_token.item() in eos_token_id):
+ # if beam_token does not belong to top num_beams tokens, it should not be added
+ is_beam_token_worse_than_top_num_beams = beam_token_rank >= self.group_size
+ if is_beam_token_worse_than_top_num_beams:
+ continue
+ if beam_indices is not None:
+ beam_index = beam_indices[batch_beam_idx]
+ beam_index = beam_index + (batch_beam_idx,)
+ else:
+ beam_index = None
+
+ self._beam_hyps[batch_group_idx].add(
+ input_ids[batch_beam_idx].clone(),
+ next_score.item(),
+ beam_indices=beam_index,
+ generated_len=cur_len - decoder_prompt_len,
+ )
+ else:
+ # add next predicted token since it is not eos_token
+ next_beam_scores[batch_idx, beam_idx] = next_score
+ next_beam_tokens[batch_idx, beam_idx] = next_token
+ next_beam_indices[batch_idx, beam_idx] = batch_beam_idx
+ beam_idx += 1
+
+ # once the beam for next step is full, don't add more tokens to it.
+ if beam_idx == self.group_size:
+ break
+
+ if beam_idx < self.group_size:
+ raise ValueError(
+ f"At most {self.group_size} tokens in {next_tokens[batch_idx]} can be equal to `eos_token_id:"
+ f" {eos_token_id}`. Make sure {next_tokens[batch_idx]} are corrected."
+ )
+
+ # Check if we are done so that we can save a pad step if all(done)
+ self._done[batch_group_idx] = self._done[batch_group_idx] or self._beam_hyps[batch_group_idx].is_done(
+ next_scores[batch_idx].max().item(), cur_len, decoder_prompt_len
+ )
+
+ return UserDict(
+ {
+ "next_beam_scores": next_beam_scores.view(-1),
+ "next_beam_tokens": next_beam_tokens.view(-1),
+ "next_beam_indices": next_beam_indices.view(-1),
+ }
+ )
+
+ def finalize(
+ self,
+ input_ids: torch.LongTensor,
+ final_beam_scores: torch.FloatTensor,
+ final_beam_tokens: torch.LongTensor,
+ final_beam_indices: torch.LongTensor,
+ max_length: int,
+ pad_token_id: Optional[int] = None,
+ eos_token_id: Optional[Union[int, List[int]]] = None,
+ beam_indices: Optional[torch.LongTensor] = None,
+ decoder_prompt_len: Optional[int] = 0,
+ ) -> Tuple[torch.LongTensor]:
+ batch_size = len(self._beam_hyps) // self.num_beam_groups
+
+ if isinstance(eos_token_id, int):
+ eos_token_id = [eos_token_id]
+
+ # finalize all open beam hypotheses and add to generated hypotheses
+ for batch_group_idx, beam_hyp in enumerate(self._beam_hyps):
+ if self._done[batch_group_idx]:
+ continue
+
+ # all open beam hypotheses are added to the beam hypothesis
+ # beam hypothesis class automatically keeps the best beams
+ for index_per_group in range(self.group_size):
+ batch_beam_idx = batch_group_idx * self.group_size + index_per_group
+ final_score = final_beam_scores[batch_beam_idx].item()
+ final_tokens = input_ids[batch_beam_idx]
+ beam_index = beam_indices[batch_beam_idx] if beam_indices is not None else None
+ generated_len = final_tokens.shape[-1] - decoder_prompt_len
+ beam_hyp.add(final_tokens, final_score, beam_indices=beam_index, generated_len=generated_len)
+
+ # select the best hypotheses
+ sent_lengths = input_ids.new(batch_size * self.num_beam_hyps_to_keep)
+ best = []
+ best_indices = []
+ best_scores = torch.zeros(batch_size * self.num_beam_hyps_to_keep, device=self.device, dtype=torch.float32)
+
+ # retrieve best hypotheses
+ for i in range(batch_size):
+ beam_hyps_in_batch = self._beam_hyps[i * self.num_beam_groups : (i + 1) * self.num_beam_groups]
+ candidate_beams = [beam for beam_hyp in beam_hyps_in_batch for beam in beam_hyp.beams]
+ sorted_hyps = sorted(candidate_beams, key=lambda x: x[0])
+ for j in range(self.num_beam_hyps_to_keep):
+ best_hyp_tuple = sorted_hyps.pop()
+ best_score = best_hyp_tuple[0]
+ best_hyp = best_hyp_tuple[1]
+ best_index = best_hyp_tuple[2]
+ sent_lengths[self.num_beam_hyps_to_keep * i + j] = len(best_hyp)
+
+ # append hyp to lists
+ best.append(best_hyp)
+
+ # append indices to list
+ best_indices.append(best_index)
+
+ best_scores[i * self.num_beam_hyps_to_keep + j] = best_score
+
+ # prepare for adding eos
+ sent_lengths_max = sent_lengths.max().item() + 1
+ sent_max_len = min(sent_lengths_max, max_length) if max_length is not None else sent_lengths_max
+ decoded: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, sent_max_len)
+
+ if len(best_indices) > 0 and best_indices[0] is not None:
+ indices: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, sent_max_len)
+ else:
+ indices = None
+
+ # shorter batches are padded if needed
+ if sent_lengths.min().item() != sent_lengths.max().item():
+ if pad_token_id is None:
+ raise ValueError("`pad_token_id` has to be defined")
+ decoded.fill_(pad_token_id)
+
+ if indices is not None:
+ indices.fill_(-1)
+
+ # fill with hypotheses and eos_token_id if the latter fits in
+ for i, (hypo, best_idx) in enumerate(zip(best, best_indices)):
+ decoded[i, : sent_lengths[i]] = hypo
+
+ if indices is not None:
+ indices[i, : len(best_idx)] = torch.tensor(best_idx)
+
+ if sent_lengths[i] < sent_max_len:
+ # inserting only the first eos_token_id
+ decoded[i, sent_lengths[i]] = eos_token_id[0]
+
+ return UserDict(
+ {
+ "sequences": decoded,
+ "sequence_scores": best_scores,
+ "beam_indices": indices,
+ }
+ )
+
+
+class ConstrainedBeamSearchScorer(BeamScorer):
+ r"""
+ [`BeamScorer`] implementing constrained beam search decoding.
+
+
+ Args:
+ batch_size (`int`):
+ Batch Size of `input_ids` for which standard beam search decoding is run in parallel.
+ num_beams (`int`):
+ Number of beams for beam search.
+ constraints (`List[Constraint]`):
+ A list of positive constraints represented as `Constraint` objects that must be fulfilled in the generation
+ output. For more information, the documentation of [`Constraint`] should be read.
+ device (`torch.device`):
+ Defines the device type (*e.g.*, `"cpu"` or `"cuda"`) on which this instance of `BeamSearchScorer` will be
+ allocated.
+ length_penalty (`float`, *optional*, defaults to 1.0):
+ Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to
+ the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log
+ likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while
+ `length_penalty` < 0.0 encourages shorter sequences.
+ do_early_stopping (`bool` or `str`, *optional*, defaults to `False`):
+ Controls the stopping condition for beam-based methods, like beam-search. It accepts the following values:
+ `True`, where the generation stops as soon as there are `num_beams` complete candidates; `False`, where an
+ heuristic is applied and the generation stops when is it very unlikely to find better candidates;
+ `"never"`, where the beam search procedure only stops when there cannot be better candidates (canonical
+ beam search algorithm).
+ num_beam_hyps_to_keep (`int`, *optional*, defaults to 1):
+ The number of beam hypotheses that shall be returned upon calling
+ [`~transformers.BeamSearchScorer.finalize`].
+ num_beam_groups (`int`, *optional*, defaults to 1):
+ Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams.
+ See [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details.
+ max_length (`int`, *optional*):
+ The maximum length of the sequence to be generated.
+ """
+
+ def __init__(
+ self,
+ batch_size: int,
+ num_beams: int,
+ constraints: List[Constraint],
+ device: torch.device,
+ length_penalty: Optional[float] = 1.0,
+ do_early_stopping: Optional[Union[bool, str]] = False,
+ num_beam_hyps_to_keep: Optional[int] = 1,
+ num_beam_groups: Optional[int] = 1,
+ max_length: Optional[int] = None,
+ ):
+ self.num_beams = num_beams
+ self.device = device
+ self.length_penalty = length_penalty
+ self.do_early_stopping = do_early_stopping
+ self.num_beam_hyps_to_keep = num_beam_hyps_to_keep
+ self.num_beam_groups = num_beam_groups
+ self.group_size = self.num_beams // self.num_beam_groups
+ self.constraints = constraints
+
+ self._is_init = False
+ self._beam_hyps = [
+ BeamHypotheses(
+ num_beams=self.num_beams,
+ length_penalty=self.length_penalty,
+ early_stopping=self.do_early_stopping,
+ max_length=max_length,
+ )
+ for _ in range(batch_size)
+ ]
+ self._done = torch.tensor([False for _ in range(batch_size)], dtype=torch.bool, device=self.device)
+
+ if not isinstance(num_beams, int) or num_beams <= 1:
+ raise ValueError(
+ f"`num_beams` has to be an integer strictly greater than 1, but is {num_beams}. For `num_beams` == 1,"
+ " one should make use of `greedy_search` instead."
+ )
+
+ if not isinstance(num_beam_groups, int) or (num_beam_groups > num_beams) or (num_beams % num_beam_groups != 0):
+ raise ValueError(
+ "`num_beam_groups` has to be an integer smaller or equal than `num_beams` and `num_beams` has to be"
+ f" divisible by `num_beam_groups`, but is {num_beam_groups} with `num_beams` being {num_beams}."
+ )
+
+ @property
+ def is_done(self) -> bool:
+ return self._done.all()
+
+ def make_constraint_states(self, n):
+ return [ConstraintListState([constraint.copy() for constraint in self.constraints]) for _ in range(n)]
+
+ def check_completes_constraints(self, sequence):
+ new_state = self.make_constraint_states(1)[0]
+ new_state.reset(sequence)
+ return new_state.completed
+
+ def process(
+ self,
+ input_ids: torch.LongTensor,
+ next_scores: torch.FloatTensor,
+ next_tokens: torch.LongTensor,
+ next_indices: torch.LongTensor,
+ scores_for_all_vocab: torch.FloatTensor,
+ pad_token_id: Optional[int] = None,
+ eos_token_id: Optional[Union[int, List[int]]] = None,
+ beam_indices: Optional[torch.LongTensor] = None,
+ decoder_prompt_len: Optional[int] = 0,
+ ) -> Tuple[torch.Tensor]:
+ r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size * num_beams, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using any class inheriting from [`PreTrainedTokenizer`]. See
+ [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ next_scores (`torch.FloatTensor` of shape `(batch_size, 2 * num_beams)`):
+ Current scores of the top `2 * num_beams` non-finished beam hypotheses.
+ next_tokens (`torch.LongTensor` of shape `(batch_size, 2 * num_beams)`):
+ `input_ids` of the tokens corresponding to the top `2 * num_beams` non-finished beam hypotheses.
+ next_indices (`torch.LongTensor` of shape `(batch_size, 2 * num_beams)`):
+ Beam indices indicating to which beam hypothesis the `next_tokens` correspond.
+ scores_for_all_vocab (`torch.FloatTensor` of shape `(batch_size * num_beams, sequence_length)`):
+ The scores of all tokens in the vocabulary for each of the beam hypotheses.
+ pad_token_id (`int`, *optional*):
+ The id of the *padding* token.
+ eos_token_id (`Union[int, List[int]]`, *optional*):
+ The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
+ beam_indices (`torch.LongTensor`, *optional*):
+ Beam indices indicating to which beam hypothesis each token correspond.
+ decoder_prompt_len (`int`, *optional*):
+ The length of prompt that is included in the input to decoder.
+ Return:
+ `UserDict`: A dictionary composed of the fields as defined above:
+
+ - **next_beam_scores** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Updated scores of
+ all
+ non-finished beams.
+
+ - **next_beam_tokens** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Next tokens to be
+ added
+ to the non-finished beam_hypotheses.
+ - **next_beam_indices** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Beam indices
+ indicating to which beam the next tokens shall be added.
+ """
+
+ # add up to the length which the next_scores is calculated on (including decoder prompt)
+ cur_len = input_ids.shape[-1] + 1
+ batch_size = len(self._beam_hyps)
+ if not (batch_size == (input_ids.shape[0] // self.group_size)):
+ if self.num_beam_groups > 1:
+ raise ValueError(
+ f"A group beam size of {input_ids.shape[0]} is used as the input, but a group beam "
+ f"size of {self.group_size} is expected by the beam scorer."
+ )
+ else:
+ raise ValueError(
+ f"A beam size of {input_ids.shape[0]} is used as the input, but a beam size of "
+ f"{self.group_size} is expected by the beam scorer."
+ )
+
+ device = input_ids.device
+
+ next_beam_scores = torch.zeros((batch_size, self.group_size), dtype=next_scores.dtype, device=device)
+ next_beam_tokens = torch.zeros((batch_size, self.group_size), dtype=next_tokens.dtype, device=device)
+ next_beam_indices = torch.zeros((batch_size, self.group_size), dtype=next_indices.dtype, device=device)
+
+ if isinstance(eos_token_id, int):
+ eos_token_id = [eos_token_id]
+
+ for batch_idx, beam_hyp in enumerate(self._beam_hyps):
+ if self._done[batch_idx]:
+ if self.num_beams < len(beam_hyp):
+ raise ValueError(f"Batch can only be done if at least {self.num_beams} beams have been generated")
+ if eos_token_id is None or pad_token_id is None:
+ raise ValueError("Generated beams >= num_beams -> eos_token_id and pad_token have to be defined")
+ # pad the batch
+ next_beam_scores[batch_idx, :] = 0
+ next_beam_tokens[batch_idx, :] = pad_token_id
+ next_beam_indices[batch_idx, :] = 0
+ continue
+
+ # next tokens for this sentence.
+ beam_idx = 0
+ for beam_token_rank, (next_token, next_score, next_index) in enumerate(
+ zip(next_tokens[batch_idx], next_scores[batch_idx], next_indices[batch_idx])
+ ):
+ batch_beam_idx = batch_idx * self.group_size + next_index
+ # add to generated hypotheses if end of sentence
+ if (eos_token_id is not None) and (next_token.item() in eos_token_id):
+ # if beam_token does not belong to top num_beams tokens, it should not be added
+ is_beam_token_worse_than_top_num_beams = beam_token_rank >= self.group_size
+ if is_beam_token_worse_than_top_num_beams:
+ continue
+
+ completes_constraint = self.check_completes_constraints(input_ids[batch_beam_idx].cpu().tolist())
+ if completes_constraint:
+ if beam_indices is not None:
+ beam_index = beam_indices[batch_beam_idx]
+ beam_index = beam_index + (batch_beam_idx,)
+ else:
+ beam_index = None
+
+ beam_hyp.add(
+ input_ids[batch_beam_idx].clone(),
+ next_score.item(),
+ beam_indices=beam_index,
+ generated_len=cur_len - decoder_prompt_len,
+ )
+ else:
+ # add next predicted token since it is not eos_token
+ next_beam_scores[batch_idx, beam_idx] = next_score
+ next_beam_tokens[batch_idx, beam_idx] = next_token
+ next_beam_indices[batch_idx, beam_idx] = batch_beam_idx
+ beam_idx += 1
+
+ # once the beam for next step is full, don't add more tokens to it.
+ if beam_idx == self.group_size:
+ break
+
+ new_scores, new_tokens, new_indices = self.step_sentence_constraint(
+ batch_idx,
+ input_ids,
+ scores_for_all_vocab,
+ next_beam_scores[batch_idx],
+ next_beam_tokens[batch_idx],
+ next_beam_indices[batch_idx],
+ )
+
+ next_beam_scores[batch_idx] = new_scores
+ next_beam_tokens[batch_idx] = new_tokens
+ next_beam_indices[batch_idx] = new_indices
+
+ if beam_idx < self.group_size:
+ raise ValueError(
+ f"At most {self.group_size} tokens in {next_tokens[batch_idx]} can be equal to `eos_token_id:"
+ f" {eos_token_id}`. Make sure {next_tokens[batch_idx]} are corrected."
+ )
+
+ # Check if we are done so that we can save a pad step if all(done)
+ self._done[batch_idx] = self._done[batch_idx] or beam_hyp.is_done(
+ next_scores[batch_idx].max().item(), cur_len, decoder_prompt_len
+ )
+
+ return UserDict(
+ {
+ "next_beam_scores": next_beam_scores.view(-1),
+ "next_beam_tokens": next_beam_tokens.view(-1),
+ "next_beam_indices": next_beam_indices.view(-1),
+ }
+ )
+
+ def step_sentence_constraint(
+ self,
+ batch_idx: int,
+ input_ids: torch.LongTensor,
+ vocab_scores: torch.FloatTensor,
+ sent_beam_scores: torch.FloatTensor,
+ sent_beam_tokens: torch.LongTensor,
+ sent_beam_indices: torch.LongTensor,
+ push_progress: bool = False,
+ ):
+ # sent_beam_tokens are the next {num_beams} number of tokens that are under consideration for this beam
+ # (candidate next tokens)
+
+ # 1. Adding "advance_tokens"
+ # using ConstraintStateList.advance(), we propose new tokens to be added into this "candidate list" that will
+ # advance us in fulfilling the constraints.
+
+ # 2. Selecting best candidates such that we end up with highest probable candidates
+ # that fulfill our constraints.
+
+ orig_len = sent_beam_indices.size(0)
+ device = sent_beam_indices.device
+
+ # initialize states
+ topk_contraint_states = self.make_constraint_states(orig_len)
+ advance_constraint_states = self.make_constraint_states(orig_len)
+
+ sidx, eidx = batch_idx * orig_len, (batch_idx + 1) * orig_len
+ this_batch_input_ids = input_ids[sidx:eidx]
+ this_batch_token_scores = vocab_scores[sidx:eidx]
+ full_hypotheses = torch.cat((input_ids[sent_beam_indices], sent_beam_tokens.unsqueeze(-1)), dim=-1)
+
+ # need to make new hypothesis that advance the constraints
+ track_new = {
+ "new_seqs": full_hypotheses.tolist(),
+ "new_states": [],
+ "new_indices": [],
+ "new_tokens": [],
+ "new_scores": [],
+ }
+ for seq_idx, pre_seq in enumerate(this_batch_input_ids):
+ # pre_seq = ith sequence generated before this step.
+
+ # input_ids -> (topk) generic beam search best model next tokens
+ # -> (advance) constraints forcing the next token
+ # either way, we need to sort them into "banks" later, so store a "ConstraintListState" for all types of
+ # hypotheses.
+
+ topk_state = topk_contraint_states[seq_idx]
+ topk_state.reset(full_hypotheses[seq_idx].cpu().tolist())
+
+ advance_state = advance_constraint_states[seq_idx]
+ advance_state.reset(pre_seq.cpu().tolist())
+
+ if not advance_state.completed:
+ advance_tokens = torch.LongTensor(advance_state.advance()).to(device)
+ for advance_token in advance_tokens:
+ # since adding each `advance_token` leads to a different hypothesis, create new state instance.
+ new_state = advance_state.copy(stateful=True)
+ new_state.add(advance_token.cpu().tolist())
+
+ advance_seq = torch.cat((pre_seq, advance_token.unsqueeze(0)), -1).cpu().tolist()
+ if advance_seq not in track_new["new_seqs"]:
+ # prevent duplicates, which are basically bound to happen in this process.
+ track_new["new_seqs"].append(advance_seq)
+ track_new["new_indices"].append(sidx + seq_idx) # idx -> global idx across all the batches
+ track_new["new_tokens"].append(advance_token)
+ track_new["new_scores"].append(this_batch_token_scores[seq_idx].take(advance_token))
+ track_new["new_states"].append(new_state)
+ elif push_progress:
+ # Basically, `sent_beam_indices` often chooses very little among `input_ids` the generated sequences that
+ # actually fulfill our constraints. For example, let constraints == ["loves pies"] and
+
+ # pre_seq_1 = "The child loves pies and" pre_seq_2 = "The child plays in the playground and"
+
+ # Without this step, if `sent_beam_indices` is something like [1,1], then
+ # 1. `pre_seq_1` won't be added to the list of (topk) hypothesis since it's not in the indices and
+ # 2. it won't be added to the list of (advance) hypothesis since it's completed already. (this is
+ # the else part of `if constraints_completed[seq_idx]`)
+ # 3. it ends up simply getting removed from consideration.
+
+ # #3 might be fine and actually desired, since it's likely that it's a low-probability output anyways,
+ # especially if it's not in the list of `sent_beam_indices`. But this often leads to lengthened beam
+ # search times, since completed sequences keep getting removed after all this effort for constrained
+ # generation.
+
+ # Here, we basically take `pre_seq_1` and to "push" it into the considered list of hypotheses, by simply
+ # appending the next likely token in the vocabulary and adding it to the list of hypotheses.
+
+ new_score, new_token = torch.max(this_batch_token_scores[seq_idx], 0) # some next probable token
+ advance_seq = torch.cat((pre_seq, new_token.unsqueeze(0)), -1)
+
+ advance_state = advance_constraint_states[seq_idx]
+
+ advance_seq = advance_seq.cpu().tolist()
+
+ advance_state.reset(advance_seq)
+ if advance_seq not in track_new["new_seqs"]:
+ # but still don't want to have duplicates
+ track_new["new_seqs"].append(advance_seq)
+ track_new["new_indices"].append(seq_idx)
+ track_new["new_tokens"].append(new_token)
+ track_new["new_scores"].append(new_score)
+ track_new["new_states"].append(advance_state)
+
+ if len(track_new["new_indices"]) > 0:
+ new_indices = torch.tensor(track_new["new_indices"]).to(device)
+ new_tokens = torch.stack(track_new["new_tokens"]).to(device)
+ new_scores = torch.stack(track_new["new_scores"]).to(device)
+
+ all_states = topk_contraint_states + track_new["new_states"]
+ all_tokens = torch.cat((sent_beam_tokens, new_tokens), -1)
+ all_scores = torch.cat((sent_beam_scores, new_scores), -1)
+ all_banks = torch.tensor([one.get_bank() for one in all_states]).to(device)
+
+ zipped = all_banks * 100 + all_scores
+ indices = zipped.sort(descending=True).indices
+ sorted_banks = all_banks[indices]
+
+ # Then we end up with {sorted among bank C}, {sorted among bank C-1}, ..., {sorted among bank 0}
+
+ counter = -1
+ cur_bank = sorted_banks[0]
+ increments = []
+ for bank in sorted_banks:
+ if bank == cur_bank:
+ counter += 1
+ else:
+ counter = 0
+ cur_bank = bank
+ increments.append(counter)
+ rearrangers = torch.tensor(np.argsort(increments, kind="mergesort"))
+
+ indices = indices[rearrangers][:orig_len]
+
+ sent_beam_scores = all_scores[indices]
+ sent_beam_tokens = all_tokens[indices]
+ sent_beam_indices = torch.cat((sent_beam_indices, new_indices))[indices]
+
+ return sent_beam_scores, sent_beam_tokens, sent_beam_indices
+
+ def finalize(
+ self,
+ input_ids: torch.LongTensor,
+ final_beam_scores: torch.FloatTensor,
+ final_beam_tokens: torch.LongTensor,
+ final_beam_indices: torch.LongTensor,
+ max_length: int,
+ pad_token_id: Optional[int] = None,
+ eos_token_id: Optional[Union[int, List[int]]] = None,
+ beam_indices: Optional[torch.LongTensor] = None,
+ decoder_prompt_len: Optional[int] = 0,
+ ) -> Tuple[torch.LongTensor]:
+ batch_size = len(self._beam_hyps)
+
+ if isinstance(eos_token_id, int):
+ eos_token_id = [eos_token_id]
+
+ # finalize all open beam hypotheses and add to generated hypotheses
+ for batch_idx, beam_hyp in enumerate(self._beam_hyps):
+ if self._done[batch_idx]:
+ continue
+
+ # all open beam hypotheses are added to the beam hypothesis
+ # beam hypothesis class automatically keeps the best beams
+
+ ids_collect = []
+ for beam_id in range(self.num_beams):
+ batch_beam_idx = batch_idx * self.num_beams + beam_id
+ final_score = final_beam_scores[batch_beam_idx].item()
+ final_tokens = input_ids[batch_beam_idx]
+
+ completes_constraint = self.check_completes_constraints(final_tokens.cpu().tolist())
+ if completes_constraint:
+ beam_index = beam_indices[batch_beam_idx] if beam_indices is not None else None
+ generated_len = final_tokens.shape[-1] - decoder_prompt_len
+ beam_hyp.add(final_tokens, final_score, beam_indices=beam_index, generated_len=generated_len)
+ ids_collect.append(beam_id)
+
+ # due to overly complex constraints or other factors, sometimes we can't gaurantee a successful
+ # generation. In these cases we simply return the highest scoring outputs.
+ if len(ids_collect) < self.num_beam_hyps_to_keep:
+ for beam_id in range(self.num_beams):
+ if beam_id not in ids_collect:
+ batch_beam_idx = batch_idx * self.num_beams + beam_id
+ final_score = final_beam_scores[batch_beam_idx].item()
+ final_tokens = input_ids[batch_beam_idx]
+ generated_len = final_tokens.shape[-1] - decoder_prompt_len
+ beam_hyp.add(final_tokens, final_score, generated_len=generated_len)
+ if len(ids_collect) >= self.num_beam_hyps_to_keep:
+ break
+
+ # select the best hypotheses
+ sent_lengths = input_ids.new(batch_size * self.num_beam_hyps_to_keep)
+ best = []
+ best_indices = []
+ best_scores = torch.zeros(batch_size * self.num_beam_hyps_to_keep, device=self.device, dtype=torch.float32)
+
+ # retrieve best hypotheses
+ for i, beam_hyp in enumerate(self._beam_hyps):
+ sorted_hyps = sorted(beam_hyp.beams, key=lambda x: x[0])
+ for j in range(self.num_beam_hyps_to_keep):
+ best_hyp_tuple = sorted_hyps.pop()
+ best_score = best_hyp_tuple[0]
+ best_hyp = best_hyp_tuple[1]
+ best_index = best_hyp_tuple[2]
+ sent_lengths[self.num_beam_hyps_to_keep * i + j] = len(best_hyp)
+
+ # append to lists
+ best.append(best_hyp)
+
+ # append indices to list
+ best_indices.append(best_index)
+
+ best_scores[i * self.num_beam_hyps_to_keep + j] = best_score
+
+ # prepare for adding eos
+ sent_lengths_max = sent_lengths.max().item() + 1
+
+ sent_max_len = min(sent_lengths_max, max_length) if max_length is not None else sent_lengths_max
+ decoded: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, sent_max_len)
+
+ if len(best_indices) > 0 and best_indices[0] is not None:
+ indices: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, sent_max_len)
+ else:
+ indices = None
+
+ # shorter batches are padded if needed
+ if sent_lengths.min().item() != sent_lengths.max().item():
+ if pad_token_id is None:
+ raise ValueError("`pad_token_id` has to be defined")
+ decoded.fill_(pad_token_id)
+
+ if indices is not None:
+ indices.fill_(-1)
+
+ # fill with hypotheses and eos_token_id if the latter fits in
+ for i, (hypo, best_idx) in enumerate(zip(best, best_indices)):
+ decoded[i, : sent_lengths[i]] = hypo
+
+ if indices is not None:
+ indices[i, : len(best_idx)] = torch.tensor(best_idx)
+
+ if sent_lengths[i] < sent_max_len:
+ # inserting only the first eos_token_id
+ decoded[i, sent_lengths[i]] = eos_token_id[0]
+
+ return UserDict(
+ {
+ "sequences": decoded,
+ "sequence_scores": best_scores,
+ "beam_indices": indices,
+ }
+ )
+
+
+class BeamHypotheses:
+ def __init__(self, num_beams: int, length_penalty: float, early_stopping: bool, max_length: Optional[int] = None):
+ """
+ Initialize n-best list of hypotheses.
+ """
+ self.length_penalty = length_penalty
+ self.early_stopping = early_stopping
+ self.max_length = max_length
+ self.num_beams = num_beams
+ self.beams = []
+ self.worst_score = 1e9
+
+ if not isinstance(self.early_stopping, bool) and self.max_length is None:
+ raise ValueError(
+ "When `do_early_stopping` is set to a string, `max_length` must be defined. Ensure it is passed to the"
+ " BeamScorer class instance at initialization time."
+ )
+
+ def __len__(self):
+ """
+ Number of hypotheses in the list.
+ """
+ return len(self.beams)
+
+ def add(
+ self,
+ hyp: torch.LongTensor,
+ sum_logprobs: float,
+ beam_indices: Optional[torch.LongTensor] = None,
+ generated_len: Optional[int] = None,
+ ):
+ """
+ Add a new hypothesis to the list.
+ """
+ if generated_len is not None:
+ score = sum_logprobs / (generated_len**self.length_penalty)
+ # This 'else' case exists for retrocompatibility
+ else:
+ score = sum_logprobs / (hyp.shape[-1] ** self.length_penalty)
+
+ if len(self) < self.num_beams or score > self.worst_score:
+ self.beams.append((score, hyp, beam_indices))
+ if len(self) > self.num_beams:
+ sorted_next_scores = sorted([(s, idx) for idx, (s, _, _) in enumerate(self.beams)])
+ del self.beams[sorted_next_scores[0][1]]
+ self.worst_score = sorted_next_scores[1][0]
+ else:
+ self.worst_score = min(score, self.worst_score)
+
+ def is_done(self, best_sum_logprobs: float, cur_len: int, decoder_prompt_len: Optional[int] = 0) -> bool:
+ """
+ If there are enough hypotheses and that none of the hypotheses being generated can become better than the worst
+ one in the heap, then we are done with this sentence.
+ """
+
+ if len(self) < self.num_beams:
+ return False
+
+ # `True`: stop as soon as at least `num_beams` hypotheses are finished
+ if self.early_stopping is True:
+ return True
+ # `False`: heuristic -- compute best possible score from `cur_len`, even though it is not entirely accurate
+ # when `length_penalty` is positive. See the discussion below for more details.
+ # https://github.com/huggingface/transformers/pull/20901#issuecomment-1369845565
+ elif self.early_stopping is False:
+ highest_attainable_score = best_sum_logprobs / (cur_len - decoder_prompt_len) ** self.length_penalty
+ ret = self.worst_score >= highest_attainable_score
+ return ret
+ # `"never"`: compute the best possible score, depending on the signal of `length_penalty`
+ else:
+ # `length_penalty` > 0.0 -> max denominator is obtaned from `max_length`, not from `cur_len` -> min
+ # abs(`highest_attainable_score`) is obtained -> `highest_attainable_score` is negative, hence we obtain
+ # its max this way
+ if self.length_penalty > 0.0:
+ if self.max_length <= decoder_prompt_len:
+ raise ValueError("max_length is not larger than decoder prompt length")
+ highest_attainable_score = (
+ best_sum_logprobs / (self.max_length - decoder_prompt_len) ** self.length_penalty
+ )
+ # the opposite logic applies here (max `highest_attainable_score` from `cur_len`)
+ else:
+ highest_attainable_score = best_sum_logprobs / (cur_len - decoder_prompt_len) ** self.length_penalty
+ ret = self.worst_score >= highest_attainable_score
+ return ret
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/generation/candidate_generator.py b/env-llmeval/lib/python3.10/site-packages/transformers/generation/candidate_generator.py
new file mode 100644
index 0000000000000000000000000000000000000000..ff4eea9765f0938ff7d97b003c16457bb6a964e0
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/generation/candidate_generator.py
@@ -0,0 +1,410 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple
+
+import torch
+
+
+if TYPE_CHECKING:
+ from ..modeling_utils import PreTrainedModel
+ from .configuration_utils import GenerationConfig
+ from .logits_process import LogitsProcessorList
+
+
+class CandidateGenerator:
+ """Abstract base class for all candidate generators that can be applied during assisted generation."""
+
+ def get_candidates(self, input_ids: torch.LongTensor) -> Tuple[torch.LongTensor, Optional[torch.FloatTensor]]:
+ """
+ Fetches the candidates to be tried for the current input.
+
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
+
+ Return:
+ `torch.LongTensor` of shape `(batch_size, candidate_length)` containing the candidate sequences to be
+ assessed by the model and, optionally, a `torch.FloatTensor` of shape `(batch_size, candidate_length,
+ vocabulary_size)` containing the logits associated to each candidate.
+ """
+ raise NotImplementedError(
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can call `get_candidates`."
+ )
+
+ def update_candidate_strategy(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, num_matches: int):
+ """
+ Updates the candidate generation strategy based on the outcomes.
+
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
+ scores (`torch.FloatTensor` of shape `(batch_size, candidate_length, config.vocab_size)`):
+ Prediction scores of a language modeling head. These can be logits for each vocabulary when not using
+ beam search or log softmax for each vocabulary token when using beam search
+ num_matches (`int`):
+ The number of matches between the candidate sequences and the model predictions.
+ """
+ raise NotImplementedError(
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can call "
+ "`update_candidate_strategy`."
+ )
+
+
+class AssistedCandidateGenerator(CandidateGenerator):
+ """
+ `CandidateGenerator` class to be used for assisted generation and speculative decoding. This class generates
+ candidates through the use of a smaller model. Read the following blog post for more information:
+ https://huggingface.co/blog/assisted-generation
+
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
+ assistant_model (`PreTrainedModel`):
+ The model to be used for generating candidates. This model should be smaller than the main model.
+ generation_config (`~generation.GenerationConfig`, *optional*):
+ The generation configuration to be used as base parametrization for the generation call.
+ logits_processor (`LogitsProcessorList`):
+ An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]
+ used to modify the prediction scores of the language modeling head applied at each generation step.
+ model_kwargs (`Dict`):
+ The keyword arguments that will be passed to the main model, and are used as base inputs for the assistant
+ model as well.
+ inputs_tensor (`torch.Tensor`, *optional*):
+ The model input tensor. In encoder-decoder models, this is the encoder input.
+ """
+
+ def __init__(
+ self,
+ input_ids: torch.LongTensor,
+ assistant_model: "PreTrainedModel",
+ generation_config: "GenerationConfig",
+ logits_processor: "LogitsProcessorList",
+ model_kwargs: Dict,
+ inputs_tensor: Optional[torch.Tensor] = None,
+ ):
+ # Make sure all data at the same device as assistant model
+ device = assistant_model.device
+ input_ids = input_ids.to(device)
+ if inputs_tensor is not None:
+ inputs_tensor = inputs_tensor.to(device)
+
+ # Prepare the assistant and the starting number of candidate tokens
+ self.assistant_model = assistant_model
+ self.num_assistant_tokens = assistant_model.generation_config.num_assistant_tokens
+
+ # Prepare the kwargs for the assistant model
+ assistant_kwargs = {}
+ for key, value in model_kwargs.items(): # deepcopy crashes if we attempt to copy encoder outputs with grads
+ if key not in ("encoder_outputs", "assistant_encoder_outputs"):
+ assistant_kwargs[key] = (
+ value.detach().to(device) if isinstance(value, torch.Tensor) else copy.deepcopy(value)
+ )
+
+ if "assistant_encoder_outputs" in model_kwargs:
+ assistant_kwargs["encoder_outputs"] = model_kwargs["assistant_encoder_outputs"]
+ elif assistant_model.config.is_encoder_decoder:
+ inputs_tensor, model_input_name, assistant_kwargs = assistant_model._prepare_model_inputs(
+ inputs_tensor, assistant_model.generation_config.bos_token_id, assistant_kwargs
+ )
+ assistant_kwargs = assistant_model._prepare_encoder_decoder_kwargs_for_generation(
+ inputs_tensor, assistant_kwargs, model_input_name
+ )
+ elif "encoder_outputs" in model_kwargs:
+ assistant_kwargs["encoder_outputs"] = model_kwargs["encoder_outputs"]
+ self.assistant_kwargs = assistant_kwargs
+
+ # Prepare assistant model's keys of inputs
+ if assistant_model.config.is_encoder_decoder:
+ # both are encoder-decoder
+ self.input_ids_key = "decoder_input_ids"
+ self.attention_key = "decoder_attention_mask"
+ elif "encoder_outputs" in assistant_kwargs:
+ # special case for encoder-decoder with decoder-only assistant (like DistilWhisper)
+ self.input_ids_key = "input_ids"
+ self.attention_key = "attention_mask"
+ self.assistant_kwargs["attention_mask"] = self.assistant_kwargs.get(
+ "decoder_attention_mask",
+ torch.ones((input_ids.shape[0], 1), device=input_ids.device, dtype=torch.long),
+ )
+ else:
+ # both are decoder-only
+ self.input_ids_key = "input_ids"
+ self.attention_key = "attention_mask"
+
+ # Prepare generation-related options.
+ eos_token_id = generation_config.eos_token_id
+ if isinstance(eos_token_id, int):
+ eos_token_id = [eos_token_id]
+ self.eos_token_id_tensor = (
+ torch.tensor(eos_token_id).to(input_ids.device) if eos_token_id is not None else None
+ )
+ self.logits_processor = logits_processor
+ self.generation_config = copy.deepcopy(generation_config)
+ self.generation_config.return_dict_in_generate = True
+ self.generation_config.output_scores = True
+
+ def get_candidates(self, input_ids: torch.LongTensor) -> Tuple[torch.LongTensor, Optional[torch.FloatTensor]]:
+ """
+ Fetches the candidates to be tried for the current input.
+
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
+
+ Return:
+ `torch.LongTensor` of shape `(batch_size, candidate_length)` containing the candidate sequences to be
+ assessed by the model and a `torch.FloatTensor` of shape `(batch_size, candidate_length,
+ vocabulary_size)` containing the logits associated to each candidate.
+ """
+ input_ids = input_ids.to(self.assistant_model.device)
+
+ # Don't generate more than `max_length - 1` candidates since the target model generates one extra token.
+ new_cur_len = input_ids.shape[-1]
+ max_new_tokens = min(int(self.num_assistant_tokens), self.generation_config.max_length - new_cur_len - 1)
+ if max_new_tokens == 0:
+ return input_ids, None
+
+ # 1. If it is not the first round of candidate generation, prepare the inputs based on the input_ids length
+ # (which implicitly contains the number of accepted candidates from the previous round)
+ has_past_key_values = self.assistant_kwargs.get("past_key_values", None) is not None
+ if has_past_key_values:
+ new_cache_size = new_cur_len - 1
+ self.assistant_kwargs["past_key_values"] = _crop_past_key_values(
+ self.assistant_model, self.assistant_kwargs["past_key_values"], new_cache_size - 1
+ ) # the assistant does not have the token after the last match, hence the -1
+
+ self.assistant_kwargs = _prepare_attention_mask(
+ self.assistant_kwargs, new_cur_len, self.assistant_model.config.is_encoder_decoder
+ )
+ self.assistant_kwargs = _prepare_token_type_ids(self.assistant_kwargs, new_cur_len)
+
+ # 2. Forecast next N tokens using the assistant model.
+ assistant_generation_kwargs = {
+ self.input_ids_key: input_ids,
+ "max_new_tokens": max_new_tokens,
+ "generation_config": self.generation_config,
+ "logits_processor": self.logits_processor,
+ }
+
+ assistant_output = self.assistant_model.generate(**assistant_generation_kwargs, **self.assistant_kwargs)
+
+ # 3. Update variables for the next round of candidate generation
+ self.assistant_kwargs["past_key_values"] = assistant_output.past_key_values
+
+ # 4. Prepare variables for output
+ candidate_logits = torch.stack(assistant_output.scores, dim=1)
+ candidate_ids = assistant_output.sequences
+ return candidate_ids, candidate_logits
+
+ def update_candidate_strategy(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, num_matches: int):
+ """
+ Updates the candidate generation strategy based on the outcomes.
+
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
+ scores (`torch.FloatTensor` of shape `(batch_size, candidate_length, config.vocab_size)`):
+ Prediction scores of a language modeling head. These can be logits for each vocabulary when not using
+ beam search or log softmax for each vocabulary token when using beam search
+ num_matches (`int`):
+ The number of matches between the candidate sequences and the model predictions.
+ """
+ # Adjust the max number of assistant tokens to use in the next iteration. This is a simple heuristic,
+ # probably can be improved -- we want to balance the benefits of getting assistant tokens correct with the
+ # cost of forecasting incorrect assistant tokens.
+ if self.assistant_model.generation_config.num_assistant_tokens_schedule in {
+ "heuristic",
+ "heuristic_transient",
+ }:
+ if num_matches == int(self.num_assistant_tokens):
+ self.num_assistant_tokens += 2.0
+ else:
+ self.num_assistant_tokens = max(1.0, self.num_assistant_tokens - 1.0)
+
+
+class PromptLookupCandidateGenerator(CandidateGenerator):
+ """
+ `CandidateGenerator` class to be used for prompt lookup generation. This class generates candidates by looking up
+ likely continuations in the provided prompt (input_ids) itself.
+ Read the following blog post for more information: https://github.com/apoorvumang/prompt-lookup-decoding
+
+ Args:
+ max_matching_ngram_size (`int`):
+ The maximum ngram size to be considered for matching in the prompt
+ num_output_tokens (`int`):
+ The number of tokens to be output as candidate tokens.
+ """
+
+ def __init__(
+ self,
+ num_output_tokens: int = 10,
+ max_matching_ngram_size: int = None,
+ ):
+ self.num_output_tokens = num_output_tokens
+ self.max_matching_ngram_size = max_matching_ngram_size if max_matching_ngram_size else 2
+
+ if self.max_matching_ngram_size <= 0 or self.num_output_tokens <= 0:
+ raise ValueError("Invalid max_matching_ngram_size or num_output_tokens")
+
+ def get_candidates(self, input_ids: torch.LongTensor) -> Tuple[torch.LongTensor, Optional[torch.FloatTensor]]:
+ """
+ Fetches the candidates to be tried for the current input.
+
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
+
+ Return:
+ `torch.LongTensor` of shape `(num_candidates, candidate_length)`: The candidate sequences to be tried.
+ """
+ input_length = input_ids.size(1)
+
+ chosen_ids = None
+ match_found = False
+ for ngram_size in range(min(self.max_matching_ngram_size, input_length - 1), 0, -1):
+ # Create sliding windows of size ngram_size
+ windows = input_ids.unfold(dimension=1, size=ngram_size, step=1)
+
+ # Convert ngram to a tensor for comparison
+ ngram_tensor = input_ids[0, -ngram_size:]
+
+ # Find where the windows match the ngram
+ matches = (windows == ngram_tensor).all(dim=2)
+
+ # Get the indices of matches
+ match_indices = matches.nonzero(as_tuple=True)[1]
+
+ # Iterate through match indices to find a valid continuation
+ for idx in match_indices:
+ start_idx = idx + ngram_size
+ end_idx = start_idx + self.num_output_tokens
+ end_idx = min(end_idx, input_length)
+
+ if start_idx < end_idx:
+ chosen_ids = input_ids[0, start_idx:end_idx]
+ match_found = True
+ break
+ if match_found:
+ break
+
+ if chosen_ids is None or len(chosen_ids) == 0:
+ # In case we didn't find a match return the input sequence unchanged, reverts back to autoregressive decoding
+ return input_ids, None
+
+ # Now need extend input_ids with chosen_ids
+ chosen_ids = chosen_ids.unsqueeze(0)
+ candidate_input_ids = torch.cat((input_ids, chosen_ids), dim=1)
+ # assisted_generation expects logits as well, but we don't have those here, so returning None
+ return candidate_input_ids, None
+
+ def update_candidate_strategy(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, num_matches: int):
+ """
+ Updates the candidate generation strategy based on the outcomes.
+
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
+ scores (`torch.FloatTensor` of shape `(batch_size, candidate_length, config.vocab_size)`):
+ Prediction scores of a language modeling head. These can be logits for each vocabulary when not using
+ beam search or log softmax for each vocabulary token when using beam search
+ num_matches (`int`):
+ The number of matches between the candidate sequences and the model predictions.
+ """
+ # Currently does nothing
+ return
+
+
+def _crop_past_key_values(model, past_key_values, maximum_length):
+ """Crops the past key values up to a certain maximum length."""
+ new_past = []
+ if model.config.is_encoder_decoder:
+ for idx in range(len(past_key_values)):
+ new_past.append(
+ (
+ past_key_values[idx][0][:, :, :maximum_length, :],
+ past_key_values[idx][1][:, :, :maximum_length, :],
+ past_key_values[idx][2],
+ past_key_values[idx][3],
+ )
+ )
+ past_key_values = tuple(new_past)
+ # bloom is special
+ elif "bloom" in model.__class__.__name__.lower() or (
+ model.config.architectures is not None and "bloom" in model.config.architectures[0].lower()
+ ):
+ for idx in range(len(past_key_values)):
+ new_past.append(
+ (
+ past_key_values[idx][0][:, :, :maximum_length],
+ past_key_values[idx][1][:, :maximum_length, :],
+ )
+ )
+ past_key_values = tuple(new_past)
+ # gptbigcode is too
+ elif "gptbigcode" in model.__class__.__name__.lower() or (
+ model.config.architectures is not None and "gptbigcode" in model.config.architectures[0].lower()
+ ):
+ if model.config.multi_query:
+ for idx in range(len(past_key_values)):
+ past_key_values[idx] = past_key_values[idx][:, :maximum_length, :]
+ else:
+ for idx in range(len(past_key_values)):
+ past_key_values[idx] = past_key_values[idx][:, :, :maximum_length, :]
+ else:
+ for idx in range(len(past_key_values)):
+ new_past.append(
+ (
+ past_key_values[idx][0][:, :, :maximum_length, :],
+ past_key_values[idx][1][:, :, :maximum_length, :],
+ )
+ )
+ past_key_values = tuple(new_past)
+ return past_key_values
+
+
+def _prepare_attention_mask(model_kwargs: Dict[str, Any], new_length: int, is_encoder_decoder: bool) -> Dict[str, Any]:
+ """Expands or crops the model's mask for decoding purposes, to the defined length"""
+
+ mask_key = "decoder_attention_mask" if is_encoder_decoder else "attention_mask"
+ if mask_key not in model_kwargs:
+ return model_kwargs
+
+ mask = model_kwargs[mask_key]
+ mask_length_diff = new_length - mask.shape[1]
+
+ if mask_length_diff < 0:
+ model_kwargs[mask_key] = mask[:, :mask_length_diff]
+ elif mask_length_diff > 0:
+ model_kwargs[mask_key] = torch.cat([mask, mask.new_ones((mask.shape[0], mask_length_diff))], dim=-1)
+ return model_kwargs
+
+
+def _prepare_token_type_ids(model_kwargs: Dict[str, Any], new_length: int) -> Dict[str, Any]:
+ """Expands or crops the model's token_type_ids for decoding purposes, to the defined length"""
+ if "token_type_ids" not in model_kwargs or model_kwargs["token_type_ids"] is None:
+ return model_kwargs
+
+ token_type_ids = model_kwargs["token_type_ids"]
+ final_token_type = token_type_ids[:, -1].unsqueeze(-1)
+ type_length_diff = new_length - token_type_ids.shape[1]
+
+ if type_length_diff < 0:
+ token_type_ids = token_type_ids[:, :type_length_diff]
+ elif type_length_diff > 0:
+ token_type_copies = final_token_type.repeat(1, type_length_diff)
+ model_kwargs["token_type_ids"] = torch.cat([model_kwargs["token_type_ids"], token_type_copies], dim=-1)
+ return model_kwargs
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/generation/configuration_utils.py b/env-llmeval/lib/python3.10/site-packages/transformers/generation/configuration_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..f40960c213ea67372c9200e0bdfc77ea94892b26
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/generation/configuration_utils.py
@@ -0,0 +1,1092 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Generation configuration class and utilities."""
+
+import copy
+import json
+import os
+import warnings
+from typing import TYPE_CHECKING, Any, Dict, Optional, Union
+
+from .. import __version__
+from ..configuration_utils import PretrainedConfig
+from ..utils import (
+ GENERATION_CONFIG_NAME,
+ ExplicitEnum,
+ PushToHubMixin,
+ cached_file,
+ download_url,
+ extract_commit_hash,
+ is_remote_url,
+ logging,
+)
+
+
+if TYPE_CHECKING:
+ from ..modeling_utils import PreTrainedModel
+
+
+logger = logging.get_logger(__name__)
+METADATA_FIELDS = ("_from_model_config", "_commit_hash", "_original_object_hash", "transformers_version")
+
+
+class GenerationMode(ExplicitEnum):
+ """
+ Possible generation modes, downstream of the [`~generation.GenerationMixin.generate`] method.
+ """
+
+ # Non-beam methods
+ CONTRASTIVE_SEARCH = "contrastive_search"
+ GREEDY_SEARCH = "greedy_search"
+ SAMPLE = "sample"
+ ASSISTED_GENERATION = "assisted_generation"
+ # Beam methods
+ BEAM_SEARCH = "beam_search"
+ BEAM_SAMPLE = "beam_sample"
+ CONSTRAINED_BEAM_SEARCH = "constrained_beam_search"
+ GROUP_BEAM_SEARCH = "group_beam_search"
+
+
+class GenerationConfig(PushToHubMixin):
+ # no-format
+ r"""
+ Class that holds a configuration for a generation task. A `generate` call supports the following generation methods
+ for text-decoder, text-to-text, speech-to-text, and vision-to-text models:
+
+ - *greedy decoding* by calling [`~generation.GenerationMixin._greedy_search`] if `num_beams=1` and
+ `do_sample=False`
+ - *contrastive search* by calling [`~generation.GenerationMixin._contrastive_search`] if `penalty_alpha>0.`
+ and `top_k>1`
+ - *multinomial sampling* by calling [`~generation.GenerationMixin._sample`] if `num_beams=1` and
+ `do_sample=True`
+ - *beam-search decoding* by calling [`~generation.GenerationMixin._beam_search`] if `num_beams>1` and
+ `do_sample=False`
+ - *beam-search multinomial sampling* by calling [`~generation.GenerationMixin._beam_sample`] if
+ `num_beams>1` and `do_sample=True`
+ - *diverse beam-search decoding* by calling [`~generation.GenerationMixin._group_beam_search`], if
+ `num_beams>1` and `num_beam_groups>1`
+ - *constrained beam-search decoding* by calling [`~generation.GenerationMixin._constrained_beam_search`], if
+ `constraints!=None` or `force_words_ids!=None`
+ - *assisted decoding* by calling [`~generation.GenerationMixin._assisted_decoding`], if
+ `assistant_model` or `prompt_lookup_num_tokens` is passed to `.generate()`
+
+ You do not need to call any of the above methods directly. Pass custom parameter values to '.generate()'. To learn
+ more about decoding strategies refer to the [text generation strategies guide](../generation_strategies).
+
+
+
+ A large number of these flags control the logits or the stopping criteria of the generation. Make sure you check
+ the [generate-related classes](https://huggingface.co/docs/transformers/internal/generation_utils) for a full
+ description of the possible manipulations, as well as examples of their usage.
+
+
+
+ Arg:
+ > Parameters that control the length of the output
+
+ max_length (`int`, *optional*, defaults to 20):
+ The maximum length the generated tokens can have. Corresponds to the length of the input prompt +
+ `max_new_tokens`. Its effect is overridden by `max_new_tokens`, if also set.
+ max_new_tokens (`int`, *optional*):
+ The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt.
+ min_length (`int`, *optional*, defaults to 0):
+ The minimum length of the sequence to be generated. Corresponds to the length of the input prompt +
+ `min_new_tokens`. Its effect is overridden by `min_new_tokens`, if also set.
+ min_new_tokens (`int`, *optional*):
+ The minimum numbers of tokens to generate, ignoring the number of tokens in the prompt.
+ early_stopping (`bool` or `str`, *optional*, defaults to `False`):
+ Controls the stopping condition for beam-based methods, like beam-search. It accepts the following values:
+ `True`, where the generation stops as soon as there are `num_beams` complete candidates; `False`, where an
+ heuristic is applied and the generation stops when is it very unlikely to find better candidates;
+ `"never"`, where the beam search procedure only stops when there cannot be better candidates (canonical
+ beam search algorithm).
+ max_time(`float`, *optional*):
+ The maximum amount of time you allow the computation to run for in seconds. generation will still finish
+ the current pass after allocated time has been passed.
+
+ > Parameters that control the generation strategy used
+
+ do_sample (`bool`, *optional*, defaults to `False`):
+ Whether or not to use sampling ; use greedy decoding otherwise.
+ num_beams (`int`, *optional*, defaults to 1):
+ Number of beams for beam search. 1 means no beam search.
+ num_beam_groups (`int`, *optional*, defaults to 1):
+ Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams.
+ [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details.
+ penalty_alpha (`float`, *optional*):
+ The values balance the model confidence and the degeneration penalty in contrastive search decoding.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should use the past last key/values attentions (if applicable to the model) to
+ speed up decoding.
+
+ > Parameters for manipulation of the model output logits
+
+ temperature (`float`, *optional*, defaults to 1.0):
+ The value used to modulate the next token probabilities.
+ top_k (`int`, *optional*, defaults to 50):
+ The number of highest probability vocabulary tokens to keep for top-k-filtering.
+ top_p (`float`, *optional*, defaults to 1.0):
+ If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to
+ `top_p` or higher are kept for generation.
+ typical_p (`float`, *optional*, defaults to 1.0):
+ Local typicality measures how similar the conditional probability of predicting a target token next is to
+ the expected conditional probability of predicting a random token next, given the partial text already
+ generated. If set to float < 1, the smallest set of the most locally typical tokens with probabilities that
+ add up to `typical_p` or higher are kept for generation. See [this
+ paper](https://arxiv.org/pdf/2202.00666.pdf) for more details.
+ epsilon_cutoff (`float`, *optional*, defaults to 0.0):
+ If set to float strictly between 0 and 1, only tokens with a conditional probability greater than
+ `epsilon_cutoff` will be sampled. In the paper, suggested values range from 3e-4 to 9e-4, depending on the
+ size of the model. See [Truncation Sampling as Language Model
+ Desmoothing](https://arxiv.org/abs/2210.15191) for more details.
+ eta_cutoff (`float`, *optional*, defaults to 0.0):
+ Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to float strictly between
+ 0 and 1, a token is only considered if it is greater than either `eta_cutoff` or `sqrt(eta_cutoff) *
+ exp(-entropy(softmax(next_token_logits)))`. The latter term is intuitively the expected next token
+ probability, scaled by `sqrt(eta_cutoff)`. In the paper, suggested values range from 3e-4 to 2e-3,
+ depending on the size of the model. See [Truncation Sampling as Language Model
+ Desmoothing](https://arxiv.org/abs/2210.15191) for more details.
+ diversity_penalty (`float`, *optional*, defaults to 0.0):
+ This value is subtracted from a beam's score if it generates a token same as any beam from other group at a
+ particular time. Note that `diversity_penalty` is only effective if `group beam search` is enabled.
+ repetition_penalty (`float`, *optional*, defaults to 1.0):
+ The parameter for repetition penalty. 1.0 means no penalty. See [this
+ paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
+ encoder_repetition_penalty (`float`, *optional*, defaults to 1.0):
+ The paramater for encoder_repetition_penalty. An exponential penalty on sequences that are not in the
+ original input. 1.0 means no penalty.
+ length_penalty (`float`, *optional*, defaults to 1.0):
+ Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to
+ the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log
+ likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while
+ `length_penalty` < 0.0 encourages shorter sequences.
+ no_repeat_ngram_size (`int`, *optional*, defaults to 0):
+ If set to int > 0, all ngrams of that size can only occur once.
+ bad_words_ids(`List[List[int]]`, *optional*):
+ List of list of token ids that are not allowed to be generated. Check
+ [`~generation.NoBadWordsLogitsProcessor`] for further documentation and examples.
+ force_words_ids(`List[List[int]]` or `List[List[List[int]]]`, *optional*):
+ List of token ids that must be generated. If given a `List[List[int]]`, this is treated as a simple list of
+ words that must be included, the opposite to `bad_words_ids`. If given `List[List[List[int]]]`, this
+ triggers a [disjunctive constraint](https://github.com/huggingface/transformers/issues/14081), where one
+ can allow different forms of each word.
+ renormalize_logits (`bool`, *optional*, defaults to `False`):
+ Whether to renormalize the logits after applying all the logits processors or warpers (including the custom
+ ones). It's highly recommended to set this flag to `True` as the search algorithms suppose the score logits
+ are normalized but some logit processors or warpers break the normalization.
+ constraints (`List[Constraint]`, *optional*):
+ Custom constraints that can be added to the generation to ensure that the output will contain the use of
+ certain tokens as defined by `Constraint` objects, in the most sensible way possible.
+ forced_bos_token_id (`int`, *optional*, defaults to `model.config.forced_bos_token_id`):
+ The id of the token to force as the first generated token after the `decoder_start_token_id`. Useful for
+ multilingual models like [mBART](../model_doc/mbart) where the first generated token needs to be the target
+ language token.
+ forced_eos_token_id (`Union[int, List[int]]`, *optional*, defaults to `model.config.forced_eos_token_id`):
+ The id of the token to force as the last generated token when `max_length` is reached. Optionally, use a
+ list to set multiple *end-of-sequence* tokens.
+ remove_invalid_values (`bool`, *optional*, defaults to `model.config.remove_invalid_values`):
+ Whether to remove possible *nan* and *inf* outputs of the model to prevent the generation method to crash.
+ Note that using `remove_invalid_values` can slow down generation.
+ exponential_decay_length_penalty (`tuple(int, float)`, *optional*):
+ This Tuple adds an exponentially increasing length penalty, after a certain amount of tokens have been
+ generated. The tuple shall consist of: `(start_index, decay_factor)` where `start_index` indicates where
+ penalty starts and `decay_factor` represents the factor of exponential decay
+ suppress_tokens (`List[int]`, *optional*):
+ A list of tokens that will be suppressed at generation. The `SupressTokens` logit processor will set their
+ log probs to `-inf` so that they are not sampled.
+ begin_suppress_tokens (`List[int]`, *optional*):
+ A list of tokens that will be suppressed at the beginning of the generation. The `SupressBeginTokens` logit
+ processor will set their log probs to `-inf` so that they are not sampled.
+ forced_decoder_ids (`List[List[int]]`, *optional*):
+ A list of pairs of integers which indicates a mapping from generation indices to token indices that will be
+ forced before sampling. For example, `[[1, 123]]` means the second generated token will always be a token
+ of index 123.
+ sequence_bias (`Dict[Tuple[int], float]`, *optional*)):
+ Dictionary that maps a sequence of tokens to its bias term. Positive biases increase the odds of the
+ sequence being selected, while negative biases do the opposite. Check
+ [`~generation.SequenceBiasLogitsProcessor`] for further documentation and examples.
+ guidance_scale (`float`, *optional*):
+ The guidance scale for classifier free guidance (CFG). CFG is enabled by setting `guidance_scale > 1`.
+ Higher guidance scale encourages the model to generate samples that are more closely linked to the input
+ prompt, usually at the expense of poorer quality.
+ low_memory (`bool`, *optional*):
+ Switch to sequential beam search and sequential topk for contrastive search to reduce peak memory.
+ Used with beam search and contrastive search.
+
+
+ > Parameters that define the output variables of `generate`
+
+ num_return_sequences(`int`, *optional*, defaults to 1):
+ The number of independently computed returned sequences for each element in the batch.
+ output_attentions (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more details.
+ output_hidden_states (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more details.
+ output_scores (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the prediction scores. See `scores` under returned tensors for more details.
+ output_logits (`bool`, *optional*):
+ Whether or not to return the unprocessed prediction logit scores. See `logits` under returned tensors for
+ more details.
+ return_dict_in_generate (`bool`, *optional*, defaults to `False`):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+
+ > Special tokens that can be used at generation time
+
+ pad_token_id (`int`, *optional*):
+ The id of the *padding* token.
+ bos_token_id (`int`, *optional*):
+ The id of the *beginning-of-sequence* token.
+ eos_token_id (`Union[int, List[int]]`, *optional*):
+ The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
+
+ > Generation parameters exclusive to encoder-decoder models
+
+ encoder_no_repeat_ngram_size (`int`, *optional*, defaults to 0):
+ If set to int > 0, all ngrams of that size that occur in the `encoder_input_ids` cannot occur in the
+ `decoder_input_ids`.
+ decoder_start_token_id (`Union[int, List[int]]`, *optional*):
+ If an encoder-decoder model starts decoding with a different token than *bos*, the id of that token or a list of length
+ `batch_size`. Indicating a list enables different start ids for each element in the batch
+ (e.g. multilingual models with different target languages in one batch)
+
+
+ > Generation parameters exclusive to [assistant generation](https://arxiv.org/abs/2211.17192)
+
+ num_assistant_tokens (`int`, *optional*, defaults to 5):
+ Defines the number of _speculative tokens_ that shall be generated by the assistant model before being
+ checked by the target model at each iteration. Higher values for `num_assistant_tokens` make the generation
+ more _speculative_ : If the assistant model is performant larger speed-ups can be reached, if the assistant
+ model requires lots of corrections, lower speed-ups are reached.
+
+ num_assistant_tokens_schedule (`str`, *optional*, defaults to `"heuristic"`):
+ Defines the schedule at which max assistant tokens shall be changed during inference.
+ - `"heuristic"`: When all speculative tokens are correct, increase `num_assistant_tokens` by 2 else
+ reduce by 1. `num_assistant_tokens` value is persistent over multiple generation calls with the same assistant model.
+ - `"heuristic_transient"`: Same as `"heuristic"` but `num_assistant_tokens` is reset to its initial value after each generation call.
+ - `"constant"`: `num_assistant_tokens` stays unchanged during generation
+
+ prompt_lookup_num_tokens (`int`, *optional*, default to `None`):
+ The number of tokens to be output as candidate tokens.
+
+ max_matching_ngram_size (`int`, *optional*, default to `None`):
+ The maximum ngram size to be considered for matching in the prompt. Default to 2 if not provided.
+
+ > Parameters specific to the caching mechanism:
+
+ cache_implementation (`str`, *optional*, default to `None`):
+ Cache class that should be used when generating.
+
+
+ > Wild card
+
+ generation_kwargs:
+ Additional generation kwargs will be forwarded to the `generate` function of the model. Kwargs that are not
+ present in `generate`'s signature will be used in the model forward pass.
+ """
+
+ def __init__(self, **kwargs):
+ # Parameters that control the length of the output
+ self.max_length = kwargs.pop("max_length", 20)
+ self.max_new_tokens = kwargs.pop("max_new_tokens", None)
+ self.min_length = kwargs.pop("min_length", 0)
+ self.min_new_tokens = kwargs.pop("min_new_tokens", None)
+ self.early_stopping = kwargs.pop("early_stopping", False)
+ self.max_time = kwargs.pop("max_time", None)
+
+ # Parameters that control the generation strategy used
+ self.do_sample = kwargs.pop("do_sample", False)
+ self.num_beams = kwargs.pop("num_beams", 1)
+ self.num_beam_groups = kwargs.pop("num_beam_groups", 1)
+ self.penalty_alpha = kwargs.pop("penalty_alpha", None)
+ self.use_cache = kwargs.pop("use_cache", True)
+
+ # Parameters for manipulation of the model output logits
+ self.temperature = kwargs.pop("temperature", 1.0)
+ self.top_k = kwargs.pop("top_k", 50)
+ self.top_p = kwargs.pop("top_p", 1.0)
+ self.typical_p = kwargs.pop("typical_p", 1.0)
+ self.epsilon_cutoff = kwargs.pop("epsilon_cutoff", 0.0)
+ self.eta_cutoff = kwargs.pop("eta_cutoff", 0.0)
+ self.diversity_penalty = kwargs.pop("diversity_penalty", 0.0)
+ self.repetition_penalty = kwargs.pop("repetition_penalty", 1.0)
+ self.encoder_repetition_penalty = kwargs.pop("encoder_repetition_penalty", 1.0)
+ self.length_penalty = kwargs.pop("length_penalty", 1.0)
+ self.no_repeat_ngram_size = kwargs.pop("no_repeat_ngram_size", 0)
+ self.bad_words_ids = kwargs.pop("bad_words_ids", None)
+ self.force_words_ids = kwargs.pop("force_words_ids", None)
+ self.renormalize_logits = kwargs.pop("renormalize_logits", False)
+ self.constraints = kwargs.pop("constraints", None)
+ self.forced_bos_token_id = kwargs.pop("forced_bos_token_id", None)
+ self.forced_eos_token_id = kwargs.pop("forced_eos_token_id", None)
+ self.remove_invalid_values = kwargs.pop("remove_invalid_values", False)
+ self.exponential_decay_length_penalty = kwargs.pop("exponential_decay_length_penalty", None)
+ self.suppress_tokens = kwargs.pop("suppress_tokens", None)
+ self.begin_suppress_tokens = kwargs.pop("begin_suppress_tokens", None)
+ self.forced_decoder_ids = kwargs.pop("forced_decoder_ids", None)
+ self.sequence_bias = kwargs.pop("sequence_bias", None)
+ self.guidance_scale = kwargs.pop("guidance_scale", None)
+ self.low_memory = kwargs.pop("low_memory", None)
+
+ # Parameters that define the output variables of `generate`
+ self.num_return_sequences = kwargs.pop("num_return_sequences", 1)
+ self.output_attentions = kwargs.pop("output_attentions", False)
+ self.output_hidden_states = kwargs.pop("output_hidden_states", False)
+ self.output_scores = kwargs.pop("output_scores", False)
+ self.output_logits = kwargs.pop("output_logits", None)
+ self.return_dict_in_generate = kwargs.pop("return_dict_in_generate", False)
+
+ # Special tokens that can be used at generation time
+ self.pad_token_id = kwargs.pop("pad_token_id", None)
+ self.bos_token_id = kwargs.pop("bos_token_id", None)
+ self.eos_token_id = kwargs.pop("eos_token_id", None)
+
+ # Generation parameters exclusive to encoder-decoder models
+ self.encoder_no_repeat_ngram_size = kwargs.pop("encoder_no_repeat_ngram_size", 0)
+ self.decoder_start_token_id = kwargs.pop("decoder_start_token_id", None)
+
+ # Assistant generation
+ self.num_assistant_tokens = kwargs.pop("num_assistant_tokens", 5)
+ self.num_assistant_tokens_schedule = kwargs.pop("num_assistant_tokens_schedule", "heuristic")
+
+ # Cache implementation
+ self.cache_implementation = kwargs.pop("cache_implementation", None)
+
+ # Prompt lookup decoding
+ self.prompt_lookup_num_tokens = kwargs.pop("prompt_lookup_num_tokens", None)
+ self.max_matching_ngram_size = kwargs.pop("max_matching_ngram_size", None)
+
+ # Wild card
+ self.generation_kwargs = kwargs.pop("generation_kwargs", {})
+
+ # The remaining attributes do not parametrize `.generate()`, but are informative and/or used by the hub
+ # interface.
+ self._from_model_config = kwargs.pop("_from_model_config", False)
+ self._commit_hash = kwargs.pop("_commit_hash", None)
+ self.transformers_version = kwargs.pop("transformers_version", __version__)
+
+ # Additional attributes without default values
+ if not self._from_model_config:
+ # we don't want to copy values from the model config if we're initializing a `GenerationConfig` from a
+ # model's default configuration file
+ for key, value in kwargs.items():
+ try:
+ setattr(self, key, value)
+ except AttributeError as err:
+ logger.error(f"Can't set {key} with value {value} for {self}")
+ raise err
+
+ # Validate the values of the attributes
+ self.validate(is_init=True)
+
+ def __hash__(self):
+ return hash(self.to_json_string(ignore_metadata=True))
+
+ def __eq__(self, other):
+ if not isinstance(other, GenerationConfig):
+ return False
+
+ self_without_metadata = self.to_json_string(use_diff=False, ignore_metadata=True)
+ other_without_metadata = other.to_json_string(use_diff=False, ignore_metadata=True)
+ return self_without_metadata == other_without_metadata
+
+ def __repr__(self):
+ return f"{self.__class__.__name__} {self.to_json_string(ignore_metadata=True)}"
+
+ def get_generation_mode(self, assistant_model: Optional["PreTrainedModel"] = None) -> GenerationMode:
+ """
+ Returns the generation mode triggered by the [`GenerationConfig`] instance.
+
+ Arg:
+ assistant_model (`PreTrainedModel`, *optional*):
+ The assistant model to be used for assisted generation. If set, the generation mode will be
+ assisted generation.
+
+ Returns:
+ `GenerationMode`: The generation mode triggered by the instance.
+ """
+ # TODO joao: find out a way of not depending on external fields (e.g. `assistant_model`), then make this a
+ # property and part of the `__repr__`
+ if self.constraints is not None or self.force_words_ids is not None:
+ generation_mode = GenerationMode.CONSTRAINED_BEAM_SEARCH
+ elif self.num_beams == 1:
+ if self.do_sample is False:
+ if (
+ self.top_k is not None
+ and self.top_k > 1
+ and self.penalty_alpha is not None
+ and self.penalty_alpha > 0
+ ):
+ generation_mode = GenerationMode.CONTRASTIVE_SEARCH
+ else:
+ generation_mode = GenerationMode.GREEDY_SEARCH
+ else:
+ generation_mode = GenerationMode.SAMPLE
+ else:
+ if self.num_beam_groups > 1:
+ generation_mode = GenerationMode.GROUP_BEAM_SEARCH
+ elif self.do_sample is True:
+ generation_mode = GenerationMode.BEAM_SAMPLE
+ else:
+ generation_mode = GenerationMode.BEAM_SEARCH
+
+ # Assisted generation may extend some generation modes
+ if assistant_model is not None or self.prompt_lookup_num_tokens is not None:
+ if generation_mode in ("greedy_search", "sample"):
+ generation_mode = GenerationMode.ASSISTED_GENERATION
+ else:
+ raise ValueError(
+ "You've set `assistant_model`, which triggers assisted generate. Currently, assisted generate "
+ "is only supported with Greedy Search and Sample."
+ )
+ return generation_mode
+
+ def validate(self, is_init=False):
+ """
+ Validates the values of the attributes of the [`GenerationConfig`] instance. Raises exceptions in the presence
+ of parameterization that can be detected as incorrect from the configuration instance alone.
+
+ Note that some parameters not validated here are best validated at generate runtime, as they may depend on
+ other inputs and/or the model, such as parameters related to the generation length.
+
+ Arg:
+ is_init (`bool`, *optional*, defaults to `False`):
+ Whether the validation is performed during the initialization of the instance.
+ """
+
+ # Validation of individual attributes
+ if self.early_stopping not in {True, False, "never"}:
+ raise ValueError(f"`early_stopping` must be a boolean or 'never', but is {self.early_stopping}.")
+ if self.max_new_tokens is not None and self.max_new_tokens <= 0:
+ raise ValueError(f"`max_new_tokens` must be greater than 0, but is {self.max_new_tokens}.")
+
+ # Validation of attribute relations:
+ fix_location = ""
+ if is_init:
+ fix_location = (
+ " This was detected when initializing the generation config instance, which means the corresponding "
+ "file may hold incorrect parameterization and should be fixed."
+ )
+
+ # 1. detect sampling-only parameterization when not in sampling mode
+ if self.do_sample is False:
+ greedy_wrong_parameter_msg = (
+ "`do_sample` is set to `False`. However, `{flag_name}` is set to `{flag_value}` -- this flag is only "
+ "used in sample-based generation modes. You should set `do_sample=True` or unset `{flag_name}`."
+ + fix_location
+ )
+ if self.temperature is not None and self.temperature != 1.0:
+ warnings.warn(
+ greedy_wrong_parameter_msg.format(flag_name="temperature", flag_value=self.temperature),
+ UserWarning,
+ )
+ if self.top_p is not None and self.top_p != 1.0:
+ warnings.warn(
+ greedy_wrong_parameter_msg.format(flag_name="top_p", flag_value=self.top_p),
+ UserWarning,
+ )
+ if self.typical_p is not None and self.typical_p != 1.0:
+ warnings.warn(
+ greedy_wrong_parameter_msg.format(flag_name="typical_p", flag_value=self.typical_p),
+ UserWarning,
+ )
+ if (
+ self.top_k is not None and self.top_k != 50 and self.penalty_alpha is None
+ ): # contrastive search uses top_k
+ warnings.warn(
+ greedy_wrong_parameter_msg.format(flag_name="top_k", flag_value=self.top_k),
+ UserWarning,
+ )
+ if self.epsilon_cutoff is not None and self.epsilon_cutoff != 0.0:
+ warnings.warn(
+ greedy_wrong_parameter_msg.format(flag_name="epsilon_cutoff", flag_value=self.epsilon_cutoff),
+ UserWarning,
+ )
+ if self.eta_cutoff is not None and self.eta_cutoff != 0.0:
+ warnings.warn(
+ greedy_wrong_parameter_msg.format(flag_name="eta_cutoff", flag_value=self.eta_cutoff),
+ UserWarning,
+ )
+
+ # 2. detect beam-only parameterization when not in beam mode
+ if self.num_beams is None:
+ warnings.warn("`num_beams` is set to None - defaulting to 1.", UserWarning)
+ self.num_beams = 1
+
+ if self.num_beams == 1:
+ single_beam_wrong_parameter_msg = (
+ "`num_beams` is set to 1. However, `{flag_name}` is set to `{flag_value}` -- this flag is only used "
+ "in beam-based generation modes. You should set `num_beams>1` or unset `{flag_name}`." + fix_location
+ )
+ if self.early_stopping is not False:
+ warnings.warn(
+ single_beam_wrong_parameter_msg.format(flag_name="early_stopping", flag_value=self.early_stopping),
+ UserWarning,
+ )
+ if self.num_beam_groups is not None and self.num_beam_groups != 1:
+ warnings.warn(
+ single_beam_wrong_parameter_msg.format(
+ flag_name="num_beam_groups", flag_value=self.num_beam_groups
+ ),
+ UserWarning,
+ )
+ if self.diversity_penalty is not None and self.diversity_penalty != 0.0:
+ warnings.warn(
+ single_beam_wrong_parameter_msg.format(
+ flag_name="diversity_penalty", flag_value=self.diversity_penalty
+ ),
+ UserWarning,
+ )
+ if self.length_penalty is not None and self.length_penalty != 1.0:
+ warnings.warn(
+ single_beam_wrong_parameter_msg.format(flag_name="length_penalty", flag_value=self.length_penalty),
+ UserWarning,
+ )
+ if self.constraints is not None:
+ warnings.warn(
+ single_beam_wrong_parameter_msg.format(flag_name="constraints", flag_value=self.constraints),
+ UserWarning,
+ )
+
+ # 3. detect incorrect paramaterization specific to advanced beam modes
+ else:
+ # constrained beam search
+ if self.constraints is not None or self.force_words_ids is not None:
+ constrained_wrong_parameter_msg = (
+ "one of `constraints`, `force_words_ids` is not `None`, triggering constrained beam search. However, "
+ "`{flag_name}` is set to `{flag_value}`, which is incompatible with this generation mode. Set "
+ "`constraints` and `force_words_ids` to `None` or unset `{flag_name}` to continue." + fix_location
+ )
+ if self.do_sample is True:
+ raise ValueError(
+ constrained_wrong_parameter_msg.format(flag_name="do_sample", flag_value=self.do_sample)
+ )
+ if self.num_beam_groups is not None and self.num_beam_groups != 1:
+ raise ValueError(
+ constrained_wrong_parameter_msg.format(
+ flag_name="num_beam_groups", flag_value=self.num_beam_groups
+ )
+ )
+ # group beam search
+ if self.diversity_penalty != 0.0 or self.num_beam_groups != 1:
+ group_error_prefix = (
+ "`diversity_penalty` is not 0.0 or `num_beam_groups` is not 1, triggering group beam search. In "
+ "this generation mode, "
+ )
+ if self.do_sample is True:
+ raise ValueError(group_error_prefix + "`do_sample` must be set to `False`")
+ if self.num_beams % self.num_beam_groups != 0:
+ raise ValueError(group_error_prefix + "`num_beams` should be divisible by `num_beam_groups`")
+ if self.diversity_penalty == 0.0:
+ raise ValueError(
+ group_error_prefix
+ + "`diversity_penalty` should be greater than `0.0`, otherwise your groups will be identical."
+ )
+
+ # 4. check `num_return_sequences`
+ if self.num_return_sequences != 1:
+ if self.num_beams == 1:
+ if self.do_sample is False:
+ raise ValueError(
+ "Greedy methods without beam search do not support `num_return_sequences` different than 1 "
+ f"(got {self.num_return_sequences})."
+ )
+ elif self.num_return_sequences > self.num_beams:
+ raise ValueError(
+ f"`num_return_sequences` ({self.num_return_sequences}) has to be smaller or equal to `num_beams` "
+ f"({self.num_beams})."
+ )
+
+ # 5. check common issue: passing `generate` arguments inside the generation config
+ generate_arguments = (
+ "logits_processor",
+ "stopping_criteria",
+ "prefix_allowed_tokens_fn",
+ "synced_gpus",
+ "assistant_model",
+ "streamer",
+ "negative_prompt_ids",
+ "negative_prompt_attention_mask",
+ )
+ for arg in generate_arguments:
+ if hasattr(self, arg):
+ raise ValueError(
+ f"Argument `{arg}` is not a valid argument of `GenerationConfig`. It should be passed to "
+ "`generate()` (or a pipeline) directly."
+ )
+
+ def save_pretrained(
+ self,
+ save_directory: Union[str, os.PathLike],
+ config_file_name: Optional[Union[str, os.PathLike]] = None,
+ push_to_hub: bool = False,
+ **kwargs,
+ ):
+ r"""
+ Save a generation configuration object to the directory `save_directory`, so that it can be re-loaded using the
+ [`~GenerationConfig.from_pretrained`] class method.
+
+ Args:
+ save_directory (`str` or `os.PathLike`):
+ Directory where the configuration JSON file will be saved (will be created if it does not exist).
+ config_file_name (`str` or `os.PathLike`, *optional*, defaults to `"generation_config.json"`):
+ Name of the generation configuration JSON file to be saved in `save_directory`.
+ push_to_hub (`bool`, *optional*, defaults to `False`):
+ Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
+ repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
+ namespace).
+ kwargs (`Dict[str, Any]`, *optional*):
+ Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
+ """
+
+ # At save time, validate the instance -- if any warning/exception is thrown, we refuse to save the instance.
+ # This strictness is enforced to prevent bad configurations from being saved and re-used.
+ try:
+ with warnings.catch_warnings(record=True) as caught_warnings:
+ self.validate()
+ if len(caught_warnings) > 0:
+ raise ValueError(str([w.message for w in caught_warnings]))
+ except ValueError as exc:
+ raise ValueError(
+ "The generation config instance is invalid -- `.validate()` throws warnings and/or exceptions. "
+ "Fix these issues to save the configuration.\n\nThrown during validation:\n" + str(exc)
+ )
+
+ use_auth_token = kwargs.pop("use_auth_token", None)
+
+ if use_auth_token is not None:
+ warnings.warn(
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
+ FutureWarning,
+ )
+ if kwargs.get("token", None) is not None:
+ raise ValueError(
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
+ )
+ kwargs["token"] = use_auth_token
+
+ config_file_name = config_file_name if config_file_name is not None else GENERATION_CONFIG_NAME
+
+ if os.path.isfile(save_directory):
+ raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file")
+
+ os.makedirs(save_directory, exist_ok=True)
+
+ if push_to_hub:
+ commit_message = kwargs.pop("commit_message", None)
+ repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
+ repo_id = self._create_repo(repo_id, **kwargs)
+ files_timestamps = self._get_files_timestamps(save_directory)
+
+ output_config_file = os.path.join(save_directory, config_file_name)
+
+ self.to_json_file(output_config_file, use_diff=True)
+ logger.info(f"Configuration saved in {output_config_file}")
+
+ if push_to_hub:
+ self._upload_modified_files(
+ save_directory,
+ repo_id,
+ files_timestamps,
+ commit_message=commit_message,
+ token=kwargs.get("token"),
+ )
+
+ @classmethod
+ def from_pretrained(
+ cls,
+ pretrained_model_name: Union[str, os.PathLike],
+ config_file_name: Optional[Union[str, os.PathLike]] = None,
+ cache_dir: Optional[Union[str, os.PathLike]] = None,
+ force_download: bool = False,
+ local_files_only: bool = False,
+ token: Optional[Union[str, bool]] = None,
+ revision: str = "main",
+ **kwargs,
+ ) -> "GenerationConfig":
+ r"""
+ Instantiate a [`GenerationConfig`] from a generation configuration file.
+
+ Args:
+ pretrained_model_name (`str` or `os.PathLike`):
+ This can be either:
+
+ - a string, the *model id* of a pretrained model configuration hosted inside a model repo on
+ huggingface.co.
+ - a path to a *directory* containing a configuration file saved using the
+ [`~GenerationConfig.save_pretrained`] method, e.g., `./my_model_directory/`.
+ config_file_name (`str` or `os.PathLike`, *optional*, defaults to `"generation_config.json"`):
+ Name of the generation configuration JSON file to be loaded from `pretrained_model_name`.
+ cache_dir (`str` or `os.PathLike`, *optional*):
+ Path to a directory in which a downloaded pretrained model configuration should be cached if the
+ standard cache should not be used.
+ force_download (`bool`, *optional*, defaults to `False`):
+ Whether or not to force to (re-)download the configuration files and override the cached versions if
+ they exist.
+ resume_download (`bool`, *optional*, defaults to `False`):
+ Whether or not to delete incompletely received file. Attempts to resume the download if such a file
+ exists.
+ proxies (`Dict[str, str]`, *optional*):
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
+ 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
+ token (`str` or `bool`, *optional*):
+ The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use
+ the token generated when running `huggingface-cli login` (stored in `~/.huggingface`).
+ revision (`str`, *optional*, defaults to `"main"`):
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
+ identifier allowed by git.
+
+
+
+ To test a pull request you made on the Hub, you can pass `revision="refs/pr/".
+
+
+
+ return_unused_kwargs (`bool`, *optional*, defaults to `False`):
+ If `False`, then this function returns just the final configuration object.
+
+ If `True`, then this functions returns a `Tuple(config, unused_kwargs)` where *unused_kwargs* is a
+ dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the
+ part of `kwargs` which has not been used to update `config` and is otherwise ignored.
+ subfolder (`str`, *optional*, defaults to `""`):
+ In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can
+ specify the folder name here.
+ kwargs (`Dict[str, Any]`, *optional*):
+ The values in kwargs of any keys which are configuration attributes will be used to override the loaded
+ values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled
+ by the `return_unused_kwargs` keyword parameter.
+
+ Returns:
+ [`GenerationConfig`]: The configuration object instantiated from this pretrained model.
+
+ Examples:
+
+ ```python
+ >>> from transformers import GenerationConfig
+
+ >>> # Download configuration from huggingface.co and cache.
+ >>> generation_config = GenerationConfig.from_pretrained("openai-community/gpt2")
+
+ >>> # E.g. config was saved using *save_pretrained('./test/saved_model/')*
+ >>> generation_config.save_pretrained("./test/saved_model/")
+ >>> generation_config = GenerationConfig.from_pretrained("./test/saved_model/")
+
+ >>> # You can also specify configuration names to your generation configuration file
+ >>> generation_config.save_pretrained("./test/saved_model/", config_file_name="my_configuration.json")
+ >>> generation_config = GenerationConfig.from_pretrained("./test/saved_model/", "my_configuration.json")
+
+ >>> # If you'd like to try a minor variation to an existing configuration, you can also pass generation
+ >>> # arguments to `.from_pretrained()`. Be mindful that typos and unused arguments will be ignored
+ >>> generation_config, unused_kwargs = GenerationConfig.from_pretrained(
+ ... "openai-community/gpt2", top_k=1, foo=False, do_sample=True, return_unused_kwargs=True
+ ... )
+ >>> generation_config.top_k
+ 1
+
+ >>> unused_kwargs
+ {'foo': False}
+ ```"""
+ config_file_name = config_file_name if config_file_name is not None else GENERATION_CONFIG_NAME
+
+ resume_download = kwargs.pop("resume_download", False)
+ proxies = kwargs.pop("proxies", None)
+ use_auth_token = kwargs.pop("use_auth_token", None)
+ subfolder = kwargs.pop("subfolder", "")
+ from_pipeline = kwargs.pop("_from_pipeline", None)
+ from_auto_class = kwargs.pop("_from_auto", False)
+ commit_hash = kwargs.pop("_commit_hash", None)
+
+ if use_auth_token is not None:
+ warnings.warn(
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
+ FutureWarning,
+ )
+ if token is not None:
+ raise ValueError(
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
+ )
+ token = use_auth_token
+
+ user_agent = {"file_type": "config", "from_auto_class": from_auto_class}
+ if from_pipeline is not None:
+ user_agent["using_pipeline"] = from_pipeline
+
+ config_path = os.path.join(pretrained_model_name, config_file_name)
+ config_path = str(config_path)
+
+ is_local = os.path.exists(config_path)
+ if os.path.isfile(os.path.join(subfolder, config_path)):
+ # Special case when config_path is a local file
+ resolved_config_file = config_path
+ is_local = True
+ elif is_remote_url(config_path):
+ configuration_file = config_path
+ resolved_config_file = download_url(config_path)
+ else:
+ configuration_file = config_file_name
+ try:
+ # Load from local folder or from cache or download from model Hub and cache
+ resolved_config_file = cached_file(
+ pretrained_model_name,
+ configuration_file,
+ cache_dir=cache_dir,
+ force_download=force_download,
+ proxies=proxies,
+ resume_download=resume_download,
+ local_files_only=local_files_only,
+ token=token,
+ user_agent=user_agent,
+ revision=revision,
+ subfolder=subfolder,
+ _commit_hash=commit_hash,
+ )
+ commit_hash = extract_commit_hash(resolved_config_file, commit_hash)
+ except EnvironmentError:
+ # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to
+ # the original exception.
+ raise
+ except Exception:
+ # For any other exception, we throw a generic error.
+ raise EnvironmentError(
+ f"Can't load the configuration of '{pretrained_model_name}'. If you were trying to load it"
+ " from 'https://huggingface.co/models', make sure you don't have a local directory with the same"
+ f" name. Otherwise, make sure '{pretrained_model_name}' is the correct path to a directory"
+ f" containing a {configuration_file} file"
+ )
+
+ try:
+ # Load config dict
+ config_dict = cls._dict_from_json_file(resolved_config_file)
+ config_dict["_commit_hash"] = commit_hash
+ except (json.JSONDecodeError, UnicodeDecodeError):
+ raise EnvironmentError(
+ f"It looks like the config file at '{resolved_config_file}' is not a valid JSON file."
+ )
+
+ if is_local:
+ logger.info(f"loading configuration file {resolved_config_file}")
+ else:
+ logger.info(f"loading configuration file {configuration_file} from cache at {resolved_config_file}")
+
+ if kwargs.get("return_unused_kwargs") is True:
+ config, unused_kwargs = cls.from_dict(config_dict, **kwargs)
+ config._original_object_hash = hash(config) # Hash to detect whether the instance was modified
+ return config, unused_kwargs
+ else:
+ config = cls.from_dict(config_dict, **kwargs)
+ config._original_object_hash = hash(config) # Hash to detect whether the instance was modified
+ return config
+
+ @classmethod
+ def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):
+ with open(json_file, "r", encoding="utf-8") as reader:
+ text = reader.read()
+ return json.loads(text)
+
+ @classmethod
+ def from_dict(cls, config_dict: Dict[str, Any], **kwargs) -> "GenerationConfig":
+ """
+ Instantiates a [`GenerationConfig`] from a Python dictionary of parameters.
+
+ Args:
+ config_dict (`Dict[str, Any]`):
+ Dictionary that will be used to instantiate the configuration object.
+ kwargs (`Dict[str, Any]`):
+ Additional parameters from which to initialize the configuration object.
+
+ Returns:
+ [`GenerationConfig`]: The configuration object instantiated from those parameters.
+ """
+ return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
+ # Those arguments may be passed along for our internal telemetry.
+ # We remove them so they don't appear in `return_unused_kwargs`.
+ kwargs.pop("_from_auto", None)
+ kwargs.pop("_from_pipeline", None)
+ # The commit hash might have been updated in the `config_dict`, we don't want the kwargs to erase that update.
+ if "_commit_hash" in kwargs and "_commit_hash" in config_dict:
+ kwargs["_commit_hash"] = config_dict["_commit_hash"]
+
+ # The line below allows model-specific config to be loaded as well through kwargs, with safety checks.
+ # See https://github.com/huggingface/transformers/pull/21269
+ config = cls(**{**config_dict, **kwargs})
+ unused_kwargs = config.update(**kwargs)
+
+ logger.info(f"Generate config {config}")
+ if return_unused_kwargs:
+ return config, unused_kwargs
+ else:
+ return config
+
+ def dict_torch_dtype_to_str(self, d: Dict[str, Any]) -> None:
+ """
+ Checks whether the passed dictionary and its nested dicts have a *torch_dtype* key and if it's not None,
+ converts torch.dtype to a string of just the type. For example, `torch.float32` get converted into *"float32"*
+ string, which can then be stored in the json format.
+ """
+ if d.get("torch_dtype", None) is not None and not isinstance(d["torch_dtype"], str):
+ d["torch_dtype"] = str(d["torch_dtype"]).split(".")[1]
+ for value in d.values():
+ if isinstance(value, dict):
+ self.dict_torch_dtype_to_str(value)
+
+ def to_diff_dict(self) -> Dict[str, Any]:
+ """
+ Removes all attributes from config which correspond to the default config attributes for better readability and
+ serializes to a Python dictionary.
+
+ Returns:
+ `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,
+ """
+ config_dict = self.to_dict()
+
+ # get the default config dict
+ default_config_dict = GenerationConfig().to_dict()
+
+ serializable_config_dict = {}
+
+ # only serialize values that differ from the default config
+ for key, value in config_dict.items():
+ if key not in default_config_dict or key == "transformers_version" or value != default_config_dict[key]:
+ serializable_config_dict[key] = value
+
+ self.dict_torch_dtype_to_str(serializable_config_dict)
+ return serializable_config_dict
+
+ def to_dict(self) -> Dict[str, Any]:
+ """
+ Serializes this instance to a Python dictionary.
+
+ Returns:
+ `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
+ """
+ output = copy.deepcopy(self.__dict__)
+
+ # Fields to ignore at serialization time
+ if "_commit_hash" in output:
+ del output["_commit_hash"]
+ if "_original_object_hash" in output:
+ del output["_original_object_hash"]
+
+ # Transformers version when serializing this file
+ output["transformers_version"] = __version__
+
+ self.dict_torch_dtype_to_str(output)
+ return output
+
+ def to_json_string(self, use_diff: bool = True, ignore_metadata: bool = False) -> str:
+ """
+ Serializes this instance to a JSON string.
+
+ Args:
+ use_diff (`bool`, *optional*, defaults to `True`):
+ If set to `True`, only the difference between the config instance and the default `GenerationConfig()`
+ is serialized to JSON string.
+ ignore_metadata (`bool`, *optional*, defaults to `False`):
+ Whether to ignore the metadata fields present in the instance
+
+ Returns:
+ `str`: String containing all the attributes that make up this configuration instance in JSON format.
+ """
+ if use_diff is True:
+ config_dict = self.to_diff_dict()
+ else:
+ config_dict = self.to_dict()
+
+ if ignore_metadata:
+ for metadata_field in METADATA_FIELDS:
+ config_dict.pop(metadata_field, None)
+
+ def convert_keys_to_string(obj):
+ if isinstance(obj, dict):
+ return {str(key): convert_keys_to_string(value) for key, value in obj.items()}
+ elif isinstance(obj, list):
+ return [convert_keys_to_string(item) for item in obj]
+ else:
+ return obj
+
+ config_dict = convert_keys_to_string(config_dict)
+
+ return json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
+
+ def to_json_file(self, json_file_path: Union[str, os.PathLike], use_diff: bool = True):
+ """
+ Save this instance to a JSON file.
+
+ Args:
+ json_file_path (`str` or `os.PathLike`):
+ Path to the JSON file in which this configuration instance's parameters will be saved.
+ use_diff (`bool`, *optional*, defaults to `True`):
+ If set to `True`, only the difference between the config instance and the default `GenerationConfig()`
+ is serialized to JSON file.
+ """
+ with open(json_file_path, "w", encoding="utf-8") as writer:
+ writer.write(self.to_json_string(use_diff=use_diff))
+
+ @classmethod
+ def from_model_config(cls, model_config: PretrainedConfig) -> "GenerationConfig":
+ """
+ Instantiates a [`GenerationConfig`] from a [`PretrainedConfig`]. This function is useful to convert legacy
+ [`PretrainedConfig`] objects, which may contain generation parameters, into a stand-alone [`GenerationConfig`].
+
+ Args:
+ model_config (`PretrainedConfig`):
+ The model config that will be used to instantiate the generation config.
+
+ Returns:
+ [`GenerationConfig`]: The configuration object instantiated from those parameters.
+ """
+ config_dict = model_config.to_dict()
+ config_dict.pop("_from_model_config", None)
+ config = cls.from_dict(config_dict, return_unused_kwargs=False, _from_model_config=True)
+
+ # Special case: some models have generation attributes set in the decoder. Use them if still unset in the
+ # generation config.
+ for decoder_name in ("decoder", "generator", "text_config"):
+ if decoder_name in config_dict:
+ default_generation_config = GenerationConfig()
+ decoder_config = config_dict[decoder_name]
+ for attr in config.to_dict().keys():
+ if attr in decoder_config and getattr(config, attr) == getattr(default_generation_config, attr):
+ setattr(config, attr, decoder_config[attr])
+
+ config._original_object_hash = hash(config) # Hash to detect whether the instance was modified
+ return config
+
+ def update(self, **kwargs):
+ """
+ Updates attributes of this class instance with attributes from `kwargs` if they match existing atributtes,
+ returning all the unused kwargs.
+
+ Args:
+ kwargs (`Dict[str, Any]`):
+ Dictionary of attributes to tentatively update this class.
+
+ Returns:
+ `Dict[str, Any]`: Dictionary containing all the key-value pairs that were not used to update the instance.
+ """
+ to_remove = []
+ for key, value in kwargs.items():
+ if hasattr(self, key):
+ setattr(self, key, value)
+ to_remove.append(key)
+
+ # Confirm that the updated instance is still valid
+ self.validate()
+
+ # Remove all the attributes that were updated, without modifying the input dict
+ unused_kwargs = {key: value for key, value in kwargs.items() if key not in to_remove}
+ return unused_kwargs
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/generation/flax_logits_process.py b/env-llmeval/lib/python3.10/site-packages/transformers/generation/flax_logits_process.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c30b92755a4261654a7b7c930d07c0c6859c4a5
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/generation/flax_logits_process.py
@@ -0,0 +1,457 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import inspect
+
+import jax
+import jax.lax as lax
+import jax.numpy as jnp
+
+from ..utils import add_start_docstrings
+from ..utils.logging import get_logger
+
+
+logger = get_logger(__name__)
+
+
+LOGITS_PROCESSOR_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
+ Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
+ search or log softmax for each vocabulary token when using beam search
+ kwargs (`Dict[str, Any]`, *optional*):
+ Additional logits processor specific kwargs.
+
+ Return:
+ `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
+
+"""
+
+
+class FlaxLogitsProcessor:
+ """Abstract base class for all logit processors that can be applied during generation."""
+
+ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray) -> jnp.ndarray:
+ """Flax method for processing logits."""
+ raise NotImplementedError(
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
+ )
+
+
+class FlaxLogitsWarper:
+ """Abstract base class for all logit warpers that can be applied during generation with multinomial sampling."""
+
+ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray) -> jnp.ndarray:
+ """Flax method for warping logits."""
+ raise NotImplementedError(
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
+ )
+
+
+class FlaxLogitsProcessorList(list):
+ """
+ This class can be used to create a list of [`FlaxLogitsProcessor`] or [`FlaxLogitsWarper`] to subsequently process
+ a `scores` input tensor. This class inherits from list and adds a specific *__call__* method to apply each
+ [`FlaxLogitsProcessor`] or [`FlaxLogitsWarper`] to the inputs.
+ """
+
+ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int, **kwargs) -> jnp.ndarray:
+ for processor in self:
+ function_args = inspect.signature(processor.__call__).parameters
+ if len(function_args) > 3:
+ if not all(arg in kwargs for arg in list(function_args.keys())[2:]):
+ raise ValueError(
+ f"Make sure that all the required parameters: {list(function_args.keys())} for "
+ f"{processor.__class__} are passed to the logits processor."
+ )
+ scores = processor(input_ids, scores, cur_len, **kwargs)
+ else:
+ scores = processor(input_ids, scores, cur_len)
+ return scores
+
+
+class FlaxTemperatureLogitsWarper(FlaxLogitsWarper):
+ r"""
+ [`FlaxLogitsWarper`] for temperature (exponential scaling output probability distribution).
+
+ Args:
+ temperature (`float`):
+ The value used to module the logits distribution.
+ """
+
+ def __init__(self, temperature: float):
+ if not isinstance(temperature, float) or not (temperature > 0):
+ raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}")
+
+ self.temperature = temperature
+
+ def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
+ scores = scores / self.temperature
+ return scores
+
+
+class FlaxTopPLogitsWarper(FlaxLogitsWarper):
+ """
+ [`FlaxLogitsWarper`] that performs top-p, i.e. restricting to top tokens summing to prob_cut_off <= prob_cut_off.
+
+ Args:
+ top_p (`float`):
+ If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
+ higher are kept for generation.
+ filter_value (`float`, *optional*, defaults to -inf):
+ All filtered values will be set to this float value.
+ min_tokens_to_keep (`int`, *optional*, defaults to 1):
+ Minimum number of tokens that cannot be filtered.
+ """
+
+ def __init__(self, top_p: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
+ if not isinstance(top_p, float) or (top_p < 0 or top_p > 1.0):
+ raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}")
+ if not isinstance(min_tokens_to_keep, int) or (min_tokens_to_keep < 1):
+ raise ValueError(f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}")
+
+ self.top_p = top_p
+ self.filter_value = filter_value
+ self.min_tokens_to_keep = min_tokens_to_keep
+
+ def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
+ topk_scores, topk_indices = lax.top_k(scores, scores.shape[-1])
+
+ mask_scores = jnp.full_like(scores, self.filter_value)
+ cumulative_probs = jax.nn.softmax(topk_scores, axis=-1).cumsum(axis=-1)
+ score_mask = cumulative_probs < self.top_p
+
+ # include the token that is higher than top_p as well
+ score_mask = jnp.roll(score_mask, 1)
+ score_mask |= score_mask.at[:, 0].set(True)
+
+ # min tokens to keep
+ score_mask = score_mask.at[:, : self.min_tokens_to_keep].set(True)
+
+ topk_next_scores = jnp.where(score_mask, topk_scores, mask_scores)
+ next_scores = jax.lax.sort_key_val(topk_indices, topk_next_scores)[-1]
+
+ return next_scores
+
+
+class FlaxTopKLogitsWarper(FlaxLogitsWarper):
+ r"""
+ [`FlaxLogitsWarper`] that performs top-k, i.e. restricting to the k highest probability elements.
+
+ Args:
+ top_k (`int`):
+ The number of highest probability vocabulary tokens to keep for top-k-filtering.
+ filter_value (`float`, *optional*, defaults to -inf):
+ All filtered values will be set to this float value.
+ min_tokens_to_keep (`int`, *optional*, defaults to 1):
+ Minimum number of tokens that cannot be filtered.
+ """
+
+ def __init__(self, top_k: int, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
+ if not isinstance(top_k, int) or top_k <= 0:
+ raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}")
+
+ self.top_k = max(top_k, min_tokens_to_keep)
+ self.filter_value = filter_value
+
+ def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
+ batch_size, vocab_size = scores.shape
+ next_scores_flat = jnp.full(batch_size * vocab_size, self.filter_value)
+
+ topk = min(self.top_k, scores.shape[-1]) # Safety check
+ topk_scores, topk_indices = lax.top_k(scores, topk)
+ shift = jnp.broadcast_to((jnp.arange(batch_size) * vocab_size)[:, None], (batch_size, topk)).flatten()
+ topk_scores_flat = topk_scores.flatten()
+ topk_indices_flat = topk_indices.flatten() + shift
+
+ next_scores_flat = next_scores_flat.at[topk_indices_flat].set(topk_scores_flat)
+ next_scores = next_scores_flat.reshape(batch_size, vocab_size)
+ return next_scores
+
+
+class FlaxForcedBOSTokenLogitsProcessor(FlaxLogitsProcessor):
+ r"""
+ [`FlaxLogitsProcessor`] that enforces the specified token as the first generated token.
+
+ Args:
+ bos_token_id (`int`):
+ The id of the token to force as the first generated token.
+ """
+
+ def __init__(self, bos_token_id: int):
+ self.bos_token_id = bos_token_id
+
+ def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
+ new_scores = jnp.full(scores.shape, -float("inf"))
+
+ apply_penalty = 1 - jnp.bool_(cur_len - 1)
+
+ scores = jnp.where(apply_penalty, new_scores.at[:, self.bos_token_id].set(0), scores)
+
+ return scores
+
+
+class FlaxForcedEOSTokenLogitsProcessor(FlaxLogitsProcessor):
+ r"""
+ [`FlaxLogitsProcessor`] that enforces the specified token as the last generated token when `max_length` is reached.
+
+ Args:
+ max_length (`int`):
+ The maximum length of the sequence to be generated.
+ eos_token_id (`int`):
+ The id of the token to force as the last generated token when `max_length` is reached.
+ """
+
+ def __init__(self, max_length: int, eos_token_id: int):
+ self.max_length = max_length
+ self.eos_token_id = eos_token_id
+
+ def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
+ new_scores = jnp.full(scores.shape, -float("inf"))
+
+ apply_penalty = 1 - jnp.bool_(cur_len - self.max_length + 1)
+
+ scores = jnp.where(apply_penalty, new_scores.at[:, self.eos_token_id].set(0), scores)
+
+ return scores
+
+
+class FlaxMinLengthLogitsProcessor(FlaxLogitsProcessor):
+ r"""
+ [`FlaxLogitsProcessor`] enforcing a min-length by setting EOS probability to 0.
+
+ Args:
+ min_length (`int`):
+ The minimum length below which the score of `eos_token_id` is set to `-float("Inf")`.
+ eos_token_id (`int`):
+ The id of the *end-of-sequence* token.
+ """
+
+ def __init__(self, min_length: int, eos_token_id: int):
+ if not isinstance(min_length, int) or min_length < 0:
+ raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}")
+
+ if not isinstance(eos_token_id, int) or eos_token_id < 0:
+ raise ValueError(f"`eos_token_id` has to be a positive integer, but is {eos_token_id}")
+
+ self.min_length = min_length
+ self.eos_token_id = eos_token_id
+
+ def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
+ # create boolean flag to decide if min length penalty should be applied
+ apply_penalty = 1 - jnp.clip(cur_len - self.min_length, 0, 1)
+
+ scores = jnp.where(apply_penalty, scores.at[:, self.eos_token_id].set(-float("inf")), scores)
+
+ return scores
+
+
+class FlaxSuppressTokensAtBeginLogitsProcessor(FlaxLogitsProcessor):
+ r"""
+ [`FlaxLogitsProcessor`] supressing a list of tokens as soon as the `generate` function starts generating using
+ `begin_index` tokens. This should ensure that the tokens defined by `begin_suppress_tokens` are not sampled at the
+ begining of the generation.
+
+ Args:
+ begin_suppress_tokens (`List[int]`):
+ Tokens to not sample.
+ begin_index (`int`):
+ Index where the tokens are suppressed.
+ """
+
+ def __init__(self, begin_suppress_tokens, begin_index):
+ self.begin_suppress_tokens = list(begin_suppress_tokens)
+ self.begin_index = begin_index
+
+ def __call__(self, input_ids, scores, cur_len: int):
+ apply_penalty = 1 - jnp.bool_(cur_len - self.begin_index)
+
+ scores = jnp.where(apply_penalty, scores.at[:, self.begin_suppress_tokens].set(-float("inf")), scores)
+
+ return scores
+
+
+class FlaxSuppressTokensLogitsProcessor(FlaxLogitsProcessor):
+ r"""
+ [`FlaxLogitsProcessor`] suppressing a list of tokens at each decoding step. The processor will set their log probs
+ to be `-inf` so they are not sampled.
+
+ Args:
+ suppress_tokens (`list`):
+ Tokens to not sample.
+ """
+
+ def __init__(self, suppress_tokens: list):
+ self.suppress_tokens = list(suppress_tokens)
+
+ def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
+ scores = scores.at[..., self.suppress_tokens].set(-float("inf"))
+
+ return scores
+
+
+class FlaxForceTokensLogitsProcessor(FlaxLogitsProcessor):
+ r"""
+ [`FlaxLogitsProcessor`] that takes a list of pairs of integers which indicates a mapping from generation indices to
+ token indices that will be forced before sampling. The processor will set their log probs to 0 and all other tokens
+ to `-inf` so that they are sampled at their corresponding index.
+
+ Args:
+ force_token_map (`list`):
+ Map giving token ids and indices where they will be forced to be sampled.
+ """
+
+ def __init__(self, force_token_map):
+ force_token_map = dict(force_token_map)
+ # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
+ # index of the array corresponds to the index of the token to be forced, for XLA compatibility.
+ # Indexes without forced tokens will have a negative value.
+ force_token_array = jnp.ones((max(force_token_map.keys()) + 1), dtype=jnp.int32) * -1
+ for index, token in force_token_map.items():
+ if token is not None:
+ force_token_array = force_token_array.at[index].set(token)
+ self.force_token_array = jnp.int32(force_token_array)
+
+ def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
+ def _force_token(generation_idx):
+ batch_size = scores.shape[0]
+ current_token = self.force_token_array[generation_idx]
+
+ new_scores = jnp.ones_like(scores, dtype=scores.dtype) * -float("inf")
+ updates = jnp.zeros((batch_size, 1), dtype=scores.dtype)
+ new_scores = lax.dynamic_update_slice(new_scores, updates, (0, current_token))
+ return new_scores
+
+ scores = lax.cond(
+ cur_len >= self.force_token_array.shape[0],
+ # If the current length is geq than the length of force_token_array, the processor does nothing.
+ lambda: scores,
+ # Otherwise, it may force a certain token.
+ lambda: lax.cond(
+ self.force_token_array[cur_len] >= 0,
+ # Only valid (positive) tokens are forced
+ lambda: _force_token(cur_len),
+ # Otherwise, the processor does nothing.
+ lambda: scores,
+ ),
+ )
+ return scores
+
+
+class FlaxWhisperTimeStampLogitsProcessor(FlaxLogitsProcessor):
+ r"""
+ Whisper specific Processor. This processor can be used to force a list of tokens. The processor will set their log
+ probs to `inf` so that they are sampled at their corresponding index.
+
+ Args:
+ generate_config (`GenerateConfig`):
+ The generate config used to generate the output. The following parameters are required:
+ eos_token_id (`int`, *optional*, defaults to 50257):
+ The id of the *end-of-sequence* token.
+ no_timestamps_token_id (`int`, *optional*, defaults to 50363):
+ The id of the `"<|notimestamps|>"` token.
+ max_initial_timestamp_index (`int`, *optional*, defaults to 1):
+ Used to set the maximum value of the initial timestamp. This is used to prevent the model from
+ predicting timestamps that are too far in the future.
+ """
+
+ def __init__(self, generate_config, model_config, decoder_input_length):
+ self.eos_token_id = generate_config.eos_token_id
+ self.no_timestamps_token_id = generate_config.no_timestamps_token_id
+ self.timestamp_begin = generate_config.no_timestamps_token_id + 1
+
+ self.begin_index = decoder_input_length + 1
+
+ if generate_config.is_multilingual:
+ # room for language token and task token
+ self.begin_index += 2
+ if hasattr(generate_config, "max_initial_timestamp_index"):
+ self.max_initial_timestamp_index = generate_config.max_initial_timestamp_index
+ else:
+ self.max_initial_timestamp_index = model_config.vocab_size
+ if self.max_initial_timestamp_index is None:
+ self.max_initial_timestamp_index = model_config.vocab_size
+
+ def __call__(self, input_ids, scores, cur_len):
+ # suppress <|notimestamps|> which is handled by without_timestamps
+ scores = scores.at[:, self.no_timestamps_token_id].set(-float("inf"))
+
+ def handle_pairs(input_ids_k, scores_k):
+ last_was_timestamp = jnp.where((cur_len - self.begin_index) >= 1, True, False)
+ last_was_timestamp = jnp.where(
+ input_ids_k[cur_len - 1] >= self.timestamp_begin,
+ True and last_was_timestamp,
+ False,
+ )
+
+ penultimate_was_timestamp = jnp.where((cur_len - self.begin_index) < 2, True, False)
+ penultimate_was_timestamp = jnp.where(
+ input_ids_k[cur_len - 2] >= self.timestamp_begin,
+ True,
+ penultimate_was_timestamp,
+ )
+
+ return jnp.where(
+ last_was_timestamp,
+ jnp.where(
+ penultimate_was_timestamp > 0,
+ scores_k.at[self.timestamp_begin :].set(-float("inf")),
+ scores_k.at[: self.eos_token_id].set(-float("inf")),
+ ),
+ scores_k,
+ )
+
+ scores = jax.vmap(handle_pairs)(input_ids, scores)
+
+ apply_max_initial_timestamp = jnp.where(cur_len == self.begin_index, True, False)
+ apply_max_initial_timestamp = jnp.where(
+ self.max_initial_timestamp_index is not None,
+ True and apply_max_initial_timestamp,
+ False,
+ )
+
+ last_allowed = self.timestamp_begin + self.max_initial_timestamp_index
+
+ scores = jnp.where(
+ apply_max_initial_timestamp,
+ scores.at[:, last_allowed + 1 :].set(-float("inf")),
+ scores,
+ )
+
+ # if sum of probability over timestamps is above any other token, sample timestamp
+ logprobs = jax.nn.log_softmax(scores, axis=-1)
+
+ def handle_cumulative_probs(logprobs_k, scores_k):
+ timestamp_logprob = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :], axis=-1)
+ max_text_token_logprob = jnp.max(logprobs_k[: self.timestamp_begin])
+ return jnp.where(
+ timestamp_logprob > max_text_token_logprob,
+ scores_k.at[: self.timestamp_begin].set(-float("inf")),
+ scores_k,
+ )
+
+ scores = jax.vmap(handle_cumulative_probs)(logprobs, scores)
+
+ return scores
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/generation/flax_utils.py b/env-llmeval/lib/python3.10/site-packages/transformers/generation/flax_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..1bdf58691a80d70cb58158fcd62e9dece7c58784
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/generation/flax_utils.py
@@ -0,0 +1,1019 @@
+# coding=utf-8
+# Copyright 2021 The Google AI Flax Team Authors, and The HuggingFace Inc. team.
+# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import copy
+import inspect
+import warnings
+from functools import partial
+from typing import Any, Dict, Optional, Union
+
+import flax
+import jax
+import jax.numpy as jnp
+import numpy as np
+from jax import lax
+
+from ..models.auto import (
+ FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
+ FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
+ FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING,
+)
+from ..utils import ModelOutput, logging
+from .configuration_utils import GenerationConfig
+from .flax_logits_process import (
+ FlaxForcedBOSTokenLogitsProcessor,
+ FlaxForcedEOSTokenLogitsProcessor,
+ FlaxForceTokensLogitsProcessor,
+ FlaxLogitsProcessorList,
+ FlaxMinLengthLogitsProcessor,
+ FlaxSuppressTokensAtBeginLogitsProcessor,
+ FlaxSuppressTokensLogitsProcessor,
+ FlaxTemperatureLogitsWarper,
+ FlaxTopKLogitsWarper,
+ FlaxTopPLogitsWarper,
+)
+
+
+logger = logging.get_logger(__name__)
+
+
+@flax.struct.dataclass
+class FlaxGreedySearchOutput(ModelOutput):
+ """
+ Flax Base class for outputs of decoder-only generation models using greedy search.
+
+
+ Args:
+ sequences (`jnp.ndarray` of shape `(batch_size, max_length)`):
+ The generated sequences.
+ """
+
+ sequences: jnp.ndarray = None
+
+
+@flax.struct.dataclass
+class FlaxSampleOutput(ModelOutput):
+ """
+ Flax Base class for outputs of decoder-only generation models using sampling.
+
+
+ Args:
+ sequences (`jnp.ndarray` of shape `(batch_size, max_length)`):
+ The generated sequences.
+ """
+
+ sequences: jnp.ndarray = None
+
+
+@flax.struct.dataclass
+class FlaxBeamSearchOutput(ModelOutput):
+ """
+ Flax Base class for outputs of decoder-only generation models using greedy search.
+
+
+ Args:
+ sequences (`jnp.ndarray` of shape `(batch_size, max_length)`):
+ The generated sequences.
+ scores (`jnp.ndarray` of shape `(batch_size,)`):
+ The scores (log probabilities) of the generated sequences.
+ """
+
+ sequences: jnp.ndarray = None
+ scores: jnp.ndarray = None
+
+
+@flax.struct.dataclass
+class GreedyState:
+ cur_len: jnp.ndarray
+ sequences: jnp.ndarray
+ running_token: jnp.ndarray
+ is_sent_finished: jnp.ndarray
+ model_kwargs: Dict[str, jnp.ndarray]
+
+
+@flax.struct.dataclass
+class SampleState:
+ cur_len: jnp.ndarray
+ sequences: jnp.ndarray
+ running_token: jnp.ndarray
+ is_sent_finished: jnp.ndarray
+ prng_key: jnp.ndarray
+ model_kwargs: Dict[str, jnp.ndarray]
+
+
+@flax.struct.dataclass
+class BeamSearchState:
+ cur_len: jnp.ndarray
+ running_sequences: jnp.ndarray
+ running_scores: jnp.ndarray
+ sequences: jnp.ndarray
+ scores: jnp.ndarray
+ is_sent_finished: jnp.ndarray
+ model_kwargs: Dict[str, jnp.ndarray]
+
+
+class FlaxGenerationMixin:
+ """
+ A class containing all functions for auto-regressive text generation, to be used as a mixin in
+ [`FlaxPreTrainedModel`].
+
+ The class exposes [`~generation.FlaxGenerationMixin.generate`], which can be used for:
+ - *greedy decoding* by calling [`~generation.FlaxGenerationMixin._greedy_search`] if `num_beams=1` and
+ `do_sample=False`
+ - *multinomial sampling* by calling [`~generation.FlaxGenerationMixin._sample`] if `num_beams=1` and
+ `do_sample=True`
+ - *beam-search decoding* by calling [`~generation.FlaxGenerationMixin._beam_search`] if `num_beams>1` and
+ `do_sample=False`
+
+ You do not need to call any of the above methods directly. Pass custom parameter values to 'generate' instead. To
+ learn more about decoding strategies refer to the [text generation strategies guide](../generation_strategies).
+ """
+
+ def prepare_inputs_for_generation(self, *args, **kwargs):
+ raise NotImplementedError(
+ "A model class needs to define a `prepare_inputs_for_generation` method in order to use `generate`."
+ )
+
+ @staticmethod
+ def _run_loop_in_debug(cond_fn, body_fn, init_state):
+ """
+ Run generation in untraced mode. This should only be used for debugging purposes.
+ """
+ state = init_state
+ while cond_fn(state):
+ state = body_fn(state)
+ return state
+
+ def _prepare_encoder_decoder_kwargs_for_generation(self, input_ids, params, model_kwargs):
+ encoder_kwargs = {
+ argument: value
+ for argument, value in model_kwargs.items()
+ if not (argument.startswith("decoder_") or argument.startswith("cross_attn"))
+ }
+ model_kwargs["encoder_outputs"] = self.encode(input_ids, params=params, return_dict=True, **encoder_kwargs)
+ return model_kwargs
+
+ def _prepare_decoder_input_ids_for_generation(
+ self,
+ batch_size: int,
+ decoder_start_token_id: int = None,
+ bos_token_id: int = None,
+ model_kwargs: Optional[Dict[str, jnp.ndarray]] = None,
+ ) -> jnp.ndarray:
+ if model_kwargs is not None and "decoder_input_ids" in model_kwargs:
+ # Only use this arg if not None, otherwise just remove from model_kwargs
+ decoder_input_ids = model_kwargs.pop("decoder_input_ids")
+ if decoder_input_ids is not None:
+ return decoder_input_ids
+ decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id)
+ return jnp.array(decoder_start_token_id, dtype="i4").reshape(1, -1).repeat(batch_size, axis=0)
+
+ def _get_decoder_start_token_id(self, decoder_start_token_id: int = None, bos_token_id: int = None) -> int:
+ # retrieve decoder_start_token_id for encoder-decoder models
+ # fall back to bos_token_id if necessary
+ decoder_start_token_id = (
+ decoder_start_token_id
+ if decoder_start_token_id is not None
+ else self.generation_config.decoder_start_token_id
+ )
+ bos_token_id = bos_token_id if bos_token_id is not None else self.generation_config.bos_token_id
+ if decoder_start_token_id is not None:
+ return decoder_start_token_id
+ elif (
+ hasattr(self.config, "decoder")
+ and hasattr(self.config.decoder, "decoder_start_token_id")
+ and self.config.decoder.decoder_start_token_id is not None
+ ):
+ return self.config.decoder.decoder_start_token_id
+ elif bos_token_id is not None:
+ return bos_token_id
+ elif (
+ hasattr(self.config, "decoder")
+ and hasattr(self.config.decoder, "bos_token_id")
+ and self.config.decoder.bos_token_id is not None
+ ):
+ return self.config.decoder.bos_token_id
+ raise ValueError(
+ "`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation."
+ )
+
+ @staticmethod
+ def _expand_to_num_beams(tensor, num_beams):
+ return jnp.broadcast_to(tensor[:, None], (tensor.shape[0], num_beams) + tensor.shape[1:])
+
+ def _adapt_logits_for_beam_search(self, logits):
+ """
+ This function can be overwritten in the specific modeling_flax_.py classes to allow for custom beam
+ search behavior. Note that the only model that overwrites this method is [`~transformes.FlaxMarianMTModel`].
+ """
+ return logits
+
+ def _validate_model_class(self):
+ """
+ Confirms that the model class is compatible with generation. If not, raises an exception that points to the
+ right class to use.
+ """
+ if not self.can_generate():
+ generate_compatible_mappings = [
+ FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
+ FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING,
+ FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
+ ]
+ generate_compatible_classes = set()
+ for model_mapping in generate_compatible_mappings:
+ supported_models = model_mapping.get(type(self.config), default=None)
+ if supported_models is not None:
+ generate_compatible_classes.add(supported_models.__name__)
+ exception_message = (
+ f"The current model class ({self.__class__.__name__}) is not compatible with `.generate()`, as "
+ "it doesn't have a language model head."
+ )
+ if generate_compatible_classes:
+ exception_message += f" Please use one of the following classes instead: {generate_compatible_classes}"
+ raise TypeError(exception_message)
+
+ def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]):
+ """Validates model kwargs for generation. Generate argument typos will also be caught here."""
+ unused_model_args = []
+ model_args = set(inspect.signature(self.prepare_inputs_for_generation).parameters)
+ # `kwargs`/`model_kwargs` is often used to handle optional forward pass inputs like `attention_mask`. If
+ # `prepare_inputs_for_generation` doesn't accept them, then a stricter check can be made ;)
+ if "kwargs" in model_args or "model_kwargs" in model_args:
+ model_args |= set(inspect.signature(self.__call__).parameters)
+ for key, value in model_kwargs.items():
+ if value is not None and key not in model_args:
+ unused_model_args.append(key)
+
+ if unused_model_args:
+ raise ValueError(
+ f"The following `model_kwargs` are not used by the model: {unused_model_args} (note: typos in the"
+ " generate arguments will also show up in this list)"
+ )
+
+ def generate(
+ self,
+ input_ids: jnp.ndarray,
+ generation_config: Optional[GenerationConfig] = None,
+ prng_key: Optional[jnp.ndarray] = None,
+ trace: bool = True,
+ params: Optional[Dict[str, jnp.ndarray]] = None,
+ logits_processor: Optional[FlaxLogitsProcessorList] = None,
+ **kwargs,
+ ):
+ r"""
+ Generates sequences of token ids for models with a language modeling head.
+
+ Parameters:
+ input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
+ The sequence used as a prompt for the generation.
+ generation_config (`~generation.GenerationConfig`, *optional*):
+ The generation configuration to be used as base parametrization for the generation call. `**kwargs`
+ passed to generate matching the attributes of `generation_config` will override them. If
+ `generation_config` is not provided, the default will be used, which had the following loading
+ priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
+ configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
+ default values, whose documentation should be checked to parameterize generation.
+ trace (`bool`, *optional*, defaults to `True`):
+ Whether to trace generation. Setting `trace=False` should only be used for debugging and will lead to a
+ considerably slower runtime.
+ params (`Dict[str, jnp.ndarray]`, *optional*):
+ Optionally the model parameters can be passed. Can be useful for parallelized generation.
+ logits_processor (`FlaxLogitsProcessorList `, *optional*):
+ Custom logits processors that complement the default logits processors built from arguments and
+ generation config. If a logit processor is passed that is already created with the arguments or a
+ generation config an error is thrown. This feature is intended for advanced users.
+ kwargs (`Dict[str, Any]`, *optional*):
+ Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
+ forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder
+ specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*.
+
+ Return:
+ [`~utils.ModelOutput`].
+
+ """
+ # Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call
+ self._validate_model_class()
+
+ # priority: `generation_config` argument > `model.generation_config` (the default generation config)
+ if generation_config is None:
+ # legacy: users may modify the model configuration to control generation. To trigger this legacy behavior,
+ # two conditions must be met
+ # 1) the generation config must have been created from the model config (`_from_model_config` field);
+ # 2) the generation config must have seen no modification since its creation (the hash is the same).
+ if self.generation_config._from_model_config and self.generation_config._original_object_hash == hash(
+ self.generation_config
+ ):
+ new_generation_config = GenerationConfig.from_model_config(self.config)
+ if new_generation_config != self.generation_config:
+ warnings.warn(
+ "You have modified the pretrained model configuration to control generation. This is a"
+ " deprecated strategy to control generation and will be removed soon, in a future version."
+ " Please use and modify the model generation configuration (see"
+ " https://huggingface.co/docs/transformers/generation_strategies#default-text-generation-configuration )"
+ )
+ self.generation_config = new_generation_config
+ generation_config = self.generation_config
+
+ generation_config = copy.deepcopy(generation_config)
+ model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs
+ self._validate_model_kwargs(model_kwargs.copy())
+
+ logits_processor = logits_processor if logits_processor is not None else FlaxLogitsProcessorList()
+
+ # set init values
+ prng_key = prng_key if prng_key is not None else jax.random.PRNGKey(0)
+
+ if generation_config.pad_token_id is None and generation_config.eos_token_id is not None:
+ if model_kwargs.get("attention_mask") is None:
+ logger.warning(
+ "The attention mask and the pad token id were not set. As a consequence, you may observe "
+ "unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results."
+ )
+ eos_token_id = generation_config.eos_token_id
+ if isinstance(eos_token_id, list):
+ eos_token_id = eos_token_id[0]
+ logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.")
+ generation_config.pad_token_id = eos_token_id
+
+ if generation_config.decoder_start_token_id is None and self.config.is_encoder_decoder:
+ raise ValueError("`decoder_start_token_id` has to be defined for encoder-decoder generation.")
+
+ # decoder-only models should use left-padding for generation (can't be checked with `trace=True`)
+ if not self.config.is_encoder_decoder and not trace:
+ if (
+ generation_config.pad_token_id is not None
+ and jnp.sum(input_ids[:, -1] == generation_config.pad_token_id) > 0
+ ):
+ logger.warning(
+ "A decoder-only architecture is being used, but right-padding was detected! For correct "
+ "generation results, please set `padding_side='left'` when initializing the tokenizer."
+ )
+
+ batch_size = input_ids.shape[0]
+
+ if self.config.is_encoder_decoder:
+ # add encoder_outputs to model_kwargs
+ if model_kwargs.get("encoder_outputs") is None:
+ model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(input_ids, params, model_kwargs)
+ # prepare decoder_input_ids for generation
+ input_ids = self._prepare_decoder_input_ids_for_generation(
+ batch_size,
+ decoder_start_token_id=generation_config.decoder_start_token_id,
+ bos_token_id=generation_config.bos_token_id,
+ model_kwargs=model_kwargs,
+ )
+
+ # Prepare `max_length` depending on other stopping criteria.
+ input_ids_seq_length = input_ids.shape[-1]
+ has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
+ if has_default_max_length and generation_config.max_new_tokens is None and generation_config.max_length == 20:
+ # 20 is the default max_length of the generation config
+ warnings.warn(
+ f"Using the model-agnostic default `max_length` (={generation_config.max_length}) "
+ "to control the generation length. recommend setting `max_new_tokens` to control the maximum length of the generation.",
+ UserWarning,
+ )
+ elif generation_config.max_new_tokens is not None:
+ if not has_default_max_length and generation_config.max_length is not None:
+ logger.warning(
+ f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(="
+ f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. "
+ "Please refer to the documentation for more information. "
+ "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)"
+ )
+ generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length
+
+ if generation_config.min_length is not None and generation_config.min_length > generation_config.max_length:
+ raise ValueError(
+ f"Unfeasable length constraints: the minimum length ({generation_config.min_length}) is larger than"
+ f" the maximum length ({generation_config.max_length})"
+ )
+ if input_ids_seq_length >= generation_config.max_length:
+ input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids"
+ logger.warning(
+ f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to"
+ f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider"
+ " increasing`max_new_tokens`."
+ )
+
+ logits_processor = self._get_logits_processor(
+ generation_config=generation_config,
+ input_ids_seq_length=input_ids_seq_length,
+ logits_processor=logits_processor,
+ )
+
+ if not generation_config.do_sample and generation_config.num_beams == 1:
+ return self._greedy_search(
+ input_ids,
+ generation_config.max_length,
+ generation_config.pad_token_id,
+ generation_config.eos_token_id,
+ logits_processor=logits_processor,
+ trace=trace,
+ params=params,
+ model_kwargs=model_kwargs,
+ )
+ elif generation_config.do_sample and generation_config.num_beams == 1:
+ logits_warper = self._get_logits_warper(generation_config=generation_config)
+ return self._sample(
+ input_ids,
+ generation_config.max_length,
+ generation_config.pad_token_id,
+ generation_config.eos_token_id,
+ prng_key,
+ logits_warper=logits_warper,
+ logits_processor=logits_processor,
+ trace=trace,
+ params=params,
+ model_kwargs=model_kwargs,
+ )
+ elif not generation_config.do_sample and generation_config.num_beams > 1:
+ # broadcast input_ids & encoder_outputs
+ input_ids = self._expand_to_num_beams(input_ids, num_beams=generation_config.num_beams)
+
+ if "encoder_outputs" in model_kwargs:
+ model_kwargs["encoder_outputs"]["last_hidden_state"] = self._expand_to_num_beams(
+ model_kwargs["encoder_outputs"]["last_hidden_state"], num_beams=generation_config.num_beams
+ )
+
+ for kwarg in ["attention_mask", "decoder_attention_mask"]:
+ if kwarg in model_kwargs:
+ model_kwargs[kwarg] = self._expand_to_num_beams(
+ model_kwargs[kwarg], num_beams=generation_config.num_beams
+ )
+
+ return self._beam_search(
+ input_ids,
+ generation_config.max_length,
+ generation_config.pad_token_id,
+ generation_config.eos_token_id,
+ length_penalty=generation_config.length_penalty,
+ early_stopping=generation_config.early_stopping,
+ logits_processor=logits_processor,
+ trace=trace,
+ params=params,
+ num_return_sequences=generation_config.num_return_sequences,
+ model_kwargs=model_kwargs,
+ )
+ else:
+ raise NotImplementedError("`Beam sampling is currently not implemented.")
+
+ def _get_logits_warper(self, generation_config: GenerationConfig) -> FlaxLogitsProcessorList:
+ """
+ This class returns a [`FlaxLogitsProcessorList`] list object that contains all relevant [`FlaxLogitsWarper`]
+ instances used for multinomial sampling.
+ """
+ warpers = FlaxLogitsProcessorList()
+
+ if generation_config.temperature is not None and generation_config.temperature != 1.0:
+ warpers.append(FlaxTemperatureLogitsWarper(generation_config.temperature))
+ if generation_config.top_k is not None and generation_config.top_k != 0:
+ warpers.append(FlaxTopKLogitsWarper(top_k=generation_config.top_k, min_tokens_to_keep=1))
+ if generation_config.top_p is not None and generation_config.top_p < 1.0:
+ warpers.append(FlaxTopPLogitsWarper(top_p=generation_config.top_p, min_tokens_to_keep=1))
+
+ return warpers
+
+ def _get_logits_processor(
+ self,
+ generation_config: GenerationConfig,
+ input_ids_seq_length: int,
+ logits_processor: Optional[FlaxLogitsProcessorList],
+ ) -> FlaxLogitsProcessorList:
+ """
+ This class returns a [`FlaxLogitsProcessorList`] list object that contains all relevant [`FlaxLogitsProcessor`]
+ instances used to modify the scores of the language model head.
+ """
+ processors = FlaxLogitsProcessorList()
+
+ if (
+ generation_config.min_length is not None
+ and generation_config.eos_token_id is not None
+ and generation_config.min_length > -1
+ ):
+ processors.append(
+ FlaxMinLengthLogitsProcessor(generation_config.min_length, generation_config.eos_token_id)
+ )
+ if generation_config.forced_bos_token_id is not None:
+ processors.append(FlaxForcedBOSTokenLogitsProcessor(generation_config.forced_bos_token_id))
+ if generation_config.forced_eos_token_id is not None:
+ processors.append(
+ FlaxForcedEOSTokenLogitsProcessor(generation_config.max_length, generation_config.forced_eos_token_id)
+ )
+ if generation_config.suppress_tokens is not None:
+ processors.append(FlaxSuppressTokensLogitsProcessor(generation_config.suppress_tokens))
+ if generation_config.begin_suppress_tokens is not None:
+ begin_index = input_ids_seq_length
+ begin_index = (
+ begin_index
+ if (input_ids_seq_length > 1 or generation_config.forced_bos_token_id is None)
+ else begin_index + 1
+ )
+ if generation_config.forced_decoder_ids is not None and len(generation_config.forced_decoder_ids) > 0:
+ # generation starts after the last token that is forced
+ begin_index += generation_config.forced_decoder_ids[-1][0]
+ processors.append(
+ FlaxSuppressTokensAtBeginLogitsProcessor(generation_config.begin_suppress_tokens, begin_index)
+ )
+ if generation_config.forced_decoder_ids is not None:
+ forced_decoder_ids = [
+ [input_ids_seq_length + i[0] - 1, i[1]] for i in generation_config.forced_decoder_ids
+ ]
+ processors.append(FlaxForceTokensLogitsProcessor(forced_decoder_ids))
+ processors = self._merge_criteria_processor_list(processors, logits_processor)
+
+ return processors
+
+ def _merge_criteria_processor_list(
+ self,
+ default_list: FlaxLogitsProcessorList,
+ custom_list: FlaxLogitsProcessorList,
+ ) -> FlaxLogitsProcessorList:
+ if len(custom_list) == 0:
+ return default_list
+ for default in default_list:
+ for custom in custom_list:
+ if type(custom) is type(default):
+ object_type = "logits processor"
+ raise ValueError(
+ f"A custom {object_type} of type {type(custom)} with values {custom} has been passed to"
+ f" `generate`, but it has already been created with the values {default}. {default} has been"
+ " created by passing the corresponding arguments to generate or by the model's config default"
+ f" values. If you just want to change the default values of {object_type} consider passing"
+ f" them as arguments to `generate` instead of using a custom {object_type}."
+ )
+ default_list.extend(custom_list)
+ return default_list
+
+ def _greedy_search(
+ self,
+ input_ids: None,
+ max_length: Optional[int] = None,
+ pad_token_id: Optional[int] = None,
+ eos_token_id: Optional[int] = None,
+ logits_processor: Optional[FlaxLogitsProcessorList] = None,
+ trace: bool = True,
+ params: Optional[Dict[str, jnp.ndarray]] = None,
+ model_kwargs: Optional[Dict[str, jnp.ndarray]] = None,
+ ):
+ # init values
+ max_length = max_length if max_length is not None else self.generation_config.max_length
+ pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
+ eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
+
+ batch_size, cur_len = input_ids.shape
+
+ eos_token_id = jnp.array(eos_token_id, dtype=jnp.int32 if eos_token_id is not None else None)
+ pad_token_id = jnp.array(pad_token_id, dtype=jnp.int32)
+ cur_len = jnp.array(cur_len)
+
+ # per batch-item holding current token in loop.
+ sequences = jnp.full((batch_size, max_length), pad_token_id, dtype=jnp.int32)
+ sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0))
+
+ # per batch-item state bit indicating if sentence has finished.
+ is_sent_finished = jnp.zeros((batch_size,), dtype=jnp.bool_)
+
+ # For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop
+ # and pass it the `encoder_outputs`, which are part of the `model_kwargs`.
+ model = self.decode if self.config.is_encoder_decoder else self
+ # initialize model specific kwargs
+ model_kwargs = self.prepare_inputs_for_generation(input_ids, max_length, **model_kwargs)
+
+ # initialize state
+ state = GreedyState(
+ cur_len=cur_len,
+ sequences=sequences,
+ running_token=input_ids,
+ is_sent_finished=is_sent_finished,
+ model_kwargs=model_kwargs,
+ )
+
+ def greedy_search_cond_fn(state):
+ """state termination condition fn."""
+ has_reached_max_length = state.cur_len == max_length
+ all_sequence_finished = jnp.all(state.is_sent_finished)
+ finish_generation = jnp.logical_or(has_reached_max_length, all_sequence_finished)
+ return ~finish_generation
+
+ def greedy_search_body_fn(state):
+ """state update fn."""
+ model_outputs = model(state.running_token, params=params, **state.model_kwargs)
+ logits = model_outputs.logits[:, -1]
+
+ # apply min_length, ...
+ logits = logits_processor(state.sequences, logits, state.cur_len)
+
+ next_token = jnp.argmax(logits, axis=-1)
+
+ next_token = next_token * ~state.is_sent_finished + pad_token_id * state.is_sent_finished
+ next_is_sent_finished = state.is_sent_finished | (next_token == eos_token_id)
+ next_token = next_token[:, None]
+
+ next_sequences = lax.dynamic_update_slice(state.sequences, next_token, (0, state.cur_len))
+ next_model_kwargs = self.update_inputs_for_generation(model_outputs, state.model_kwargs)
+ return GreedyState(
+ cur_len=state.cur_len + 1,
+ sequences=next_sequences,
+ running_token=next_token,
+ is_sent_finished=next_is_sent_finished,
+ model_kwargs=next_model_kwargs,
+ )
+
+ # The very first prompt often has sequence length > 1, so run outside of `lax.while_loop` to comply with TPU
+ if input_ids.shape[1] > 1:
+ state = greedy_search_body_fn(state)
+
+ if not trace:
+ state = self._run_loop_in_debug(greedy_search_cond_fn, greedy_search_body_fn, state)
+ else:
+ state = lax.while_loop(greedy_search_cond_fn, greedy_search_body_fn, state)
+
+ return FlaxGreedySearchOutput(sequences=state.sequences)
+
+ def _sample(
+ self,
+ input_ids: None,
+ max_length: Optional[int] = None,
+ pad_token_id: Optional[int] = None,
+ eos_token_id: Optional[int] = None,
+ prng_key: Optional[jnp.ndarray] = None,
+ logits_processor: Optional[FlaxLogitsProcessorList] = None,
+ logits_warper: Optional[FlaxLogitsProcessorList] = None,
+ trace: bool = True,
+ params: Optional[Dict[str, jnp.ndarray]] = None,
+ model_kwargs: Optional[Dict[str, jnp.ndarray]] = None,
+ ):
+ # init values
+ max_length = max_length if max_length is not None else self.generation_config.max_length
+ pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
+ eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
+ prng_key = prng_key if prng_key is not None else jax.random.PRNGKey(0)
+
+ batch_size, cur_len = input_ids.shape
+
+ eos_token_id = jnp.array(eos_token_id, dtype=jnp.int32 if eos_token_id is not None else None)
+ pad_token_id = jnp.array(pad_token_id, dtype=jnp.int32)
+ cur_len = jnp.array(cur_len)
+
+ # per batch-item holding current token in loop.
+ sequences = jnp.full((batch_size, max_length), pad_token_id, dtype=jnp.int32)
+ sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0))
+
+ # per batch-item state bit indicating if sentence has finished.
+ is_sent_finished = jnp.zeros((batch_size,), dtype=jnp.bool_)
+
+ # For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop
+ # and pass it the `encoder_outputs`, which are part of the `model_kwargs`.
+ model = self.decode if self.config.is_encoder_decoder else self
+
+ # initialize model specific kwargs
+ model_kwargs = self.prepare_inputs_for_generation(input_ids, max_length, **model_kwargs)
+
+ # initialize state
+ state = SampleState(
+ cur_len=cur_len,
+ sequences=sequences,
+ running_token=input_ids,
+ is_sent_finished=is_sent_finished,
+ prng_key=prng_key,
+ model_kwargs=model_kwargs,
+ )
+
+ def sample_search_cond_fn(state):
+ """state termination condition fn."""
+ has_reached_max_length = state.cur_len == max_length
+ all_sequence_finished = jnp.all(state.is_sent_finished)
+ finish_generation = jnp.logical_or(has_reached_max_length, all_sequence_finished)
+ return ~finish_generation
+
+ def sample_search_body_fn(state):
+ """state update fn."""
+ prng_key, prng_key_next = jax.random.split(state.prng_key)
+ model_outputs = model(state.running_token, params=params, **state.model_kwargs)
+
+ logits = model_outputs.logits[:, -1]
+
+ # apply min_length, ...
+ logits = logits_processor(state.sequences, logits, state.cur_len)
+ # apply top_p, top_k, temperature
+ logits = logits_warper(logits, logits, state.cur_len)
+
+ next_token = jax.random.categorical(prng_key, logits, axis=-1)
+
+ next_token = next_token * ~state.is_sent_finished + pad_token_id * state.is_sent_finished
+ next_is_sent_finished = state.is_sent_finished | (next_token == eos_token_id)
+ next_token = next_token[:, None]
+
+ next_sequences = lax.dynamic_update_slice(state.sequences, next_token, (0, state.cur_len))
+ next_model_kwargs = self.update_inputs_for_generation(model_outputs, state.model_kwargs)
+
+ return SampleState(
+ cur_len=state.cur_len + 1,
+ sequences=next_sequences,
+ running_token=next_token,
+ is_sent_finished=next_is_sent_finished,
+ model_kwargs=next_model_kwargs,
+ prng_key=prng_key_next,
+ )
+
+ # The very first prompt often has sequence length > 1, so run outside of `lax.while_loop` to comply with TPU
+ if input_ids.shape[1] > 1:
+ state = sample_search_body_fn(state)
+
+ if not trace:
+ state = self._run_loop_in_debug(sample_search_cond_fn, sample_search_body_fn, state)
+ else:
+ state = lax.while_loop(sample_search_cond_fn, sample_search_body_fn, state)
+
+ return FlaxSampleOutput(sequences=state.sequences)
+
+ def _beam_search(
+ self,
+ input_ids: None,
+ max_length: Optional[int] = None,
+ pad_token_id: Optional[int] = None,
+ eos_token_id: Optional[int] = None,
+ length_penalty: Optional[float] = None,
+ early_stopping: Optional[Union[bool, str]] = None,
+ logits_processor: Optional[FlaxLogitsProcessorList] = None,
+ trace: bool = True,
+ params: Optional[Dict[str, jnp.ndarray]] = None,
+ num_return_sequences: Optional[int] = None,
+ model_kwargs: Optional[Dict[str, jnp.ndarray]] = None,
+ ):
+ """
+ This beam search function is heavily inspired by Flax's official example:
+ https://github.com/google/flax/blob/main/examples/wmt/decode.py
+ """
+
+ def flatten_beam_dim(tensor):
+ """Flattens the first two dimensions of a non-scalar array."""
+ # ignore scalars (e.g. cache index)
+ if tensor.ndim == 0:
+ return tensor
+ return tensor.reshape((tensor.shape[0] * tensor.shape[1],) + tensor.shape[2:])
+
+ def unflatten_beam_dim(tensor, batch_size, num_beams):
+ """Unflattens the first, flat batch*beam dimension of a non-scalar array."""
+ # ignore scalars (e.g. cache index)
+ if tensor.ndim == 0:
+ return tensor
+ return tensor.reshape((batch_size, num_beams) + tensor.shape[1:])
+
+ def gather_beams(nested, beam_indices, batch_size, new_num_beams):
+ """
+ Gathers the beam slices indexed by beam_indices into new beam array.
+ """
+ batch_indices = jnp.reshape(
+ jnp.arange(batch_size * new_num_beams) // new_num_beams, (batch_size, new_num_beams)
+ )
+
+ def gather_fn(tensor):
+ # ignore scalars (e.g. cache index)
+ if tensor.ndim == 0:
+ return tensor
+ else:
+ return tensor[batch_indices, beam_indices]
+
+ return jax.tree_util.tree_map(gather_fn, nested)
+
+ # init values
+ max_length = max_length if max_length is not None else self.generation_config.max_length
+ pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
+ eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
+ length_penalty = length_penalty if length_penalty is not None else self.generation_config.length_penalty
+ early_stopping = early_stopping if early_stopping is not None else self.generation_config.early_stopping
+ num_return_sequences = (
+ num_return_sequences if num_return_sequences is not None else self.generation_config.num_return_sequences
+ )
+
+ batch_size, num_beams, cur_len = input_ids.shape
+
+ eos_token_id = jnp.array(eos_token_id, dtype=jnp.int32 if eos_token_id is not None else None)
+ pad_token_id = jnp.array(pad_token_id, dtype=jnp.int32)
+ cur_len = jnp.array(cur_len)
+
+ # record the prompt length of decoder
+ decoder_prompt_len = input_ids.shape[-1]
+
+ # per batch,beam-item holding current token in loop.
+ sequences = jnp.full((batch_size, num_beams, max_length), pad_token_id, dtype=jnp.int32)
+ running_sequences = jnp.full((batch_size, num_beams, max_length), pad_token_id, dtype=jnp.int32)
+ running_sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0, 0))
+
+ # per batch,beam-item state bit indicating if sentence has finished.
+ is_sent_finished = jnp.zeros((batch_size, num_beams), dtype=jnp.bool_)
+
+ # per batch,beam-item score, logprobs
+ running_scores = jnp.tile(jnp.array([0.0] + [np.array(-1.0e7)] * (num_beams - 1)), [batch_size, 1])
+ scores = jnp.ones((batch_size, num_beams)) * np.array(-1.0e7)
+
+ # For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop
+ # and pass it the `encoder_outputs`, which are part of the `model_kwargs`.
+ model = self.decode if self.config.is_encoder_decoder else self
+
+ # flatten beam dim
+ if "encoder_outputs" in model_kwargs:
+ model_kwargs["encoder_outputs"]["last_hidden_state"] = flatten_beam_dim(
+ model_kwargs["encoder_outputs"]["last_hidden_state"]
+ )
+ for kwarg in ["attention_mask", "decoder_attention_mask"]:
+ if kwarg in model_kwargs:
+ model_kwargs[kwarg] = flatten_beam_dim(model_kwargs[kwarg])
+
+ # initialize model specific kwargs
+ model_kwargs = self.prepare_inputs_for_generation(flatten_beam_dim(input_ids), max_length, **model_kwargs)
+
+ # initialize state
+ state = BeamSearchState(
+ cur_len=cur_len,
+ running_sequences=running_sequences,
+ running_scores=running_scores,
+ sequences=sequences,
+ scores=scores,
+ is_sent_finished=is_sent_finished,
+ model_kwargs=model_kwargs,
+ )
+
+ def beam_search_cond_fn(state):
+ """beam search state termination condition fn."""
+
+ # 1. is less than max length?
+ not_max_length_yet = state.cur_len < max_length
+
+ # 2. can the new beams still improve?
+ # early_stopping == False -> apply heuristic = always get the best score from `cur_len`. See the discussion
+ # below for more details.
+ # https://github.com/huggingface/transformers/pull/20901#issuecomment-1369845565
+ # early_stopping == "never" -> compute the best score from max_length or cur_len, depending on the sign of
+ # length_penalty. Positive length_penalty favors longer sequences, thus we use max_length there.
+ if early_stopping == "never" and length_penalty > 0.0:
+ best_running_score = state.running_scores[:, :1] / (
+ (max_length - decoder_prompt_len) ** length_penalty
+ )
+ else:
+ best_running_score = state.running_scores[:, :1] / (
+ (state.cur_len - decoder_prompt_len) ** length_penalty
+ )
+ worst_finished_score = jnp.where(
+ state.is_sent_finished, jnp.min(state.scores, axis=1, keepdims=True), np.array(-1.0e7)
+ )
+ improvement_still_possible = jnp.any(best_running_score > worst_finished_score)
+
+ # 3. is there still a beam that has not finished?
+ still_open_beam = ~(jnp.all(state.is_sent_finished) & (early_stopping is True))
+
+ return not_max_length_yet & still_open_beam & improvement_still_possible
+
+ def beam_search_body_fn(state, input_ids_length=1):
+ """beam search state update fn."""
+ # 1. Forward current tokens
+ # Collect the current position slice along length to feed the fast
+ # autoregressive decoder model. Flatten the beam dimension into batch
+ # dimension for feeding into the model.
+ # unflatten beam dimension
+ # Unflatten beam dimension in attention cache arrays
+ input_token = flatten_beam_dim(
+ lax.dynamic_slice(
+ state.running_sequences,
+ (0, 0, state.cur_len - input_ids_length),
+ (batch_size, num_beams, input_ids_length),
+ )
+ )
+ model_outputs = model(input_token, params=params, **state.model_kwargs)
+
+ logits = unflatten_beam_dim(model_outputs.logits[:, -1], batch_size, num_beams)
+ cache = jax.tree_util.tree_map(
+ lambda tensor: unflatten_beam_dim(tensor, batch_size, num_beams), model_outputs.past_key_values
+ )
+
+ # adapt logits for FlaxMarianMTModel
+ logits = self._adapt_logits_for_beam_search(logits)
+
+ # 2. Compute log probs
+ # get log probabilities from logits,
+ # process logits with processors (*e.g.* min_length, ...), and
+ # add new logprobs to existing running logprobs scores.
+ log_probs = jax.nn.log_softmax(logits)
+ log_probs = logits_processor(
+ flatten_beam_dim(running_sequences), flatten_beam_dim(log_probs), state.cur_len
+ )
+ log_probs = unflatten_beam_dim(log_probs, batch_size, num_beams)
+ log_probs = log_probs + jnp.expand_dims(state.running_scores, axis=2)
+ vocab_size = log_probs.shape[2]
+ log_probs = log_probs.reshape((batch_size, num_beams * vocab_size))
+
+ # 3. Retrieve top-K
+ # Each item in batch has num_beams * vocab_size candidate sequences.
+ # For each item, get the top 2*k candidates with the highest log-
+ # probabilities. We gather the top 2*K beams here so that even if the best
+ # K sequences reach EOS simultaneously, we have another K sequences
+ # remaining to continue the live beam search.
+ # Gather the top 2*K scores from _all_ beams.
+ # Gather 2*k top beams.
+ # Recover the beam index by floor division.
+ # Recover token id by modulo division and expand Id array for broadcasting.
+ # Update sequences for the 2*K top-k new sequences.
+ beams_to_keep = 2 * num_beams
+ topk_log_probs, topk_indices = lax.top_k(log_probs, k=beams_to_keep)
+ topk_beam_indices = topk_indices // vocab_size
+ topk_running_sequences = gather_beams(
+ state.running_sequences, topk_beam_indices, batch_size, beams_to_keep
+ )
+ topk_ids = jnp.expand_dims(topk_indices % vocab_size, axis=2)
+ topk_sequences = lax.dynamic_update_slice(topk_running_sequences, topk_ids, (0, 0, state.cur_len))
+
+ # 4. Check which sequences have ended
+ # Update current sequences:
+ # Did any of these sequences reach an end marker?
+ # To prevent these just finished sequences from being added to the current sequences
+ # set of active beam search sequences, set their log probs to a very large
+ # negative value.
+ did_topk_just_finished = topk_sequences[:, :, state.cur_len] == eos_token_id
+ running_topk_log_probs = topk_log_probs + did_topk_just_finished * np.array(-1.0e7)
+ # 5. Get running sequences scores for next
+ # Determine the top k beam indices (from top 2*k beams) from log probs
+ # and gather top k beams (from top 2*k beams).
+ next_topk_indices = lax.top_k(running_topk_log_probs, k=num_beams)[1]
+ next_running_sequences, next_running_scores = gather_beams(
+ [topk_sequences, running_topk_log_probs], next_topk_indices, batch_size, num_beams
+ )
+
+ # 6. Process topk logits
+ # Further process log probs:
+ # - add length penalty
+ # - make sure no scores can be added anymore if beam is full
+ # - make sure still running sequences cannot be chosen as finalized beam
+ topk_log_probs = topk_log_probs / ((state.cur_len + 1 - decoder_prompt_len) ** length_penalty)
+ beams_in_batch_are_full = jnp.broadcast_to(
+ state.is_sent_finished.all(axis=-1, keepdims=True), did_topk_just_finished.shape
+ ) & (early_stopping is True)
+ add_penalty = ~did_topk_just_finished | beams_in_batch_are_full
+ topk_log_probs += add_penalty * np.array(-1.0e7)
+
+ # 7. Get scores, sequences, is sentence finished for next.
+ # Combine sequences, scores, and flags along the beam dimension and compare
+ # new finished sequence scores to existing finished scores and select the
+ # best from the new set of beams
+ merged_sequences = jnp.concatenate([state.sequences, topk_sequences], axis=1)
+ merged_scores = jnp.concatenate([state.scores, topk_log_probs], axis=1)
+ merged_is_sent_finished = jnp.concatenate([state.is_sent_finished, did_topk_just_finished], axis=1)
+ topk_merged_indices = lax.top_k(merged_scores, k=num_beams)[1]
+ next_sequences, next_scores, next_is_sent_finished = gather_beams(
+ [merged_sequences, merged_scores, merged_is_sent_finished], topk_merged_indices, batch_size, num_beams
+ )
+
+ # 8. Update model kwargs.
+ # Determine the top k beam indices from the original set of all beams.
+ # With these, gather the top k beam-associated caches.
+ next_running_indices = gather_beams(topk_beam_indices, next_topk_indices, batch_size, num_beams)
+ next_cache = gather_beams(cache, next_running_indices, batch_size, num_beams)
+ model_outputs["past_key_values"] = jax.tree_util.tree_map(lambda x: flatten_beam_dim(x), next_cache)
+ next_model_kwargs = self.update_inputs_for_generation(model_outputs, state.model_kwargs)
+
+ return BeamSearchState(
+ cur_len=state.cur_len + 1,
+ running_scores=next_running_scores,
+ running_sequences=next_running_sequences,
+ scores=next_scores,
+ sequences=next_sequences,
+ is_sent_finished=next_is_sent_finished,
+ model_kwargs=next_model_kwargs,
+ )
+
+ # Always run first iteration outside of `lax.while_loop` to avoid calling `beam_search_cond_fn`
+ # when `state.cur_len` equals `decoder_prompt_len`. This also helps to comply with TPU when
+ # the very first prompt has sequence length > 1.
+ state = partial(beam_search_body_fn, input_ids_length=input_ids.shape[-1])(state)
+
+ if not trace:
+ state = self._run_loop_in_debug(beam_search_cond_fn, beam_search_body_fn, state)
+ else:
+ state = lax.while_loop(beam_search_cond_fn, beam_search_body_fn, state)
+
+ # Account for the edge-case where there are no finished sequences for a
+ # particular batch item. If so, return running sequences for that batch item.
+ none_finished = jnp.any(state.is_sent_finished, axis=1)
+ sequences = jnp.where(none_finished[:, None, None], state.sequences, state.running_sequences)
+ scores = jnp.where(none_finished[:, None], state.scores, state.running_scores)
+
+ # Take best beams for each batch (the score is sorted in descending order)
+ sequences = flatten_beam_dim(sequences[:, :num_return_sequences, :])
+ scores = flatten_beam_dim(scores[:, :num_return_sequences])
+
+ return FlaxBeamSearchOutput(sequences=sequences, scores=scores)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/generation/logits_process.py b/env-llmeval/lib/python3.10/site-packages/transformers/generation/logits_process.py
new file mode 100644
index 0000000000000000000000000000000000000000..439a4e702c0ba616bd218f883b012d1598d3893e
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/generation/logits_process.py
@@ -0,0 +1,2217 @@
+# coding=utf-8
+# Copyright 2020 The HuggingFace Inc. team
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import inspect
+import math
+import warnings
+from typing import Callable, Dict, Iterable, List, Optional, Tuple, Union
+
+import numpy as np
+import torch
+
+from ..utils import add_start_docstrings
+from ..utils.logging import get_logger
+
+
+logger = get_logger(__name__)
+
+
+LOGITS_PROCESSOR_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
+ scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
+ Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
+ search or log softmax for each vocabulary token when using beam search
+
+ Return:
+ `torch.FloatTensor` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
+
+"""
+
+
+class LogitsProcessor:
+ """Abstract base class for all logit processors that can be applied during generation."""
+
+ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
+ raise NotImplementedError(
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
+ )
+
+
+class LogitsWarper:
+ """Abstract base class for all logit warpers that can be applied during generation with multinomial sampling."""
+
+ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
+ raise NotImplementedError(
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
+ )
+
+
+class LogitsProcessorList(list):
+ """
+ This class can be used to create a list of [`LogitsProcessor`] or [`LogitsWarper`] to subsequently process a
+ `scores` input tensor. This class inherits from list and adds a specific *__call__* method to apply each
+ [`LogitsProcessor`] or [`LogitsWarper`] to the inputs.
+ """
+
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.FloatTensor:
+ r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
+ scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
+ Prediction scores of a language modeling head. These can be logits for each vocabulary when not using
+ beam search or log softmax for each vocabulary token when using beam search
+ kwargs (`Dict[str, Any]`, *optional*):
+ Additional kwargs that are specific to a logits processor.
+
+ Return:
+ `torch.FloatTensor` of shape `(batch_size, config.vocab_size)`:
+ The processed prediction scores.
+
+ """
+ for processor in self:
+ function_args = inspect.signature(processor.__call__).parameters
+ if len(function_args) > 2:
+ if not all(arg in kwargs for arg in list(function_args.keys())[2:]):
+ raise ValueError(
+ f"Make sure that all the required parameters: {list(function_args.keys())} for "
+ f"{processor.__class__} are passed to the logits processor."
+ )
+ scores = processor(input_ids, scores, **kwargs)
+ else:
+ scores = processor(input_ids, scores)
+
+ return scores
+
+
+class MinLengthLogitsProcessor(LogitsProcessor):
+ r"""
+ [`LogitsProcessor`] enforcing a min-length by setting EOS probability to 0. Note that, for decoder-only models
+ like most LLMs, the length includes the prompt.
+
+ Args:
+ min_length (`int`):
+ The minimum length below which the score of `eos_token_id` is set to `-float("Inf")`.
+ eos_token_id (`Union[int, List[int]]`):
+ The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoModelForCausalLM, AutoTokenizer
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("bigscience/bloomz-560m")
+ >>> model = AutoModelForCausalLM.from_pretrained("bigscience/bloomz-560m")
+
+ >>> inputs = tokenizer("A number:", return_tensors="pt")
+ >>> gen_out = model.generate(**inputs)
+ >>> print(tokenizer.batch_decode(gen_out, skip_special_tokens=True)[0])
+ A number: one
+
+ >>> # setting `min_length` to a value smaller than the uncontrolled output length has no impact
+ >>> gen_out = model.generate(**inputs, min_length=3)
+ >>> print(tokenizer.batch_decode(gen_out, skip_special_tokens=True)[0])
+ A number: one
+
+ >>> # setting a larger `min_length` will force the model to generate beyond its natural ending point, which is not
+ >>> # necessarily incorrect
+ >>> gen_out = model.generate(**inputs, min_length=10)
+ >>> print(tokenizer.batch_decode(gen_out, skip_special_tokens=True)[0])
+ A number: one thousand, nine hundred and ninety-four
+ ```
+ """
+
+ def __init__(self, min_length: int, eos_token_id: Union[int, List[int]]):
+ if not isinstance(min_length, int) or min_length < 0:
+ raise ValueError(f"`min_length` has to be a non-negative integer, but is {min_length}")
+
+ if isinstance(eos_token_id, int):
+ eos_token_id = [eos_token_id]
+ if not all(isinstance(i, int) for i in eos_token_id) or any(i < 0 for i in eos_token_id):
+ logger.warning(f"`eos_token_id` has to be a list of positive integers, but is {eos_token_id}")
+
+ self.min_length = min_length
+ self.eos_token_id = eos_token_id
+
+ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
+ cur_len = input_ids.shape[-1]
+ if cur_len < self.min_length:
+ for i in self.eos_token_id:
+ scores[:, i] = -float("inf")
+ return scores
+
+
+class MinNewTokensLengthLogitsProcessor(LogitsProcessor):
+ r"""
+ [`LogitsProcessor`] enforcing a min-length of new tokens by setting EOS (End-Of-Sequence) token probability to 0.
+ Contrarily to [`MinLengthLogitsProcessor`], this processor ignores the prompt.
+
+ Args:
+ prompt_length_to_skip (`int`):
+ The input tokens length. Not a valid argument when used with `generate` as it will automatically assign the
+ input length.
+ min_new_tokens (`int`):
+ The minimum *new* tokens length below which the score of `eos_token_id` is set to `-float("Inf")`.
+ eos_token_id (`Union[int, List[int]]`):
+ The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoModelForCausalLM, AutoTokenizer
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("bigscience/bloomz-560m")
+ >>> model = AutoModelForCausalLM.from_pretrained("bigscience/bloomz-560m")
+
+ >>> inputs = tokenizer(["A number:"], return_tensors="pt")
+ >>> gen_out = model.generate(**inputs)
+ >>> print(tokenizer.batch_decode(gen_out, skip_special_tokens=True)[0])
+ A number: one
+
+ >>> # setting `min_new_tokens` will force the model to generate beyond its natural ending point, which is not
+ >>> # necessarily incorrect
+ >>> gen_out = model.generate(**inputs, min_new_tokens=2)
+ >>> print(tokenizer.batch_decode(gen_out, skip_special_tokens=True)[0])
+ A number: one thousand
+ ```
+ """
+
+ def __init__(self, prompt_length_to_skip: int, min_new_tokens: int, eos_token_id: Union[int, List[int]]):
+ for arg_name, arg_value in [
+ ("prompt_length_to_skip", prompt_length_to_skip),
+ ("min_new_tokens", min_new_tokens),
+ ]:
+ if not isinstance(arg_value, int) or arg_value < 0:
+ raise ValueError(f"`{arg_name}` has to be a positive integer, but is {arg_value}")
+
+ if isinstance(eos_token_id, int):
+ eos_token_id = [eos_token_id]
+ if not all(isinstance(i, int) for i in eos_token_id) or any(i < 0 for i in eos_token_id):
+ logger.warning(f"`eos_token_id` has to be a list of positive integers, but is {eos_token_id}")
+
+ self.prompt_length_to_skip = prompt_length_to_skip
+ self.min_new_tokens = min_new_tokens
+ self.eos_token_id = eos_token_id
+
+ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
+ new_tokens_length = input_ids.shape[-1] - self.prompt_length_to_skip
+ if new_tokens_length < self.min_new_tokens:
+ for i in self.eos_token_id:
+ scores[:, i] = -float("inf")
+
+ return scores
+
+
+class TemperatureLogitsWarper(LogitsWarper):
+ r"""
+ [`LogitsWarper`] for temperature (exponential scaling output probability distribution), which effectively means
+ that it can control the randomness of the predicted tokens. Often used together with [`TopPLogitsWarper`] and
+ [`TopKLogitsWarper`].
+
+
+
+ Make sure that `do_sample=True` is included in the `generate` arguments otherwise the temperature value won't have
+ any effect.
+
+
+
+ Args:
+ temperature (`float`):
+ Strictly positive float value used to modulate the logits distribution. A value smaller than `1` decreases
+ randomness (and vice versa), with `0` being equivalent to shifting all probability mass to the most likely
+ token.
+
+ Examples:
+
+ ```python
+ >>> import torch
+ >>> from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed
+
+ >>> set_seed(0) # for reproducibility
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
+ >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
+ >>> model.config.pad_token_id = model.config.eos_token_id
+ >>> inputs = tokenizer(["Hugging Face Company is"], return_tensors="pt")
+
+ >>> # With temperature=1.0, the default, we consistently get random outputs due to random sampling.
+ >>> generate_kwargs = {"max_new_tokens": 10, "do_sample": True, "temperature": 1.0, "num_return_sequences": 2}
+ >>> outputs = model.generate(**inputs, **generate_kwargs)
+ >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True))
+ ['Hugging Face Company is a joint venture between GEO Group, one of',
+ 'Hugging Face Company is not an exact science – but what we believe does']
+
+ >>> # However, with temperature close to 0, it approximates greedy decoding strategies (invariant)
+ >>> generate_kwargs["temperature"] = 0.0001
+ >>> outputs = model.generate(**inputs, **generate_kwargs)
+ >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True))
+ ['Hugging Face Company is a company that has been around for over 20 years',
+ 'Hugging Face Company is a company that has been around for over 20 years']
+ ```
+ """
+
+ def __init__(self, temperature: float):
+ if not isinstance(temperature, float) or not (temperature > 0):
+ except_msg = (
+ f"`temperature` (={temperature}) has to be a strictly positive float, otherwise your next token "
+ "scores will be invalid."
+ )
+ if isinstance(temperature, float) and temperature == 0.0:
+ except_msg += " If you're looking for greedy decoding strategies, set `do_sample=False`."
+ raise ValueError(except_msg)
+
+ self.temperature = temperature
+
+ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
+ scores = scores / self.temperature
+ return scores
+
+
+class RepetitionPenaltyLogitsProcessor(LogitsProcessor):
+ r"""
+ [`LogitsProcessor`] that prevents the repetition of previous tokens through a penalty. This penalty is applied at
+ most once per token. Note that, for decoder-only models like most LLMs, the considered tokens include the prompt.
+
+ In the original [paper](https://arxiv.org/pdf/1909.05858.pdf), the authors suggest the use of a penalty of around
+ 1.2 to achieve a good balance between truthful generation and lack of repetition. To penalize and reduce
+ repetition, use `penalty` values above 1.0, where a higher value penalizes more strongly. To reward and encourage
+ repetition, use `penalty` values between 0.0 and 1.0, where a lower value rewards more strongly.
+
+ Args:
+ penalty (`float`):
+ The parameter for repetition penalty. 1.0 means no penalty. Above 1.0 penalizes previously generated
+ tokens. Between 0.0 and 1.0 rewards previously generated tokens.
+
+ Examples:
+
+ ```py
+ >>> from transformers import AutoTokenizer, AutoModelForCausalLM
+
+ >>> # Initializing the model and tokenizer for it
+ >>> model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2")
+ >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2")
+ >>> inputs = tokenizer(["I'm not going to"], return_tensors="pt")
+
+ >>> # This shows a normal generate without any specific parameters
+ >>> summary_ids = model.generate(**inputs)
+ >>> print(tokenizer.batch_decode(summary_ids, skip_special_tokens=True)[0])
+ I'm not going to be able to do that. I'm going to be able to do that
+
+ >>> # This generates a penalty for repeated tokens
+ >>> penalized_ids = model.generate(**inputs, repetition_penalty=1.1)
+ >>> print(tokenizer.batch_decode(penalized_ids, skip_special_tokens=True)[0])
+ I'm not going to be able to do that. I'll just have to go out and play
+ ```
+ """
+
+ def __init__(self, penalty: float):
+ if not isinstance(penalty, float) or not (penalty > 0):
+ raise ValueError(f"`penalty` has to be a strictly positive float, but is {penalty}")
+
+ self.penalty = penalty
+
+ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
+ score = torch.gather(scores, 1, input_ids)
+
+ # if score < 0 then repetition penalty has to be multiplied to reduce the token probabilities
+ score = torch.where(score < 0, score * self.penalty, score / self.penalty)
+
+ scores.scatter_(1, input_ids, score)
+ return scores
+
+
+class EncoderRepetitionPenaltyLogitsProcessor(LogitsProcessor):
+ r"""
+ [`LogitsProcessor`] that works similarly to [`RepetitionPenaltyLogitsProcessor`], but with an *inverse* penalty
+ that is applied to the tokens present in the prompt. In other words, a penalty above 1.0 increases the odds of
+ selecting tokens that were present in the prompt.
+
+ It was designed to avoid hallucination in input-grounded tasks, like summarization. Although originally intended
+ for encoder-decoder models, it can also be used with decoder-only models like LLMs.
+
+ Args:
+ penalty (`float`):
+ The parameter for repetition penalty. 1.0 means no penalty. Above 1.0 rewards prompt tokens. Between 0.0
+ and 1.0 penalizes prompt tokens.
+ encoder_input_ids (`torch.LongTensor`):
+ The encoder_input_ids that should be repeated within the decoder ids.
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoModelForCausalLM, AutoTokenizer
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("bigscience/bloomz-560m")
+ >>> model = AutoModelForCausalLM.from_pretrained("bigscience/bloomz-560m")
+
+ >>> inputs = tokenizer(["Alice and Bob. The third member's name was"], return_tensors="pt")
+ >>> gen_out = model.generate(**inputs)
+ >>> print(tokenizer.batch_decode(gen_out, skip_special_tokens=True)[0])
+ Alice and Bob. The third member's name was not mentioned.
+
+ >>> # With the `encoder_repetition_penalty` argument we can trigger this logits processor in `generate`, which can
+ >>> # promote the use of prompt tokens ("Bob" in this example)
+ >>> gen_out = model.generate(**inputs, encoder_repetition_penalty=1.2)
+ >>> print(tokenizer.batch_decode(gen_out, skip_special_tokens=True)[0])
+ Alice and Bob. The third member's name was Bob. The third member's name was Bob.
+ ```
+ """
+
+ def __init__(self, penalty: float, encoder_input_ids: torch.LongTensor):
+ if not isinstance(penalty, float) or not (penalty > 0):
+ raise ValueError(f"`penalty` has to be a strictly positive float, but is {penalty}")
+
+ self.penalty = 1 / penalty
+ self.encoder_input_ids = encoder_input_ids
+
+ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
+ score = torch.gather(scores, 1, self.encoder_input_ids)
+
+ # if score < 0 then hallucination penalty has to be multiplied to increase the token probabilities
+ score = torch.where(score < 0, score * self.penalty, score / self.penalty)
+
+ scores.scatter_(1, self.encoder_input_ids, score)
+ return scores
+
+
+class TopPLogitsWarper(LogitsWarper):
+ """
+ [`LogitsWarper`] that performs top-p, i.e. restricting to top tokens summing to prob_cut_off <= prob_cut_off. Often
+ used together with [`TemperatureLogitsWarper`] and [`TopKLogitsWarper`].
+
+ Args:
+ top_p (`float`):
+ If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
+ higher are kept for generation.
+ filter_value (`float`, *optional*, defaults to -inf):
+ All filtered values will be set to this float value.
+ min_tokens_to_keep (`int`, *optional*, defaults to 1):
+ Minimum number of tokens that cannot be filtered.
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed
+
+ >>> set_seed(0)
+ >>> model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2")
+ >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2")
+
+ >>> inputs = tokenizer("A sequence: 1, 2", return_tensors="pt")
+
+ >>> # With sampling, the output is unexpected -- sometimes too unexpected.
+ >>> outputs = model.generate(**inputs, do_sample=True)
+ >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0])
+ A sequence: 1, 2, 0, 2, 2. 2, 2, 2, 2
+
+ >>> # With `top_p` sampling, the output gets restricted to high-probability tokens.
+ >>> # Pro tip: In practice, LLMs use `top_p` in the 0.9-0.95 range.
+ >>> outputs = model.generate(**inputs, do_sample=True, top_p=0.1)
+ >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0])
+ A sequence: 1, 2, 3, 4, 5, 6, 7, 8, 9
+ ```
+ """
+
+ def __init__(self, top_p: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
+ top_p = float(top_p)
+ if top_p < 0 or top_p > 1.0:
+ raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}")
+ if not isinstance(min_tokens_to_keep, int) or (min_tokens_to_keep < 1):
+ raise ValueError(f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}")
+
+ self.top_p = top_p
+ self.filter_value = filter_value
+ self.min_tokens_to_keep = min_tokens_to_keep
+
+ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
+ sorted_logits, sorted_indices = torch.sort(scores, descending=False)
+ cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1)
+
+ # Remove tokens with cumulative top_p above the threshold (token with 0 are kept)
+ sorted_indices_to_remove = cumulative_probs <= (1 - self.top_p)
+ # Keep at least min_tokens_to_keep
+ sorted_indices_to_remove[..., -self.min_tokens_to_keep :] = 0
+
+ # scatter sorted tensors to original indexing
+ indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
+ scores = scores.masked_fill(indices_to_remove, self.filter_value)
+ return scores
+
+
+class TopKLogitsWarper(LogitsWarper):
+ r"""
+ [`LogitsWarper`] that performs top-k, i.e. restricting to the k highest probability elements. Often used together
+ with [`TemperatureLogitsWarper`] and [`TopPLogitsWarper`].
+
+ Args:
+ top_k (`int`):
+ The number of highest probability vocabulary tokens to keep for top-k-filtering.
+ filter_value (`float`, *optional*, defaults to -inf):
+ All filtered values will be set to this float value.
+ min_tokens_to_keep (`int`, *optional*, defaults to 1):
+ Minimum number of tokens that cannot be filtered.
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed
+
+ >>> set_seed(0)
+ >>> model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2")
+ >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2")
+
+ >>> inputs = tokenizer("A sequence: A, B, C, D", return_tensors="pt")
+
+ >>> # With sampling, the output is unexpected -- sometimes too unexpected.
+ >>> outputs = model.generate(**inputs, do_sample=True)
+ >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0])
+ A sequence: A, B, C, D, G, H, I. A, M
+
+ >>> # With `top_k` sampling, the output gets restricted the k most likely tokens.
+ >>> # Pro tip: In practice, LLMs use `top_k` in the 5-50 range.
+ >>> outputs = model.generate(**inputs, do_sample=True, top_k=2)
+ >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0])
+ A sequence: A, B, C, D, E, F, G, H, I
+ ```
+ """
+
+ def __init__(self, top_k: int, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
+ if not isinstance(top_k, int) or top_k <= 0:
+ raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}")
+
+ self.top_k = max(top_k, min_tokens_to_keep)
+ self.filter_value = filter_value
+
+ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
+ top_k = min(self.top_k, scores.size(-1)) # Safety check
+ # Remove all tokens with a probability less than the last token of the top-k
+ indices_to_remove = scores < torch.topk(scores, top_k)[0][..., -1, None]
+ scores = scores.masked_fill(indices_to_remove, self.filter_value)
+ return scores
+
+
+class TypicalLogitsWarper(LogitsWarper):
+ r"""
+ [`LogitsWarper`] that performs typical decoding. Inspired on how humans use language, it prioritizes tokens whose
+ log probability is close to the entropy of the token probability distribution. This means that the most likely
+ tokens may be discarded in the process.
+
+ See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information.
+
+ Args:
+ mass (`float`, *optional*, defaults to 0.9):
+ Value of typical_p between 0 and 1 inclusive, defaults to 0.9.
+ filter_value (`float`, *optional*, defaults to -inf):
+ All filtered values will be set to this float value.
+ min_tokens_to_keep (`int`, *optional*, defaults to 1):
+ Minimum number of tokens that cannot be filtered.
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed
+
+ >>> model = AutoModelForCausalLM.from_pretrained("bigscience/bloomz-560m")
+ >>> tokenizer = AutoTokenizer.from_pretrained("bigscience/bloomz-560m")
+
+ >>> inputs = tokenizer("1, 2, 3", return_tensors="pt")
+
+ >>> # We can see that greedy decoding produces a sequence of numbers
+ >>> outputs = model.generate(**inputs)
+ >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0])
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+
+ >>> # For this particular seed, we can see that sampling produces nearly the same low-information (= low entropy)
+ >>> # sequence
+ >>> set_seed(18)
+ >>> outputs = model.generate(**inputs, do_sample=True)
+ >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0])
+ 1, 2, 3, 4, 5, 6, 7, 8, 9 and 10
+
+ >>> # With `typical_p` set, the most obvious sequence is no longer produced, which may be good for your problem
+ >>> set_seed(18)
+ >>> outputs = model.generate(
+ ... **inputs, do_sample=True, typical_p=0.1, return_dict_in_generate=True, output_scores=True
+ ... )
+ >>> print(tokenizer.batch_decode(outputs.sequences, skip_special_tokens=True)[0])
+ 1, 2, 3 and 5
+
+ >>> # We can see that the token corresponding to "4" (token 934) in the second position, the most likely token
+ >>> # as seen with greedy decoding, was entirely blocked out
+ >>> print(outputs.scores[1][0, 934])
+ tensor(-inf)
+ ```
+ """
+
+ def __init__(self, mass: float = 0.9, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
+ mass = float(mass)
+ if not (mass > 0 and mass < 1):
+ raise ValueError(f"`typical_p` has to be a float > 0 and < 1, but is {mass}")
+ if not isinstance(min_tokens_to_keep, int) or (min_tokens_to_keep < 1):
+ raise ValueError(f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}")
+
+ self.filter_value = filter_value
+ self.mass = mass
+ self.min_tokens_to_keep = min_tokens_to_keep
+
+ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
+ # calculate entropy
+ normalized = torch.nn.functional.log_softmax(scores, dim=-1)
+ p = torch.exp(normalized)
+ ent = -(normalized * p).nansum(-1, keepdim=True)
+
+ # shift and sort
+ shifted_scores = torch.abs((-normalized) - ent)
+ sorted_scores, sorted_indices = torch.sort(shifted_scores, descending=False)
+ sorted_logits = scores.gather(-1, sorted_indices)
+ cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1)
+
+ # Remove tokens with cumulative mass above the threshold
+ last_ind = (cumulative_probs < self.mass).sum(dim=1)
+ last_ind.clamp_(max=sorted_scores.shape[-1] - 1)
+ sorted_indices_to_remove = sorted_scores > sorted_scores.gather(1, last_ind.view(-1, 1))
+ sorted_indices_to_remove[..., : self.min_tokens_to_keep] = 0
+ indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
+
+ scores = scores.masked_fill(indices_to_remove, self.filter_value)
+ return scores
+
+
+class EpsilonLogitsWarper(LogitsWarper):
+ r"""
+ [`LogitsWarper`] that performs epsilon-sampling, i.e. restricting to tokens with `prob >= epsilon`. Takes the
+ largest min_tokens_to_keep tokens if no tokens satisfy this constraint. See [Truncation Sampling as Language Model
+ Desmoothing](https://arxiv.org/abs/2210.15191) for more information.
+
+ Args:
+ epsilon (`float`):
+ If set to > 0, only the most tokens with probabilities `epsilon` or higher are kept for generation.
+ filter_value (`float`, *optional*, defaults to -inf):
+ All filtered values will be set to this float value.
+ min_tokens_to_keep (`int`, *optional*, defaults to 1):
+ Minimum number of tokens that cannot be filtered.
+
+ Examples:
+ ```python
+ >>> from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed
+
+ >>> set_seed(0)
+ >>> model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2")
+ >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2")
+
+ >>> inputs = tokenizer("A sequence: 1, 2", return_tensors="pt")
+
+ >>> # With sampling, the output is unexpected -- sometimes too unexpected.
+ >>> outputs = model.generate(**inputs, do_sample=True)
+ >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0])
+ A sequence: 1, 2, 0, 2, 2. 2, 2, 2, 2
+
+ >>> # With epsilon sampling, the output gets restricted to high-probability tokens. Note that this is similar to
+ >>> # Top P sampling, which restricts tokens based on their cumulative probability.
+ >>> # Pro tip: The paper recomends using `epsilon_cutoff` values between 3e-4 and 9e-4
+ >>> outputs = model.generate(**inputs, do_sample=True, epsilon_cutoff=0.1)
+ >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0])
+ A sequence: 1, 2, 3, 4, 5, 6, 7, 8, 9
+ ```
+ """
+
+ def __init__(self, epsilon: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
+ epsilon = float(epsilon)
+ if epsilon <= 0 or epsilon >= 1:
+ raise ValueError(f"`epsilon_cutoff` has to be a float > 0 and < 1, but is {epsilon}")
+
+ min_tokens_to_keep = int(min_tokens_to_keep)
+ if min_tokens_to_keep < 1:
+ raise ValueError(
+ f"`min_tokens_to_keep` has to be a strictly positive integer, but is {min_tokens_to_keep}"
+ )
+
+ self.epsilon = epsilon
+ self.filter_value = filter_value
+ self.min_tokens_to_keep = min_tokens_to_keep
+
+ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
+ # Determine which indices to remove
+ probabilities = scores.softmax(dim=-1)
+ indices_to_remove = probabilities < self.epsilon
+
+ # Keep the words with the 'min_tokens_to_keep'-highest probabilities
+ top_k = min(self.min_tokens_to_keep, scores.size(-1)) # Safety check
+ indices_to_remove = indices_to_remove & (scores < torch.topk(scores, top_k)[0][..., -1, None])
+
+ scores = scores.masked_fill(indices_to_remove, self.filter_value)
+ return scores
+
+
+class EtaLogitsWarper(LogitsWarper):
+ r"""
+ [`LogitsWarper`] that performs eta-sampling, a technique to filter out tokens with probabilities below a dynamic
+ cutoff value, `eta`, which is calculated based on a combination of the hyperparameter `epsilon` and the entropy of
+ the token probabilities, i.e. `eta := min(epsilon, sqrt(epsilon * e^-entropy(probabilities)))`. Takes the largest
+ min_tokens_to_keep tokens if no tokens satisfy this constraint. It addresses the issue of poor quality in long
+ samples of text generated by neural language models leading to more coherent and fluent text. See [Truncation
+ Sampling as Language Model Desmoothing](https://arxiv.org/abs/2210.15191) for more information. Note: `do_sample`
+ must be set to `True` for this `LogitsWarper` to work.
+
+
+ Args:
+ epsilon (`float`):
+ A float value in the range (0, 1). Hyperparameter used to calculate the dynamic cutoff value, `eta`. The
+ suggested values from the paper ranges from 3e-4 to 4e-3 depending on the size of the model.
+ filter_value (`float`, *optional*, defaults to -inf):
+ All values that are found to be below the dynamic cutoff value, `eta`, are set to this float value. This
+ parameter is useful when logits need to be modified for very low probability tokens that should be excluded
+ from generation entirely.
+ min_tokens_to_keep (`int`, *optional*, defaults to 1):
+ Specifies the minimum number of tokens that must be kept for generation, regardless of their probabilities.
+ For example, if `min_tokens_to_keep` is set to 1, at least one token will always be kept for generation,
+ even if all tokens have probabilities below the cutoff `eta`.
+
+ Examples:
+ ```python
+ >>> from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed
+
+ >>> set_seed(0)
+ >>> model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2")
+ >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2")
+
+ >>> inputs = tokenizer("A sequence: 1, 2", return_tensors="pt")
+
+ >>> # With sampling, the output is unexpected -- sometimes too unexpected.
+ >>> outputs = model.generate(**inputs, do_sample=True)
+ >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0])
+ A sequence: 1, 2, 0, 2, 2. 2, 2, 2, 2
+
+ >>> # With eta sampling, the output gets restricted to high-probability tokens. You can see it as a dynamic form of
+ >>> # epsilon sampling that adapts its cutoff probability based on the entropy (high entropy = lower cutoff).
+ >>> # Pro tip: The paper recomends using `eta_cutoff` values between 3e-4 to 4e-3
+ >>> outputs = model.generate(**inputs, do_sample=True, eta_cutoff=0.1)
+ >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0])
+ A sequence: 1, 2, 3, 4, 5, 6, 7, 8, 9
+ ```
+ """
+
+ def __init__(self, epsilon: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
+ epsilon = float(epsilon)
+ if epsilon <= 0 or epsilon >= 1:
+ raise ValueError(f"`eta_cutoff` has to be a float > 0 and < 1, but is {epsilon}")
+
+ min_tokens_to_keep = int(min_tokens_to_keep)
+ if min_tokens_to_keep < 1:
+ raise ValueError(
+ f"`min_tokens_to_keep` has to be a strictly positive integer, but is {min_tokens_to_keep}"
+ )
+
+ self.epsilon = torch.tensor(epsilon)
+ self.filter_value = filter_value
+ self.min_tokens_to_keep = min_tokens_to_keep
+
+ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
+ # Calculate the adaptive cutoff
+ probabilities = scores.softmax(dim=-1)
+ entropy = torch.distributions.Categorical(logits=scores).entropy()
+ eta = torch.min(self.epsilon, torch.sqrt(self.epsilon) * torch.exp(-entropy))[..., None]
+ indices_to_remove = probabilities < eta
+
+ # Keep the words with the 'min_tokens_to_keep'-highest probabilities
+ top_k = min(self.min_tokens_to_keep, scores.size(-1)) # Safety check
+ indices_to_remove = indices_to_remove & (scores < torch.topk(scores, top_k)[0][..., -1, None])
+
+ scores = scores.masked_fill(indices_to_remove, self.filter_value)
+ return scores
+
+
+def _get_ngrams(ngram_size: int, prev_input_ids: torch.Tensor, num_hypos: int):
+ """
+ Assume ngram_size=2 and prev_input_ids=tensor([[40, 2883, 2712, 4346]]). The output of generated ngrams look like
+ this {(40,): [2883], (2883,): [2712], (2712,): [4346]}.
+
+ Args:
+ ngram_size (`int`):
+ The number sequential tokens taken as a group which may only occur once before being banned.
+ prev_input_ids (`torch.Tensor`):
+ Generated token ids for the current hypothesis.
+ num_hypos (`int`):
+ The number of hypotheses for which n-grams need to be generated.
+
+ Returns:
+ generated_ngrams (`dict`):
+ Dictionary of generated ngrams.
+ """
+ # Initialize an empty list of dictionaries, one for each hypothesis (index) in the range of num_hypos
+ generated_ngrams = [{} for _ in range(num_hypos)]
+ for idx in range(num_hypos):
+ gen_tokens = prev_input_ids[idx].tolist()
+ generated_ngram = generated_ngrams[idx]
+ # Loop through each n-gram of size ngram_size in the list of tokens (gen_tokens)
+ for ngram in zip(*[gen_tokens[i:] for i in range(ngram_size)]):
+ prev_ngram_tuple = tuple(ngram[:-1])
+ generated_ngram[prev_ngram_tuple] = generated_ngram.get(prev_ngram_tuple, []) + [ngram[-1]]
+ return generated_ngrams
+
+
+def _get_generated_ngrams(banned_ngrams, prev_input_ids, ngram_size, cur_len):
+ """
+ Determines the banned tokens for the current hypothesis based on previously generated n-grams.
+
+ Args:
+ banned_ngrams (`dict`):
+ A dictionary containing previously generated n-grams for each hypothesis.
+ prev_input_ids (`torch.Tensor`):
+ Generated token ids for the current hypothesis.
+ ngram_size (`int`):
+ The number sequential tokens taken as a group which may only occur once before being banned.
+ cur_len (`int`):
+ The current length of the token sequences for which the n-grams are being checked.
+
+ Returns:
+ List of tokens that are banned.
+ """
+ # Before decoding the next token, prevent decoding of ngrams that have already appeared
+ start_idx = cur_len + 1 - ngram_size
+ ngram_idx = tuple(prev_input_ids[start_idx:cur_len].tolist())
+ return banned_ngrams.get(ngram_idx, [])
+
+
+def _calc_banned_ngram_tokens(
+ ngram_size: int, prev_input_ids: torch.Tensor, num_hypos: int, cur_len: int
+) -> List[Iterable[int]]:
+ """Copied from fairseq for no_repeat_ngram in beam_search"""
+ if cur_len + 1 < ngram_size:
+ # return no banned tokens if we haven't generated no_repeat_ngram_size tokens yet
+ return [[] for _ in range(num_hypos)]
+ generated_ngrams = _get_ngrams(ngram_size, prev_input_ids, num_hypos)
+ banned_tokens = [
+ _get_generated_ngrams(generated_ngrams[hypo_idx], prev_input_ids[hypo_idx], ngram_size, cur_len)
+ for hypo_idx in range(num_hypos)
+ ]
+ return banned_tokens
+
+
+class NoRepeatNGramLogitsProcessor(LogitsProcessor):
+ r"""
+ N-grams are groups of "n" consecutive words, characters, or tokens taken from a sequence of text. Given the
+ sentence: "She runs fast", the bi-grams (n=2) would be ("she", "runs") and ("runs", "fast"). In text generation,
+ avoiding repetitions of word sequences provides a more diverse output. This [`LogitsProcessor`] enforces no
+ repetition of n-grams by setting the scores of banned tokens to negative infinity which eliminates those tokens
+ from consideration when further processing the scores. Note that, for decoder-only models like most LLMs, the
+ prompt is also considered to obtain the n-grams.
+ [Fairseq](https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345).
+
+
+
+ Use n-gram penalties with care. For instance, penalizing 2-grams (bigrams) in an article about the city of New York
+ might lead to undesirable outcomes where the city's name appears only once in the entire text.
+ [Reference](https://huggingface.co/blog/how-to-generate)
+
+
+
+ Args:
+ ngram_size (`int`):
+ All ngrams of size `ngram_size` can only occur once.
+
+ Examples:
+
+ ```py
+ >>> from transformers import AutoTokenizer, AutoModelForCausalLM
+
+ >>> model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2")
+ >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2")
+ >>> inputs = tokenizer(["Today I"], return_tensors="pt")
+
+ >>> output = model.generate(**inputs)
+ >>> print(tokenizer.decode(output[0], skip_special_tokens=True))
+ Today I’m not sure if I’m going to be able to do it.
+
+ >>> # Now let's add ngram size using `no_repeat_ngram_size`. This stops the repetitions ("I’m") in the output.
+ >>> output = model.generate(**inputs, no_repeat_ngram_size=2)
+ >>> print(tokenizer.decode(output[0], skip_special_tokens=True))
+ Today I’m not sure if I can get a better understanding of the nature of this issue
+ ```
+ """
+
+ def __init__(self, ngram_size: int):
+ if not isinstance(ngram_size, int) or ngram_size <= 0:
+ raise ValueError(f"`ngram_size` has to be a strictly positive integer, but is {ngram_size}")
+ self.ngram_size = ngram_size
+
+ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
+ num_batch_hypotheses = scores.shape[0]
+ cur_len = input_ids.shape[-1]
+ banned_batch_tokens = _calc_banned_ngram_tokens(self.ngram_size, input_ids, num_batch_hypotheses, cur_len)
+ for i, banned_tokens in enumerate(banned_batch_tokens):
+ scores[i, banned_tokens] = -float("inf")
+
+ return scores
+
+
+class EncoderNoRepeatNGramLogitsProcessor(LogitsProcessor):
+ r"""
+ [`LogitsProcessor`] that works similarly to [`NoRepeatNGramLogitsProcessor`], but applied exclusively to prevent
+ the repetition of n-grams present in the prompt.
+
+ It was designed to promote chattiness in a language model, by preventing the generation of n-grams present in
+ previous conversation rounds.
+
+ Args:
+ encoder_ngram_size (`int`):
+ All ngrams of size `ngram_size` can only occur within the encoder input ids.
+ encoder_input_ids (`int`):
+ The encoder_input_ids that should not be repeated within the decoder ids.
+
+ Examples:
+
+ ```py
+ >>> from transformers import AutoTokenizer, AutoModelForCausalLM
+
+ >>> model = AutoModelForCausalLM.from_pretrained("bigscience/bloomz-560m")
+ >>> tokenizer = AutoTokenizer.from_pretrained("bigscience/bloomz-560m")
+
+ >>> inputs = tokenizer("Alice: I love cats. What do you love?\nBob:", return_tensors="pt")
+
+ >>> # With greedy decoding, we see Bob repeating Alice's opinion. If Bob was a chatbot, it would be a poor one.
+ >>> outputs = model.generate(**inputs)
+ >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0])
+ Alice: I love cats. What do you love?
+ Bob: I love cats. What do you
+
+ >>> # With this logits processor, we can prevent Bob from repeating Alice's opinion.
+ >>> outputs = model.generate(**inputs, encoder_no_repeat_ngram_size=2)
+ >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0])
+ Alice: I love cats. What do you love?
+ Bob: My cats are very cute.
+ ```
+ """
+
+ def __init__(self, encoder_ngram_size: int, encoder_input_ids: torch.LongTensor):
+ if not isinstance(encoder_ngram_size, int) or encoder_ngram_size <= 0:
+ raise ValueError(
+ f"`encoder_ngram_size` has to be a strictly positive integer, but is {encoder_ngram_size}"
+ )
+ self.ngram_size = encoder_ngram_size
+ if len(encoder_input_ids.shape) == 1:
+ encoder_input_ids = encoder_input_ids.unsqueeze(0)
+ self.batch_size = encoder_input_ids.shape[0]
+ self.generated_ngrams = _get_ngrams(encoder_ngram_size, encoder_input_ids, self.batch_size)
+
+ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
+ # B x num_beams
+ num_hypos = scores.shape[0]
+ num_beams = num_hypos // self.batch_size
+ cur_len = input_ids.shape[-1]
+ banned_batch_tokens = [
+ _get_generated_ngrams(
+ self.generated_ngrams[hypo_idx // num_beams], input_ids[hypo_idx], self.ngram_size, cur_len
+ )
+ for hypo_idx in range(num_hypos)
+ ]
+
+ for i, banned_tokens in enumerate(banned_batch_tokens):
+ scores[i, banned_tokens] = -float("inf")
+
+ return scores
+
+
+class SequenceBiasLogitsProcessor(LogitsProcessor):
+ """
+ [`LogitsProcessor`] that applies an additive bias on sequences. The bias is applied to the last token of a sequence
+ when the next generated token can complete it. Consequently, to take the most of biasing sequences with more than
+ one token, consider using beam methods (to gracefully work around partially completed sequences that have a
+ negative bias) and applying the bias to their prefixes (to ensure the bias is applied earlier).
+
+
+
+ In order to get the token ids of the sequences that you want to bias, make sure to set `add_prefix_space=True` when
+ initializing the tokenizer, and use `tokenizer(bad_words, add_special_tokens=False).input_ids`. The
+ `add_prefix_space` argument is only supported for some slow tokenizers, as fast tokenizers' prefixing behaviours
+ come from `pre tokenizers`. Read more [here](https://huggingface.co/docs/tokenizers/api/pre-tokenizers).
+
+
+
+ Args:
+ sequence_bias (`Dict[Tuple[int], float]`):
+ Dictionary that maps a sequence of tokens to its bias term. Positive biases increase the odds of the
+ sequence being selected, while negative biases do the opposite. If a sequence has a length of 1, its bias
+ will always be applied. Otherwise, the bias will only be applied if the sequence in question is about to be
+ completed (in the token selection step after this processor is applied).
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, AutoModelForCausalLM
+
+ >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
+ >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
+ >>> inputs = tokenizer(["The full name of Donald is Donald"], return_tensors="pt")
+
+ >>> summary_ids = model.generate(inputs["input_ids"], max_new_tokens=4)
+ >>> print(tokenizer.batch_decode(summary_ids, skip_special_tokens=True)[0])
+ The full name of Donald is Donald J. Trump Jr
+
+ >>> # Now let's control generation through a bias. Please note that the tokenizer is initialized differently!
+ >>> tokenizer_with_prefix_space = AutoTokenizer.from_pretrained("openai-community/gpt2", add_prefix_space=True)
+
+
+ >>> def get_tokens_as_tuple(word):
+ ... return tuple(tokenizer_with_prefix_space([word], add_special_tokens=False).input_ids[0])
+
+
+ >>> # If we add a negative bias without beam search, it may become "stuck" in a prefix without good continuations
+ >>> sequence_bias = {get_tokens_as_tuple("Trump"): -10.0}
+ >>> biased_ids = model.generate(inputs["input_ids"], max_new_tokens=4, sequence_bias=sequence_bias)
+ >>> print(tokenizer.batch_decode(biased_ids, skip_special_tokens=True)[0])
+ The full name of Donald is Donald J. Donald,
+
+ >>> biased_ids = model.generate(inputs["input_ids"], max_new_tokens=4, num_beams=4, sequence_bias=sequence_bias)
+ >>> print(tokenizer.batch_decode(biased_ids, skip_special_tokens=True)[0])
+ The full name of Donald is Donald Rumsfeld,
+
+ >>> # We can also add a positive bias to nudge the model towards specific tokens or continuations
+ >>> sequence_bias = {get_tokens_as_tuple("Donald Duck"): 10.0}
+ >>> biased_ids = model.generate(inputs["input_ids"], max_new_tokens=4, num_beams=4, sequence_bias=sequence_bias)
+ >>> print(tokenizer.batch_decode(biased_ids, skip_special_tokens=True)[0])
+ The full name of Donald is Donald Duck.
+ ```
+ """
+
+ def __init__(self, sequence_bias: Dict[Tuple[int], float]):
+ self.sequence_bias = sequence_bias
+ self._validate_arguments()
+
+ # Bias variables that will be populated on the first call (for retrocompatibility purposes, the vocabulary size
+ # is infered in the first usage, which inhibits initializing here)
+ self.length_1_bias = None
+ self.prepared_bias_variables = False
+
+ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
+ # 1 - Prepares the bias tensors. This is only needed the first time the logit processor is called.
+ if not self.prepared_bias_variables:
+ self._prepare_bias_variables(scores)
+
+ # 2 - prepares an empty bias to add
+ bias = torch.zeros_like(scores)
+
+ # 3 - include the bias from length = 1
+ bias += self.length_1_bias
+
+ # 4 - include the bias from length > 1, after determining which biased sequences may be completed.
+ for sequence_ids, sequence_bias in self.sequence_bias.items():
+ if len(sequence_ids) == 1: # the sequence is of length 1, already applied
+ continue
+ if len(sequence_ids) > input_ids.shape[1]: # the sequence is longer than the context, ignore
+ continue
+ prefix_length = len(sequence_ids) - 1
+ last_token = sequence_ids[-1]
+ matching_rows = torch.eq(
+ input_ids[:, -prefix_length:],
+ torch.tensor(sequence_ids[:-1], dtype=input_ids.dtype, device=input_ids.device),
+ ).prod(dim=1)
+ bias[:, last_token] += torch.where(
+ matching_rows.bool(),
+ torch.tensor(sequence_bias, device=input_ids.device),
+ torch.tensor(0.0, device=input_ids.device),
+ )
+
+ # 5 - apply the bias to the scores
+ scores = scores + bias
+ return scores
+
+ def _prepare_bias_variables(self, scores: torch.FloatTensor):
+ vocabulary_size = scores.shape[-1]
+
+ # Check biased tokens out of bounds
+ invalid_biases = []
+ for sequence_ids in self.sequence_bias:
+ for token_id in sequence_ids:
+ if token_id >= vocabulary_size:
+ invalid_biases.append(token_id)
+ if len(invalid_biases) > 0:
+ raise ValueError(
+ f"The model vocabulary size is {vocabulary_size}, but the following tokens were being biased: "
+ f"{invalid_biases}"
+ )
+
+ # Precompute the bias tensors to be applied. Sequences of length 1 are kept separately, as they can be applied
+ # with simpler logic.
+ self.length_1_bias = torch.zeros((vocabulary_size,), dtype=torch.float).to(scores.device)
+ for sequence_ids, bias in self.sequence_bias.items():
+ if len(sequence_ids) == 1:
+ self.length_1_bias[sequence_ids[-1]] = bias
+
+ self.prepared_bias_variables = True
+
+ def _validate_arguments(self):
+ sequence_bias = self.sequence_bias
+ if not isinstance(sequence_bias, dict) or len(sequence_bias) == 0:
+ raise ValueError(f"`sequence_bias` has to be a non-empty dictionary, but is {sequence_bias}.")
+ if any(not isinstance(sequence_ids, tuple) for sequence_ids in sequence_bias.keys()):
+ raise ValueError(f"`sequence_bias` has to be a dict with tuples as keys, but is {sequence_bias}.")
+ if any(
+ any((not isinstance(token_id, (int, np.integer)) or token_id < 0) for token_id in sequence_ids)
+ or len(sequence_ids) == 0
+ for sequence_ids in sequence_bias.keys()
+ ):
+ raise ValueError(
+ f"Each key in `sequence_bias` has to be a non-empty tuple of positive integers, but is "
+ f"{sequence_bias}."
+ )
+ if any(not isinstance(bias, float) for bias in sequence_bias.values()):
+ raise ValueError(f"`sequence_bias` has to be a dict with floats as values, but is {sequence_bias}.")
+
+
+class NoBadWordsLogitsProcessor(SequenceBiasLogitsProcessor):
+ """
+ [`LogitsProcessor`] that enforces that specified sequences will never be selected.
+
+
+
+ In order to get the token ids of the words that should not appear in the generated text, make sure to set
+ `add_prefix_space=True` when initializing the tokenizer, and use `tokenizer(bad_words,
+ add_special_tokens=False).input_ids`. The `add_prefix_space` argument is only supported for some slow tokenizers,
+ as fast tokenizers' prefixing behaviours come from `pre tokenizers`. Read more
+ [here](https://huggingface.co/docs/tokenizers/api/pre-tokenizers).
+
+
+
+ Args:
+ bad_words_ids (`List[List[int]]`):
+ List of list of token ids that are not allowed to be generated.
+ eos_token_id (`Union[int, List[int]]`):
+ The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, AutoModelForCausalLM
+
+ >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
+ >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
+ >>> inputs = tokenizer(["In a word, the cake is a"], return_tensors="pt")
+
+ >>> output_ids = model.generate(inputs["input_ids"], max_new_tokens=5, pad_token_id=tokenizer.eos_token_id)
+ >>> print(tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0])
+ In a word, the cake is a bit of a mess.
+
+ >>> # Now let's take the bad words out. Please note that the tokenizer is initialized differently
+ >>> tokenizer_with_prefix_space = AutoTokenizer.from_pretrained("openai-community/gpt2", add_prefix_space=True)
+
+
+ >>> def get_tokens_as_list(word_list):
+ ... "Converts a sequence of words into a list of tokens"
+ ... tokens_list = []
+ ... for word in word_list:
+ ... tokenized_word = tokenizer_with_prefix_space([word], add_special_tokens=False).input_ids[0]
+ ... tokens_list.append(tokenized_word)
+ ... return tokens_list
+
+
+ >>> bad_words_ids = get_tokens_as_list(word_list=["mess"])
+ >>> output_ids = model.generate(
+ ... inputs["input_ids"], max_new_tokens=5, bad_words_ids=bad_words_ids, pad_token_id=tokenizer.eos_token_id
+ ... )
+ >>> print(tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0])
+ In a word, the cake is a bit of a surprise.
+ ```
+ """
+
+ def __init__(self, bad_words_ids: List[List[int]], eos_token_id: Union[int, List[int]]):
+ self.bad_word_ids = bad_words_ids
+ self._validate_arguments()
+
+ # Filter EOS token from bad_words_ids
+ if eos_token_id is None:
+ eos_token_id = []
+ if isinstance(eos_token_id, int):
+ eos_token_id = [eos_token_id]
+ bad_words_ids = list(
+ filter(lambda bad_token_seq: all(bad_token_seq != [i] for i in eos_token_id), bad_words_ids)
+ )
+
+ # Forbidding a sequence is equivalent to setting its bias to -inf
+ sequence_bias = {tuple(sequence): float("-inf") for sequence in bad_words_ids}
+ super().__init__(sequence_bias=sequence_bias)
+
+ def _validate_arguments(self):
+ bad_words_ids = self.bad_word_ids
+ if not isinstance(bad_words_ids, list) or len(bad_words_ids) == 0:
+ raise ValueError(f"`bad_words_ids` has to be a non-empty list, but is {bad_words_ids}.")
+ if any(not isinstance(bad_word_ids, list) for bad_word_ids in bad_words_ids):
+ raise ValueError(f"`bad_words_ids` has to be a list of lists, but is {bad_words_ids}.")
+ if any(
+ any((not isinstance(token_id, (int, np.integer)) or token_id < 0) for token_id in bad_word_ids)
+ for bad_word_ids in bad_words_ids
+ ):
+ raise ValueError(
+ f"Each list in `bad_words_ids` has to be a list of positive integers, but is {bad_words_ids}."
+ )
+
+
+class PrefixConstrainedLogitsProcessor(LogitsProcessor):
+ r"""
+ [`LogitsProcessor`] that enforces constrained generation and is useful for prefix-conditioned constrained
+ generation. See [Autoregressive Entity Retrieval](https://arxiv.org/abs/2010.00904) for more information.
+
+ Args:
+ prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], List[int]]`):
+ This function constraints the beam search to allowed tokens only at each step. This function takes 2
+ arguments `inputs_ids` and the batch ID `batch_id`. It has to return a list with the allowed tokens for the
+ next generation step conditioned on the previously generated tokens `inputs_ids` and the batch ID
+ `batch_id`.
+
+ Examples:
+
+ ```py
+ >>> from transformers import AutoTokenizer, AutoModelForCausalLM
+
+ >>> model = AutoModelForCausalLM.from_pretrained("bigscience/bloomz-560m")
+ >>> tokenizer = AutoTokenizer.from_pretrained("bigscience/bloomz-560m")
+
+ >>> inputs = tokenizer("Alice and Bob", return_tensors="pt")
+
+ >>> # By default, it continues generating according to the model's logits
+ >>> outputs = model.generate(**inputs, max_new_tokens=5)
+ >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0])
+ Alice and Bob are friends
+
+ >>> # We can contrain it with `prefix_allowed_tokens_fn` to force a certain behavior based on a prefix.
+ >>> # For instance, we can force an entire entity to be generated when its beginning is detected.
+ >>> entity = tokenizer(" Bob Marley", return_tensors="pt").input_ids[0] # 3 tokens
+ >>> def prefix_allowed_tokens_fn(batch_id, input_ids):
+ ... '''
+ ... Attempts to generate 'Bob Marley' when 'Bob' is detected.
+ ... In this case, `batch_id` is not used, but you can set rules for each batch member.
+ ... '''
+ ... if input_ids[-1] == entity[0]:
+ ... return entity[1]
+ ... elif input_ids[-2] == entity[0] and input_ids[-1] == entity[1]:
+ ... return entity[2]
+ ... return list(range(tokenizer.vocab_size)) # If no match, allow all tokens
+
+ >>> outputs = model.generate(**inputs, max_new_tokens=5, prefix_allowed_tokens_fn=prefix_allowed_tokens_fn)
+ >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0])
+ Alice and Bob Marley
+ ```
+ """
+
+ def __init__(self, prefix_allowed_tokens_fn: Callable[[int, torch.Tensor], List[int]], num_beams: int):
+ self._prefix_allowed_tokens_fn = prefix_allowed_tokens_fn
+ self._num_beams = num_beams
+
+ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
+ mask = torch.full_like(scores, -math.inf)
+ for batch_id, beam_sent in enumerate(input_ids.view(-1, self._num_beams, input_ids.shape[-1])):
+ for beam_id, sent in enumerate(beam_sent):
+ prefix_allowed_tokens = self._prefix_allowed_tokens_fn(batch_id, sent)
+ if len(prefix_allowed_tokens) == 0:
+ raise ValueError(
+ f"`prefix_allowed_tokens_fn` returned an empty list for batch ID {batch_id}."
+ f"This means that the constraint is unsatisfiable. Please check your implementation"
+ f"of `prefix_allowed_tokens_fn` "
+ )
+ mask[batch_id * self._num_beams + beam_id, prefix_allowed_tokens] = 0
+
+ return scores + mask
+
+
+class HammingDiversityLogitsProcessor(LogitsProcessor):
+ r"""
+ [`LogitsProcessor`] that enforces diverse beam search.
+
+ Note that this logits processor is only effective for [`PreTrainedModel.group_beam_search`]. See [Diverse Beam
+ Search: Decoding Diverse Solutions from Neural Sequence Models](https://arxiv.org/pdf/1610.02424.pdf) for more
+ details.
+
+ Traditional beam search often generates very similar sequences across different beams.
+ `HammingDiversityLogitsProcessor` addresses this by penalizing beams that generate tokens already chosen by other
+ beams in the same time step.
+
+ Args:
+ diversity_penalty (`float`):
+ This value is subtracted from a beam's score if it generates a token same as any beam from other group at a
+ particular time. A higher `diversity_penalty` will enforce greater diversity among the beams. Adjusting
+ this value can help strike a balance between diversity and natural likelihood.
+ num_beams (`int`):
+ Number of beams for beam search. 1 means no beam search.
+ num_beam_groups (`int`):
+ Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams.
+ [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details.
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
+ >>> import torch
+
+ >>> # Initialize the model and tokenizer
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base")
+ >>> model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-base")
+
+ >>> # A long text about the solar system
+ >>> text = (
+ ... "The Solar System is a gravitationally bound system comprising the Sun and the objects that orbit it, "
+ ... "either directly or indirectly. Of the objects that orbit the Sun directly, the largest are the eight "
+ ... "planets, with the remainder being smaller objects, such as the five dwarf planets and small Solar System "
+ ... "bodies. The Solar System formed 4.6 billion years ago from the gravitational collapse of a giant "
+ ... "interstellar molecular cloud."
+ ... )
+ >>> inputs = tokenizer("summarize: " + text, return_tensors="pt")
+
+ >>> # Generate diverse summary
+ >>> outputs_diverse = model.generate(
+ ... **inputs,
+ ... num_beam_groups=2,
+ ... diversity_penalty=10.0,
+ ... max_length=100,
+ ... num_beams=4,
+ ... num_return_sequences=2,
+ ... )
+ >>> summaries_diverse = tokenizer.batch_decode(outputs_diverse, skip_special_tokens=True)
+
+ >>> # Generate non-diverse summary
+ >>> outputs_non_diverse = model.generate(
+ ... **inputs,
+ ... max_length=100,
+ ... num_beams=4,
+ ... num_return_sequences=2,
+ ... )
+ >>> summary_non_diverse = tokenizer.batch_decode(outputs_non_diverse, skip_special_tokens=True)
+
+ >>> # With `diversity_penalty`, the resulting beams are much more diverse
+ >>> print(summary_non_diverse)
+ ['the solar system formed 4.6 billion years ago from the collapse of a giant interstellar molecular cloud. of the objects that orbit the Sun directly, the largest are the eight planets.',
+ 'the Solar System formed 4.6 billion years ago from the collapse of a giant interstellar molecular cloud. of the objects that orbit the Sun directly, the largest are the eight planets.']
+
+ >>> print(summaries_diverse)
+ ['the solar system formed 4.6 billion years ago from the collapse of a giant interstellar molecular cloud. of the objects that orbit the Sun directly, the largest are the eight planets.',
+ 'the solar system formed 4.6 billion years ago from the collapse of a giant interstellar molecular cloud. of the objects that orbit the Sun directly, the largest are the eight planets. the rest of the objects are smaller objects, such as the five dwarf planets and small solar system bodies.']
+ ```
+ """
+
+ def __init__(self, diversity_penalty: float, num_beams: int, num_beam_groups: int):
+ if not isinstance(diversity_penalty, float) or (not diversity_penalty > 0.0):
+ raise ValueError("`diversity_penalty` should be a float strictly larger than 0.")
+ self._diversity_penalty = diversity_penalty
+ if not isinstance(num_beams, int) or num_beams < 2:
+ raise ValueError("`num_beams` should be an integer strictly larger than 1.")
+ self._num_beams = num_beams
+ if not isinstance(num_beam_groups, int) or num_beam_groups < 2:
+ raise ValueError("`num_beam_groups` should be an integer strictly larger than 1.")
+ if num_beam_groups > num_beams:
+ raise ValueError("`beam_groups` has to be smaller or equal to `num_beams`.")
+ self._num_sub_beams = num_beams // num_beam_groups
+
+ def __call__(
+ self,
+ input_ids: torch.LongTensor,
+ scores: torch.FloatTensor,
+ current_tokens: torch.LongTensor,
+ beam_group_idx: int,
+ ) -> torch.FloatTensor:
+ r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
+ scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
+ Prediction scores of a language modeling head. These can be logits for each vocabulary when not using
+ beam search or log softmax for each vocabulary token when using beam search
+ current_tokens (`torch.LongTensor` of shape `(batch_size)`):
+ Indices of input sequence tokens in the vocabulary, corresponding to the tokens selected by the other
+ beam groups in the current generation step.
+ beam_group_idx (`int`):
+ The index of the beam group currently being processed.
+
+ Return:
+ `torch.FloatTensor` of shape `(batch_size, config.vocab_size)`:
+ The processed prediction scores.
+ """
+ # hamming diversity: penalise using same token in current group which was used in previous groups at
+ # the same time step
+ batch_size = current_tokens.shape[0] // self._num_beams
+ group_start_idx = beam_group_idx * self._num_sub_beams
+ group_end_idx = min(group_start_idx + self._num_sub_beams, self._num_beams)
+ group_size = group_end_idx - group_start_idx
+ vocab_size = scores.shape[-1]
+
+ if group_start_idx == 0:
+ return scores
+
+ for batch_idx in range(batch_size):
+ # predicted tokens of last time step of previous groups
+ previous_group_tokens = current_tokens[
+ batch_idx * self._num_beams : batch_idx * self._num_beams + group_start_idx
+ ]
+ token_frequency = torch.bincount(previous_group_tokens, minlength=vocab_size).to(scores.device)
+ scores[batch_idx * group_size : (batch_idx + 1) * group_size] -= self._diversity_penalty * token_frequency
+
+ return scores
+
+
+class ForcedBOSTokenLogitsProcessor(LogitsProcessor):
+ r"""
+ [`LogitsProcessor`] that enforces the specified token as the first generated token. Used with encoder-decoder
+ models.
+
+ Args:
+ bos_token_id (`int`):
+ The id of the token to force as the first generated token.
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
+
+ >>> model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-small")
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-small")
+
+ >>> inputs = tokenizer("Translate from English to German: I love cats.", return_tensors="pt")
+
+ >>> # By default, it continues generating according to the model's logits
+ >>> outputs = model.generate(**inputs, max_new_tokens=10)
+ >>> print(tokenizer.batch_decode(outputs)[0])
+ Ich liebe Kitty.
+
+ >>> # We can use `forced_bos_token_id` to force the start of generation with an encoder-decoder model
+ >>> # (including forcing it to end straight away with an EOS token)
+ >>> outputs = model.generate(**inputs, max_new_tokens=10, forced_bos_token_id=tokenizer.eos_token_id)
+ >>> print(tokenizer.batch_decode(outputs)[0])
+
+ ```
+ """
+
+ def __init__(self, bos_token_id: int):
+ self.bos_token_id = bos_token_id
+
+ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
+ cur_len = input_ids.shape[-1]
+ if cur_len == 1:
+ num_tokens = scores.shape[1]
+ scores[:, [i for i in range(num_tokens) if i != self.bos_token_id]] = -float("inf")
+ scores[:, self.bos_token_id] = 0
+ return scores
+
+
+class ForcedEOSTokenLogitsProcessor(LogitsProcessor):
+ r"""
+ [`LogitsProcessor`] that enforces the specified token as the last generated token when `max_length` is reached.
+
+ Args:
+ max_length (`int`):
+ The maximum length of the sequence to be generated.
+ eos_token_id (`Union[int, List[int]]`):
+ The id of the token to force as the last generated token when `max_length` is reached. Optionally, use a
+ list to set multiple *end-of-sequence* tokens.
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, AutoModelForCausalLM
+
+ >>> model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2")
+ >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2")
+
+ >>> inputs = tokenizer("A sequence: 1, 2, 3", return_tensors="pt")
+
+ >>> # By default, it continues generating according to the model's logits
+ >>> outputs = model.generate(**inputs, max_new_tokens=10)
+ >>> print(tokenizer.batch_decode(outputs)[0])
+ A sequence: 1, 2, 3, 4, 5, 6, 7, 8
+
+ >>> # `forced_eos_token_id` ensures the generation ends with a EOS token
+ >>> outputs = model.generate(**inputs, max_new_tokens=10, forced_eos_token_id=tokenizer.eos_token_id)
+ >>> print(tokenizer.batch_decode(outputs)[0])
+ A sequence: 1, 2, 3, 4, 5, 6, 7,<|endoftext|>
+ ```
+ """
+
+ def __init__(self, max_length: int, eos_token_id: Union[int, List[int]]):
+ self.max_length = max_length
+ if isinstance(eos_token_id, int):
+ eos_token_id = [eos_token_id]
+ self.eos_token_id = eos_token_id
+
+ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
+ cur_len = input_ids.shape[-1]
+ if cur_len == self.max_length - 1:
+ num_tokens = scores.shape[1]
+ scores[:, [i for i in range(num_tokens) if i not in self.eos_token_id]] = -float("inf")
+ for i in self.eos_token_id:
+ scores[:, i] = 0
+ return scores
+
+
+class InfNanRemoveLogitsProcessor(LogitsProcessor):
+ r"""
+ [`LogitsProcessor`] that removes all `nan` and `inf` values to avoid the generation method to fail. Note that using
+ the logits processor should only be used if necessary since it can slow down the generation method.
+
+ This logits processor has no `generate` example, as there shouldn't be a correct combination of flags that warrants
+ its use.
+ """
+
+ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
+ # set all nan values to 0.0
+ scores[scores != scores] = 0.0
+
+ # set all +/-inf values to max/min possible value
+ scores[scores == float("inf")] = torch.finfo(scores.dtype).max
+ scores[scores == float("-inf")] = torch.finfo(scores.dtype).min
+
+ return scores
+
+
+class ExponentialDecayLengthPenalty(LogitsProcessor):
+ r"""
+ [`LogitsProcessor`] that exponentially increases the score of the `eos_token_id` after `start_index` has been
+ reached. This allows generating shorter sequences without having a hard cutoff, allowing the `eos_token` to be
+ predicted in a meaningful position.
+
+ Args:
+ exponential_decay_length_penalty (`tuple(int, float)`):
+ This tuple shall consist of: `(start_index, decay_factor)` where `start_index` indicates where penalty
+ starts and `decay_factor` represents the factor of exponential decay
+ eos_token_id (`Union[int, List[int]]`):
+ The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
+ input_ids_seq_length (`int`):
+ The length of the input sequence.
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed
+
+ >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
+ >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
+
+ >>> text = "Just wanted to let you know, I"
+ >>> inputs = tokenizer(text, return_tensors="pt")
+
+ >>> # Let's consider that we want short sentences, so we limit `max_length=30`. However, we observe that the answer
+ >>> # tends to end abruptly.
+ >>> set_seed(1)
+ >>> outputs = model.generate(**inputs, do_sample=True, temperature=0.9, max_length=30, pad_token_id=50256)
+ >>> print(tokenizer.batch_decode(outputs)[0])
+ Just wanted to let you know, I received a link to an ebook, the book How To Start A Social Network which was
+ published in 2010. Although
+
+ >>> # To promote the appearance of the EOS token at the right time, we add the `exponential_decay_length_penalty =
+ >>> # (start_index, decay_factor)`. Instead of cutting at max_tokens, the output comes to an end before and usually
+ >>> # with more meaning. What happens is that starting from `start_index` the EOS token score will be increased
+ >>> # by `decay_factor` exponentially. However, if you set a high decay factor, you may also end up with abruptly
+ >>> # ending sequences.
+ >>> set_seed(1)
+ >>> outputs = model.generate(
+ ... **inputs,
+ ... do_sample=True,
+ ... temperature=0.9,
+ ... max_length=30,
+ ... pad_token_id=50256,
+ ... exponential_decay_length_penalty=(15, 1.6),
+ ... )
+ >>> print(tokenizer.batch_decode(outputs)[0])
+ Just wanted to let you know, I received a link to an ebook, the book How To Start A Social Network
+ which<|endoftext|>
+
+ >>> # With a small decay factor, you will have a higher chance of getting a meaningful sequence.
+ >>> set_seed(1)
+ >>> outputs = model.generate(
+ ... **inputs,
+ ... do_sample=True,
+ ... temperature=0.9,
+ ... max_length=30,
+ ... pad_token_id=50256,
+ ... exponential_decay_length_penalty=(15, 1.01),
+ ... )
+ >>> print(tokenizer.batch_decode(outputs)[0])
+ Just wanted to let you know, I received a link to an ebook, the book How To Start A Social Network which was
+ published in 2010.<|endoftext|>
+ ```
+ """
+
+ def __init__(
+ self,
+ exponential_decay_length_penalty: Tuple[int, float],
+ eos_token_id: Union[int, List[int]],
+ input_ids_seq_length: int,
+ ):
+ self.regulation_start = exponential_decay_length_penalty[0] + input_ids_seq_length
+ self.regulation_factor = exponential_decay_length_penalty[1]
+ if isinstance(eos_token_id, int):
+ eos_token_id = [eos_token_id]
+ self.eos_token_id = eos_token_id
+
+ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
+ cur_len = input_ids.shape[-1]
+ if cur_len > self.regulation_start:
+ for i in self.eos_token_id:
+ penalty_idx = cur_len - self.regulation_start
+ # To support negative logits we compute the penalty of the absolute value and add to the original logit
+ scores[:, i] = scores[:, i] + torch.abs(scores[:, i]) * (pow(self.regulation_factor, penalty_idx) - 1)
+ return scores
+
+
+class LogitNormalization(LogitsProcessor, LogitsWarper):
+ r"""
+ [`LogitsWarper`] and [`LogitsProcessor`] for normalizing the scores using log-softmax. It's important to normalize
+ the scores during beam search, after applying the logits processors or warpers, since the search algorithm used in
+ this library doesn't do it (it only does it before, but they may need re-normalization) but it still supposes that
+ the scores are normalized when comparing the hypotheses.
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, AutoModelForCausalLM
+ >>> import torch
+
+ >>> model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2")
+ >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2")
+
+ >>> inputs = tokenizer("A sequence: 1, 2, 3", return_tensors="pt")
+
+ >>> # By default, the scores are not normalized -- the sum of their exponentials is NOT a normalized probability
+ >>> # distribution, summing to 1
+ >>> outputs = model.generate(**inputs, return_dict_in_generate=True, output_scores=True)
+ >>> print(torch.sum(torch.exp(outputs.scores[-1])))
+ tensor(816.3250)
+
+ >>> # Normalizing them may have a positive impact on beam methods, or when using the scores on your application
+ >>> outputs = model.generate(**inputs, renormalize_logits=True, return_dict_in_generate=True, output_scores=True)
+ >>> print(torch.sum(torch.exp(outputs.scores[-1])))
+ tensor(1.0000)
+ ```
+ """
+
+ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
+ scores = scores.log_softmax(dim=-1)
+ return scores
+
+
+class SuppressTokensAtBeginLogitsProcessor(LogitsProcessor):
+ r"""
+ [`SuppressTokensAtBeginLogitsProcessor`] supresses a list of tokens as soon as the `generate` function starts
+ generating using `begin_index` tokens. This should ensure that the tokens defined by `begin_suppress_tokens` are
+ not generated at the begining. Originally created for
+ [Whisper](https://huggingface.co/docs/transformers/model_doc/whisper).
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoProcessor, WhisperForConditionalGeneration
+ >>> from datasets import load_dataset
+
+ >>> processor = AutoProcessor.from_pretrained("openai/whisper-tiny.en")
+ >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en")
+ >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
+ >>> inputs = processor(ds[0]["audio"]["array"], return_tensors="pt")
+
+ >>> # Whisper has `begin_suppress_tokens` set by default (= `[220, 50256]`). 50256 is the EOS token, so this means
+ >>> # it can't generate and EOS token in the first iteration, but it can in the others.
+ >>> outputs = model.generate(**inputs, return_dict_in_generate=True, output_scores=True)
+ >>> print(outputs.scores[1][0, 50256]) # 1 (and not 0) is the first freely generated token
+ tensor(-inf)
+ >>> print(outputs.scores[-1][0, 50256]) # in other places we can see some probability mass for EOS
+ tensor(29.9010)
+
+ >>> # If we disable `begin_suppress_tokens`, we can generate EOS in the first iteration.
+ >>> outputs = model.generate(
+ ... **inputs, return_dict_in_generate=True, output_scores=True, begin_suppress_tokens=None
+ ... )
+ >>> print(outputs.scores[1][0, 50256])
+ tensor(11.2027)
+ ```
+ """
+
+ def __init__(self, begin_suppress_tokens, begin_index):
+ self.begin_suppress_tokens = list(begin_suppress_tokens)
+ self.begin_index = begin_index
+
+ def set_begin_index(self, begin_index):
+ self.begin_index = begin_index
+
+ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
+ if input_ids.shape[1] == self.begin_index:
+ scores[:, self.begin_suppress_tokens] = -float("inf")
+
+ return scores
+
+
+class SuppressTokensLogitsProcessor(LogitsProcessor):
+ r"""
+ This processor can be used to suppress a list of tokens. The processor will set their log probs to `-inf` so
+ that they are not generated. Originally created for
+ [Whisper](https://huggingface.co/docs/transformers/model_doc/whisper).
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoProcessor, WhisperForConditionalGeneration
+ >>> from datasets import load_dataset
+
+ >>> processor = AutoProcessor.from_pretrained("openai/whisper-tiny.en")
+ >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en")
+ >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
+ >>> inputs = processor(ds[0]["audio"]["array"], return_tensors="pt")
+
+ >>> # Whisper has a long list of suppressed tokens. For instance, in this case, the token 1 is suppressed by default.
+ >>> outputs = model.generate(**inputs, return_dict_in_generate=True, output_scores=True)
+ >>> print(outputs.scores[1][0, 1]) # 1 (and not 0) is the first freely generated token
+ tensor(-inf)
+
+ >>> # If we disable `suppress_tokens`, we can generate it.
+ >>> outputs = model.generate(**inputs, return_dict_in_generate=True, output_scores=True, suppress_tokens=None)
+ >>> print(outputs.scores[1][0, 1])
+ tensor(5.7738)
+ ```
+ """
+
+ def __init__(self, suppress_tokens):
+ self.suppress_tokens = list(suppress_tokens)
+
+ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
+ scores[:, self.suppress_tokens] = -float("inf")
+ return scores
+
+
+class ForceTokensLogitsProcessor(LogitsProcessor):
+ r"""
+ This processor takes a list of pairs of integers which indicates a mapping from generation indices to token
+ indices that will be forced before generation. The processor will set their log probs to `inf` so that they are
+ sampled at their corresponding index. Originally created for
+ [Whisper](https://huggingface.co/docs/transformers/model_doc/whisper).
+
+ Examples:
+ ```python
+ >>> from transformers import AutoProcessor, WhisperForConditionalGeneration
+ >>> from datasets import load_dataset
+
+ >>> processor = AutoProcessor.from_pretrained("openai/whisper-tiny.en")
+ >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en")
+ >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
+ >>> inputs = processor(ds[0]["audio"]["array"], return_tensors="pt")
+
+ >>> # This Whisper model forces the generation to start with `50362` at the first position by default, i.e.
+ >>> # `"forced_decoder_ids": [[1, 50362]]`. This means all other tokens are masked out.
+ >>> outputs = model.generate(**inputs, return_dict_in_generate=True, output_scores=True)
+ >>> print(
+ ... all(outputs.scores[0][0, i] == float("-inf") for i in range(processor.tokenizer.vocab_size) if i != 50362)
+ ... )
+ True
+ >>> print(outputs.scores[0][0, 50362])
+ tensor(0.)
+
+ >>> # If we disable `forced_decoder_ids`, we stop seeing that effect
+ >>> outputs = model.generate(**inputs, return_dict_in_generate=True, output_scores=True, forced_decoder_ids=None)
+ >>> print(
+ ... all(outputs.scores[0][0, i] == float("-inf") for i in range(processor.tokenizer.vocab_size) if i != 50362)
+ ... )
+ False
+ >>> print(outputs.scores[0][0, 50362])
+ tensor(19.3140)
+ ```
+ """
+
+ def __init__(self, force_token_map: List[List[int]], _has_warned: Optional[bool] = False):
+ self.force_token_map = dict(force_token_map)
+ if not _has_warned:
+ # TODO(Sanchit): remove this processor entirely in v4.40
+ warnings.warn(
+ "This `ForceTokensLogitsProcessor` has been deprecated and will be removed in v4.40. Should you need to provide prompt ids for generation, specify `input_ids` to the generate method for decoder-only models, or `decoder_input_ids` for encoder-decoder models.",
+ FutureWarning,
+ )
+
+ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
+ generation_idx = input_ids.shape[-1]
+ current_token = self.force_token_map.get(generation_idx, None)
+ if current_token is not None:
+ scores[:, :] = -float("inf")
+ scores[:, current_token] = 0
+ return scores
+
+
+class WhisperTimeStampLogitsProcessor(LogitsProcessor):
+ r"""
+
+ [`LogitsProcessor`] that modifies the logits for the generation of timestamps in the transcription. When the input
+ tokens are at a specific threshold, the processor sets the scores to negative infinity. The processor makes sure
+ that timestamp tokens appear in pairs, by masking out the logits that would break this pairing pattern. This is
+ done to maintain the consistency and structure of generated timestamps. It also ensures that when the predicted
+ probability of sampling any of the timestamp token is greater than any individual non-timestamp token, those
+ non-timestamp logits are set to negative infinity. This is done to ensure the generation of timestamps over other
+ potential tokens.
+
+
+ See [the paper](https://arxiv.org/abs/2212.04356) for more information.
+
+ Args:
+ generate_config (`GenerateConfig`):
+ The generate config used to generate the output. The following parameters are required:
+ eos_token_id (`int`, *optional*, defaults to 50257):
+ The id of the *end-of-sequence* token.
+ no_timestamps_token_id (`int`, *optional*, defaults to 50363):
+ The id of the `"<|notimestamps|>"` token.
+ max_initial_timestamp_index (`int`, *optional*, defaults to 1):
+ Used to set the maximum value of the initial timestamp. This is used to prevent the model from
+ predicting timestamps that are too far in the future.
+ begin_index (`Optional`, *optional*): Token index of the first token that is generated by the model.
+ _detect_timestamp_from_logprob (`bool`, *optional*): Whether timestamps can be predicted from logprobs over all timestamps.
+
+ Examples:
+ ``` python
+ >>> import torch
+ >>> from transformers import AutoProcessor, WhisperForConditionalGeneration, GenerationConfig
+ >>> from datasets import load_dataset
+
+ >>> processor = AutoProcessor.from_pretrained("openai/whisper-tiny.en")
+ >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en")
+ >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
+ >>> inputs = processor(ds[3]["audio"]["array"], return_tensors="pt")
+ >>> input_features = inputs.input_features
+
+ >>> #Displaying timestamps
+ >>> generated_ids = model.generate(inputs=input_features, return_timestamps=True)
+ >>> transcription = processor.batch_decode(generated_ids, decode_with_timestamps=True)[0]
+ >>> print("Transcription:", transcription)
+ Transcription: <|startoftranscript|><|0.00|> He has grave doubts whether Sir Frederick Layton's work is really Greek after all, and can<|6.44|><|6.44|> discover in it but little of rocky Ithaca.<|9.44|><|endoftext|>
+
+
+ >>> #No timestamps & change EOS:
+ >>> #This allows the user to select a specific token to terminate the sequence on, in this case it's the word "can"(460)
+ >>> model.generation_config.eos_token_id = 460
+ >>> generated_ids = model.generate(inputs=input_features,return_timestamps=False)
+ >>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
+ >>> print("Transcription:", transcription)
+ Transcription: He has grave doubts whether Sir Frederick Layton's work is really Greek after all and can
+ ```
+ """
+
+ def __init__(
+ self, generate_config, begin_index: Optional[int] = None, _detect_timestamp_from_logprob: Optional[bool] = None
+ ): # support for the kwargs
+ self.no_timestamps_token_id = generate_config.no_timestamps_token_id
+ self.timestamp_begin = generate_config.no_timestamps_token_id + 1
+ self.eos_token_id = generate_config.eos_token_id or generate_config.bos_token_id
+
+ # this variable is mostly just used for testing
+ self._detect_timestamp_from_logprob = (
+ _detect_timestamp_from_logprob
+ if _detect_timestamp_from_logprob is not None
+ else getattr(generate_config, "_detect_timestamp_from_logprob", True)
+ )
+
+ num_forced_ids = (
+ len(generate_config.forced_decoder_ids) if generate_config.forced_decoder_ids is not None else 0
+ )
+ self.begin_index = begin_index or (num_forced_ids + 1)
+
+ self.max_initial_timestamp_index = getattr(generate_config, "max_initial_timestamp_index", None)
+ # TODO(Patrick): Make sure that official models have max_initial_timestamp_index set to 50
+ # self.max_initial_timestamp_index = 50
+
+ def set_begin_index(self, begin_index):
+ self.begin_index = begin_index
+
+ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
+ # suppress <|notimestamps|> which is handled by without_timestamps
+ scores[:, self.no_timestamps_token_id] = -float("inf")
+
+ # timestamps have to appear in pairs, except directly before eos_token; mask logits accordingly
+ for k in range(input_ids.shape[0]):
+ sampled_tokens = input_ids[k, self.begin_index :]
+ seq = list(sampled_tokens.tolist())
+
+ last_was_timestamp = len(seq) >= 1 and seq[-1] >= self.timestamp_begin
+ penultimate_was_timestamp = len(seq) < 2 or seq[-2] >= self.timestamp_begin
+
+ if last_was_timestamp:
+ if penultimate_was_timestamp: # has to be non-timestamp
+ scores[k, self.timestamp_begin :] = -float("inf")
+ else: # cannot be normal text tokens
+ scores[k, : self.eos_token_id] = -float("inf")
+
+ timestamps = sampled_tokens[sampled_tokens.ge(self.timestamp_begin)]
+ if timestamps.numel() > 0:
+ # `timestamps` shouldn't decrease; forbid timestamp tokens smaller than the last
+ # The following lines of code are copied from: https://github.com/openai/whisper/pull/914/files#r1137085090
+ if last_was_timestamp and not penultimate_was_timestamp:
+ timestamp_last = timestamps[-1]
+ else:
+ # Avoid to emit <|0.00|> again
+ timestamp_last = timestamps[-1] + 1
+
+ scores[k, self.timestamp_begin : timestamp_last] = -float("inf")
+
+ # apply the `max_initial_timestamp` option
+ if input_ids.shape[1] == self.begin_index:
+ scores[:, : self.timestamp_begin] = -float("inf")
+
+ if self.max_initial_timestamp_index is not None:
+ last_allowed = self.timestamp_begin + self.max_initial_timestamp_index
+ scores[:, last_allowed + 1 :] = -float("inf")
+
+ # if sum of probability over timestamps is above any other token, sample timestamp
+ logprobs = torch.nn.functional.log_softmax(scores.float(), dim=-1)
+ for k in range(input_ids.shape[0]):
+ timestamp_logprob = logprobs[k, self.timestamp_begin :].logsumexp(dim=-1)
+ max_text_token_logprob = logprobs[k, : self.timestamp_begin].max()
+ if timestamp_logprob > max_text_token_logprob and self._detect_timestamp_from_logprob:
+ scores[k, : self.timestamp_begin] = -float("inf")
+
+ return scores
+
+
+class WhisperNoSpeechDetection(LogitsProcessor):
+ r"""This processor can be used to detect silence when using Whisper. It should take as input unprocessed logits to follow the original implementation"""
+
+ def __init__(self, no_speech_token: int, begin_index: int, scores_is_logprobs: bool = False):
+ self.no_speech_token = no_speech_token
+ # offset between token, , in paper and first generated token
+ # is equal to the position of the first generated token index
+ self.start_of_trans_offset = begin_index
+
+ # `self.begin_index` is a running value that is changed on the fly
+ self.begin_index = begin_index
+ self._no_speech_prob = [0.0]
+ self.is_scores_logprobs = scores_is_logprobs
+
+ # overwritten dynamically
+ self.model = None
+ self.inputs = None
+
+ def set_model(self, model):
+ self.model = model
+
+ def set_inputs(self, inputs):
+ self.inputs = {**self.model.prepare_inputs_for_generation(**inputs), **inputs}
+ self.inputs["input_features"] = self.inputs.pop("inputs")
+
+ @property
+ def no_speech_prob(self):
+ return self._no_speech_prob
+
+ def set_begin_index(self, begin_index):
+ self.begin_index = begin_index
+
+ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
+ if input_ids.shape[1] == self.begin_index:
+ if self.start_of_trans_offset > 1:
+ with torch.no_grad():
+ logits = self.model(**self.inputs).logits
+
+ no_speech_index = self.begin_index - self.start_of_trans_offset
+ no_speech_scores = logits[:, no_speech_index]
+ else:
+ no_speech_scores = scores
+
+ if self.is_scores_logprobs:
+ probs = no_speech_scores.exp()
+ else:
+ probs = no_speech_scores.float().softmax(dim=-1)
+
+ self._no_speech_prob = probs[:, self.no_speech_token]
+
+ return scores
+
+
+class ClassifierFreeGuidanceLogitsProcessor(LogitsProcessor):
+ r"""
+ [`LogitsProcessor`] for classifier free guidance (CFG). The scores are split over the batch dimension,
+ where the first half correspond to the conditional logits (predicted from the input prompt) and the second half
+ correspond to the unconditional logits (predicted from an empty or 'null' prompt). The processor computes a
+ weighted average across the conditional and unconditional logits, parameterised by the `guidance_scale`.
+
+ See [the paper](https://arxiv.org/abs/2306.05284) for more information.
+
+
+
+ This logits processor is exclusively compatible with
+ [MusicGen](https://huggingface.co/docs/transformers/main/en/model_doc/musicgen)
+
+
+
+ Args:
+ guidance_scale (float):
+ The guidance scale for classifier free guidance (CFG). CFG is enabled by setting `guidance_scale > 1`.
+ Higher guidance scale encourages the model to generate samples that are more closely linked to the input
+ prompt, usually at the expense of poorer quality.
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoProcessor, MusicgenForConditionalGeneration
+
+ >>> processor = AutoProcessor.from_pretrained("facebook/musicgen-small")
+ >>> model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small")
+
+ >>> inputs = processor(
+ ... text=["80s pop track with bassy drums and synth", "90s rock song with loud guitars and heavy drums"],
+ ... padding=True,
+ ... return_tensors="pt",
+ ... )
+ >>> audio_values = model.generate(**inputs, do_sample=True, guidance_scale=3, max_new_tokens=256)
+ ```
+ """
+
+ def __init__(self, guidance_scale):
+ if guidance_scale > 1:
+ self.guidance_scale = guidance_scale
+ else:
+ raise ValueError(
+ "Require guidance scale >1 to use the classifier free guidance processor, got guidance scale "
+ f"{guidance_scale}."
+ )
+
+ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
+ # simple check to make sure we have compatible batch sizes between our
+ # logits scores (cond + uncond) and input ids (cond only)
+ if scores.shape[0] != 2 * input_ids.shape[0]:
+ raise ValueError(
+ f"Logits should have twice the batch size of the input ids, the first half of batches corresponding to "
+ f"the conditional inputs, and the second half of batches corresponding to the unconditional inputs. Got "
+ f"batch size {scores.shape[0]} for the logits and {input_ids.shape[0]} for the input ids."
+ )
+ unguided_bsz = scores.shape[0] // 2
+ cond_logits, uncond_logits = scores.split(unguided_bsz, dim=0)
+ scores = uncond_logits + (cond_logits - uncond_logits) * self.guidance_scale
+ return scores
+
+
+class AlternatingCodebooksLogitsProcessor(LogitsProcessor):
+ r"""
+ [`LogitsProcessor`] enforcing alternated generation between the two codebooks of Bark.
+
+
+
+ This logits processor is exclusively compatible with
+ [Bark](https://huggingface.co/docs/transformers/en/model_doc/bark)'s fine submodel. See the model documentation
+ for examples.
+
+
+
+ Args:
+ input_start_len (`int`):
+ The length of the initial input sequence.
+ semantic_vocab_size (`int`):
+ Vocabulary size of the semantic part, i.e number of tokens associated to the semantic vocabulary.
+ codebook_size (`int`):
+ Number of tokens associated to the codebook.
+ """
+
+ def __init__(self, input_start_len: int, semantic_vocab_size: int, codebook_size: int):
+ if not isinstance(input_start_len, int) or input_start_len < 0:
+ raise ValueError(f"`input_starting_length` has to be a non-negative integer, but is {input_start_len}")
+
+ self.input_start_len = input_start_len
+ self.semantic_vocab_size = semantic_vocab_size
+ self.codebook_size = codebook_size
+
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
+ curr_len = input_ids.shape[-1]
+
+ # even -> first codebook, odd -> second codebook
+ is_first_codebook = ((curr_len - self.input_start_len) % 2) == 0
+
+ if is_first_codebook:
+ scores[:, : self.semantic_vocab_size] = -float("inf")
+ scores[:, self.semantic_vocab_size + self.codebook_size :] = -float("inf")
+ else:
+ scores[:, : self.semantic_vocab_size + self.codebook_size] = -float("inf")
+
+ return scores
+
+
+class UnbatchedClassifierFreeGuidanceLogitsProcessor(LogitsProcessor):
+ r"""
+ Logits processor for Classifier-Free Guidance (CFG). The processors computes a weighted average across scores
+ from prompt conditional and prompt unconditional (or negative) logits, parameterized by the `guidance_scale`.
+ The unconditional scores are computed internally by prompting `model` with the `unconditional_ids` branch.
+
+ See [the paper](https://arxiv.org/abs/2306.17806) for more information.
+
+ Args:
+ guidance_scale (`float`):
+ The guidance scale for classifier free guidance (CFG). CFG is enabled by setting `guidance_scale != 1`.
+ Higher guidance scale encourages the model to generate samples that are more closely linked to the input
+ prompt, usually at the expense of poorer quality. A value smaller than 1 has the opposite effect, while
+ making the negative prompt provided with negative_prompt_ids (if any) act as a positive prompt.
+ model (`PreTrainedModel`):
+ The model computing the unconditional scores. Supposedly the same as the one computing the conditional
+ scores. Both models must use the same tokenizer.
+ unconditional_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of input sequence tokens in the vocabulary for the unconditional branch. If unset, will default to
+ the last token of the prompt.
+ unconditional_attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Attention mask for unconditional_ids.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether to cache key/values during the negative prompt forward pass.
+
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, AutoModelForCausalLM
+
+ >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
+ >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
+ >>> inputs = tokenizer(["Today, a dragon flew over Paris, France,"], return_tensors="pt")
+ >>> out = model.generate(inputs["input_ids"], guidance_scale=1.5)
+ >>> tokenizer.batch_decode(out, skip_special_tokens=True)[0]
+ 'Today, a dragon flew over Paris, France, killing at least 50 people and injuring more than 100'
+
+ >>> # with a negative prompt
+ >>> neg_inputs = tokenizer(["A very happy event happened,"], return_tensors="pt")
+ >>> out = model.generate(inputs["input_ids"], guidance_scale=2, negative_prompt_ids=neg_inputs["input_ids"])
+ >>> tokenizer.batch_decode(out, skip_special_tokens=True)[0]
+ 'Today, a dragon flew over Paris, France, killing at least 130 people. French media reported that'
+
+ >>> # with a positive prompt
+ >>> neg_inputs = tokenizer(["A very happy event happened,"], return_tensors="pt")
+ >>> out = model.generate(inputs["input_ids"], guidance_scale=0, negative_prompt_ids=neg_inputs["input_ids"])
+ >>> tokenizer.batch_decode(out, skip_special_tokens=True)[0]
+ "Today, a dragon flew over Paris, France, and I'm very happy to be here. I"
+ ```
+ """
+
+ def __init__(
+ self,
+ guidance_scale: float,
+ model,
+ unconditional_ids: Optional[torch.LongTensor] = None,
+ unconditional_attention_mask: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = True,
+ ):
+ self.guidance_scale = guidance_scale
+ self.model = model
+ self.unconditional_context = {
+ "input_ids": unconditional_ids,
+ "attention_mask": unconditional_attention_mask,
+ "use_cache": use_cache,
+ "past_key_values": None,
+ "first_pass": True,
+ }
+
+ def get_unconditional_logits(self, input_ids):
+ if self.unconditional_context["first_pass"]:
+ if self.unconditional_context["input_ids"] is None:
+ self.unconditional_context["input_ids"] = input_ids[:, -1:]
+ if self.unconditional_context["attention_mask"] is None:
+ self.unconditional_context["attention_mask"] = torch.ones_like(
+ self.unconditional_context["input_ids"], dtype=torch.long
+ )
+ input_ids = self.unconditional_context["input_ids"]
+ attention_mask = self.unconditional_context["attention_mask"]
+ self.unconditional_context["first_pass"] = False
+ else:
+ attention_mask = torch.cat(
+ [
+ self.unconditional_context["attention_mask"],
+ torch.ones_like(input_ids[:, -1:], dtype=torch.long),
+ ],
+ dim=1,
+ )
+ if not self.unconditional_context["use_cache"]:
+ input_ids = torch.cat([self.unconditional_context["input_ids"], input_ids[:, -1:]], dim=1)
+ else:
+ input_ids = input_ids[:, -1:]
+ self.unconditional_context["input_ids"] = input_ids
+ self.unconditional_context["attention_mask"] = attention_mask
+
+ out = self.model(
+ input_ids,
+ attention_mask=attention_mask,
+ use_cache=self.unconditional_context["use_cache"],
+ past_key_values=self.unconditional_context["past_key_values"],
+ )
+ self.unconditional_context["past_key_values"] = out.get("past_key_values", None)
+
+ return out.logits
+
+ def __call__(self, input_ids, scores):
+ scores = torch.nn.functional.log_softmax(scores, dim=-1)
+ if self.guidance_scale == 1:
+ return scores
+
+ logits = self.get_unconditional_logits(input_ids)
+
+ unconditional_logits = torch.nn.functional.log_softmax(logits[:, -1], dim=-1)
+ out = self.guidance_scale * (scores - unconditional_logits) + unconditional_logits
+ return out
+
+
+class BarkEosPrioritizerLogitsProcessor(LogitsProcessor):
+ r"""This processor ensures that the EOS token is selected if its probability is greater than the `min_eos_p`.
+
+
+
+ This logits processor is exclusively compatible with
+ [Bark](https://huggingface.co/docs/transformers/en/model_doc/bark). See the model documentation for examples.
+
+
+
+ Args:
+ eos_token_id (`Union[int, List[int]]`):
+ The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
+ min_eos_p (`float`, *optional*):
+ Minimum end of speech threshold.
+ """
+
+ def __init__(self, eos_token_id: Union[int, List[int]], min_eos_p: float):
+ if isinstance(eos_token_id, int):
+ eos_token_id = [eos_token_id]
+ self.eos_token_id = eos_token_id
+ if min_eos_p is not None and min_eos_p <= 0:
+ raise ValueError(f"`min_eos_p` has to be a positive float, but is {min_eos_p}")
+ self.min_eos_p = min_eos_p
+
+ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
+ if self.min_eos_p:
+ probs = torch.nn.functional.softmax(scores.float(), dim=-1)
+ # create scores full of -inf except for the eos_token_id
+ early_stop_scores = torch.ones_like(scores) * -float("inf")
+ early_stop_scores[:, self.eos_token_id] = scores[:, self.eos_token_id]
+
+ do_early_stop = probs[:, self.eos_token_id] > self.min_eos_p
+ do_early_stop = torch.any(do_early_stop, dim=1, keepdim=True)
+ scores = torch.where(do_early_stop, early_stop_scores, scores)
+
+ return scores
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/generation/stopping_criteria.py b/env-llmeval/lib/python3.10/site-packages/transformers/generation/stopping_criteria.py
new file mode 100644
index 0000000000000000000000000000000000000000..f4624296d237f7223b915017c909f2bc1f164e7a
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/generation/stopping_criteria.py
@@ -0,0 +1,157 @@
+import time
+import warnings
+from abc import ABC
+from copy import deepcopy
+from typing import Optional
+
+import torch
+
+from ..utils import add_start_docstrings, logging
+
+
+logger = logging.get_logger(__name__)
+
+
+STOPPING_CRITERIA_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
+ Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
+ or scores for each vocabulary token after SoftMax. If this stopping criteria depends on the `scores` input,
+ make sure you pass `return_dict_in_generate=True, output_scores=True` to `generate`.
+ kwargs (`Dict[str, Any]`, *optional*):
+ Additional stopping criteria specific kwargs.
+
+ Return:
+ `torch.BoolTensor`. (`torch.BoolTensor` of shape `(batch_size, 1)`), where `True` indicates we stop generation
+ for a particular row, `True` indicates we should continue.
+
+"""
+
+
+class StoppingCriteria(ABC):
+ """Abstract base class for all stopping criteria that can be applied during generation.
+
+ If your stopping criteria depends on the `scores` input, make sure you pass `return_dict_in_generate=True,
+ output_scores=True` to `generate`.
+ """
+
+ @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.BoolTensor:
+ raise NotImplementedError("StoppingCriteria needs to be subclassed")
+
+
+class MaxLengthCriteria(StoppingCriteria):
+ """
+ This class can be used to stop generation whenever the full generated number of tokens exceeds `max_length`. Keep
+ in mind for decoder-only type of transformers, this will include the initial prompted tokens.
+
+ Args:
+ max_length (`int`):
+ The maximum length that the output sequence can have in number of tokens.
+ max_position_embeddings (`int`, *optional*):
+ The maximum model length, as defined by the model's `config.max_position_embeddings` attribute.
+ """
+
+ def __init__(self, max_length: int, max_position_embeddings: Optional[int] = None):
+ self.max_length = max_length
+ self.max_position_embeddings = max_position_embeddings
+
+ @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.BoolTensor:
+ cur_len = input_ids.shape[-1]
+ is_done = cur_len >= self.max_length
+ if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
+ logger.warning_once(
+ "This is a friendly reminder - the current text generation call will exceed the model's predefined "
+ f"maximum length ({self.max_position_embeddings}). Depending on the model, you may observe "
+ "exceptions, performance degradation, or nothing at all."
+ )
+ return torch.full((input_ids.shape[0],), is_done, device=input_ids.device, dtype=torch.bool)
+
+
+class MaxNewTokensCriteria(StoppingCriteria):
+ """
+ This class can be used to stop generation whenever the generated number of tokens exceeds `max_new_tokens`. Keep in
+ mind for decoder-only type of transformers, this will **not** include the initial prompted tokens. This is very
+ close to `MaxLengthCriteria` but ignores the number of initial tokens.
+
+ Args:
+ start_length (`int`):
+ The number of initial tokens.
+ max_new_tokens (`int`):
+ The maximum number of tokens to generate.
+ """
+
+ def __init__(self, start_length: int, max_new_tokens: int):
+ warnings.warn(
+ "The class `MaxNewTokensCriteria` is deprecated. "
+ f"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` "
+ "with `max_length = start_length + max_new_tokens` instead.",
+ FutureWarning,
+ )
+ self.start_length = start_length
+ self.max_new_tokens = max_new_tokens
+ self.max_length = start_length + max_new_tokens
+
+ @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.BoolTensor:
+ is_done = input_ids.shape[-1] >= self.max_length
+ return torch.full((input_ids.shape[0],), is_done, device=input_ids.device, dtype=torch.bool)
+
+
+class MaxTimeCriteria(StoppingCriteria):
+ """
+ This class can be used to stop generation whenever the full generation exceeds some amount of time. By default, the
+ time will start being counted when you initialize this function. You can override this by passing an
+ `initial_time`.
+
+ Args:
+ max_time (`float`):
+ The maximum allowed time in seconds for the generation.
+ initial_time (`float`, *optional*, defaults to `time.time()`):
+ The start of the generation allowed time.
+ """
+
+ def __init__(self, max_time: float, initial_timestamp: Optional[float] = None):
+ self.max_time = max_time
+ self.initial_timestamp = time.time() if initial_timestamp is None else initial_timestamp
+
+ @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.BoolTensor:
+ is_done = time.time() - self.initial_timestamp > self.max_time
+ return torch.full((input_ids.shape[0],), is_done, device=input_ids.device, dtype=torch.bool)
+
+
+class StoppingCriteriaList(list):
+ @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.BoolTensor:
+ is_done = torch.full((input_ids.shape[0],), False, device=input_ids.device)
+ for criteria in self:
+ is_done = is_done | criteria(input_ids, scores, **kwargs)
+ return is_done
+
+ @property
+ def max_length(self) -> Optional[int]:
+ for stopping_criterium in self:
+ if isinstance(stopping_criterium, MaxLengthCriteria):
+ return stopping_criterium.max_length
+ elif isinstance(stopping_criterium, MaxNewTokensCriteria):
+ return stopping_criterium.max_length
+ return None
+
+
+def validate_stopping_criteria(stopping_criteria: StoppingCriteriaList, max_length: int) -> StoppingCriteriaList:
+ stopping_max_length = stopping_criteria.max_length
+ new_stopping_criteria = deepcopy(stopping_criteria)
+ if stopping_max_length is not None and stopping_max_length != max_length:
+ warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter", UserWarning)
+ elif stopping_max_length is None:
+ new_stopping_criteria.append(MaxLengthCriteria(max_length=max_length))
+ return new_stopping_criteria
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/generation/streamers.py b/env-llmeval/lib/python3.10/site-packages/transformers/generation/streamers.py
new file mode 100644
index 0000000000000000000000000000000000000000..c75b43466af7a8423d401639cce85411d4edcab5
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/generation/streamers.py
@@ -0,0 +1,227 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from queue import Queue
+from typing import TYPE_CHECKING, Optional
+
+
+if TYPE_CHECKING:
+ from ..models.auto import AutoTokenizer
+
+
+class BaseStreamer:
+ """
+ Base class from which `.generate()` streamers should inherit.
+ """
+
+ def put(self, value):
+ """Function that is called by `.generate()` to push new tokens"""
+ raise NotImplementedError()
+
+ def end(self):
+ """Function that is called by `.generate()` to signal the end of generation"""
+ raise NotImplementedError()
+
+
+class TextStreamer(BaseStreamer):
+ """
+ Simple text streamer that prints the token(s) to stdout as soon as entire words are formed.
+
+
+
+ The API for the streamer classes is still under development and may change in the future.
+
+
+
+ Parameters:
+ tokenizer (`AutoTokenizer`):
+ The tokenized used to decode the tokens.
+ skip_prompt (`bool`, *optional*, defaults to `False`):
+ Whether to skip the prompt to `.generate()` or not. Useful e.g. for chatbots.
+ decode_kwargs (`dict`, *optional*):
+ Additional keyword arguments to pass to the tokenizer's `decode` method.
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer
+
+ >>> tok = AutoTokenizer.from_pretrained("openai-community/gpt2")
+ >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
+ >>> inputs = tok(["An increasing sequence: one,"], return_tensors="pt")
+ >>> streamer = TextStreamer(tok)
+
+ >>> # Despite returning the usual output, the streamer will also print the generated text to stdout.
+ >>> _ = model.generate(**inputs, streamer=streamer, max_new_tokens=20)
+ An increasing sequence: one, two, three, four, five, six, seven, eight, nine, ten, eleven,
+ ```
+ """
+
+ def __init__(self, tokenizer: "AutoTokenizer", skip_prompt: bool = False, **decode_kwargs):
+ self.tokenizer = tokenizer
+ self.skip_prompt = skip_prompt
+ self.decode_kwargs = decode_kwargs
+
+ # variables used in the streaming process
+ self.token_cache = []
+ self.print_len = 0
+ self.next_tokens_are_prompt = True
+
+ def put(self, value):
+ """
+ Receives tokens, decodes them, and prints them to stdout as soon as they form entire words.
+ """
+ if len(value.shape) > 1 and value.shape[0] > 1:
+ raise ValueError("TextStreamer only supports batch size 1")
+ elif len(value.shape) > 1:
+ value = value[0]
+
+ if self.skip_prompt and self.next_tokens_are_prompt:
+ self.next_tokens_are_prompt = False
+ return
+
+ # Add the new token to the cache and decodes the entire thing.
+ self.token_cache.extend(value.tolist())
+ text = self.tokenizer.decode(self.token_cache, **self.decode_kwargs)
+
+ # After the symbol for a new line, we flush the cache.
+ if text.endswith("\n"):
+ printable_text = text[self.print_len :]
+ self.token_cache = []
+ self.print_len = 0
+ # If the last token is a CJK character, we print the characters.
+ elif len(text) > 0 and self._is_chinese_char(ord(text[-1])):
+ printable_text = text[self.print_len :]
+ self.print_len += len(printable_text)
+ # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
+ # which may change with the subsequent token -- there are probably smarter ways to do this!)
+ else:
+ printable_text = text[self.print_len : text.rfind(" ") + 1]
+ self.print_len += len(printable_text)
+
+ self.on_finalized_text(printable_text)
+
+ def end(self):
+ """Flushes any remaining cache and prints a newline to stdout."""
+ # Flush the cache, if it exists
+ if len(self.token_cache) > 0:
+ text = self.tokenizer.decode(self.token_cache, **self.decode_kwargs)
+ printable_text = text[self.print_len :]
+ self.token_cache = []
+ self.print_len = 0
+ else:
+ printable_text = ""
+
+ self.next_tokens_are_prompt = True
+ self.on_finalized_text(printable_text, stream_end=True)
+
+ def on_finalized_text(self, text: str, stream_end: bool = False):
+ """Prints the new text to stdout. If the stream is ending, also prints a newline."""
+ print(text, flush=True, end="" if not stream_end else None)
+
+ def _is_chinese_char(self, cp):
+ """Checks whether CP is the codepoint of a CJK character."""
+ # This defines a "chinese character" as anything in the CJK Unicode block:
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
+ #
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
+ # despite its name. The modern Korean Hangul alphabet is a different block,
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
+ # space-separated words, so they are not treated specially and handled
+ # like the all of the other languages.
+ if (
+ (cp >= 0x4E00 and cp <= 0x9FFF)
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
+ or (cp >= 0xF900 and cp <= 0xFAFF)
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
+ ): #
+ return True
+
+ return False
+
+
+class TextIteratorStreamer(TextStreamer):
+ """
+ Streamer that stores print-ready text in a queue, to be used by a downstream application as an iterator. This is
+ useful for applications that benefit from acessing the generated text in a non-blocking way (e.g. in an interactive
+ Gradio demo).
+
+
+
+ The API for the streamer classes is still under development and may change in the future.
+
+
+
+ Parameters:
+ tokenizer (`AutoTokenizer`):
+ The tokenized used to decode the tokens.
+ skip_prompt (`bool`, *optional*, defaults to `False`):
+ Whether to skip the prompt to `.generate()` or not. Useful e.g. for chatbots.
+ timeout (`float`, *optional*):
+ The timeout for the text queue. If `None`, the queue will block indefinitely. Useful to handle exceptions
+ in `.generate()`, when it is called in a separate thread.
+ decode_kwargs (`dict`, *optional*):
+ Additional keyword arguments to pass to the tokenizer's `decode` method.
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
+ >>> from threading import Thread
+
+ >>> tok = AutoTokenizer.from_pretrained("openai-community/gpt2")
+ >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
+ >>> inputs = tok(["An increasing sequence: one,"], return_tensors="pt")
+ >>> streamer = TextIteratorStreamer(tok)
+
+ >>> # Run the generation in a separate thread, so that we can fetch the generated text in a non-blocking way.
+ >>> generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=20)
+ >>> thread = Thread(target=model.generate, kwargs=generation_kwargs)
+ >>> thread.start()
+ >>> generated_text = ""
+ >>> for new_text in streamer:
+ ... generated_text += new_text
+ >>> generated_text
+ 'An increasing sequence: one, two, three, four, five, six, seven, eight, nine, ten, eleven,'
+ ```
+ """
+
+ def __init__(
+ self, tokenizer: "AutoTokenizer", skip_prompt: bool = False, timeout: Optional[float] = None, **decode_kwargs
+ ):
+ super().__init__(tokenizer, skip_prompt, **decode_kwargs)
+ self.text_queue = Queue()
+ self.stop_signal = None
+ self.timeout = timeout
+
+ def on_finalized_text(self, text: str, stream_end: bool = False):
+ """Put the new text in the queue. If the stream is ending, also put a stop signal in the queue."""
+ self.text_queue.put(text, timeout=self.timeout)
+ if stream_end:
+ self.text_queue.put(self.stop_signal, timeout=self.timeout)
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ value = self.text_queue.get(timeout=self.timeout)
+ if value == self.stop_signal:
+ raise StopIteration()
+ else:
+ return value
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/generation/tf_logits_process.py b/env-llmeval/lib/python3.10/site-packages/transformers/generation/tf_logits_process.py
new file mode 100644
index 0000000000000000000000000000000000000000..fc9799b7ab39f19610faf3ac684e3cb287c95678
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/generation/tf_logits_process.py
@@ -0,0 +1,591 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import inspect
+from typing import List, Tuple
+
+import numpy as np
+import tensorflow as tf
+
+from ..tf_utils import stable_softmax
+from ..utils import add_start_docstrings
+from ..utils.logging import get_logger
+
+
+logger = get_logger(__name__)
+
+
+TF_LOGITS_PROCESSOR_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ scores (`tf.Tensor` of shape `(batch_size, config.vocab_size)`):
+ Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
+ search or log softmax for each vocabulary token when using beam search.
+ cur_len (`int`):
+ The current length of valid input sequence tokens. In the TF implementation, the input_ids' sequence length
+ is the maximum length generate can produce, and we need to know which of its tokens are valid.
+ kwargs (`Dict[str, Any]`, *optional*):
+ Additional logits processor specific kwargs.
+
+ Return:
+ `tf.Tensor` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
+"""
+
+
+class TFLogitsProcessor:
+ """Abstract base class for all logit processors that can be applied during generation."""
+
+ @add_start_docstrings(TF_LOGITS_PROCESSOR_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
+ """TF method for processing logits."""
+ raise NotImplementedError(
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
+ )
+
+
+class TFLogitsWarper:
+ """Abstract base class for all logit warpers that can be applied during generation with multinomial sampling."""
+
+ @add_start_docstrings(TF_LOGITS_PROCESSOR_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
+ """TF method for warping logits."""
+ raise NotImplementedError(
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
+ )
+
+
+class TFLogitsProcessorList(list):
+ """
+ This class can be used to create a list of [`TFLogitsProcessor`] to subsequently process a `scores` input tensor.
+ This class inherits from list and adds a specific *__call__* method to apply each [`TFLogitsProcessor`] to the
+ inputs.
+ """
+
+ @add_start_docstrings(TF_LOGITS_PROCESSOR_INPUTS_DOCSTRING)
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int, **kwargs) -> tf.Tensor:
+ for processor in self:
+ function_args = inspect.signature(processor.__call__).parameters
+ if len(function_args) > 3:
+ if not all(arg in kwargs for arg in list(function_args.keys())[2:]):
+ raise ValueError(
+ f"Make sure that all the required parameters: {list(function_args.keys())} for "
+ f"{processor.__class__} are passed to the logits processor."
+ )
+ scores = processor(input_ids, scores, cur_len, **kwargs)
+ else:
+ scores = processor(input_ids, scores, cur_len)
+ return scores
+
+
+class TFTemperatureLogitsWarper(TFLogitsWarper):
+ r"""
+ [`TFLogitsWarper`] for temperature (exponential scaling output probability distribution).
+
+ Args:
+ temperature (`float`):
+ The value used to module the logits distribution.
+ """
+
+ def __init__(self, temperature: float):
+ if not isinstance(temperature, float) or not (temperature > 0):
+ raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}")
+
+ self.temperature = temperature
+
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
+ scores = scores / self.temperature
+ return scores
+
+
+class TFTopKLogitsWarper(TFLogitsWarper):
+ r"""
+ [`TFLogitsWarper`] that performs top-k, i.e. restricting to the k highest probability elements.
+
+ Args:
+ top_k (`int`):
+ The number of highest probability vocabulary tokens to keep for top-k-filtering.
+ filter_value (`float`, *optional*, defaults to -inf):
+ All filtered values will be set to this float value.
+ min_tokens_to_keep (`int`, *optional*, defaults to 1):
+ Minimum number of tokens that cannot be filtered.
+ """
+
+ def __init__(self, top_k: int, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
+ if not isinstance(top_k, int) or top_k <= 0:
+ raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}")
+
+ self.top_k = max(top_k, min_tokens_to_keep)
+ self.filter_value = filter_value
+
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
+ top_k = min(self.top_k, scores.shape[-1]) # Safety check
+ # Boolean mask containing all tokens with a probability less than the last token of the top-k
+ indices_to_remove = scores < tf.math.top_k(scores, k=top_k)[0][..., -1:]
+ next_scores = tf.where(indices_to_remove, self.filter_value, scores)
+ return next_scores
+
+
+class TFTopPLogitsWarper(TFLogitsWarper):
+ """
+ [`TFLogitsWarper`] that performs top-p, i.e. restricting to top tokens summing to <= prob_cut_off.
+
+ Args:
+ top_p (`float`):
+ If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
+ higher are kept for generation.
+ filter_value (`float`, *optional*, defaults to -inf):
+ All filtered values will be set to this float value.
+ min_tokens_to_keep (`int`, *optional*, defaults to 1):
+ Minimum number of tokens that cannot be filtered.
+ """
+
+ def __init__(self, top_p: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
+ if not isinstance(top_p, float) or (top_p < 0 or top_p > 1.0):
+ raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}")
+ if not isinstance(min_tokens_to_keep, int) or (min_tokens_to_keep < 1):
+ raise ValueError(f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}")
+
+ self.top_p = top_p
+ self.filter_value = filter_value
+ self.min_tokens_to_keep = min_tokens_to_keep
+
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
+ topk_scores, topk_indices = tf.math.top_k(scores, scores.shape[-1])
+
+ mask_scores = tf.fill(scores.shape, self.filter_value)
+ cumulative_probs = tf.math.cumsum(stable_softmax(topk_scores, axis=-1), axis=-1)
+ score_mask = cumulative_probs < self.top_p
+
+ # Also include the token that is higher than top_p (the first false = shift and insert a True on the left)
+ score_mask = tf.concat((tf.ones([score_mask.shape[0], 1], dtype=tf.bool), score_mask[:, :-1]), axis=-1)
+
+ # Ensure min tokens to keep
+ score_mask = tf.concat(
+ (
+ tf.ones([score_mask.shape[0], self.min_tokens_to_keep], dtype=tf.bool),
+ score_mask[:, self.min_tokens_to_keep :],
+ ),
+ axis=-1,
+ )
+
+ # Mask the values that do not fit the criteria
+ topk_next_scores = tf.where(score_mask, topk_scores, mask_scores)
+
+ # Undo the topk sorting: converts the 2D matrix of per-row original indices of shape (batch_size, vocab_size)
+ # to a 3D tensor of shape (batch_size, vocab_size, 2) containing the original score coordinate, from which we
+ # can scatter (i.e. `scatter_indices[row, col, :]` is a tensor containing `[row, topk_indices[row, col]]`)
+ scatter_rows = tf.tile(tf.expand_dims(tf.range(topk_indices.shape[0]), axis=-1), [1, topk_indices.shape[-1]])
+ scatter_indices = tf.stack((scatter_rows, topk_indices), axis=-1)
+ next_scores = tf.scatter_nd(scatter_indices, topk_next_scores, shape=topk_next_scores.shape)
+
+ return next_scores
+
+
+class TFMinLengthLogitsProcessor(TFLogitsProcessor):
+ r"""
+ [`TFLogitsProcessor`] enforcing a min-length by setting EOS probability to 0.
+
+ Args:
+ min_length (`int`):
+ The minimum length below which the score of `eos_token_id` is set to `-float("Inf")`.
+ eos_token_id (`int`):
+ The id of the *end-of-sequence* token.
+ """
+
+ def __init__(self, min_length: int, eos_token_id: int):
+ if not isinstance(min_length, int) or min_length < 0:
+ raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}")
+
+ if not isinstance(eos_token_id, int) or eos_token_id < 0:
+ raise ValueError(f"`eos_token_id` has to be a positive integer, but is {eos_token_id}")
+
+ self.min_length = min_length
+ self.eos_token_id = eos_token_id
+
+ def _apply_eos_token_mask(self, scores: tf.Tensor) -> tf.Tensor:
+ eos_token_id_mask = tf.range(scores.shape[-1]) == self.eos_token_id
+ scores = tf.where(eos_token_id_mask, float("-inf"), scores)
+ return scores
+
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
+ # applies eos token masking if the first argument is true
+ scores = tf.cond(
+ tf.less(cur_len, self.min_length),
+ lambda: self._apply_eos_token_mask(scores),
+ lambda: tf.identity(scores),
+ )
+ return scores
+
+
+class TFRepetitionPenaltyLogitsProcessor(TFLogitsProcessor):
+ r"""
+ [`TFLogitsProcessor`] enforcing an exponential penalty on repeated sequences.
+
+ Args:
+ repetition_penalty (`float`):
+ The parameter for repetition penalty. 1.0 means no penalty. See [this
+ paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
+ """
+
+ def __init__(self, penalty: float):
+ if not isinstance(penalty, float) or not (penalty > 0):
+ raise ValueError(f"`penalty` has to be a strictly positive float, but is {penalty}")
+
+ self.penalty = penalty
+
+ def _create_score_penalties(self, input_ids: tf.Tensor, logits: tf.Tensor) -> tf.Tensor:
+ # We want to populate the penalties in the positions of `input_ids`. Since XLA can't handle shapes unknown
+ # before runtime, `tf.unique` can't be used. Therefore, we may have redundant updates, when a given row has
+ # the same token multiple times.
+
+ # Gathers the penalties to apply
+ logit_penalties = tf.gather(logits, input_ids, axis=1, batch_dims=1)
+ logit_penalties = tf.where(logit_penalties > 0, 1 / self.penalty, logit_penalties)
+ logit_penalties = tf.where(logit_penalties < 0, self.penalty, logit_penalties)
+
+ # Scatters the penalties
+ token_penalties = tf.ones(logits.shape)
+ batch_size = input_ids.shape[0]
+ seq_len = tf.shape(input_ids)[1] # the sequence length has dynamic size, hence the dynamic shape
+ indexable_prev_input_ids = tf.concat(
+ (
+ tf.expand_dims(tf.repeat(tf.range(batch_size), seq_len), axis=-1),
+ tf.expand_dims(tf.reshape(input_ids, [-1]), axis=-1),
+ ),
+ axis=1,
+ )
+ token_penalties = tf.tensor_scatter_nd_update(
+ token_penalties, indices=indexable_prev_input_ids, updates=tf.reshape(logit_penalties, [-1])
+ )
+ return token_penalties
+
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
+ score_penalties = self._create_score_penalties(input_ids[:, :cur_len], scores)
+
+ scores = tf.math.multiply(scores, score_penalties)
+
+ return scores
+
+
+class TFNoBadWordsLogitsProcessor(TFLogitsProcessor):
+ """
+ [`TFLogitsProcessor`] that enforces that specified sequences will never be sampled.
+
+ Args:
+ bad_words_ids (`List[List[int]]`):
+ List of list of token ids that are not allowed to be generated. In order to get the tokens of the words
+ that should not appear in the generated text, make sure to set `add_prefix_space=True` when initializing
+ the tokenizer, and use `tokenizer(bad_words, add_special_tokens=False).input_ids`. The `add_prefix_space`
+ argument is only supported for some slow tokenizers, as fast tokenizers' prefixing behaviours come from
+ `pre tokenizers`. Read more [here](https://huggingface.co/docs/tokenizers/api/pre-tokenizers).
+ eos_token_id (`int`):
+ The id of the *end-of-sequence* token.
+ """
+
+ def __init__(self, bad_words_ids: List[List[int]], eos_token_id: int):
+ if not isinstance(bad_words_ids, List) or len(bad_words_ids) == 0:
+ raise ValueError(f"`bad_words_ids` has to be a non-empty list, but is {bad_words_ids}.")
+ if any(not isinstance(bad_word_ids, list) for bad_word_ids in bad_words_ids):
+ raise ValueError(f"`bad_words_ids` has to be a list of lists, but is {bad_words_ids}.")
+ if any(
+ any((not isinstance(token_id, (int, np.integer)) or token_id < 0) for token_id in bad_word_ids)
+ for bad_word_ids in bad_words_ids
+ ):
+ raise ValueError(
+ f"Each list in `bad_words_ids` has to be a list of positive integers, but is {bad_words_ids}."
+ )
+
+ # stores the information about bad words in three tensors:
+ # 1. a rectangular tensor with the forbidden sequences (padded with `-1`), for full data comparisons
+ self.bad_word_seqs_ids = tf.ragged.constant(bad_words_ids).to_tensor(default_value=-1)
+ # 2. a tensor with the unpadded length of each forbidden sequence, for quick length comparisons
+ bad_word_seqs_len = [len(bad_words) for bad_words in bad_words_ids]
+ if any(word_len == 0 for word_len in bad_word_seqs_len):
+ raise ValueError(f"Banned words token sequences {bad_words_ids} cannot have an empty list")
+ self.bad_word_seqs_len = tf.convert_to_tensor(bad_word_seqs_len, dtype=tf.int32)
+ # 3. a tensor containing the last token for each sequence, for easy access to the tokens that may be banned
+ self.seq_forbidden_tokens = tf.convert_to_tensor([bad_words[-1] for bad_words in bad_words_ids])
+
+ def _calc_row_banned_bad_tokens(self, row_input_ids: tf.Tensor) -> tf.Tensor:
+ def _tokens_match(bad_word_seq_number):
+ def _len_one():
+ # If the bad sequence only has one token, always mask it
+ return tf.cond(
+ tf.math.equal(self.bad_word_seqs_len[bad_word_seq_number], 1),
+ lambda: tf.ones((), dtype=tf.bool),
+ _len_greater_than_cur_len,
+ )
+
+ def _len_greater_than_cur_len():
+ # Otherwise, if the bad sequence is longer than the current length they can't ever match
+ return tf.cond(
+ tf.math.greater(self.bad_word_seqs_len[bad_word_seq_number], tf.shape(row_input_ids)[0]),
+ lambda: tf.zeros((), dtype=tf.bool),
+ _match_found,
+ )
+
+ def _match_found():
+ # Finaly, runs the actual comparison. Can only be called if the previous comparisons do not yield
+ # an answer (otherwise we get indexing exceptions)
+ compare_len = self.bad_word_seqs_len[bad_word_seq_number] - 1
+ return tf.cond(
+ tf.math.reduce_all(
+ tf.math.equal(
+ row_input_ids[-compare_len:], self.bad_word_seqs_ids[bad_word_seq_number, :compare_len]
+ )
+ ),
+ lambda: tf.ones((), dtype=tf.bool),
+ lambda: tf.zeros((), dtype=tf.bool),
+ )
+
+ match = _len_one()
+ return match
+
+ # Compares the current row against all bad word sequences, obtaining a mask with the matches.
+ match_mask = tf.map_fn(_tokens_match, tf.range(self.bad_word_seqs_ids.shape[0]), fn_output_signature=tf.bool)
+ row_banned_tokens = self.seq_forbidden_tokens[match_mask]
+ return row_banned_tokens
+
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
+ # We want to mask some banned tokens, at a score level. Since the banned tokens depend on the previous
+ # `input_ids`, they may have a different length for each row, and they may even be empty for some rows.
+ # To remain simple and XLA-compatible, we work on a per-row fashion.
+ # TODO (Joao): this function might trigger XLA retracing as `cur_len` increases. Fix it if it becomes
+ # a frequent choke point. (make `cur_len` a tensor?)
+ def _get_row_updated_score(row_inputs: Tuple[tf.Tensor]) -> tf.Tensor:
+ row_input_ids, row_score = row_inputs
+ banned_tokens = self._calc_row_banned_bad_tokens(row_input_ids[:cur_len])
+ banned_tokens_mask = tf.scatter_nd(
+ indices=tf.expand_dims(banned_tokens, axis=-1),
+ updates=tf.ones_like(banned_tokens, dtype=tf.bool),
+ shape=row_score.shape,
+ )
+ row_score = tf.where(banned_tokens_mask, -float("inf"), row_score)
+ return row_score
+
+ scores = tf.map_fn(_get_row_updated_score, (input_ids, scores), fn_output_signature=tf.float32)
+ return scores
+
+
+class TFNoRepeatNGramLogitsProcessor(TFLogitsProcessor):
+ r"""
+ [`TFLogitsProcessor`] that enforces no repetition of n-grams. See
+ [Fairseq](https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345).
+
+ Args:
+ ngram_size (`int`):
+ All ngrams of size `ngram_size` can only occur once.
+ """
+
+ def __init__(self, ngram_size: int):
+ if not isinstance(ngram_size, int) or ngram_size <= 0:
+ raise ValueError(f"`ngram_size` has to be a strictly positive integer, but is {ngram_size}")
+ self.ngram_size = ngram_size
+
+ def calc_banned_ngram_tokens(self, input_ids, num_hypos, cur_len):
+ # Copied from fairseq for no_repeat_ngram in beam_search
+ if cur_len + 1 < self.ngram_size:
+ # return no banned tokens if we haven't generated ngram_size tokens yet
+ return [[] for _ in range(num_hypos)]
+ generated_ngrams = [{} for _ in range(num_hypos)]
+ prev_input_ids = input_ids[:, :cur_len]
+ for idx in range(num_hypos):
+ gen_tokens = prev_input_ids[idx].numpy().tolist()
+ generated_ngram = generated_ngrams[idx]
+ for ngram in zip(*[gen_tokens[i:] for i in range(self.ngram_size)]):
+ prev_ngram_tuple = tuple(ngram[:-1])
+ generated_ngram[prev_ngram_tuple] = generated_ngram.get(prev_ngram_tuple, []) + [ngram[-1]]
+
+ def _get_generated_ngrams(hypo_idx):
+ # Before decoding the next token, prevent decoding of ngrams that have already appeared
+ start_idx = cur_len + 1 - self.ngram_size
+ ngram_idx = tuple(prev_input_ids[hypo_idx, start_idx:cur_len].numpy().tolist())
+ return generated_ngrams[hypo_idx].get(ngram_idx, [])
+
+ banned_tokens = [_get_generated_ngrams(hypo_idx) for hypo_idx in range(num_hypos)]
+
+ return banned_tokens
+
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
+ # TODO (joao): enable XLA on this logits processor. See discussion and attempts in
+ # https://github.com/huggingface/transformers/pull/16974
+ if not tf.executing_eagerly():
+ raise NotImplementedError("TFNoRepeatNGramLogitsProcessor is only implemented for eager execution.")
+
+ batch_size, vocab_size = scores.shape
+ banned_tokens = self.calc_banned_ngram_tokens(input_ids, batch_size, cur_len)
+
+ # create banned_tokens boolean mask
+ banned_tokens_indices_mask = []
+ for banned_tokens_slice in banned_tokens:
+ banned_tokens_indices_mask.append(
+ [True if token in banned_tokens_slice else False for token in range(vocab_size)]
+ )
+
+ scores = tf.where(tf.convert_to_tensor(banned_tokens_indices_mask, dtype=tf.bool), -float("inf"), scores)
+
+ return scores
+
+
+class TFForcedBOSTokenLogitsProcessor(TFLogitsProcessor):
+ r"""
+ [`TFLogitsProcessor`] that enforces the specified token as the first generated token.
+
+ Args:
+ bos_token_id (`int`):
+ The id of the token to force as the first generated token.
+ """
+
+ def __init__(self, bos_token_id: int):
+ if bos_token_id < 0:
+ raise ValueError(f"The forced bos token id must be a non-negative integer, got {bos_token_id}")
+ self.bos_token_id = bos_token_id
+
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
+ if cur_len == 1:
+ batch_size, num_tokens = scores.shape
+ # sets the score to 0 in the bos_token_id column
+ scores = tf.zeros((batch_size, 1))
+ # sets the score to -inf everywhere else
+ if self.bos_token_id > 0:
+ scores = tf.concat((tf.broadcast_to(-float("inf"), (batch_size, self.bos_token_id)), scores), axis=-1)
+ if self.bos_token_id < (num_tokens - 1):
+ scores = tf.concat(
+ (scores, tf.broadcast_to(-float("inf"), (batch_size, (num_tokens - 1) - self.bos_token_id))),
+ axis=-1,
+ )
+ return scores
+
+
+class TFForcedEOSTokenLogitsProcessor(TFLogitsProcessor):
+ r"""
+ [`TFLogitsProcessor`] that enforces the specified token as the last generated token when `max_length` is reached.
+
+ Args:
+ max_length (`int`):
+ The maximum length of the sequence to be generated.
+ eos_token_id (`int`):
+ The id of the token to force as the last generated token when `max_length` is reached.
+ """
+
+ def __init__(self, max_length: int, eos_token_id: int):
+ self.max_length = max_length
+ if eos_token_id < 0:
+ raise ValueError(f"The forced eos token id must be a non-negative integer, got {eos_token_id}")
+ self.eos_token_id = eos_token_id
+
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
+ if cur_len == self.max_length - 1:
+ batch_size, num_tokens = scores.shape
+ # sets the score to 0 in the eos_token_id column
+ scores = tf.zeros((batch_size, 1))
+ # sets the score to -inf everywhere else
+ if self.eos_token_id > 0:
+ scores = tf.concat((tf.broadcast_to(-float("inf"), (batch_size, self.eos_token_id)), scores), axis=-1)
+ if self.eos_token_id < (num_tokens - 1):
+ scores = tf.concat(
+ (scores, tf.broadcast_to(-float("inf"), (batch_size, (num_tokens - 1) - self.eos_token_id))),
+ axis=-1,
+ )
+ return scores
+
+
+class TFSuppressTokensAtBeginLogitsProcessor(TFLogitsProcessor):
+ r"""
+ [`TFSuppressTokensAtBeginLogitsProcessor`] suppresses a list of tokens as soon as the `generate` function starts
+ generating using `begin_index` tokens. This should ensure that the tokens defined by `begin_suppress_tokens` at not
+ sampled at the begining of the generation.
+ """
+
+ def __init__(self, begin_suppress_tokens, begin_index):
+ self.begin_suppress_tokens = list(begin_suppress_tokens)
+ self.begin_index = begin_index
+
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
+ scores = tf.cond(
+ tf.equal(cur_len, self.begin_index),
+ lambda: tf.tensor_scatter_nd_update(
+ scores,
+ indices=[[i, token] for i in range(scores.shape[0]) for token in self.begin_suppress_tokens],
+ updates=[-float("inf") for _ in range(scores.shape[0] * len(self.begin_suppress_tokens))],
+ ),
+ lambda: scores,
+ )
+ return scores
+
+
+class TFSuppressTokensLogitsProcessor(TFLogitsProcessor):
+ r"""This processor can be used to suppress a list of tokens. The processor will set their log probs to `-inf` so that they
+ are not sampled."""
+
+ def __init__(self, suppress_tokens):
+ self.suppress_tokens = list(suppress_tokens)
+
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
+ scores = tf.tensor_scatter_nd_update(
+ scores,
+ indices=[[i, token] for i in range(scores.shape[0]) for token in self.suppress_tokens],
+ updates=[-float("inf") for _ in range(scores.shape[0] * len(self.suppress_tokens))],
+ )
+ return scores
+
+
+class TFForceTokensLogitsProcessor(TFLogitsProcessor):
+ r"""This processor takes a list of pairs of integers which indicates a mapping from generation indices to token
+ indices that will be forced before sampling. The processor will set their log probs to `0` and all other tokens to
+ `-inf` so that they are sampled at their corresponding index."""
+
+ def __init__(self, force_token_map: List[List[int]]):
+ force_token_map = dict(force_token_map)
+ # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
+ # index of the array corresponds to the index of the token to be forced, for XLA compatibility.
+ # Indexes without forced tokens will have an negative value.
+ force_token_array = np.ones((max(force_token_map.keys()) + 1), dtype=np.int32) * -1
+ for index, token in force_token_map.items():
+ if token is not None:
+ force_token_array[index] = token
+ self.force_token_array = tf.convert_to_tensor(force_token_array, dtype=tf.int32)
+
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
+ def _force_token(generation_idx):
+ batch_size = scores.shape[0]
+ current_token = self.force_token_array[generation_idx]
+
+ new_scores = tf.ones_like(scores, dtype=scores.dtype) * -float("inf")
+ indices = tf.stack((tf.range(batch_size), tf.tile([current_token], [batch_size])), axis=1)
+ updates = tf.zeros((batch_size,), dtype=scores.dtype)
+ new_scores = tf.tensor_scatter_nd_update(new_scores, indices, updates)
+ return new_scores
+
+ scores = tf.cond(
+ tf.greater_equal(cur_len, tf.shape(self.force_token_array)[0]),
+ # If the current length is geq than the length of force_token_array, the processor does nothing.
+ lambda: tf.identity(scores),
+ # Otherwise, it may force a certain token.
+ lambda: tf.cond(
+ tf.greater_equal(self.force_token_array[cur_len], 0),
+ # Only valid (positive) tokens are forced
+ lambda: _force_token(cur_len),
+ # Otherwise, the processor does nothing.
+ lambda: scores,
+ ),
+ )
+ return scores
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/generation/tf_utils.py b/env-llmeval/lib/python3.10/site-packages/transformers/generation/tf_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..90219c316b6c8c0bcc763fefff8af8269a0a77ee
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/generation/tf_utils.py
@@ -0,0 +1,3131 @@
+# coding=utf-8
+# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+import inspect
+import warnings
+from dataclasses import dataclass
+from typing import Any, Dict, Optional, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+from tensorflow.compiler.tf2xla.python.xla import dynamic_update_slice
+
+from ..modeling_tf_outputs import TFCausalLMOutputWithPast, TFSeq2SeqLMOutput
+from ..models.auto import (
+ TF_MODEL_FOR_CAUSAL_LM_MAPPING,
+ TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
+ TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING,
+ TF_MODEL_FOR_VISION_2_SEQ_MAPPING,
+)
+from ..tf_utils import shape_list, stable_softmax
+from ..utils import ModelOutput, logging
+from .configuration_utils import GenerationConfig
+from .tf_logits_process import (
+ TFForcedBOSTokenLogitsProcessor,
+ TFForcedEOSTokenLogitsProcessor,
+ TFForceTokensLogitsProcessor,
+ TFLogitsProcessorList,
+ TFMinLengthLogitsProcessor,
+ TFNoBadWordsLogitsProcessor,
+ TFNoRepeatNGramLogitsProcessor,
+ TFRepetitionPenaltyLogitsProcessor,
+ TFSuppressTokensAtBeginLogitsProcessor,
+ TFSuppressTokensLogitsProcessor,
+ TFTemperatureLogitsWarper,
+ TFTopKLogitsWarper,
+ TFTopPLogitsWarper,
+)
+
+
+logger = logging.get_logger(__name__)
+
+
+@dataclass
+class TFGreedySearchDecoderOnlyOutput(ModelOutput):
+ """
+ Base class for outputs of decoder-only generation models using greedy search.
+
+
+ Args:
+ sequences (`tf.Tensor` of shape `(batch_size, sequence_length)`):
+ The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
+ if all batches finished early due to the `eos_token_id`.
+ scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
+ Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
+ at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each
+ generated token), with each tensor of shape `(batch_size, config.vocab_size)`.
+ attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
+ `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
+ hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
+ `tf.Tensor` of shape `(batch_size, generated_length, hidden_size)`.
+ """
+
+ sequences: tf.Tensor = None
+ scores: Optional[Tuple[tf.Tensor]] = None
+ attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None
+ hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None
+
+
+@dataclass
+class TFGreedySearchEncoderDecoderOutput(ModelOutput):
+ """
+ Base class for outputs of encoder-decoder generation models using greedy search. Hidden states and attention
+ weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the
+ encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)
+
+
+ Args:
+ sequences (`tf.Tensor` of shape `(batch_size, sequence_length)`):
+ The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
+ if all batches finished early due to the `eos_token_id`.
+ scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
+ Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
+ at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each
+ generated token), with each tensor of shape `(batch_size, config.vocab_size)`.
+ encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+ encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+ decoder_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
+ `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
+ cross_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
+ `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
+ decoder_hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
+ `tf.Tensor` of shape `(batch_size, generated_length, hidden_size)`.
+ """
+
+ sequences: tf.Tensor = None
+ scores: Optional[Tuple[tf.Tensor]] = None
+ encoder_attentions: Optional[Tuple[tf.Tensor]] = None
+ encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None
+ decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None
+ cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None
+ decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None
+
+
+@dataclass
+class TFSampleDecoderOnlyOutput(ModelOutput):
+ """
+ Base class for outputs of decoder-only generation models using sampling.
+
+
+ Args:
+ sequences (`tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`):
+ The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
+ if all batches finished early due to the `eos_token_id`.
+ scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
+ Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
+ at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each
+ generated token), with each tensor of shape `(batch_size*num_return_sequences, config.vocab_size)`.
+ attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
+ `tf.Tensor` of shape `(num_return_sequences*batch_size, num_heads, generated_length, sequence_length)`.
+ hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
+ `tf.Tensor` of shape `(num_return_sequences*batch_size, generated_length, hidden_size)`.
+ """
+
+ sequences: tf.Tensor = None
+ scores: Optional[Tuple[tf.Tensor]] = None
+ attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None
+ hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None
+
+
+@dataclass
+class TFSampleEncoderDecoderOutput(ModelOutput):
+ """
+ Base class for outputs of encoder-decoder generation models using sampling. Hidden states and attention weights of
+ the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states
+ attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)
+
+
+ Args:
+ sequences (`tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`):
+ The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
+ if all batches finished early due to the `eos_token_id`.
+ scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
+ Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
+ at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each
+ generated token), with each tensor of shape `(batch_size*num_return_sequences, config.vocab_size)`.
+ encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer of the decoder) of shape `(batch_size*num_return_sequences,
+ num_heads, sequence_length, sequence_length)`.
+ encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size*num_return_sequences, sequence_length, hidden_size)`.
+ decoder_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
+ `tf.Tensor` of shape `(batch_size*num_return_sequences, num_heads, generated_length, sequence_length)`.
+ cross_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
+ `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
+ decoder_hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
+ `tf.Tensor` of shape `(batch_size*num_return_sequences, generated_length, hidden_size)`.
+ """
+
+ sequences: tf.Tensor = None
+ scores: Optional[Tuple[tf.Tensor]] = None
+ encoder_attentions: Optional[Tuple[tf.Tensor]] = None
+ encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None
+ decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None
+ cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None
+ decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None
+
+
+@dataclass
+class TFBeamSearchDecoderOnlyOutput(ModelOutput):
+ """
+ Base class for outputs of decoder-only generation models using beam search.
+
+ Args:
+ sequences (`tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`):
+ The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
+ if all batches finished early due to the `eos_token_id`.
+ sequences_scores (`tf.Tensor` of shape `(batch_size*num_return_sequences)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
+ Final beam scores of the generated `sequences`.
+ scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
+ Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log
+ softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this
+ beam. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token),
+ with each tensor of shape `(batch_size*num_beams*num_return_sequences, config.vocab_size)`.
+ beam_indices (`tf.Tensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
+ Beam indices of generated token id at each generation step. `tf.Tensor` of shape
+ `(batch_size*num_return_sequences, sequence_length)`.
+ attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
+ `tf.Tensor` of shape `(batch_size*num_beams, num_heads, generated_length, sequence_length)`.
+ hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
+ `tf.Tensor` of shape `(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)`.
+ """
+
+ sequences: tf.Tensor = None
+ sequences_scores: Optional[tf.Tensor] = None
+ scores: Optional[Tuple[tf.Tensor]] = None
+ beam_indices: Optional[tf.Tensor] = None
+ attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None
+ hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None
+
+
+@dataclass
+class TFBeamSearchEncoderDecoderOutput(ModelOutput):
+ """
+ Base class for outputs of encoder-decoder generation models using beam search. Hidden states and attention weights
+ of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states
+ attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)
+
+ Args:
+ sequences (`tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`):
+ The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
+ if all batches finished early due to the `eos_token_id`.
+ sequences_scores (`tf.Tensor` of shape `(batch_size*num_return_sequences)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
+ Final beam scores of the generated `sequences`.
+ scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
+ Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log
+ softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this
+ beam. `Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token),
+ with each tensor of shape `(batch_size*num_beams, config.vocab_size)`.
+ beam_indices (`tf.Tensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
+ Beam indices of generated token id at each generation step. `tf.Tensor` of shape
+ `(batch_size*num_return_sequences, sequence_length)`.
+ encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+ encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size*num_beams*num_return_sequences, sequence_length, hidden_size)`.
+ decoder_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
+ `tf.Tensor` of shape `(batch_size*num_beams*num_return_sequences, num_heads, generated_length,
+ sequence_length)`.
+ cross_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
+ `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
+ decoder_hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
+ `tf.Tensor` of shape `(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)`.
+ """
+
+ sequences: tf.Tensor = None
+ sequences_scores: Optional[tf.Tensor] = None
+ scores: Optional[Tuple[tf.Tensor]] = None
+ beam_indices: Optional[tf.Tensor] = None
+ encoder_attentions: Optional[Tuple[tf.Tensor]] = None
+ encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None
+ decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None
+ cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None
+ decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None
+
+
+@dataclass
+class TFBeamSampleDecoderOnlyOutput(ModelOutput):
+ """
+ Base class for outputs of decoder-only generation models using beam sample.
+
+ Args:
+ sequences (`tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`):
+ The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
+ if all batches finished early due to the `eos_token_id`.
+ sequences_scores (`tf.Tensor` of shape `(batch_size * num_return_sequence)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
+ Final beam scores of the generated `sequences`.
+ scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
+ Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log
+ softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this
+ beam. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token),
+ with each tensor of shape `(batch_size*num_beams*num_return_sequences, config.vocab_size)`.
+ beam_indices (`tf.Tensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
+ Beam indices of generated token id at each generation step. `tf.Tensor` of shape
+ `(batch_size*num_return_sequences, sequence_length)`.
+ attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
+ `tf.Tensor` of shape `(batch_size*num_beams, num_heads, generated_length, sequence_length)`.
+ hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
+ `tf.Tensor` of shape `(batch_size*num_beams, generated_length, hidden_size)`.
+ """
+
+ sequences: tf.Tensor = None
+ sequences_scores: Optional[tf.Tensor] = None
+ scores: Optional[Tuple[tf.Tensor]] = None
+ beam_indices: Optional[tf.Tensor] = None
+ attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None
+ hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None
+
+
+@dataclass
+class TFBeamSampleEncoderDecoderOutput(ModelOutput):
+ """
+ Base class for outputs of encoder-decoder generation models using beam sampling. Hidden states and attention
+ weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the
+ encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)
+
+ Args:
+ sequences (`tf.Tensor` of shape `(batch_size*num_beams, sequence_length)`):
+ The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
+ if all batches finished early due to the `eos_token_id`.
+ sequences_scores (`tf.Tensor` of shape `(batch_size * num_return_sequence)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
+ Final beam scores of the generated `sequences`.
+ scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
+ Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log
+ softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this
+ beam. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token),
+ with each tensor of shape `(batch_size*num_beams, config.vocab_size)`.
+ beam_indices (`tf.Tensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
+ Beam indices of generated token id at each generation step. `tf.Tensor` of shape
+ `(batch_size*num_return_sequences, sequence_length)`.
+ encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+ encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size*num_beams, sequence_length, hidden_size)`.
+ decoder_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
+ `tf.Tensor` of shape `(batch_size*num_beams, num_heads, generated_length, sequence_length)`.
+ cross_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
+ `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
+ decoder_hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
+ `tf.Tensor` of shape `(batch_size*num_beams, generated_length, hidden_size)`.
+ """
+
+ sequences: tf.Tensor = None
+ sequences_scores: Optional[tf.Tensor] = None
+ scores: Optional[Tuple[tf.Tensor]] = None
+ beam_indices: Optional[tf.Tensor] = None
+ encoder_attentions: Optional[Tuple[tf.Tensor]] = None
+ encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None
+ decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None
+ cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None
+ decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None
+
+
+@dataclass
+class TFContrastiveSearchDecoderOnlyOutput(ModelOutput):
+ """
+ Base class for outputs of decoder-only generation models using contrastive search.
+
+ Args:
+ sequences (`tf.Tensor` of shape `(batch_size, sequence_length)`):
+ The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
+ if all batches finished early due to the `eos_token_id`.
+ scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
+ Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
+ at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each
+ generated token), with each tensor of shape `(batch_size, config.vocab_size)`.
+ attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
+ `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
+ hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
+ `tf.Tensor` of shape `(batch_size, generated_length, hidden_size)`.
+ """
+
+ sequences: tf.Tensor = None
+ scores: Optional[Tuple[tf.Tensor]] = None
+ attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None
+ hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None
+
+
+@dataclass
+class TFContrastiveSearchEncoderDecoderOutput(ModelOutput):
+ """
+ Base class for outputs of encoder-decoder generation models using contrastive search. Hidden states and attention
+ weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the
+ encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)
+
+ Args:
+ sequences (`tf.Tensor` of shape `(batch_size, sequence_length)`):
+ The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
+ if all batches finished early due to the `eos_token_id`.
+ scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
+ Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
+ at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each
+ generated token), with each tensor of shape `(batch_size, config.vocab_size)`.
+ encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+ encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+ decoder_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
+ `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
+ cross_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
+ `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
+ decoder_hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
+ `tf.Tensor` of shape `(batch_size, generated_length, hidden_size)`.
+ """
+
+ sequences: tf.Tensor = None
+ scores: Optional[Tuple[tf.Tensor]] = None
+ encoder_attentions: Optional[Tuple[tf.Tensor]] = None
+ encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None
+ decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None
+ cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None
+ decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None
+
+
+TFGreedySearchOutput = Union[TFGreedySearchEncoderDecoderOutput, TFGreedySearchDecoderOnlyOutput]
+TFSampleOutput = Union[TFSampleEncoderDecoderOutput, TFSampleDecoderOnlyOutput]
+TFBeamSearchOutput = Union[TFBeamSearchEncoderDecoderOutput, TFBeamSearchDecoderOnlyOutput]
+TFBeamSampleOutput = Union[TFBeamSampleEncoderDecoderOutput, TFBeamSampleDecoderOnlyOutput]
+TFContrastiveSearchOutput = Union[TFContrastiveSearchEncoderDecoderOutput, TFContrastiveSearchDecoderOnlyOutput]
+TFGenerateOutput = Union[
+ TFGreedySearchOutput, TFSampleOutput, TFBeamSearchOutput, TFBeamSampleOutput, TFContrastiveSearchOutput
+]
+
+
+class TFGenerationMixin:
+ """
+ A class containing all of the functions supporting generation, to be used as a mixin in [`TFPreTrainedModel`].
+
+ The class exposes [`~generation.TFGenerationMixin.generate`], which can be used for:
+ - *greedy decoding* by calling [`~generation.TFGenerationMixin.greedy_search`] if `num_beams=1` and
+ `do_sample=False`
+ - *contrastive search* by calling [`~generation.TFGenerationMixin.contrastive_search`] if `penalty_alpha>0` and
+ `top_k>1`
+ - *multinomial sampling* by calling [`~generation.TFGenerationMixin.sample`] if `num_beams=1` and
+ `do_sample=True`
+ - *beam-search decoding* by calling [`~generation.TFGenerationMixin.beam_search`] if `num_beams>1`
+
+ You do not need to call any of the above methods directly. Pass custom parameter values to 'generate' instead. To
+ learn more about decoding strategies refer to the [text generation strategies guide](../generation_strategies).
+ """
+
+ _seed_generator = None
+
+ @property
+ def seed_generator(self):
+ warnings.warn("`seed_generator` is deprecated and will be removed in a future version.", UserWarning)
+ if self._seed_generator is None:
+ self._seed_generator = tf.random.Generator.from_non_deterministic_state()
+ return self._seed_generator
+
+ supports_xla_generation = True
+
+ def prepare_inputs_for_generation(self, *args, **kwargs):
+ raise NotImplementedError(
+ "A model class needs to define a `prepare_inputs_for_generation` method in order to use `generate`."
+ )
+
+ def compute_transition_scores(
+ self,
+ sequences: tf.Tensor,
+ scores: Tuple[tf.Tensor],
+ beam_indices: Optional[tf.Tensor] = None,
+ normalize_logits: bool = False,
+ ) -> tf.Tensor:
+ """
+ Computes the transition scores of sequences given the generation scores (and beam indices, if beam search was
+ used). This is a convenient method to quicky obtain the scores of the selected tokens at generation time.
+
+ Parameters:
+ sequences (`tf.Tensor`):
+ The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or
+ shorter if all batches finished early due to the `eos_token_id`.
+ scores (`tuple(tf.Tensor)`):
+ Transition scores for each vocabulary token at each generation step. Beam transition scores consisting
+ of log probabilities of tokens conditioned on log softmax of previously generated tokens Tuple of
+ `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token), with each
+ tensor of shape `(batch_size*num_beams, config.vocab_size)`.
+ beam_indices (`tf.Tensor`, *optional*):
+ Beam indices of generated token id at each generation step. `tf.Tensor` of shape
+ `(batch_size*num_return_sequences, sequence_length)`. Only required if a `num_beams>1` at
+ generate-time.
+ normalize_logits (`bool`, *optional*, defaults to `False`):
+ Whether to normalize the logits (which, for legacy reasons, may be unnormalized).
+
+ Return:
+ `tf.Tensor`: A `tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)` containing
+ the transition scores (logits)
+
+ Examples:
+
+ ```python
+ >>> from transformers import GPT2Tokenizer, TFAutoModelForCausalLM
+ >>> import numpy as np
+
+ >>> tokenizer = GPT2Tokenizer.from_pretrained("openai-community/gpt2")
+ >>> model = TFAutoModelForCausalLM.from_pretrained("openai-community/gpt2")
+ >>> tokenizer.pad_token_id = tokenizer.eos_token_id
+ >>> inputs = tokenizer(["Today is"], return_tensors="tf")
+
+ >>> # Example 1: Print the scores for each token generated with Greedy Search
+ >>> outputs = model.generate(**inputs, max_new_tokens=5, return_dict_in_generate=True, output_scores=True)
+ >>> transition_scores = model.compute_transition_scores(
+ ... outputs.sequences, outputs.scores, normalize_logits=True
+ ... )
+ >>> # input_length is the length of the input prompt for decoder-only models, like the GPT family, and 1 for
+ >>> # encoder-decoder models, like BART or T5.
+ >>> input_length = 1 if model.config.is_encoder_decoder else inputs.input_ids.shape[1]
+ >>> generated_tokens = outputs.sequences[:, input_length:]
+ >>> for tok, score in zip(generated_tokens[0], transition_scores[0]):
+ ... # | token | token string | logits | probability
+ ... print(f"| {tok:5d} | {tokenizer.decode(tok):8s} | {score.numpy():.3f} | {np.exp(score.numpy()):.2%}")
+ | 262 | the | -1.413 | 24.33%
+ | 1110 | day | -2.609 | 7.36%
+ | 618 | when | -2.009 | 13.41%
+ | 356 | we | -1.859 | 15.58%
+ | 460 | can | -2.508 | 8.14%
+
+ >>> # Example 2: Reconstruct the sequence scores from Beam Search
+ >>> outputs = model.generate(
+ ... **inputs,
+ ... max_new_tokens=5,
+ ... num_beams=4,
+ ... num_return_sequences=4,
+ ... return_dict_in_generate=True,
+ ... output_scores=True,
+ ... )
+ >>> transition_scores = model.compute_transition_scores(
+ ... outputs.sequences, outputs.scores, outputs.beam_indices, normalize_logits=False
+ ... )
+ >>> # If you sum the generated tokens' scores and apply the length penalty, you'll get the sequence scores.
+ >>> # Tip: recomputing the scores is only guaranteed to match with `normalize_logits=False`. Depending on the
+ >>> # use case, you might want to recompute it with `normalize_logits=True`.
+ >>> output_length = input_length + np.sum(transition_scores.numpy() < 0, axis=1)
+ >>> length_penalty = model.generation_config.length_penalty
+ >>> reconstructed_scores = np.sum(transition_scores, axis=1) / (output_length**length_penalty)
+ >>> print(np.allclose(outputs.sequences_scores, reconstructed_scores))
+ True
+ ```"""
+ # 1. In absence of `beam_indices`, we can assume that we come from e.g. greedy search, which is equivalent
+ # to a beam search approach were the first (and only) beam is always selected
+ if beam_indices is None:
+ beam_indices = tf.tile(tf.expand_dims(tf.range(scores[0].shape[0]), axis=1), [1, len(scores)])
+
+ # 2. reshape scores as [batch_size, vocab_size, # generation steps] with # generation steps being
+ # seq_len - input_length
+ scores = tf.transpose(tf.reshape(tf.stack(scores), (len(scores), -1)), (1, 0))
+ scores = tf.reshape(scores, (-1, self.config.vocab_size, scores.shape[-1]))
+
+ # 3. Optionally normalize the logits (across the vocab dimension)
+ if normalize_logits:
+ scores = tf.nn.log_softmax(scores, axis=1)
+
+ # 4. cut beam_indices to longest beam length
+ beam_indices_mask = beam_indices < 0
+ max_beam_length = tf.math.reduce_max(
+ tf.math.reduce_sum((1 - tf.cast(beam_indices_mask, dtype=tf.int32)), axis=-1)
+ )
+ beam_indices = beam_indices[:, -max_beam_length:]
+ beam_indices_mask = beam_indices_mask[:, -max_beam_length:]
+
+ # 5. Set indices of beams that finished early to 0; such indices will be masked correctly afterwards
+ beam_indices = tf.where(beam_indices_mask, 0, beam_indices)
+
+ # 6. Define which indices contributed to scores
+ cut_idx = sequences.shape[-1] - max_beam_length
+ token_indices = sequences[:, cut_idx:]
+ gen_step_idx = tf.broadcast_to(tf.range(scores.shape[-1]), token_indices.shape)
+ indices = tf.stack([beam_indices, token_indices, gen_step_idx], axis=-1)
+
+ # 7. Compute scores
+ transition_scores = tf.gather_nd(scores, indices)
+
+ # 8. Mask out transition_scores of beams that stopped early
+ transition_scores = tf.where(beam_indices_mask, 0, transition_scores)
+
+ return transition_scores
+
+ def _validate_model_class(self):
+ """
+ Confirms that the model class is compatible with generation. If not, raises an exception that points to the
+ right class to use.
+ """
+ if not self.can_generate():
+ generate_compatible_mappings = [
+ TF_MODEL_FOR_CAUSAL_LM_MAPPING,
+ TF_MODEL_FOR_VISION_2_SEQ_MAPPING,
+ TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
+ TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING,
+ ]
+ generate_compatible_classes = set()
+ for model_mapping in generate_compatible_mappings:
+ supported_models = model_mapping.get(type(self.config), default=None)
+ if supported_models is not None:
+ generate_compatible_classes.add(supported_models.__name__)
+ exception_message = (
+ f"The current model class ({self.__class__.__name__}) is not compatible with `.generate()`, as "
+ "it doesn't have a language model head."
+ )
+ if generate_compatible_classes:
+ exception_message += f" Please use one of the following classes instead: {generate_compatible_classes}"
+ raise TypeError(exception_message)
+
+ def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]):
+ """Validates model kwargs for generation. Generate argument typos will also be caught here."""
+ # Excludes arguments that are handled before calling any model function
+ if self.config.is_encoder_decoder:
+ for key in ["decoder_input_ids"]:
+ model_kwargs.pop(key, None)
+
+ unused_model_args = []
+ model_args = set(inspect.signature(self.prepare_inputs_for_generation).parameters)
+ # `kwargs`/`model_kwargs` is often used to handle optional forward pass inputs like `attention_mask`. If
+ # `prepare_inputs_for_generation` doesn't accept them, then a stricter check can be made ;)
+ if "kwargs" in model_args or "model_kwargs" in model_args:
+ model_args |= set(inspect.signature(self.call).parameters)
+ for key, value in model_kwargs.items():
+ if value is not None and key not in model_args:
+ unused_model_args.append(key)
+
+ if unused_model_args:
+ raise ValueError(
+ f"The following `model_kwargs` are not used by the model: {unused_model_args} (note: typos in the"
+ " generate arguments will also show up in this list)"
+ )
+
+ def generate(
+ self,
+ inputs: Optional[tf.Tensor] = None,
+ generation_config: Optional[GenerationConfig] = None,
+ logits_processor: Optional[TFLogitsProcessorList] = None,
+ seed=None,
+ **kwargs,
+ ) -> Union[TFGenerateOutput, tf.Tensor]:
+ r"""
+ Generates sequences of token ids for models with a language modeling head.
+
+
+
+ Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the
+ model's default generation configuration. You can override any `generation_config` by passing the corresponding
+ parameters to generate, e.g. `.generate(inputs, num_beams=4, do_sample=True)`.
+
+ For an overview of generation strategies and code examples, check out the [following
+ guide](../generation_strategies).
+
+
+
+ Parameters:
+ inputs (`tf.Tensor` of varying shape depending on the modality, *optional*):
+ The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the
+ method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs`
+ should of in the format of `input_ids`. For encoder-decoder models *inputs* can represent any of
+ `input_ids`, `input_values`, `input_features`, or `pixel_values`.
+ generation_config (`~generation.GenerationConfig`, *optional*):
+ The generation configuration to be used as base parametrization for the generation call. `**kwargs`
+ passed to generate matching the attributes of `generation_config` will override them. If
+ `generation_config` is not provided, the default will be used, which had the following loading
+ priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
+ configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
+ default values, whose documentation should be checked to parameterize generation.
+ logits_processor (`LogitsProcessorList`, *optional*):
+ Custom logits processors that complement the default logits processors built from arguments and
+ generation config. If a logit processor is passed that is already created with the arguments or a
+ generation config an error is thrown. This feature is intended for advanced users.
+ seed (`List[int]`, *optional*):
+ Random seed to control sampling, containing two integers, used when `do_sample` is `True`. See the
+ `seed` argument from stateless functions in `tf.random`.
+ kwargs (`Dict[str, Any]`, *optional*):
+ Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
+ forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder
+ specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*.
+
+ Return:
+ [`~utils.ModelOutput`] or `tf.Tensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True` or when
+ `config.return_dict_in_generate=True`) or a `tf.Tensor`.
+
+ If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible
+ [`~utils.ModelOutput`] types are:
+
+ - [`~generation.TFGreedySearchDecoderOnlyOutput`],
+ - [`~generation.TFSampleDecoderOnlyOutput`],
+ - [`~generation.TFBeamSearchDecoderOnlyOutput`],
+ - [`~generation.TFBeamSampleDecoderOnlyOutput`]
+
+ If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible
+ [`~utils.ModelOutput`] types are:
+
+ - [`~generation.TFGreedySearchEncoderDecoderOutput`],
+ - [`~generation.TFSampleEncoderDecoderOutput`],
+ - [`~generation.TFBeamSearchEncoderDecoderOutput`],
+ - [`~generation.TFBeamSampleEncoderDecoderOutput`]
+
+ """
+
+ # 1. Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call
+ self._validate_model_class()
+
+ # priority: `generation_config` argument > `model.generation_config` (the default generation config)
+ if generation_config is None:
+ # legacy: users may modify the model configuration to control generation. To trigger this legacy behavior,
+ # two conditions must be met
+ # 1) the generation config must have been created from the model config (`_from_model_config` field);
+ # 2) the generation config must have seen no modification since its creation (the hash is the same).
+ if self.generation_config._from_model_config and self.generation_config._original_object_hash == hash(
+ self.generation_config
+ ):
+ new_generation_config = GenerationConfig.from_model_config(self.config)
+ if new_generation_config != self.generation_config:
+ warnings.warn(
+ "You have modified the pretrained model configuration to control generation. This is a"
+ " deprecated strategy to control generation and will be removed soon, in a future version."
+ " Please use and modify the model generation configuration (see"
+ " https://huggingface.co/docs/transformers/generation_strategies#default-text-generation-configuration )"
+ )
+ self.generation_config = new_generation_config
+ generation_config = self.generation_config
+
+ generation_config = copy.deepcopy(generation_config)
+ model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs
+ self._validate_model_kwargs(model_kwargs.copy())
+
+ # 2. Cast input dtypes to tf.int32 unless they're floats (which happens for some image models)
+ if inputs is not None:
+ if isinstance(inputs, tf.Tensor) and inputs.dtype.is_floating:
+ pass
+ elif isinstance(inputs, np.ndarray) and np.issubdtype(inputs.dtype, np.floating):
+ pass
+ else:
+ inputs = tf.cast(inputs, tf.int32)
+ if model_kwargs.get("attention_mask") is not None:
+ model_kwargs["attention_mask"] = tf.cast(model_kwargs["attention_mask"], tf.int32)
+ if "decoder_input_ids" in model_kwargs:
+ if (
+ isinstance(model_kwargs["decoder_input_ids"], tf.Tensor)
+ and model_kwargs["decoder_input_ids"].dtype.is_floating
+ ):
+ pass
+ elif isinstance(model_kwargs["decoder_input_ids"], np.ndarray) and np.issubdtype(
+ model_kwargs["decoder_input_ids"].dtype, np.floating
+ ):
+ pass
+ else:
+ model_kwargs["decoder_input_ids"] = tf.cast(model_kwargs["decoder_input_ids"], tf.int32)
+
+ # 3. Set generation parameters if not already defined
+ logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList()
+
+ if generation_config.pad_token_id is None and generation_config.eos_token_id is not None:
+ if model_kwargs.get("attention_mask") is None:
+ logger.warning(
+ "The attention mask and the pad token id were not set. As a consequence, you may observe "
+ "unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results."
+ )
+ eos_token_id = generation_config.eos_token_id
+ if isinstance(eos_token_id, list):
+ eos_token_id = eos_token_id[0]
+ logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.")
+ generation_config.pad_token_id = eos_token_id
+
+ use_xla = not tf.executing_eagerly()
+ if use_xla and not self.supports_xla_generation:
+ raise ValueError(
+ "The selected model does not support Graph mode nor XLA generation (e.g. from tf.function())"
+ )
+
+ # 4. Define model inputs
+ inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs(
+ inputs, generation_config.bos_token_id, model_kwargs
+ )
+ # inputs_ids now has to be defined and cannot be None anymore
+ batch_size = shape_list(inputs_tensor)[0]
+
+ # 5. Prepare other model kwargs
+ model_kwargs["output_attentions"] = generation_config.output_attentions
+ model_kwargs["output_hidden_states"] = generation_config.output_hidden_states
+ model_kwargs["use_cache"] = generation_config.use_cache
+
+ accepts_attention_mask = "attention_mask" in set(inspect.signature(self.call).parameters.keys())
+ requires_attention_mask = "encoder_outputs" not in model_kwargs
+
+ if model_kwargs.get("attention_mask", None) is None and requires_attention_mask and accepts_attention_mask:
+ model_kwargs["attention_mask"] = self._prepare_attention_mask_for_generation(
+ inputs_tensor, generation_config.pad_token_id, generation_config.eos_token_id
+ )
+
+ # decoder-only models should use left-padding for generation
+ if not self.config.is_encoder_decoder:
+ if generation_config.pad_token_id is not None and tf.math.reduce_any(
+ inputs_tensor[:, -1] == generation_config.pad_token_id
+ ):
+ logger.warning(
+ "A decoder-only architecture is being used, but right-padding was detected! For correct "
+ "generation results, please set `padding_side='left'` when initializing the tokenizer."
+ )
+ if self.config.is_encoder_decoder and "encoder_outputs" not in model_kwargs:
+ # if model is encoder decoder encoder_outputs are created and added to `model_kwargs`
+ model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(
+ inputs_tensor, model_kwargs, model_input_name
+ )
+
+ # 6. Prepare model inputs which will be used for auto-regressive generation
+ if self.config.is_encoder_decoder:
+ input_ids, model_kwargs = self._prepare_decoder_input_ids_for_generation(
+ batch_size=batch_size,
+ model_input_name=model_input_name,
+ model_kwargs=model_kwargs,
+ decoder_start_token_id=generation_config.decoder_start_token_id,
+ bos_token_id=generation_config.bos_token_id,
+ )
+ else:
+ input_ids = inputs_tensor if model_input_name == "input_ids" else model_kwargs.pop("input_ids")
+
+ # 7. Prepare `max_length` depending on other stopping criteria.
+ input_ids_seq_length = shape_list(input_ids)[-1]
+ has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
+ if has_default_max_length and generation_config.max_new_tokens is None and generation_config.max_length == 20:
+ # 20 is the default max_length of the generation config
+ warnings.warn(
+ f"Using the model-agnostic default `max_length` (={generation_config.max_length}) "
+ "to control the generation length. recommend setting `max_new_tokens` to control the maximum length of the generation.",
+ UserWarning,
+ )
+ elif generation_config.max_new_tokens is not None:
+ if not has_default_max_length and generation_config.max_length is not None:
+ logger.warning(
+ f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(="
+ f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. "
+ "Please refer to the documentation for more information. "
+ "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)"
+ )
+ generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length
+
+ # If the input length is a tensor (i.e. dynamic length), skip length checks
+ if not isinstance(input_ids_seq_length, tf.Tensor):
+ if (
+ generation_config.min_length is not None
+ and generation_config.min_length > generation_config.max_length
+ ):
+ raise ValueError(
+ f"Unfeasable length constraints: the minimum length ({generation_config.min_length}) is larger"
+ f" than the maximum length ({generation_config.max_length})"
+ )
+ if input_ids_seq_length >= generation_config.max_length:
+ input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids"
+ logger.warning(
+ f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to"
+ f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider"
+ " increasing`max_new_tokens`."
+ )
+
+ # 8. determine generation mode
+ is_contrastive_search_gen_mode = (
+ generation_config.top_k is not None
+ and generation_config.top_k > 1
+ and generation_config.do_sample is False
+ and generation_config.penalty_alpha is not None
+ and generation_config.penalty_alpha > 0
+ )
+ is_greedy_gen_mode = (
+ not is_contrastive_search_gen_mode
+ and (generation_config.num_beams == 1)
+ and generation_config.do_sample is False
+ )
+ is_beam_gen_mode = (
+ not is_contrastive_search_gen_mode
+ and (generation_config.num_beams > 1)
+ and generation_config.do_sample is False
+ )
+ is_sample_gen_mode = (generation_config.num_beams == 1) and generation_config.do_sample is True
+ is_beam_sample_gen_mode = (generation_config.num_beams > 1) and generation_config.do_sample is True
+
+ # 9. prepare distribution pre_processing samplers
+ logits_processor = self._get_logits_processor(
+ generation_config=generation_config,
+ input_ids_seq_length=input_ids_seq_length,
+ logits_processor=logits_processor,
+ )
+
+ # 10. go into different generation modes
+ if is_greedy_gen_mode:
+ if generation_config.num_return_sequences > 1:
+ raise ValueError(
+ f"num_return_sequences has to be 1, but is {generation_config.num_return_sequences} when doing"
+ " greedy search."
+ )
+ # 11. run greedy search
+ return self.greedy_search(
+ input_ids,
+ max_length=generation_config.max_length,
+ pad_token_id=generation_config.pad_token_id,
+ eos_token_id=generation_config.eos_token_id,
+ logits_processor=logits_processor,
+ output_scores=generation_config.output_scores,
+ return_dict_in_generate=generation_config.return_dict_in_generate,
+ **model_kwargs,
+ )
+ elif is_contrastive_search_gen_mode:
+ if generation_config.num_return_sequences > 1:
+ raise ValueError(
+ f"num_return_sequences has to be 1, but is {generation_config.num_return_sequences} when doing"
+ " contrastive search."
+ )
+ # 11. run contrastive search
+ return self.contrastive_search(
+ input_ids,
+ top_k=generation_config.top_k,
+ penalty_alpha=generation_config.penalty_alpha,
+ logits_processor=logits_processor,
+ max_length=generation_config.max_length,
+ pad_token_id=generation_config.pad_token_id,
+ eos_token_id=generation_config.eos_token_id,
+ output_scores=generation_config.output_scores,
+ return_dict_in_generate=generation_config.return_dict_in_generate,
+ **model_kwargs,
+ )
+ elif is_sample_gen_mode:
+ # 11. prepare logits warper
+ logits_warper = self._get_logits_warper(generation_config=generation_config)
+
+ # 12. expand input_ids with `num_return_sequences` additional sequences per batch
+ input_ids, model_kwargs = self._expand_inputs_for_generation(
+ input_ids=input_ids,
+ expand_size=generation_config.num_return_sequences,
+ is_encoder_decoder=self.config.is_encoder_decoder,
+ **model_kwargs,
+ )
+
+ # 13. run sample
+ return self.sample(
+ input_ids,
+ logits_processor=logits_processor,
+ logits_warper=logits_warper,
+ max_length=generation_config.max_length,
+ pad_token_id=generation_config.pad_token_id,
+ eos_token_id=generation_config.eos_token_id,
+ seed=seed,
+ output_scores=generation_config.output_scores,
+ return_dict_in_generate=generation_config.return_dict_in_generate,
+ **model_kwargs,
+ )
+
+ elif is_beam_gen_mode:
+ if generation_config.num_beams < generation_config.num_return_sequences:
+ raise ValueError(
+ "Beam search decoding cannot return more sequences than it has beams. Please set num_beams >="
+ f" num_return_sequences, got {generation_config.num_beams} and"
+ f" {generation_config.num_return_sequences} (respectivelly)"
+ )
+
+ # 11. broadcast inputs to the desired number of beams
+ input_ids, model_kwargs = self._expand_inputs_for_generation(
+ input_ids=input_ids,
+ expand_size=generation_config.num_beams,
+ is_encoder_decoder=self.config.is_encoder_decoder,
+ expand_in_new_axis=True,
+ **model_kwargs,
+ )
+
+ # 12. run beam search
+ return self.beam_search(
+ input_ids,
+ max_length=generation_config.max_length,
+ pad_token_id=generation_config.pad_token_id,
+ eos_token_id=generation_config.eos_token_id,
+ length_penalty=generation_config.length_penalty,
+ early_stopping=generation_config.early_stopping,
+ logits_processor=logits_processor,
+ output_scores=generation_config.output_scores,
+ return_dict_in_generate=generation_config.return_dict_in_generate,
+ num_return_sequences=generation_config.num_return_sequences,
+ **model_kwargs,
+ )
+
+ elif is_beam_sample_gen_mode:
+ if generation_config.num_beams < generation_config.num_return_sequences:
+ raise ValueError(
+ "Beam search decoding cannot return more sequences than it has beams. Please set num_beams >="
+ f" num_return_sequences, got {generation_config.num_beams} and"
+ f" {generation_config.num_return_sequences} (respectivelly)"
+ )
+
+ # 11. prepare logits warper
+ logits_warper = self._get_logits_warper(generation_config=generation_config)
+
+ # 12. broadcast inputs to the desired number of beams
+ input_ids, model_kwargs = self._expand_inputs_for_generation(
+ input_ids=input_ids,
+ expand_size=generation_config.num_beams,
+ is_encoder_decoder=self.config.is_encoder_decoder,
+ expand_in_new_axis=True,
+ **model_kwargs,
+ )
+
+ # 13. run beam sample (beam search with sampling)
+ return self.beam_search(
+ input_ids,
+ do_sample=True,
+ max_length=generation_config.max_length,
+ pad_token_id=generation_config.pad_token_id,
+ eos_token_id=generation_config.eos_token_id,
+ length_penalty=generation_config.length_penalty,
+ early_stopping=generation_config.early_stopping,
+ logits_processor=logits_processor,
+ logits_warper=logits_warper,
+ output_scores=generation_config.output_scores,
+ return_dict_in_generate=generation_config.return_dict_in_generate,
+ num_return_sequences=generation_config.num_return_sequences,
+ **model_kwargs,
+ )
+
+ def _prepare_attention_mask_for_generation(
+ self,
+ inputs: tf.Tensor,
+ pad_token_id: Optional[int],
+ eos_token_id: Optional[int],
+ ) -> tf.Tensor:
+ is_input_ids = len(inputs.shape) == 2 and inputs.dtype in (tf.int32, tf.int64)
+ is_pad_token_in_inputs = (pad_token_id is not None) and tf.math.reduce_any(inputs == pad_token_id)
+ is_pad_token_not_equal_to_eos_token_id = (eos_token_id is None) or (pad_token_id != eos_token_id)
+
+ # Check if input is input_ids and padded -> only then is attention_mask defined
+ if is_input_ids and is_pad_token_in_inputs and is_pad_token_not_equal_to_eos_token_id:
+ return tf.cast(tf.math.not_equal(inputs, pad_token_id), dtype=tf.int32)
+ else:
+ return tf.ones(inputs.shape[:2], dtype=tf.int32)
+
+ def _prepare_encoder_decoder_kwargs_for_generation(
+ self, inputs_tensor: tf.Tensor, model_kwargs, model_input_name: Optional[str] = None
+ ) -> Dict[str, Any]:
+ # 1. get encoder and store encoder outputs
+ encoder = self.get_encoder()
+
+ # 2. prepare encoder args and encoder kwargs from model kwargs
+ irrelevant_prefix = ["decoder_", "cross_attn", "use_cache"]
+ encoder_kwargs = {
+ argument: value
+ for argument, value in model_kwargs.items()
+ if not any(argument.startswith(p) for p in irrelevant_prefix)
+ }
+ encoder_signature = set(inspect.signature(encoder.call).parameters)
+ encoder_accepts_wildcard = "kwargs" in encoder_signature or "model_kwargs" in encoder_signature
+ if not encoder_accepts_wildcard:
+ encoder_kwargs = {
+ argument: value for argument, value in encoder_kwargs.items() if argument in encoder_signature
+ }
+
+ # 3. vision models don't use `attention_mask`.
+ encoder_kwargs["return_dict"] = True
+ encoder_kwargs[model_input_name] = inputs_tensor
+ if model_input_name != self.main_input_name: # in Keras, the first input must always be passed
+ encoder_kwargs[self.main_input_name] = None
+ encoder_outputs = encoder(**encoder_kwargs)
+ model_kwargs["encoder_outputs"] = encoder_outputs
+
+ return model_kwargs
+
+ def _prepare_decoder_input_ids_for_generation(
+ self,
+ batch_size: int,
+ model_input_name: str,
+ model_kwargs: Dict[str, tf.Tensor],
+ decoder_start_token_id: int = None,
+ bos_token_id: int = None,
+ ) -> Tuple[tf.Tensor, Dict[str, tf.Tensor]]:
+ """Prepares `decoder_input_ids` for generation with encoder-decoder models"""
+ # 1. Check whether the user has defined `decoder_input_ids` manually. To facilitate in terms of input naming,
+ # we also allow the user to pass it under `input_ids`, if the encoder does not use it as the main input.
+ if model_kwargs is not None and "decoder_input_ids" in model_kwargs:
+ decoder_input_ids = model_kwargs.pop("decoder_input_ids")
+ elif "input_ids" in model_kwargs and model_input_name != "input_ids":
+ decoder_input_ids = model_kwargs.pop("input_ids")
+ else:
+ decoder_input_ids = None
+
+ # 2. Encoder-decoder models expect the `decoder_input_ids` to start with a special token. Let's ensure that.
+ decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id)
+ decoder_input_ids_start = tf.ones((batch_size, 1), dtype=tf.int32) * decoder_start_token_id
+
+ # no user input -> use decoder_start_token_id as decoder_input_ids
+ if decoder_input_ids is None:
+ decoder_input_ids = decoder_input_ids_start
+ # user input but doesn't start with decoder_start_token_id -> prepend decoder_start_token_id (and adjust
+ # decoder_attention_mask if provided)
+ elif tf.reduce_all(decoder_input_ids[:, 0] != decoder_start_token_id):
+ decoder_input_ids = tf.concat([decoder_input_ids_start, decoder_input_ids], axis=-1)
+ if "decoder_attention_mask" in model_kwargs:
+ decoder_attention_mask = model_kwargs["decoder_attention_mask"]
+ decoder_attention_mask = tf.concat(
+ (tf.ones_like(decoder_attention_mask)[:, :1], decoder_attention_mask),
+ axis=-1,
+ )
+ model_kwargs["decoder_attention_mask"] = decoder_attention_mask
+
+ return decoder_input_ids, model_kwargs
+
+ def _get_decoder_start_token_id(self, decoder_start_token_id: int = None, bos_token_id: int = None) -> int:
+ # retrieve decoder_start_token_id for encoder-decoder models
+ # fall back to bos_token_id if necessary
+ decoder_start_token_id = (
+ decoder_start_token_id
+ if decoder_start_token_id is not None
+ else self.generation_config.decoder_start_token_id
+ )
+ bos_token_id = bos_token_id if bos_token_id is not None else self.generation_config.bos_token_id
+
+ if decoder_start_token_id is not None:
+ return decoder_start_token_id
+ elif bos_token_id is not None:
+ return bos_token_id
+ raise ValueError(
+ "`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation."
+ )
+
+ @staticmethod
+ def _expand_inputs_for_generation(
+ expand_size: int = 1,
+ is_encoder_decoder: bool = False,
+ input_ids: Optional[tf.Tensor] = None,
+ expand_in_new_axis: bool = False,
+ **model_kwargs,
+ ) -> Tuple[tf.Tensor, Dict[str, Any]]:
+ """
+ Expands tensors from [batch_size, ...] to [batch_size * expand_size, ...] or [batch_size, expand_size, ...],
+ depending on `expand_in_new_axis`. Beam-based approaches expect this function to be used with
+ `expand_in_new_axis=True`
+ """
+
+ def _expand_tensor(tensor: tf.Tensor):
+ if expand_in_new_axis:
+ shape = shape_list(tensor)
+ return tf.broadcast_to(tensor[:, None], (shape[0], expand_size) + tuple(shape[1:]))
+ else:
+ return tf.repeat(tensor, expand_size, axis=0)
+
+ def _expand_dict_for_generation(dict_to_expand):
+ for key in dict_to_expand:
+ if dict_to_expand[key] is not None and isinstance(dict_to_expand[key], tf.Tensor):
+ dict_to_expand[key] = _expand_tensor(dict_to_expand[key])
+ return dict_to_expand
+
+ if input_ids is not None:
+ input_ids = _expand_tensor(input_ids)
+
+ model_kwargs = _expand_dict_for_generation(model_kwargs)
+
+ if is_encoder_decoder:
+ if model_kwargs.get("encoder_outputs") is None:
+ raise ValueError("If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined.")
+ model_kwargs["encoder_outputs"] = _expand_dict_for_generation(model_kwargs["encoder_outputs"])
+
+ return input_ids, model_kwargs
+
+ def _prepare_model_inputs(
+ self,
+ inputs: Optional[tf.Tensor] = None,
+ bos_token_id: Optional[int] = None,
+ model_kwargs: Optional[Dict[str, tf.Tensor]] = None,
+ ) -> Tuple[tf.Tensor, Optional[str], Dict[str, tf.Tensor]]:
+ """
+ This function extracts the model-specific `inputs` for generation.
+ """
+ # 1. retrieve all kwargs that are non-None or non-model input related.
+ # some encoder-decoder models have different names for model and encoder
+ if (
+ self.config.is_encoder_decoder
+ and hasattr(self, "encoder")
+ and hasattr(self.encoder, "main_input_name")
+ and self.encoder.main_input_name != self.main_input_name
+ ):
+ input_name = self.encoder.main_input_name
+ else:
+ input_name = self.main_input_name
+
+ model_kwargs = {k: v for k, v in model_kwargs.items() if v is not None or k != input_name}
+
+ # 2. check whether model_input_name is passed as kwarg
+ # if yes and `inputs` is None use kwarg inputs
+ inputs_kwarg = model_kwargs.pop(input_name, None)
+ if inputs_kwarg is not None and inputs is not None:
+ raise ValueError(
+ f"`inputs`: {inputs}` were passed alongside {input_name} which is not allowed. "
+ f"Make sure to either pass {inputs} or {input_name}=..."
+ )
+ elif inputs_kwarg is not None:
+ inputs = inputs_kwarg
+
+ # 3. In the presence of `inputs_embeds` for text models:
+ # - decoder-only models should complain if the user attempts to pass `inputs_embeds`, but the model
+ # doesn't have its forwarding implemented. `inputs_embeds` is kept in `model_kwargs` and can coexist with
+ # input_ids (`inputs_embeds` will be used in the 1st generation step, as opposed to `input_ids`)
+ # - encoder-decoder models should complain if the user attempts to pass `inputs_embeds` and `input_ids`, and
+ # pull the former to inputs. It will be used in place of `input_ids` to get the encoder hidden states.
+ if input_name == "input_ids" and "inputs_embeds" in model_kwargs:
+ if not self.config.is_encoder_decoder:
+ has_inputs_embeds_forwarding = "inputs_embeds" in set(
+ inspect.signature(self.prepare_inputs_for_generation).parameters.keys()
+ )
+ if not has_inputs_embeds_forwarding:
+ raise ValueError(
+ f"You passed `inputs_embeds` to `.generate()`, but the model class {self.__class__.__name__} "
+ "doesn't have its forwarding implemented. See the GPT2 implementation for an example "
+ "(https://github.com/huggingface/transformers/pull/21405), and feel free to open a PR with it!"
+ )
+ # In this case, `input_ids` is moved to the `model_kwargs`, so a few automations (like the creation of
+ # the attention mask) can rely on the actual model input.
+ model_kwargs["input_ids"] = self._maybe_initialize_input_ids_for_generation(
+ inputs, bos_token_id, model_kwargs=model_kwargs
+ )
+ else:
+ if inputs is not None:
+ raise ValueError("You passed `inputs_embeds` and `input_ids` to `.generate()`. Please pick one.")
+ inputs, input_name = model_kwargs["inputs_embeds"], "inputs_embeds"
+
+ # 4. if `inputs` is still None, try to create `input_ids` from BOS token
+ inputs = self._maybe_initialize_input_ids_for_generation(inputs, bos_token_id, model_kwargs)
+
+ return inputs, input_name, model_kwargs
+
+ def _maybe_initialize_input_ids_for_generation(
+ self,
+ inputs: Optional[tf.Tensor] = None,
+ bos_token_id: Optional[int] = None,
+ model_kwargs: Optional[Dict[str, tf.Tensor]] = None,
+ ) -> tf.Tensor:
+ """Initializes input ids for generation, if necessary."""
+ if inputs is not None:
+ return inputs
+
+ encoder_outputs = model_kwargs.get("encoder_outputs")
+ if self.config.is_encoder_decoder and encoder_outputs is not None:
+ # make dummy input_ids with value -100, as a sanity check ensuring that they won't be used for encoding
+ shape = encoder_outputs.last_hidden_state.shape[:-1]
+ return tf.ones(shape, dtype=tf.int32) * -100
+
+ if bos_token_id is None:
+ raise ValueError("`bos_token_id` has to be defined when no `input_ids` are provided.")
+
+ # If there is some tensor in `model_kwargs`, we can infer the batch size from it. This is helpful with
+ # soft-prompting or in multimodal implementations built on top of decoder-only language models.
+ batch_size = 1
+ for value in model_kwargs.values():
+ if isinstance(value, tf.Tensor):
+ batch_size = value.shape[0]
+ break
+ return tf.ones((batch_size, 1), dtype=tf.int32) * bos_token_id
+
+ @staticmethod
+ def _extract_past_from_model_output(outputs: ModelOutput):
+ past_key_values = None
+ if "past_key_values" in outputs:
+ past_key_values = outputs.past_key_values
+ elif "mems" in outputs:
+ past_key_values = outputs.mems
+ elif "past_buckets_states" in outputs:
+ past_key_values = outputs.past_buckets_states
+ return past_key_values
+
+ def _update_model_kwargs_for_generation(
+ self, outputs: ModelOutput, model_kwargs: Dict[str, Any], is_encoder_decoder: bool = False
+ ) -> Dict[str, Any]:
+ # update past_key_values
+ model_kwargs["past_key_values"] = self._extract_past_from_model_output(outputs)
+
+ # update attention mask
+ if not is_encoder_decoder:
+ if "attention_mask" in model_kwargs:
+ attention_mask = model_kwargs["attention_mask"]
+ model_kwargs["attention_mask"] = tf.concat(
+ [attention_mask, tf.ones((shape_list(attention_mask)[0], 1), dtype=tf.int32)], axis=-1
+ )
+
+ return model_kwargs
+
+ def _update_model_kwargs_for_xla_generation(
+ self,
+ model_outputs: ModelOutput,
+ model_kwargs: Dict[str, Any],
+ cur_len: int,
+ max_length: int,
+ batch_size: int,
+ is_encoder_decoder: bool = False,
+ batch_axis: int = 0,
+ ):
+ def _initialize_attention(model_kwargs, num_padding_values, is_encoder_decoder):
+ """initializes the appropriate attention mask -- encoder-decoder models use `decoder_attention_mask`"""
+ if is_encoder_decoder:
+ # One 1 for decoder_start_token_id, 0s for the currently-unfilled locations in the past_key_values tensor,
+ # 1s for the actual input_ids
+ decoder_attention_mask = tf.concat(
+ [
+ tf.ones((batch_size, 1), dtype=tf.int32),
+ tf.zeros((batch_size, num_padding_values), dtype=tf.int32),
+ tf.ones((batch_size, 1), dtype=tf.int32),
+ ],
+ axis=1,
+ )
+ mask = {"decoder_attention_mask": decoder_attention_mask}
+ else:
+ attention_mask = model_kwargs.pop("attention_mask")
+ # 0s for the currently-unfilled locations in the past_key_values tensor, 1s for the actual input_ids
+ attention_mask = tf.concat(
+ [
+ attention_mask,
+ tf.zeros((batch_size, num_padding_values), dtype=attention_mask.dtype),
+ tf.ones((batch_size, 1), dtype=attention_mask.dtype),
+ ],
+ axis=1,
+ )
+ mask = {"attention_mask": attention_mask}
+ return mask
+
+ def _update_attention(model_kwargs, new_past_index, is_encoder_decoder):
+ """updates the appropriate attention mask -- encoder-decoder models use `decoder_attention_mask`"""
+ update_start = tf.constant([0, 1], dtype=tf.int32) * new_past_index
+ if is_encoder_decoder:
+ decoder_attention_mask = model_kwargs.pop("decoder_attention_mask")
+ decoder_attention_mask_update_slice = tf.ones((batch_size, 1), dtype=decoder_attention_mask.dtype)
+ decoder_attention_mask = dynamic_update_slice(
+ decoder_attention_mask, decoder_attention_mask_update_slice, update_start
+ )
+ mask = {"decoder_attention_mask": decoder_attention_mask}
+ else:
+ attention_mask = model_kwargs.pop("attention_mask")
+ attention_mask_update_slice = tf.ones((batch_size, 1), dtype=attention_mask.dtype)
+ attention_mask = dynamic_update_slice(attention_mask, attention_mask_update_slice, update_start)
+ mask = {"attention_mask": attention_mask}
+ return mask
+
+ def _initialize_past(past_key_values, num_padding_values, batch_axis):
+ """initialize past_key_values with zeros -- the structure depends on `batch_axis`"""
+ if batch_axis == 0:
+ padding_values = tf.constant([[0, 0], [0, 0], [0, num_padding_values], [0, 0]], dtype=tf.int32)
+ new_past = ()
+ for past_layer in past_key_values:
+ new_past_layer = list(past_layer)
+ for i in range(len(new_past_layer[:2])):
+ new_past_layer[i] = tf.pad(past_layer[i], padding_values)
+ new_past += (tuple(new_past_layer),)
+ else:
+ padding_values = tf.scatter_nd(indices=[[3, 1]], updates=[num_padding_values], shape=(5, 2))
+ new_past = list(past_key_values)
+ for i in range(len(past_key_values)):
+ new_past[i] = tf.pad(past_key_values[i], padding_values)
+ return new_past
+
+ def _update_past(past_key_values, new_past_index, batch_axis):
+ if batch_axis == 0:
+ slice_start_base = tf.constant([0, 0, 1, 0])
+ new_past = ()
+ for past_layer in past_key_values:
+ new_past_layer = list(past_layer)
+ for i in range(len(new_past_layer[:2])):
+ update_slice = past_layer[i][:, :, -1:]
+ # Write the last slice to the first open location in the padded past_key_values array
+ # and then truncate the last slice off the array
+ new_past_layer[i] = dynamic_update_slice(
+ past_layer[i][:, :, :-1], update_slice, slice_start_base * new_past_index
+ )
+ new_past += (tuple(new_past_layer),)
+ else:
+ slice_start_base = tf.constant([0, 0, 0, 1, 0])
+ new_past = [None for _ in range(len(past_key_values))]
+ for i in range(len(past_key_values)):
+ update_slice = past_key_values[i][:, :, :, -1:]
+ # Write the last slice to the first open location in the padded past_key_values array
+ # and then truncate the last slice off the array
+ new_past[i] = dynamic_update_slice(
+ past_key_values[i][:, :, :, :-1], update_slice, slice_start_base * new_past_index
+ )
+ return new_past
+
+ past_key_values = self._extract_past_from_model_output(model_outputs)
+ if past_key_values is None:
+ raise ValueError(
+ "No known `past_key_values variable` found in model outputs (model outputs keys:"
+ f" {list(model_outputs.keys())})"
+ )
+ is_past_initialized = model_kwargs.pop("past_key_values", None) is not None
+
+ if not is_past_initialized:
+ # The padded version of `past_key_values` has a length of `max_length - 1`, as `past_key_values` holds information relative to
+ # previous autoregressive generation steps (step 0 has no past_key_values, step 1 has 1 past_key_values value, ..., the last step
+ # has `max_length - 1` past_key_values values).
+ num_padding_values = max_length - cur_len - 1
+ mask = _initialize_attention(model_kwargs, num_padding_values, is_encoder_decoder)
+ new_past = _initialize_past(past_key_values, num_padding_values, batch_axis)
+ else:
+ # The new index of past_key_values to be filled corresponds to the current length of the sequence, with two
+ # subtractions: -1 because past_key_values holds information regarding previous generation steps (read comment above)
+ # and -1 again because in an array the index is the length of the array minus 1.
+ new_past_index = cur_len - 2
+ mask = _update_attention(model_kwargs, new_past_index, is_encoder_decoder)
+ new_past = _update_past(past_key_values, new_past_index, batch_axis)
+
+ # sets the updated variables (mask and past_key_values)
+ model_kwargs.update(mask)
+ model_kwargs["past_key_values"] = tuple(new_past)
+
+ return model_kwargs
+
+ def _get_logits_warper(
+ self,
+ generation_config: GenerationConfig,
+ ) -> TFLogitsProcessorList:
+ """
+ This class returns a [`TFLogitsProcessorList`] list object that contains all relevant [`TFLogitsWarper`]
+ instances used for multinomial sampling.
+ """
+
+ # instantiate warpers list
+ warpers = TFLogitsProcessorList()
+
+ # In beam methods, we need to keep at least one non-eos token to explore continuations that might have a
+ # better score (i.e. keep len(generation_config.eos_token_id) + 1)
+ if generation_config.num_beams > 1:
+ if isinstance(generation_config.eos_token_id, list):
+ min_tokens_to_keep = len(generation_config.eos_token_id) + 1
+ else:
+ min_tokens_to_keep = 2
+ else:
+ min_tokens_to_keep = 1
+
+ if generation_config.temperature is not None and generation_config.temperature != 1.0:
+ warpers.append(TFTemperatureLogitsWarper(generation_config.temperature))
+ if generation_config.top_k is not None and generation_config.top_k != 0:
+ warpers.append(TFTopKLogitsWarper(top_k=generation_config.top_k, min_tokens_to_keep=min_tokens_to_keep))
+ if generation_config.top_p is not None and generation_config.top_p < 1.0:
+ warpers.append(TFTopPLogitsWarper(top_p=generation_config.top_p, min_tokens_to_keep=min_tokens_to_keep))
+ return warpers
+
+ def _get_logits_processor(
+ self,
+ generation_config: GenerationConfig,
+ input_ids_seq_length: int,
+ logits_processor: Optional[TFLogitsProcessorList],
+ ) -> TFLogitsProcessorList:
+ """
+ This class returns a [`TFLogitsProcessorList`] list object that contains all relevant [`TFLogitsProcessor`]
+ instances used to modify the scores of the language model head.
+ """
+ processors = TFLogitsProcessorList()
+
+ # instantiate processors list
+ if generation_config.repetition_penalty is not None and generation_config.repetition_penalty != 1.0:
+ processors.append(TFRepetitionPenaltyLogitsProcessor(penalty=generation_config.repetition_penalty))
+ if generation_config.no_repeat_ngram_size is not None and generation_config.no_repeat_ngram_size > 0:
+ processors.append(TFNoRepeatNGramLogitsProcessor(generation_config.no_repeat_ngram_size))
+ if generation_config.bad_words_ids is not None:
+ processors.append(
+ TFNoBadWordsLogitsProcessor(generation_config.bad_words_ids, generation_config.eos_token_id)
+ )
+ if (
+ generation_config.min_length is not None
+ and generation_config.eos_token_id is not None
+ and generation_config.min_length > 0
+ ):
+ processors.append(TFMinLengthLogitsProcessor(generation_config.min_length, generation_config.eos_token_id))
+ if generation_config.forced_bos_token_id is not None:
+ processors.append(TFForcedBOSTokenLogitsProcessor(generation_config.forced_bos_token_id))
+ if generation_config.forced_eos_token_id is not None:
+ processors.append(
+ TFForcedEOSTokenLogitsProcessor(generation_config.max_length, generation_config.forced_eos_token_id)
+ )
+ if generation_config.suppress_tokens is not None:
+ processors.append(TFSuppressTokensLogitsProcessor(generation_config.suppress_tokens))
+ if generation_config.begin_suppress_tokens is not None:
+ begin_index = input_ids_seq_length
+ begin_index = (
+ begin_index
+ if (input_ids_seq_length > 1 or generation_config.forced_bos_token_id is None)
+ else begin_index + 1
+ )
+ if generation_config.forced_decoder_ids is not None:
+ begin_index += generation_config.forced_decoder_ids[-1][
+ 0
+ ] # generation starts after the last token that is forced
+ processors.append(
+ TFSuppressTokensAtBeginLogitsProcessor(generation_config.begin_suppress_tokens, begin_index)
+ )
+ if generation_config.forced_decoder_ids is not None:
+ processors.append(TFForceTokensLogitsProcessor(generation_config.forced_decoder_ids))
+
+ processors = self._merge_criteria_processor_list(processors, logits_processor)
+ return processors
+
+ def _merge_criteria_processor_list(
+ self,
+ default_list: TFLogitsProcessorList,
+ custom_list: TFLogitsProcessorList,
+ ) -> TFLogitsProcessorList:
+ if len(custom_list) == 0:
+ return default_list
+ for default in default_list:
+ for custom in custom_list:
+ if type(custom) is type(default):
+ object_type = "logits processor"
+ raise ValueError(
+ f"A custom {object_type} of type {type(custom)} with values {custom} has been passed to"
+ f" `generate`, but it has already been created with the values {default}. {default} has been"
+ " created by passing the corresponding arguments to generate or by the model's config default"
+ f" values. If you just want to change the default values of {object_type} consider passing"
+ f" them as arguments to `generate` instead of using a custom {object_type}."
+ )
+ default_list.extend(custom_list)
+ return default_list
+
+ def greedy_search(
+ self,
+ input_ids: tf.Tensor,
+ max_length: Optional[int] = None,
+ pad_token_id: Optional[int] = None,
+ eos_token_id: Optional[int] = None,
+ logits_processor: Optional[TFLogitsProcessorList] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_scores: Optional[bool] = None,
+ return_dict_in_generate: Optional[bool] = None,
+ **model_kwargs,
+ ) -> Union[TFGreedySearchOutput, tf.Tensor]:
+ r"""
+ Generates sequences for models with a language modeling head using greedy decoding.
+
+ Parameters:
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
+ The sequence used as a prompt for the generation.
+ logits_processor (`TFLogitsProcessorList`, *optional*):
+ An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsProcessor`]
+ used to modify the prediction scores of the language modeling head applied at each generation step.
+ max_length (`int`, *optional*, defaults to 20):
+ The maximum length of the sequence to be generated.
+ pad_token_id (`int`, *optional*):
+ The id of the *padding* token.
+ eos_token_id (`Union[int, List[int]]`, *optional*):
+ The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
+ output_attentions (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more details.
+ output_hidden_states (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more details.
+ output_scores (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the prediction scores. See `scores` under returned tensors for more details.
+ return_dict_in_generate (`bool`, *optional*, defaults to `False`):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ model_kwargs:
+ Additional model specific keyword arguments will be forwarded to the `call` function of the model. If
+ model is an encoder-decoder model the kwargs should include `encoder_outputs`.
+
+ Return:
+ [`~generation.TFGreedySearchDecoderOnlyOutput`], [`~generation.TFGreedySearchEncoderDecoderOutput`] or
+ `tf.Tensor`: A `tf.Tensor` containing the generated tokens (default behaviour) or a
+ [`~generation.TFGreedySearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and
+ `return_dict_in_generate=True` or a [`~generation.TFGreedySearchEncoderDecoderOutput`] if
+ `model.config.is_encoder_decoder=True`.
+
+ Examples:
+
+ ```python
+ >>> from transformers import (
+ ... AutoTokenizer,
+ ... TFAutoModelForCausalLM,
+ ... TFLogitsProcessorList,
+ ... TFMinLengthLogitsProcessor,
+ ... )
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
+ >>> model = TFAutoModelForCausalLM.from_pretrained("openai-community/gpt2")
+
+ >>> # set pad_token_id to eos_token_id because GPT2 does not have a PAD token
+ >>> model.generation_config.pad_token_id = model.generation_config.eos_token_id
+
+ >>> input_prompt = "Today is a beautiful day, and"
+ >>> input_ids = tokenizer(input_prompt, return_tensors="tf").input_ids
+
+ >>> # instantiate logits processors
+ >>> logits_processor = TFLogitsProcessorList(
+ ... [
+ ... TFMinLengthLogitsProcessor(15, eos_token_id=model.generation_config.eos_token_id),
+ ... ]
+ ... )
+
+ >>> outputs = model.greedy_search(input_ids, logits_processor=logits_processor)
+ >>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
+ ["Today is a beautiful day, and I'm so happy to be here. I'm so happy to"]
+ ```"""
+
+ # 1. init greedy_search values
+ logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList()
+
+ max_length = max_length if max_length is not None else self.generation_config.max_length
+ pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
+ eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
+ if isinstance(eos_token_id, int):
+ eos_token_id = [eos_token_id]
+ output_scores = output_scores if output_scores is not None else self.generation_config.output_scores
+ output_attentions = (
+ output_attentions if output_attentions is not None else self.generation_config.output_attentions
+ )
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states
+ )
+ return_dict_in_generate = (
+ return_dict_in_generate
+ if return_dict_in_generate is not None
+ else self.generation_config.return_dict_in_generate
+ )
+ use_cache = model_kwargs.pop("use_cache", self.generation_config.use_cache)
+ use_xla = not tf.executing_eagerly()
+ # TODO (Joao): fix cache format or find programatic way to detect cache index
+ # GPT2 and other models has a slightly different cache structure, with a different batch axis
+ model_name = str(self.decoder) if "EncoderDecoder" in str(self) else str(self)
+ cache_batch_axis = 1 if any(model_prefix in model_name for model_prefix in ("TFGPT2", "TFCTRL")) else 0
+ # some models, like XLNet, need more than the last token in the presence of past_key_values
+ needs_full_input = "use_mems" in set(inspect.signature(self.prepare_inputs_for_generation).parameters.keys())
+
+ # 2. init `attentions`, `hidden_states`, and `scores` tuples
+ scores = [] if (return_dict_in_generate and output_scores) else None
+ decoder_attentions = [] if (return_dict_in_generate and output_attentions) else None
+ cross_attentions = [] if (return_dict_in_generate and output_attentions) else None
+ decoder_hidden_states = [] if (return_dict_in_generate and output_hidden_states) else None
+
+ # 3. init tensors to use for "xla-compileable" generate function
+ batch_size, cur_len = shape_list(input_ids)
+
+ # initialize `generated` (`input_ids` padded with `pad_token_id`), `finished_sequences`
+ input_ids_padding = tf.ones((batch_size, max_length - cur_len), dtype=tf.int32) * (pad_token_id or 0)
+ generated = tf.concat([input_ids, input_ids_padding], axis=-1)
+ finished_sequences = tf.zeros((batch_size,), dtype=tf.bool)
+
+ # 4. define "xla-compile-able" stop-condition and auto-regressive function
+ # define condition fn
+ def greedy_search_cond_fn(generated, finished_sequences, cur_len, model_kwargs):
+ """state termination condition fn."""
+ return ~tf.reduce_all(finished_sequences)
+
+ # define condition fn
+ def greedy_search_body_fn(generated, finished_sequences, cur_len, model_kwargs):
+ """state update fn."""
+ if model_kwargs.get("past_key_values") is None or needs_full_input:
+ input_ids = generated[:, :cur_len]
+ else:
+ input_ids = tf.expand_dims(generated[:, cur_len - 1], -1)
+ model_inputs = self.prepare_inputs_for_generation(input_ids, use_cache=use_cache, **model_kwargs)
+ # forward pass to get next token logits
+ model_outputs = self(
+ **model_inputs,
+ return_dict=True,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ )
+ next_token_logits = model_outputs.logits[:, -1]
+
+ # pre-process distribution
+ next_tokens_scores = logits_processor(generated, next_token_logits, cur_len)
+
+ # Store scores, attentions and hidden_states when required
+ if not use_xla and return_dict_in_generate:
+ if output_scores:
+ scores.append(next_tokens_scores)
+ if output_attentions and self.config.is_encoder_decoder:
+ decoder_attentions.append(model_outputs.decoder_attentions)
+ elif output_attentions and not self.config.is_encoder_decoder:
+ decoder_attentions.append(model_outputs.attentions)
+ if self.config.is_encoder_decoder:
+ cross_attentions.append(model_outputs.cross_attentions)
+
+ if output_hidden_states and self.config.is_encoder_decoder:
+ decoder_hidden_states.append(model_outputs.decoder_hidden_states)
+ elif output_hidden_states and self.config.is_encoder_decoder:
+ decoder_hidden_states.append(model_outputs.hidden_states)
+
+ # argmax
+ next_tokens = tf.argmax(next_tokens_scores, axis=-1, output_type=tf.int32)
+
+ if eos_token_id is not None:
+ if pad_token_id is None:
+ raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.")
+ unfinished_seq = 1 - tf.cast(finished_sequences, tf.int32)
+ next_tokens = next_tokens * unfinished_seq + pad_token_id * (1 - unfinished_seq)
+ next_token_is_eos = tf.math.reduce_any(
+ tf.equal(
+ tf.broadcast_to(next_tokens, (len(eos_token_id), batch_size)), tf.expand_dims(eos_token_id, -1)
+ ),
+ axis=0,
+ )
+ finished_sequences = finished_sequences | next_token_is_eos
+
+ # update `generated` and `cur_len`
+ update_indices = tf.stack([tf.range(batch_size), tf.broadcast_to(cur_len, [batch_size])], axis=-1)
+ generated = tf.tensor_scatter_nd_update(tensor=generated, indices=update_indices, updates=next_tokens)
+ cur_len += 1
+
+ # update model_kwargs
+ if use_xla:
+ model_kwargs = self._update_model_kwargs_for_xla_generation(
+ model_outputs=model_outputs,
+ model_kwargs=model_kwargs,
+ cur_len=cur_len,
+ max_length=max_length,
+ batch_size=batch_size,
+ is_encoder_decoder=self.config.is_encoder_decoder,
+ batch_axis=cache_batch_axis,
+ )
+ else:
+ model_kwargs = self._update_model_kwargs_for_generation(
+ model_outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
+ )
+ # if we don't cache past_key_values key values we need the whole input
+ if model_kwargs.get("past_key_values", None) is None:
+ # let's throw out `past_key_values` since we don't want `None` tensors
+ model_kwargs.pop("past_key_values", None)
+
+ return generated, finished_sequences, cur_len, model_kwargs
+
+ # 5. run generation
+ # 1st generation step has to be run before to initialize `past_key_values`
+ generated, finished_sequences, cur_len, model_kwargs = greedy_search_body_fn(
+ generated, finished_sequences, cur_len, model_kwargs
+ )
+
+ # 2-to-n generation steps can then be run in autoregressive fashion
+ # only in case 1st generation step does NOT yield EOS token though
+ maximum_iterations = max_length - cur_len
+ generated, _, cur_len, _ = tf.while_loop(
+ greedy_search_cond_fn,
+ greedy_search_body_fn,
+ (generated, finished_sequences, cur_len, model_kwargs),
+ maximum_iterations=maximum_iterations,
+ )
+
+ # 6. prepare outputs
+ if not use_xla:
+ # cut for backward compatibility
+ generated = generated[:, :cur_len]
+
+ if return_dict_in_generate:
+ if self.config.is_encoder_decoder:
+ # if model is an encoder-decoder, retrieve encoder attention weights
+ # and hidden states
+ encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
+ encoder_hidden_states = (
+ model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
+ )
+
+ scores = tuple(scores) if scores is not None else None
+ decoder_attentions = tuple(decoder_attentions) if decoder_attentions is not None else None
+ cross_attentions = tuple(cross_attentions) if cross_attentions is not None else None
+ decoder_hidden_states = tuple(decoder_hidden_states) if decoder_hidden_states is not None else None
+
+ return TFGreedySearchEncoderDecoderOutput(
+ sequences=generated,
+ scores=scores,
+ encoder_attentions=encoder_attentions,
+ encoder_hidden_states=encoder_hidden_states,
+ decoder_attentions=decoder_attentions,
+ cross_attentions=cross_attentions,
+ decoder_hidden_states=decoder_hidden_states,
+ )
+ else:
+ return TFGreedySearchDecoderOnlyOutput(
+ sequences=generated,
+ scores=scores,
+ attentions=decoder_attentions,
+ hidden_states=decoder_hidden_states,
+ )
+ else:
+ return generated
+
+ def sample(
+ self,
+ input_ids: tf.Tensor,
+ logits_processor: Optional[TFLogitsProcessorList] = None,
+ logits_warper: Optional[TFLogitsProcessorList] = None,
+ max_length: Optional[int] = None,
+ pad_token_id: Optional[int] = None,
+ eos_token_id: Optional[int] = None,
+ seed: Optional[Tuple[int, int]] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_scores: Optional[bool] = None,
+ return_dict_in_generate: Optional[bool] = None,
+ **model_kwargs,
+ ) -> Union[TFSampleOutput, tf.Tensor]:
+ r"""
+ Generates sequences for models with a language modeling head using multinomial sampling.
+
+ Parameters:
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
+ The sequence used as a prompt for the generation.
+ logits_processor (`TFLogitsProcessorList`, *optional*):
+ An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsProcessor`]
+ used to modify the prediction scores of the language modeling head applied at each generation step.
+ logits_warper (`TFLogitsProcessorList`, *optional*):
+ An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsWarper`]
+ used to warp the prediction score distribution of the language modeling head applied before multinomial
+ sampling at each generation step.
+ max_length (`int`, *optional*, defaults to 20):
+ The maximum length of the sequence to be generated.
+ pad_token_id (`int`, *optional*):
+ The id of the *padding* token.
+ eos_token_id (`Union[int, List[int]]`, *optional*):
+ The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
+ seed (`List[int]`, *optional*):
+ Random seed to control sampling, containing two integers, used when `do_sample` is `True`. See the
+ `seed` argument from stateless functions in `tf.random`.
+ output_attentions (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more details.
+ output_hidden_states (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more details.
+ output_scores (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the prediction scores. See `scores` under returned tensors for more details.
+ return_dict_in_generate (`bool`, *optional*, defaults to `False`):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ model_kwargs:
+ Additional model specific kwargs will be forwarded to the `call` function of the model. If model is an
+ encoder-decoder model the kwargs should include `encoder_outputs`.
+
+ Return:
+ [`~generation.TFSampleDecoderOnlyOutput`], [`~generation.TFSampleEncoderDecoderOutput`] or `tf.Tensor`: A
+ `tf.Tensor` containing the generated tokens (default behaviour) or a
+ [`~generation.TFSampleDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and
+ `return_dict_in_generate=True` or a [`~generation.TFSampleEncoderDecoderOutput`] if
+ `model.config.is_encoder_decoder=True`.
+
+ Examples:
+
+ ```python
+ >>> import tensorflow as tf
+ >>> from transformers import (
+ ... AutoTokenizer,
+ ... TFAutoModelForCausalLM,
+ ... TFLogitsProcessorList,
+ ... TFMinLengthLogitsProcessor,
+ ... TFTopKLogitsWarper,
+ ... TFTemperatureLogitsWarper,
+ ... )
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
+ >>> model = TFAutoModelForCausalLM.from_pretrained("openai-community/gpt2")
+
+ >>> # set pad_token_id to eos_token_id because GPT2 does not have a EOS token
+ >>> model.generation_config.pad_token_id = model.generation_config.eos_token_id
+
+ >>> input_prompt = "Today is a beautiful day, and"
+ >>> input_ids = tokenizer(input_prompt, return_tensors="tf").input_ids
+
+ >>> # instantiate logits processors
+ >>> logits_processor = TFLogitsProcessorList(
+ ... [
+ ... TFMinLengthLogitsProcessor(15, eos_token_id=model.generation_config.eos_token_id),
+ ... ]
+ ... )
+ >>> # instantiate logits processors
+ >>> logits_warper = TFLogitsProcessorList(
+ ... [
+ ... TFTopKLogitsWarper(50),
+ ... TFTemperatureLogitsWarper(0.7),
+ ... ]
+ ... )
+
+ >>> tf.random.set_seed(0)
+ >>> outputs = model.sample(input_ids, logits_processor=logits_processor, logits_warper=logits_warper)
+
+ >>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
+ ['Today is a beautiful day, and I love my country. But when I look at Donald Trump,']
+ ```"""
+
+ # 1. init greedy_search values
+ logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList()
+ logits_warper = logits_warper if logits_warper is not None else TFLogitsProcessorList()
+
+ max_length = max_length if max_length is not None else self.generation_config.max_length
+ pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
+ eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
+ if isinstance(eos_token_id, int):
+ eos_token_id = [eos_token_id]
+ output_scores = output_scores if output_scores is not None else self.generation_config.output_scores
+ output_attentions = (
+ output_attentions if output_attentions is not None else self.generation_config.output_attentions
+ )
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states
+ )
+ return_dict_in_generate = (
+ return_dict_in_generate
+ if return_dict_in_generate is not None
+ else self.generation_config.return_dict_in_generate
+ )
+ use_cache = model_kwargs.pop("use_cache", self.generation_config.use_cache)
+ use_xla = not tf.executing_eagerly()
+ # TODO (Joao): fix cache format or find programatic way to detect cache index
+ # GPT2 and other models has a slightly different cache structure, with a different batch axis
+ model_name = str(self.decoder) if "EncoderDecoder" in str(self) else str(self)
+ cache_batch_axis = 1 if any(model_prefix in model_name for model_prefix in ("TFGPT2", "TFCTRL")) else 0
+ # some models, like XLNet, need more than the last token in the presence of past_key_values
+ needs_full_input = "use_mems" in set(inspect.signature(self.prepare_inputs_for_generation).parameters.keys())
+
+ # 2. init `attentions`, `hidden_states`, and `scores` tuples
+ scores = [] if (return_dict_in_generate and output_scores) else None
+ decoder_attentions = [] if (return_dict_in_generate and output_attentions) else None
+ cross_attentions = [] if (return_dict_in_generate and output_attentions) else None
+ decoder_hidden_states = [] if (return_dict_in_generate and output_hidden_states) else None
+
+ # 3. init tensors to use for "xla-compileable" generate function
+ batch_size, cur_len = shape_list(input_ids)
+
+ # initialize `generated` (pre-populated with `pad_token_id`), `finished_sequences`
+ input_ids_padding = tf.ones((batch_size, max_length - cur_len), dtype=tf.int32) * (pad_token_id or 0)
+ generated = tf.concat([input_ids, input_ids_padding], axis=-1)
+ finished_sequences = tf.zeros((batch_size,), dtype=tf.bool)
+
+ # 4. define "xla-compile-able" stop-condition and auto-regressive function
+ def sample_cond_fn(generated, finished_sequences, cur_len, model_kwargs):
+ return ~tf.reduce_all(finished_sequences)
+
+ def sample_body_fn(generated, finished_sequences, cur_len, model_kwargs):
+ if model_kwargs.get("past_key_values") is None or needs_full_input:
+ input_ids = generated[:, :cur_len]
+ else:
+ input_ids = tf.expand_dims(generated[:, cur_len - 1], -1)
+ model_inputs = self.prepare_inputs_for_generation(input_ids, use_cache=use_cache, **model_kwargs)
+ # forward pass to get next token logits
+ model_outputs = self(
+ **model_inputs,
+ return_dict=True,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ )
+ next_token_logits = model_outputs.logits[:, -1]
+
+ # pre-process distribution
+ next_tokens_scores = logits_processor(generated, next_token_logits, cur_len)
+ next_tokens_scores = logits_warper(generated, next_tokens_scores, cur_len)
+
+ # Store scores, attentions and hidden_states when required
+ if not use_xla and return_dict_in_generate:
+ if output_scores:
+ scores.append(next_tokens_scores)
+ if output_attentions and self.config.is_encoder_decoder:
+ decoder_attentions.append(model_outputs.decoder_attentions)
+ elif output_attentions and not self.config.is_encoder_decoder:
+ decoder_attentions.append(model_outputs.attentions)
+ if self.config.is_encoder_decoder:
+ cross_attentions.append(model_outputs.cross_attentions)
+
+ if output_hidden_states and self.config.is_encoder_decoder:
+ decoder_hidden_states.append(model_outputs.decoder_hidden_states)
+ elif output_hidden_states and self.config.is_encoder_decoder:
+ decoder_hidden_states.append(model_outputs.hidden_states)
+
+ # sample
+ if seed is not None:
+ sample_seed = seed
+ else:
+ sample_seed = tf.experimental.numpy.random.randint(tf.int32.min, tf.int32.max, (2,), dtype=tf.int32)
+ next_tokens = tf.squeeze(
+ tf.random.stateless_categorical(
+ logits=next_tokens_scores, num_samples=1, seed=sample_seed, dtype=tf.int32
+ ),
+ axis=1,
+ )
+
+ if eos_token_id is not None:
+ if pad_token_id is None:
+ raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.")
+ unfinished_seq = 1 - tf.cast(finished_sequences, tf.int32)
+ next_tokens = next_tokens * unfinished_seq + pad_token_id * (1 - unfinished_seq)
+ next_token_is_eos = tf.math.reduce_any(
+ tf.equal(
+ tf.broadcast_to(next_tokens, (len(eos_token_id), batch_size)), tf.expand_dims(eos_token_id, -1)
+ ),
+ axis=0,
+ )
+ finished_sequences = finished_sequences | next_token_is_eos
+
+ # update `generated` and `cur_len`
+ update_indices = tf.stack([tf.range(batch_size), tf.broadcast_to(cur_len, [batch_size])], axis=-1)
+ generated = tf.tensor_scatter_nd_update(tensor=generated, indices=update_indices, updates=next_tokens)
+ cur_len += 1
+
+ # update model_kwargs
+ if use_xla:
+ model_kwargs = self._update_model_kwargs_for_xla_generation(
+ model_outputs=model_outputs,
+ model_kwargs=model_kwargs,
+ cur_len=cur_len,
+ max_length=max_length,
+ batch_size=batch_size,
+ is_encoder_decoder=self.config.is_encoder_decoder,
+ batch_axis=cache_batch_axis,
+ )
+ else:
+ model_kwargs = self._update_model_kwargs_for_generation(
+ model_outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
+ )
+ # if we don't cache past_key_values key values we need the whole input
+ if model_kwargs.get("past_key_values", None) is None:
+ # let's throw out `past_key_values` since we don't want `None` tensors
+ model_kwargs.pop("past_key_values", None)
+
+ return generated, finished_sequences, cur_len, model_kwargs
+
+ # 5. run generation
+ # 1st generation step has to be run before to initialize `past_key_values`
+ generated, finished_sequences, cur_len, model_kwargs = sample_body_fn(
+ generated, finished_sequences, cur_len, model_kwargs
+ )
+
+ # 2-to-n generation steps can then be run in autoregressive fashion
+ # only in case 1st generation step does NOT yield EOS token though
+ maximum_iterations = max_length - cur_len
+ generated, _, cur_len, _ = tf.while_loop(
+ sample_cond_fn,
+ sample_body_fn,
+ (generated, finished_sequences, cur_len, model_kwargs),
+ maximum_iterations=maximum_iterations,
+ )
+
+ # 6. prepare outputs
+ if not use_xla:
+ # cut for backward compatibility
+ generated = generated[:, :cur_len]
+
+ if return_dict_in_generate:
+ if self.config.is_encoder_decoder:
+ # if model is an encoder-decoder, retrieve encoder attention weights
+ # and hidden states
+ encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
+ encoder_hidden_states = (
+ model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
+ )
+
+ scores = tuple(scores) if scores is not None else None
+ decoder_attentions = tuple(decoder_attentions) if decoder_attentions is not None else None
+ cross_attentions = tuple(cross_attentions) if cross_attentions is not None else None
+ decoder_hidden_states = tuple(decoder_hidden_states) if decoder_hidden_states is not None else None
+
+ return TFSampleEncoderDecoderOutput(
+ sequences=generated,
+ scores=scores,
+ encoder_attentions=encoder_attentions,
+ encoder_hidden_states=encoder_hidden_states,
+ decoder_attentions=decoder_attentions,
+ cross_attentions=cross_attentions,
+ decoder_hidden_states=decoder_hidden_states,
+ )
+ else:
+ return TFSampleDecoderOnlyOutput(
+ sequences=generated,
+ scores=scores,
+ attentions=decoder_attentions,
+ hidden_states=decoder_hidden_states,
+ )
+ else:
+ return generated
+
+ @staticmethod
+ def _gather_beams(nested, beam_indices, batch_axis=0):
+ """Gathers the beam slices indexed by beam_indices into new beam array."""
+
+ def gather_fn(tensor):
+ if batch_axis > 0:
+ # pushes all dimentions before the batch to the end, so we get (batch, beam_id, ...)
+ perm = tf.concat((tf.range(tf.rank(tensor))[batch_axis:], tf.range(batch_axis)), axis=0)
+ tensor = tf.transpose(tensor, perm=perm)
+
+ gathered_tensor = tf.gather(params=tensor, indices=beam_indices, axis=1, batch_dims=1)
+ if batch_axis > 0:
+ # transposes back to the original dimensions
+ perm = tf.concat((tf.range(tf.rank(tensor))[batch_axis:], tf.range(batch_axis)), axis=0)
+ perm = tf.math.invert_permutation(perm)
+ gathered_tensor = tf.transpose(gathered_tensor, perm=perm)
+
+ return gathered_tensor
+
+ return tf.nest.map_structure(gather_fn, nested)
+
+ def beam_search(
+ self,
+ input_ids: tf.Tensor,
+ do_sample: bool = False,
+ max_length: Optional[int] = None,
+ pad_token_id: Optional[int] = None,
+ eos_token_id: Optional[int] = None,
+ length_penalty: Optional[float] = None,
+ early_stopping: Optional[Union[bool, str]] = None,
+ logits_processor: Optional[TFLogitsProcessorList] = None,
+ logits_warper: Optional[TFLogitsProcessorList] = None,
+ num_return_sequences: Optional[int] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_scores: Optional[bool] = None,
+ return_dict_in_generate: Optional[bool] = None,
+ **model_kwargs,
+ ) -> Union[TFBeamSearchOutput, TFBeamSampleOutput, tf.Tensor]:
+ r"""
+ Generates sequences for models with a language modeling head using beam search. If `do_sample` is `False`, uses
+ a greedy approach, otherwise does multinomial sampling without replacement.
+
+ Parameters:
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
+ The sequence used as a prompt for the generation.
+ do_sample (`bool`, *optional*, defaults to `False`):
+ Whether or not to use sampling ; use greedy decoding otherwise.
+ max_length (`int`, *optional*, defaults to 20):
+ The maximum length of the sequence to be generated.
+ pad_token_id (`int`, *optional*):
+ The id of the *padding* token.
+ eos_token_id (`Union[int, List[int]]`, *optional*):
+ The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
+ length_penalty (`float`, *optional*, defaults to 1.0):
+ Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent
+ to the sequence length, which in turn is used to divide the score of the sequence. Since the score is
+ the log likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences,
+ while `length_penalty` < 0.0 encourages shorter sequences.
+ early_stopping (`bool` or `str`, *optional*, defaults to `False`):
+ Controls the stopping condition for beam-based methods, like beam-search. It accepts the following
+ values: `True`, where the generation stops as soon as there are `num_beams` complete candidates;
+ `False`, where an heuristic is applied and the generation stops when is it very unlikely to find better
+ candidates; `"never"`, where the beam search procedure only stops when there cannot be better
+ candidates (canonical beam search algorithm).
+ logits_processor (`[TFLogitsProcessorList]`, *optional*):
+ An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsProcessor`]
+ used to modify the prediction scores of the language modeling head applied at each generation step.
+ logits_warper (`TFLogitsProcessorList`, *optional*):
+ An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsWarper`]
+ used to warp the prediction score distribution of the language modeling head applied before multinomial
+ sampling at each generation step.
+ num_return_sequences(`int`, *optional*, defaults to 1):
+ The number of independently computed returned sequences for each element in the batch.
+ output_attentions (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more details.
+ output_hidden_states (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more details.
+ return_dict_in_generate (`bool`, *optional*, defaults to `False`):
+ Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
+ model_kwargs:
+ Additional model specific kwargs will be forwarded to the `call` function of the model. If model is an
+ encoder-decoder model the kwargs should include `encoder_outputs`.
+
+ Return:
+ [`~generation.TFBeamSearchDecoderOnlyOutput`], [`~generation.TFBeamSearchEncoderDecoderOutput`] or
+ `tf.Tensor`: A `tf.Tensor` containing the generated tokens (default behaviour) or a
+ [`~generation.TFBeamSearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and
+ `return_dict_in_generate=True` or a [`~generation.TFBeamSearchEncoderDecoderOutput`] if
+ `model.config.is_encoder_decoder=True`.
+
+ Examples:
+
+ ```python
+ >>> from transformers import (
+ ... AutoTokenizer,
+ ... TFAutoModelForSeq2SeqLM,
+ ... TFLogitsProcessorList,
+ ... TFMinLengthLogitsProcessor,
+ ... )
+ >>> import tensorflow as tf
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base")
+ >>> model = TFAutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-base")
+
+ >>> encoder_input_str = "translate English to German: How old are you?"
+ >>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="tf").input_ids
+
+ >>> # lets run beam search using 3 beams
+ >>> num_beams = 3
+ >>> # define decoder start token ids
+ >>> input_ids = tf.ones((1, num_beams, 1), dtype=tf.int32)
+ >>> input_ids = input_ids * model.generation_config.decoder_start_token_id
+
+ >>> # add encoder_outputs to model keyword arguments
+ >>> encoder_outputs = model.get_encoder()(encoder_input_ids, return_dict=True)
+ >>> encoder_outputs.last_hidden_state = tf.repeat(
+ ... tf.expand_dims(encoder_outputs.last_hidden_state, axis=0), num_beams, axis=1
+ ... )
+ >>> model_kwargs = {"encoder_outputs": encoder_outputs}
+
+ >>> # instantiate logits processors
+ >>> logits_processor = TFLogitsProcessorList(
+ ... [TFMinLengthLogitsProcessor(5, eos_token_id=model.generation_config.eos_token_id)]
+ ... )
+
+ >>> outputs = model.beam_search(input_ids, logits_processor=logits_processor, **model_kwargs)
+ >>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
+ ['Wie alt bist du?']
+ ```"""
+
+ def flatten_beam_dim(tensor, batch_axis=0):
+ """Flattens the first two dimensions of a non-scalar array."""
+ shape = shape_list(tensor)
+ return tf.reshape(
+ tensor,
+ shape[:batch_axis] + [shape[batch_axis] * shape[batch_axis + 1]] + shape[batch_axis + 2 :],
+ )
+
+ def unflatten_beam_dim(tensor, num_beams, batch_axis=0):
+ """Unflattens the first, flat batch*beam dimension of a non-scalar array."""
+ shape = shape_list(tensor)
+ return tf.reshape(tensor, shape[:batch_axis] + [-1, num_beams] + shape[batch_axis + 1 :])
+
+ # 1. init beam_search values
+ logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList()
+ logits_warper = logits_warper if logits_warper is not None else TFLogitsProcessorList()
+
+ max_length = max_length if max_length is not None else self.generation_config.max_length
+ pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
+ eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
+ if isinstance(eos_token_id, int):
+ eos_token_id = [eos_token_id]
+ num_return_sequences = (
+ num_return_sequences if num_return_sequences is not None else self.generation_config.num_return_sequences
+ )
+
+ output_attentions = (
+ output_attentions if output_attentions is not None else self.generation_config.output_attentions
+ )
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states
+ )
+ output_scores = output_scores if output_scores is not None else self.generation_config.output_scores
+ return_dict_in_generate = (
+ return_dict_in_generate
+ if return_dict_in_generate is not None
+ else self.generation_config.return_dict_in_generate
+ )
+
+ length_penalty = length_penalty if length_penalty is not None else self.generation_config.length_penalty
+ early_stopping = early_stopping if early_stopping is not None else self.generation_config.early_stopping
+
+ use_cache = model_kwargs.pop("use_cache", self.generation_config.use_cache)
+ use_xla = not tf.executing_eagerly()
+ # TODO (Joao): fix cache format or find programatic way to detect cache index
+ # GPT2 and other models has a slightly different cache structure, with a different batch axis
+ model_name = str(self.decoder) if "EncoderDecoder" in str(self) else str(self)
+ cache_batch_axis = 1 if any(model_prefix in model_name for model_prefix in ("TFGPT2", "TFCTRL")) else 0
+ # some models, like XLNet, need more than the last token in the presence of past_key_values
+ needs_full_input = "use_mems" in set(inspect.signature(self.prepare_inputs_for_generation).parameters.keys())
+
+ # 2. init `attentions`, `hidden_states`, and `scores` tuples
+ all_scores = [] if (return_dict_in_generate and output_scores) else None
+ decoder_attentions = [] if (return_dict_in_generate and output_attentions) else None
+ cross_attentions = [] if (return_dict_in_generate and output_attentions) else None
+ decoder_hidden_states = [] if (return_dict_in_generate and output_hidden_states) else None
+
+ # 3. init tensors to use for "xla-compileable" generate function
+ batch_size, num_beams, cur_len = shape_list(input_ids)
+ # store the prompt length of decoder
+ decoder_prompt_len = cur_len
+
+ # per batch, beam-item holding current token in loop, pre-populated with `pad_token_id`
+ input_ids_padding = tf.ones((batch_size, num_beams, max_length - cur_len), dtype=tf.int32) * (
+ pad_token_id or 0
+ )
+ running_sequences = tf.concat([input_ids, input_ids_padding], axis=-1)
+ sequences = tf.ones((batch_size, num_beams, max_length), dtype=tf.int32) * (pad_token_id or 0)
+
+ # per batch,beam-item state bit indicating if sentence has finished.
+ is_sent_finished = tf.zeros((batch_size, num_beams), dtype=tf.bool)
+
+ # per batch, beam-item score, logprobs
+ running_scores = tf.tile(
+ tf.expand_dims(tf.convert_to_tensor([0.0] + [-1.0e9] * (num_beams - 1)), axis=0), [batch_size, 1]
+ )
+ scores = tf.ones((batch_size, num_beams)) * -1.0e9
+
+ # per batch beam indices
+ running_beam_indices = tf.ones((batch_size, num_beams, max_length - decoder_prompt_len), dtype=tf.int32) * -1
+ beam_indices = tf.ones((batch_size, num_beams, max_length - decoder_prompt_len), dtype=tf.int32) * -1
+
+ # flatten beam dim
+ if "encoder_outputs" in model_kwargs:
+ model_kwargs["encoder_outputs"]["last_hidden_state"] = flatten_beam_dim(
+ model_kwargs["encoder_outputs"]["last_hidden_state"]
+ )
+ if "attention_mask" in model_kwargs:
+ model_kwargs["attention_mask"] = flatten_beam_dim(model_kwargs["attention_mask"])
+
+ # 4. define "xla-compile-able" stop-condition and auto-regressive function
+ # define stop-condition and auto-regressive function
+ def beam_search_cond_fn(
+ cur_len,
+ running_sequences,
+ running_scores,
+ running_beam_indices,
+ sequences,
+ scores,
+ beam_indices,
+ is_sent_finished,
+ decoder_prompt_len,
+ model_kwargs,
+ ):
+ """
+ Beam Search termination condition function -- halts the generation loop if any of these conditions becomes
+ False
+ """
+ # 1. is less than max length?
+ not_max_length_yet = cur_len < max_length
+
+ # 2. can the new beams still improve?
+ # early_stopping == False -> apply heuristic = always get the best score from `cur_len - decoder_prompt_len`. See the discussion
+ # below for more details.
+ # https://github.com/huggingface/transformers/pull/20901#issuecomment-1369845565
+ # early_stopping == "never" -> compute the best score from max_length or cur_len, depending on the sign of
+ # length_penalty. Positive length_penalty favors longer sequences, thus we use max_length there.
+ if early_stopping == "never" and length_penalty > 0.0:
+ best_running_score = running_scores[:, :1] / ((max_length - decoder_prompt_len) ** length_penalty)
+ else:
+ best_running_score = running_scores[:, :1] / (
+ tf.cast(cur_len - decoder_prompt_len, dtype=tf.float32) ** length_penalty
+ )
+ worst_finished_score = tf.where(
+ is_sent_finished, tf.math.reduce_min(scores, axis=1, keepdims=True), -1.0e9
+ )
+ improvement_still_possible = tf.math.reduce_any(best_running_score > worst_finished_score)
+
+ # 3. is there still a beam that has not finished?
+ still_open_beam = ~(tf.math.reduce_all(is_sent_finished) & (early_stopping is True))
+
+ return not_max_length_yet & still_open_beam & improvement_still_possible
+
+ def beam_search_body_fn(
+ cur_len,
+ running_sequences,
+ running_scores,
+ running_beam_indices,
+ sequences,
+ scores,
+ beam_indices,
+ is_sent_finished,
+ decoder_prompt_len,
+ model_kwargs,
+ ):
+ """
+ Beam Search iterative update function -- each iteration adds a new token and updates the best sequences
+ seen so far
+ """
+ # 1. Forward current tokens
+ if model_kwargs.get("past_key_values") is None or needs_full_input:
+ input_ids = running_sequences[:, :, :cur_len]
+ else:
+ input_ids = tf.expand_dims(running_sequences[:, :, cur_len - 1], -1)
+ model_inputs = self.prepare_inputs_for_generation(
+ flatten_beam_dim(input_ids), use_cache=use_cache, **model_kwargs
+ )
+ model_outputs = self(
+ **model_inputs,
+ return_dict=True,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ )
+ logits = unflatten_beam_dim(model_outputs.logits[:, -1], num_beams)
+
+ # 2. Compute log probs
+ # get log probabilities from logits, process logits with processors (*e.g.* min_length, ...), and
+ # add new logprobs to existing running logprobs scores.
+ log_probs = tf.nn.log_softmax(logits)
+ log_probs = logits_processor(flatten_beam_dim(running_sequences), flatten_beam_dim(log_probs), cur_len)
+ log_probs = unflatten_beam_dim(log_probs, num_beams)
+ if do_sample:
+ log_probs = logits_warper(flatten_beam_dim(running_sequences), flatten_beam_dim(log_probs), cur_len)
+ log_probs = unflatten_beam_dim(log_probs, num_beams)
+ log_probs_processed = log_probs
+ log_probs = log_probs + tf.expand_dims(running_scores, axis=2)
+ vocab_size = log_probs.shape[2]
+ log_probs = tf.reshape(log_probs, (batch_size, num_beams * vocab_size))
+
+ # Store scores, attentions and hidden_states when required
+ if not use_xla and return_dict_in_generate:
+ if output_scores:
+ all_scores.append(
+ logits_warper(
+ flatten_beam_dim(running_sequences),
+ flatten_beam_dim(log_probs_processed),
+ cur_len,
+ )
+ )
+ if output_attentions and self.config.is_encoder_decoder:
+ decoder_attentions.append(model_outputs.decoder_attentions)
+ elif output_attentions and not self.config.is_encoder_decoder:
+ decoder_attentions.append(model_outputs.attentions)
+ if self.config.is_encoder_decoder:
+ cross_attentions.append(model_outputs.cross_attentions)
+
+ if output_hidden_states and self.config.is_encoder_decoder:
+ decoder_hidden_states.append(model_outputs.decoder_hidden_states)
+ elif output_hidden_states and self.config.is_encoder_decoder:
+ decoder_hidden_states.append(model_outputs.hidden_states)
+
+ # 3. Retrieve top-K
+ # Each item in batch has num_beams * vocab_size candidate sequences. For each item, get the top 2*k
+ # candidates with the highest log-probabilities. We gather the top 2*K beams here so that even if the
+ # best K sequences reach EOS simultaneously, we have another K sequences remaining to continue the live
+ # beam search.
+ # Gather the top 2*K scores from _all_ beams.
+ # Gather 2*k top beams.
+ # Recover the beam index by floor division.
+ # Recover token id by modulo division and expand Id array for broadcasting.
+ # Update sequences for the 2*K top-k new sequences.
+ beams_to_keep = 2 * num_beams
+ if do_sample:
+ topk_indices = sample_without_replacement(log_probs, beams_to_keep)
+ topk_log_probs = tf.gather(log_probs, topk_indices, axis=1, batch_dims=1)
+ else:
+ topk_log_probs, topk_indices = tf.math.top_k(log_probs, k=beams_to_keep)
+ topk_current_beam_indices = topk_indices // vocab_size
+ topk_running_beam_indices = self._gather_beams(running_beam_indices, topk_current_beam_indices)
+ topk_running_sequences = self._gather_beams(running_sequences, topk_current_beam_indices)
+ topk_ids = topk_indices % vocab_size
+
+ # writes the new token
+ indices_batch = tf.repeat(tf.range(batch_size), [beams_to_keep])
+ indices_beam = tf.tile(tf.range(beams_to_keep), [batch_size])
+ update_indices = tf.stack(
+ [indices_batch, indices_beam, tf.broadcast_to(cur_len, [batch_size * beams_to_keep])], axis=-1
+ )
+ topk_sequences = tf.tensor_scatter_nd_update(
+ tensor=topk_running_sequences,
+ indices=update_indices,
+ updates=tf.reshape(topk_ids, [batch_size * beams_to_keep]),
+ )
+
+ # we want to store the beam indices with batch information -> real beam index = beam index % num beams
+ batch_modified_indices = topk_current_beam_indices + tf.broadcast_to(
+ tf.expand_dims(tf.range(batch_size) * num_beams, axis=1), topk_current_beam_indices.shape
+ )
+ update_indices = tf.stack(
+ [
+ indices_batch,
+ indices_beam,
+ tf.broadcast_to(cur_len - decoder_prompt_len, [batch_size * beams_to_keep]),
+ ],
+ axis=-1,
+ )
+ topk_beam_indices = tf.tensor_scatter_nd_update(
+ tensor=topk_running_beam_indices,
+ indices=update_indices,
+ updates=tf.reshape(batch_modified_indices, [batch_size * beams_to_keep]),
+ )
+
+ # 4. Check which sequences have ended
+ # Update current sequences: Did the top `num_beams` sequences reach an end marker?
+ # To prevent these just finished sequences from being added to the current sequences
+ # set of active beam search sequences, set their log probs to a very large negative value.
+ if eos_token_id is None:
+ eos_in_next_token = tf.zeros(topk_sequences[:, :, cur_len].shape, dtype=tf.bool)
+ else:
+ eos_in_next_token = tf.math.reduce_any(
+ tf.equal(
+ tf.broadcast_to(
+ topk_sequences[:, :, cur_len],
+ [len(eos_token_id)] + topk_sequences[:, :, cur_len].shape,
+ ),
+ tf.expand_dims(tf.expand_dims(eos_token_id, -1), -1),
+ ),
+ axis=0,
+ )
+ did_topk_just_finished = eos_in_next_token & tf.broadcast_to(
+ tf.concat((tf.ones((num_beams), dtype=tf.bool), tf.zeros((num_beams), dtype=tf.bool)), axis=0),
+ shape_list(eos_in_next_token),
+ )
+
+ # non-top `num_beams` eos tokens can't be used to finish a beam, but the others can't be used in the next
+ # running sentences either
+ running_topk_log_probs = topk_log_probs + tf.cast(eos_in_next_token, tf.float32) * -1.0e9
+
+ # 5. Get running sequences scores for next
+ # Determine the top k beam indices (from top 2*k beams) from log probs and gather top k beams
+ # (from top 2*k beams).
+ next_topk_indices = tf.math.top_k(running_topk_log_probs, k=num_beams)[1]
+ next_running_sequences, next_running_scores, next_running_beam_indices = self._gather_beams(
+ [topk_sequences, running_topk_log_probs, topk_beam_indices], next_topk_indices
+ )
+
+ # 6. Process topk logits
+ # Further process log probs:
+ # - add length penalty
+ # - make sure no scores can be added anymore if beam is full
+ # - make sure still running sequences cannot be chosen as finalized beam
+ topk_log_probs = topk_log_probs / (
+ tf.cast(cur_len + 1 - decoder_prompt_len, dtype=tf.float32) ** length_penalty
+ )
+ beams_in_batch_are_full = tf.broadcast_to(
+ tf.math.reduce_all(is_sent_finished, axis=-1, keepdims=True), shape_list(did_topk_just_finished)
+ ) & (early_stopping is True)
+ add_penalty = ~did_topk_just_finished | beams_in_batch_are_full
+ topk_log_probs += tf.cast(add_penalty, tf.float32) * -1.0e9
+
+ # 7. Get scores, sequences, is sentence finished for next.
+ # Combine sequences, scores, and flags along the beam dimension and compare new finished sequence scores
+ # to existing finished scores and select the best from the new set of beams
+ merged_sequences = tf.concat([sequences, topk_sequences], axis=1)
+ merged_scores = tf.concat([scores, topk_log_probs], axis=1)
+ merged_beams = tf.concat([beam_indices, topk_beam_indices], axis=1)
+ merged_is_sent_finished = tf.concat([is_sent_finished, did_topk_just_finished], axis=1)
+ topk_merged_indices = tf.math.top_k(merged_scores, k=num_beams)[1]
+ next_sequences, next_scores, next_beam_indices, next_is_sent_finished = self._gather_beams(
+ [merged_sequences, merged_scores, merged_beams, merged_is_sent_finished], topk_merged_indices
+ )
+
+ # 8. Prepare data for the next iteration
+ # Determine the top k beam indices from the original set of all beams. With these, gather the top k
+ # beam-associated caches.
+ cur_len = cur_len + 1
+ if "past_key_values" in model_outputs:
+ cache = tf.nest.map_structure(
+ lambda tensor: unflatten_beam_dim(tensor, num_beams, batch_axis=cache_batch_axis),
+ model_outputs.past_key_values,
+ )
+ next_running_indices = self._gather_beams(topk_current_beam_indices, next_topk_indices)
+ next_cache = self._gather_beams(cache, next_running_indices, batch_axis=cache_batch_axis)
+ model_outputs["past_key_values"] = tf.nest.map_structure(
+ lambda tensor: flatten_beam_dim(tensor, batch_axis=cache_batch_axis), next_cache
+ )
+
+ if use_xla:
+ next_model_kwargs = self._update_model_kwargs_for_xla_generation(
+ model_outputs=model_outputs,
+ model_kwargs=model_kwargs,
+ cur_len=cur_len,
+ max_length=max_length,
+ batch_size=(batch_size * num_beams),
+ is_encoder_decoder=self.config.is_encoder_decoder,
+ batch_axis=cache_batch_axis,
+ )
+ else:
+ next_model_kwargs = self._update_model_kwargs_for_generation(
+ model_outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
+ )
+
+ # if we don't cache past_key_values key values we need the whole input
+ if model_kwargs.get("past_key_values", None) is None:
+ # let's throw out `past_key_values` since we don't want `None` tensors
+ model_kwargs.pop("past_key_values", None)
+
+ return (
+ cur_len,
+ next_running_sequences,
+ next_running_scores,
+ next_running_beam_indices,
+ next_sequences,
+ next_scores,
+ next_beam_indices,
+ next_is_sent_finished,
+ decoder_prompt_len,
+ next_model_kwargs,
+ )
+
+ # 5. run generation
+ # 1st generation step has to be run before to initialize `past_key_values` (if active)
+ (
+ cur_len,
+ running_sequences,
+ running_scores,
+ running_beam_indices,
+ sequences,
+ scores,
+ beam_indices,
+ is_sent_finished,
+ decoder_prompt_len,
+ model_kwargs,
+ ) = beam_search_body_fn(
+ cur_len,
+ running_sequences,
+ running_scores,
+ running_beam_indices,
+ sequences,
+ scores,
+ beam_indices,
+ is_sent_finished,
+ decoder_prompt_len,
+ model_kwargs,
+ )
+
+ # 2-to-n generation steps can then be run in autoregressive fashion (only in case 1st generation step does
+ # NOT yield EOS token though)
+ maximum_iterations = max_length - cur_len
+ (
+ cur_len,
+ running_sequences,
+ running_scores,
+ running_beam_indices,
+ sequences,
+ scores,
+ beam_indices,
+ is_sent_finished,
+ decoder_prompt_len,
+ _,
+ ) = tf.while_loop(
+ beam_search_cond_fn,
+ beam_search_body_fn,
+ (
+ cur_len,
+ running_sequences,
+ running_scores,
+ running_beam_indices,
+ sequences,
+ scores,
+ beam_indices,
+ is_sent_finished,
+ decoder_prompt_len,
+ model_kwargs,
+ ),
+ maximum_iterations=maximum_iterations,
+ )
+
+ # 6. prepare outputs
+ # Account for the edge-case where there are no finished sequences for a particular batch item. If so, return
+ # running sequences for that batch item.
+ none_finished = tf.math.reduce_any(is_sent_finished, axis=1)
+ sequences = tf.where(none_finished[:, None, None], sequences, running_sequences)
+ beam_indices = tf.where(none_finished[:, None, None], beam_indices, running_beam_indices)
+
+ # Apply the length penalty so that running scores match the finalized scores if they are used
+ running_scores = running_scores / (tf.cast(cur_len - decoder_prompt_len, dtype=tf.float32) ** length_penalty)
+ scores = tf.where(none_finished[:, None], scores, running_scores)
+
+ # Take best beams for each batch (the score is sorted in descending order)
+ sequences = flatten_beam_dim(sequences[:, :num_return_sequences, :])
+ scores = flatten_beam_dim(scores[:, :num_return_sequences])
+ beam_indices = flatten_beam_dim(beam_indices[:, :num_return_sequences, :])
+
+ if not use_xla:
+ # Cut for backward compatibility
+ sequences = sequences[:, :cur_len]
+ beam_indices = beam_indices[:, : cur_len - decoder_prompt_len]
+
+ if return_dict_in_generate:
+ if self.config.is_encoder_decoder:
+ # if model is an encoder-decoder, retrieve encoder attention weights and hidden states
+ encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
+ encoder_hidden_states = (
+ model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
+ )
+
+ output_cls = TFBeamSampleEncoderDecoderOutput if do_sample else TFBeamSearchEncoderDecoderOutput
+ return output_cls(
+ sequences=sequences,
+ sequences_scores=scores,
+ scores=all_scores,
+ beam_indices=beam_indices,
+ encoder_attentions=encoder_attentions,
+ encoder_hidden_states=encoder_hidden_states,
+ decoder_attentions=decoder_attentions,
+ cross_attentions=cross_attentions,
+ decoder_hidden_states=decoder_hidden_states,
+ )
+ else:
+ output_cls = TFBeamSampleDecoderOnlyOutput if do_sample else TFBeamSearchDecoderOnlyOutput
+ return output_cls(
+ sequences=sequences,
+ sequences_scores=scores,
+ scores=all_scores,
+ beam_indices=beam_indices,
+ attentions=decoder_attentions,
+ hidden_states=decoder_hidden_states,
+ )
+ else:
+ return sequences
+
+ def contrastive_search(
+ self,
+ input_ids: tf.Tensor,
+ top_k: Optional[int] = 1,
+ penalty_alpha: Optional[float] = 0,
+ logits_processor: Optional[TFLogitsProcessorList] = None,
+ logits_warper: Optional[TFLogitsProcessorList] = None,
+ max_length: Optional[int] = None,
+ pad_token_id: Optional[int] = None,
+ eos_token_id: Optional[int] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_scores: Optional[bool] = None,
+ return_dict_in_generate: Optional[bool] = None,
+ **model_kwargs,
+ ) -> Union[TFContrastiveSearchOutput, tf.Tensor]:
+ r"""
+ Generates sequences of token ids for models with a language modeling head using **contrastive search** and can
+ be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
+
+ Parameters:
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
+ The sequence used as a prompt for the generation.
+ top_k (`int`, *optional*, defaults to 1):
+ The size of the candidate set that is used to re-rank for contrastive search
+ penalty_alpha (`float`, *optional*, defaults to 0):
+ The degeneration penalty for contrastive search; activate when it is larger than 0
+ logits_processor (`TFLogitsProcessorList`, *optional*):
+ An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsProcessor`]
+ used to modify the prediction scores of the language modeling head applied at each generation step.
+ logits_warper (`TFLogitsProcessorList`, *optional*):
+ An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsWarper`]
+ used to warp the prediction score distribution of the language modeling head applied before multinomial
+ sampling at each generation step.
+ max_length (`int`, *optional*, defaults to 20):
+ The maximum length of the sequence to be generated.
+ pad_token_id (`int`, *optional*):
+ The id of the *padding* token.
+ eos_token_id (`Union[int, List[int]]`, *optional*):
+ The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
+ output_attentions (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more details.
+ output_hidden_states (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more details.
+ output_scores (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the prediction scores. See `scores` under returned tensors for more details.
+ return_dict_in_generate (`bool`, *optional*, defaults to `False`):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ model_kwargs:
+ Additional model specific keyword arguments will be forwarded to the `call` function of the model. If
+ model is an encoder-decoder model the kwargs should include `encoder_outputs`.
+ Return:
+ [`~generation.TFContrastiveSearchDecoderOnlyOutput`],
+ [`~generation.TFContrastiveSearchEncoderDecoderOutput`] or `tf.Tensor`: A `tf.Tensor` containing the
+ generated tokens (default behaviour) or a [`~generation.TFContrastiveySearchDecoderOnlyOutput`] if
+ `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a
+ [`~generation.TFContrastiveSearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`.
+ Examples:
+ ```python
+ >>> from transformers import AutoTokenizer, TFAutoModelForCausalLM
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-125m")
+ >>> model = TFAutoModelForCausalLM.from_pretrained("facebook/opt-125m")
+ >>> # set pad_token_id to eos_token_id because OPT does not have a PAD token
+ >>> model.config.pad_token_id = model.config.eos_token_id
+ >>> input_prompt = "DeepMind Company is"
+ >>> input_ids = tokenizer(input_prompt, return_tensors="tf")
+ >>> outputs = model.contrastive_search(**input_ids, penalty_alpha=0.6, top_k=4, max_length=64)
+ >>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
+ ['DeepMind Company is a company that focuses on the development and commercialization of artificial intelligence (AI). DeepMind’s mission is to help people understand and solve problems that are difficult to solve in the world today.\n\nIn this post, we talk about the benefits of deep learning in business and how it']
+ ```"""
+
+ def gather_best_candidate(nested, selected_idx_stacked, batch_axis=0):
+ """Gathers the slices indexed by selected_idx_stacked from a potentially nested structure of tensors."""
+
+ def gather_fn(tensor):
+ gathered_tensor = tf.gather(params=tensor, indices=selected_idx_stacked, axis=batch_axis)
+ return gathered_tensor
+
+ return tf.nest.map_structure(gather_fn, nested)
+
+ # 1. init greedy_search values
+ logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList()
+ logits_warper = logits_warper if logits_warper is not None else TFLogitsProcessorList()
+ max_length = max_length if max_length is not None else self.generation_config.max_length
+ pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
+ eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
+ if isinstance(eos_token_id, int):
+ eos_token_id = [eos_token_id]
+ output_scores = output_scores if output_scores is not None else self.generation_config.output_scores
+ output_attentions = (
+ output_attentions if output_attentions is not None else self.generation_config.output_attentions
+ )
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states
+ )
+ return_dict_in_generate = (
+ return_dict_in_generate
+ if return_dict_in_generate is not None
+ else self.generation_config.return_dict_in_generate
+ )
+ use_cache = True # In contrastive search, we always use cache
+ model_kwargs.pop("use_cache", None)
+
+ use_xla = not tf.executing_eagerly()
+ # TODO (Joao): fix cache format or find programatic way to detect cache index
+ # GPT2 and other models has a slightly different cache structure, with a different batch axis
+ model_name = str(self.decoder) if "EncoderDecoder" in str(self) else str(self)
+ cache_batch_axis = 1 if any(model_prefix in model_name for model_prefix in ("TFGPT2", "TFCTRL")) else 0
+
+ # 2. init `attentions`, `hidden_states`, and `scores` tuples
+ scores = [] if (return_dict_in_generate and output_scores) else None
+ decoder_attentions = [] if (return_dict_in_generate and output_attentions) else None
+ cross_attentions = [] if (return_dict_in_generate and output_attentions) else None
+ decoder_hidden_states = [] if (return_dict_in_generate and output_hidden_states) else None
+
+ # 3. init tensors to use for "xla-compileable" generate function
+ batch_size, cur_len = shape_list(input_ids)
+
+ # initialize `generated` (`input_ids` padded with `pad_token_id`), `finished_sequences`
+ input_ids_padding = tf.ones((batch_size, max_length - cur_len), dtype=tf.int32) * (pad_token_id or 0)
+ generated = tf.concat([input_ids, input_ids_padding], axis=-1)
+ finished_sequences = tf.zeros((batch_size,), dtype=tf.bool)
+
+ # 4. define "xla-compile-able" stop-condition and auto-regressive function
+ # define condition fn
+ def contrastive_search_cond_fn(
+ generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables
+ ):
+ """state termination condition fn."""
+ return ~tf.reduce_all(finished_sequences)
+
+ # define condition fn
+ def contrastive_search_body_fn(
+ generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables
+ ):
+ """state update fn."""
+
+ # if the first step in the loop, encode all the prefix and obtain: (1) past_key_values;
+ # (2) last_hidden_states; (3) logit_for_next_step; (4) update model kwargs for the next step
+ if model_kwargs.get("past_key_values") is None:
+ # prepare inputs
+ model_inputs = self.prepare_inputs_for_generation(
+ generated[:, :cur_len], use_cache=use_cache, **model_kwargs
+ )
+
+ # encode the given prefix and prepare model inputs; encoder-decoder model process the prefix and save
+ # the `encoder_outputs`
+ outputs = self(
+ **model_inputs, return_dict=True, output_hidden_states=True, output_attentions=output_attentions
+ )
+
+ # last decoder hidden states will be used to compute the degeneration penalty (cosine similarity with
+ # previous tokens)
+ if self.config.is_encoder_decoder:
+ last_hidden_states = outputs.decoder_hidden_states[-1]
+ else:
+ last_hidden_states = outputs.hidden_states[-1]
+
+ # XLA: last_hidden_states normally grows at each step, but in XLA it is padded so as to be used across
+ # iterations (with fixed shapes)
+ if use_xla:
+ last_hidden_states = tf.pad(last_hidden_states, [[0, 0], [0, max_length - cur_len], [0, 0]])
+
+ # next logit for contrastive search to select top-k candidate tokens
+ logit_for_next_step = outputs.logits[:, -1, :]
+
+ if use_xla:
+ model_kwargs = self._update_model_kwargs_for_xla_generation(
+ model_outputs=outputs,
+ model_kwargs=model_kwargs,
+ cur_len=cur_len,
+ max_length=max_length,
+ batch_size=batch_size,
+ is_encoder_decoder=self.config.is_encoder_decoder,
+ batch_axis=cache_batch_axis,
+ )
+ else:
+ model_kwargs = self._update_model_kwargs_for_generation(
+ outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
+ )
+
+ # Expands model inputs top_k times, for batched forward passes (akin to beam search).
+ _, model_kwargs = self._expand_inputs_for_generation(
+ expand_size=top_k, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs
+ )
+
+ past_key_values = model_kwargs.get("past_key_values")
+ if past_key_values is None:
+ raise ValueError(
+ f"{self.__class__.__name__} does not support caching and therefore **can't** be used "
+ "for contrastive search."
+ )
+ elif (
+ not isinstance(past_key_values[0], (tuple, tf.Tensor))
+ or past_key_values[0][0].shape[0] != batch_size
+ ):
+ raise ValueError(
+ f"{self.__class__.__name__} does not have a standard cache format and therefore **can't** be "
+ "used for contrastive search without further modifications."
+ )
+ else:
+ logit_for_next_step = next_step_cached_variables["logit_for_next_step"]
+ last_hidden_states = next_step_cached_variables["last_hidden_states"]
+ outputs = next_step_cached_variables["outputs"]
+
+ # contrastive_search main logic start:
+ # contrastive search decoding consists of two steps: (1) candidate tokens recall; (2) candidate re-rank by
+ # degeneration penalty
+
+ logit_for_next_step = logits_processor(generated, logit_for_next_step, cur_len)
+ logit_for_next_step = logits_warper(generated, logit_for_next_step, cur_len)
+ next_probs = stable_softmax(logit_for_next_step, axis=-1)
+ top_k_probs, top_k_ids = tf.math.top_k(next_probs, k=top_k)
+
+ # Store scores, attentions and hidden_states when required
+ if not use_xla and return_dict_in_generate:
+ if output_scores:
+ scores.append(logit_for_next_step)
+ if output_attentions and self.config.is_encoder_decoder:
+ decoder_attentions.append(outputs.decoder_attentions)
+ elif output_attentions and not self.config.is_encoder_decoder:
+ decoder_attentions.append(outputs.attentions)
+ if self.config.is_encoder_decoder:
+ cross_attentions.append(outputs.cross_attentions)
+
+ if output_hidden_states and self.config.is_encoder_decoder:
+ decoder_hidden_states.append(outputs.decoder_hidden_states)
+ elif output_hidden_states and self.config.is_encoder_decoder:
+ decoder_hidden_states.append(outputs.hidden_states)
+
+ # Replicates the new past_key_values to match the `top_k` candidates
+ model_kwargs["past_key_values"] = tf.nest.map_structure(
+ lambda tensor: tf.repeat(tensor, top_k, axis=cache_batch_axis), model_kwargs["past_key_values"]
+ )
+
+ # compute the candidate tokens by the language model and collects their hidden_states
+ next_model_inputs = self.prepare_inputs_for_generation(
+ tf.reshape(top_k_ids, [-1, 1]), use_cache=use_cache, **model_kwargs
+ )
+ outputs = self(
+ **next_model_inputs, return_dict=True, output_hidden_states=True, output_attentions=output_attentions
+ )
+ next_past_key_values = self._extract_past_from_model_output(outputs)
+
+ logits = outputs.logits[:, -1, :]
+ # name is different for encoder-decoder and decoder-only models
+ if self.config.is_encoder_decoder:
+ next_hidden = outputs.decoder_hidden_states[-1]
+ full_hidden_states = outputs.decoder_hidden_states
+ else:
+ next_hidden = outputs.hidden_states[-1]
+ full_hidden_states = outputs.hidden_states
+ context_hidden = tf.repeat(last_hidden_states[:, :cur_len, :], top_k, axis=0)
+
+ # compute the degeneration penalty and re-rank the candidates based on the degeneration penalty and the
+ # model confidence
+ selected_idx = _ranking_fast(context_hidden, next_hidden, top_k_probs, penalty_alpha, top_k)
+
+ # converts indices to a dimension of top_k to the stacked top_k * batch_size dimension, for indexing
+ # without a need to reshape on tensors that have these two dimensions stacked
+ selected_idx_stacked = selected_idx + tf.range(selected_idx.shape[0], dtype=tf.int64) * top_k
+
+ # prepare for the next step: (1) next token_id; (2) past_key_values; (3) last_hidden_states for computing
+ # the degeneration penalty; (4) logits for selecting next top-k candidates; (5) selected tokens scores
+ # (model confidence minus degeneration penalty); (6) decoder hidden_states
+ next_tokens = tf.gather(top_k_ids, selected_idx, axis=1, batch_dims=1)
+ next_hidden = gather_best_candidate(next_hidden, selected_idx_stacked)
+
+ # XLA: last_hidden_states normally grows at each step, but in XLA it is padded so as to be used across
+ # iterations (with fixed shapes)
+ if use_xla:
+ last_hidden_states = dynamic_update_slice(last_hidden_states, next_hidden, [0, cur_len, 0])
+ else:
+ last_hidden_states = tf.concat([last_hidden_states, next_hidden], axis=1)
+
+ next_decoder_hidden_states = gather_best_candidate(full_hidden_states, selected_idx_stacked)
+ next_past_key_values = gather_best_candidate(
+ next_past_key_values, selected_idx_stacked, batch_axis=cache_batch_axis
+ )
+ logit_for_next_step = gather_best_candidate(logits, selected_idx_stacked)
+
+ # Rebuilds the relevant parts of the model output for the selected token, for use in the next iteration
+ if self.config.is_encoder_decoder:
+ next_step_cross_attentions = ()
+ next_step_decoder_attentions = ()
+ if output_attentions:
+ next_step_cross_attentions = gather_best_candidate(outputs.cross_attentions, selected_idx_stacked)
+ next_step_decoder_attentions = gather_best_candidate(
+ outputs.decoder_attentions, selected_idx_stacked
+ )
+ outputs = TFSeq2SeqLMOutput(
+ past_key_values=next_past_key_values,
+ decoder_hidden_states=next_decoder_hidden_states,
+ decoder_attentions=next_step_decoder_attentions or None,
+ cross_attentions=next_step_cross_attentions or None,
+ )
+ else:
+ next_step_attentions = ()
+ if output_attentions:
+ next_step_attentions = gather_best_candidate(outputs.attentions, selected_idx_stacked)
+ outputs = TFCausalLMOutputWithPast(
+ past_key_values=next_past_key_values,
+ hidden_states=next_decoder_hidden_states,
+ attentions=next_step_attentions or None,
+ )
+ # contrastive_search main logic end
+
+ if eos_token_id is not None:
+ if pad_token_id is None:
+ raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.")
+ unfinished_seq = 1 - tf.cast(finished_sequences, tf.int32)
+ next_tokens = next_tokens * unfinished_seq + pad_token_id * (1 - unfinished_seq)
+ next_token_is_eos = tf.math.reduce_any(
+ tf.equal(
+ tf.broadcast_to(next_tokens, (len(eos_token_id), batch_size)), tf.expand_dims(eos_token_id, -1)
+ ),
+ axis=0,
+ )
+ finished_sequences = finished_sequences | next_token_is_eos
+
+ # update `generated` and `cur_len`
+ update_indices = tf.stack([tf.range(batch_size), tf.broadcast_to(cur_len, [batch_size])], axis=-1)
+ generated = tf.tensor_scatter_nd_update(tensor=generated, indices=update_indices, updates=next_tokens)
+ cur_len += 1
+
+ if use_xla:
+ # NOTE: 1) relative to other generation strategies, contrastive search is always running forward
+ # passes one step ahead -- hence the `cur_len=cur_len + 1`; 2) the attention mask here is expanded from
+ # [batch_size, ...] to [batch_size*top_k, ...] -- hence the `batch_size=batch_size * top_k`
+ model_kwargs = self._update_model_kwargs_for_xla_generation(
+ model_outputs=outputs,
+ model_kwargs=model_kwargs,
+ cur_len=cur_len + 1,
+ max_length=max_length,
+ batch_size=batch_size * top_k,
+ is_encoder_decoder=self.config.is_encoder_decoder,
+ batch_axis=cache_batch_axis,
+ )
+ else:
+ model_kwargs = self._update_model_kwargs_for_generation(
+ outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
+ )
+
+ next_step_cached_variables = {
+ "logit_for_next_step": logit_for_next_step,
+ "last_hidden_states": last_hidden_states,
+ "outputs": outputs,
+ }
+ return generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables
+
+ # 5. run generation
+ # 1st generation step has to be run before to initialize `past_key_values`
+ generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables = contrastive_search_body_fn(
+ generated, finished_sequences, cur_len, model_kwargs, None
+ )
+
+ # 2-to-n generation steps can then be run in autoregressive fashion
+ # only in case 1st generation step does NOT yield EOS token though
+ maximum_iterations = max_length - cur_len
+ generated, _, cur_len, _, _ = tf.while_loop(
+ contrastive_search_cond_fn,
+ contrastive_search_body_fn,
+ (generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables),
+ maximum_iterations=maximum_iterations,
+ )
+
+ # 6. prepare outputs
+ if not use_xla:
+ # cut for backward compatibility
+ generated = generated[:, :cur_len]
+
+ if return_dict_in_generate:
+ if self.config.is_encoder_decoder:
+ # if model is an encoder-decoder, retrieve encoder attention weights
+ # and hidden states
+ encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
+ encoder_hidden_states = (
+ model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
+ )
+
+ scores = tuple(scores) if scores is not None else None
+ decoder_attentions = tuple(decoder_attentions) if decoder_attentions is not None else None
+ cross_attentions = tuple(cross_attentions) if cross_attentions is not None else None
+ decoder_hidden_states = tuple(decoder_hidden_states) if decoder_hidden_states is not None else None
+
+ return TFContrastiveSearchEncoderDecoderOutput(
+ sequences=generated,
+ scores=scores,
+ encoder_attentions=encoder_attentions,
+ encoder_hidden_states=encoder_hidden_states,
+ decoder_attentions=decoder_attentions,
+ cross_attentions=cross_attentions,
+ decoder_hidden_states=decoder_hidden_states,
+ )
+ else:
+ return TFContrastiveSearchDecoderOnlyOutput(
+ sequences=generated,
+ scores=scores,
+ attentions=decoder_attentions,
+ hidden_states=decoder_hidden_states,
+ )
+ else:
+ return generated
+
+
+def scatter_values_on_batch_indices(values, batch_indices):
+ shape = shape_list(batch_indices)
+ # broadcast batch dim to shape
+ broad_casted_batch_dims = tf.reshape(tf.broadcast_to(tf.expand_dims(tf.range(shape[0]), axis=-1), shape), [1, -1])
+ # transform batch_indices to pair_indices
+ pair_indices = tf.transpose(tf.concat([broad_casted_batch_dims, tf.reshape(batch_indices, [1, -1])], 0))
+ # scatter values to pair indices
+ return tf.scatter_nd(pair_indices, tf.reshape(values, [-1]), shape)
+
+
+def sample_without_replacement(logits, num_samples):
+ """
+ categorical sampling without replacement is currently not implemented the gumbel-max trick will do for now see
+ https://github.com/tensorflow/tensorflow/issues/9260 for more info
+ """
+ z = -tf.math.log(-tf.math.log(tf.random.uniform(shape_list(logits), 0, 1)))
+ _, indices = tf.nn.top_k(logits + z, num_samples)
+ return indices
+
+
+def _ranking_fast(
+ context_hidden: tf.Tensor,
+ next_hidden: tf.Tensor,
+ next_top_k_probs: tf.Tensor,
+ alpha: float,
+ beam_width: int,
+) -> tf.Tensor:
+ """
+ Reranks the top_k candidates based on a degeneration penalty (cosine similarity with previous tokens), as described
+ in the paper "A Contrastive Framework for Neural Text Generation". Returns the index of the best candidate for each
+ row in the batch.
+ """
+ norm_context_hidden = context_hidden / tf.norm(context_hidden, axis=2, keepdims=True)
+ norm_next_hidden = next_hidden / tf.norm(next_hidden, axis=2, keepdims=True)
+ cosine_matrix = tf.squeeze(tf.linalg.matmul(norm_context_hidden, norm_next_hidden, transpose_b=True), axis=-1)
+ degeneration_penalty = tf.reduce_max(cosine_matrix, axis=-1)
+ next_top_k_probs = tf.reshape(next_top_k_probs, shape=[-1])
+ contrastive_score = (1.0 - alpha) * next_top_k_probs - alpha * degeneration_penalty
+ contrastive_score = tf.reshape(contrastive_score, shape=[-1, beam_width])
+ selected_idx = tf.argmax(contrastive_score, axis=1)
+ return selected_idx
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/generation/utils.py b/env-llmeval/lib/python3.10/site-packages/transformers/generation/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..b547edbc0fd03f51d0c65e42c67c8de765304ce7
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/generation/utils.py
@@ -0,0 +1,4925 @@
+# coding=utf-8
+# Copyright 2020 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team.
+# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+import inspect
+import warnings
+from dataclasses import dataclass
+from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
+
+import torch
+import torch.distributed as dist
+from torch import nn
+
+from ..cache_utils import Cache, DynamicCache, StaticCache
+from ..integrations.deepspeed import is_deepspeed_zero3_enabled
+from ..modeling_outputs import CausalLMOutputWithPast, Seq2SeqLMOutput
+from ..models.auto import (
+ MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING,
+ MODEL_FOR_CAUSAL_LM_MAPPING,
+ MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
+ MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING,
+ MODEL_FOR_VISION_2_SEQ_MAPPING,
+)
+from ..utils import ModelOutput, is_accelerate_available, is_torchdynamo_compiling, logging
+from .beam_constraints import DisjunctiveConstraint, PhrasalConstraint
+from .beam_search import BeamScorer, BeamSearchScorer, ConstrainedBeamSearchScorer
+from .candidate_generator import (
+ AssistedCandidateGenerator,
+ CandidateGenerator,
+ PromptLookupCandidateGenerator,
+ _crop_past_key_values,
+ _prepare_attention_mask,
+ _prepare_token_type_ids,
+)
+from .configuration_utils import GenerationConfig, GenerationMode
+from .logits_process import (
+ EncoderNoRepeatNGramLogitsProcessor,
+ EncoderRepetitionPenaltyLogitsProcessor,
+ EpsilonLogitsWarper,
+ EtaLogitsWarper,
+ ExponentialDecayLengthPenalty,
+ ForcedBOSTokenLogitsProcessor,
+ ForcedEOSTokenLogitsProcessor,
+ ForceTokensLogitsProcessor,
+ HammingDiversityLogitsProcessor,
+ InfNanRemoveLogitsProcessor,
+ LogitNormalization,
+ LogitsProcessorList,
+ MinLengthLogitsProcessor,
+ MinNewTokensLengthLogitsProcessor,
+ NoBadWordsLogitsProcessor,
+ NoRepeatNGramLogitsProcessor,
+ PrefixConstrainedLogitsProcessor,
+ RepetitionPenaltyLogitsProcessor,
+ SequenceBiasLogitsProcessor,
+ SuppressTokensAtBeginLogitsProcessor,
+ SuppressTokensLogitsProcessor,
+ TemperatureLogitsWarper,
+ TopKLogitsWarper,
+ TopPLogitsWarper,
+ TypicalLogitsWarper,
+ UnbatchedClassifierFreeGuidanceLogitsProcessor,
+)
+from .stopping_criteria import (
+ MaxLengthCriteria,
+ MaxTimeCriteria,
+ StoppingCriteria,
+ StoppingCriteriaList,
+ validate_stopping_criteria,
+)
+
+
+if TYPE_CHECKING:
+ from ..modeling_utils import PreTrainedModel
+ from .streamers import BaseStreamer
+
+logger = logging.get_logger(__name__)
+
+if is_accelerate_available():
+ from accelerate.hooks import AlignDevicesHook, add_hook_to_module
+
+NEED_SETUP_CACHE_CLASSES_MAPPING = {
+ "static": StaticCache,
+}
+
+
+@dataclass
+class GenerateDecoderOnlyOutput(ModelOutput):
+ """
+ Outputs of decoder-only generation models, when using non-beam methods.
+
+ Args:
+ sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
+ if all batches finished early due to the `eos_token_id`.
+ scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
+ Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
+ at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for
+ each generated token), with each tensor of shape `(batch_size, config.vocab_size)`.
+ logits (`tuple(torch.FloatTensor)` *optional*, returned when `output_logits=True` is passed or when `config.output_logits=True`):
+ Unprocessed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
+ at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for
+ each generated token), with each tensor of shape `(batch_size, config.vocab_size)`.
+ attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
+ `torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
+ hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
+ `torch.FloatTensor` of shape `(batch_size, generated_length, hidden_size)`.
+ past_key_values (`tuple(tuple(torch.FloatTensor)))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ NOTE: some models have a different `past_key_values` format, confirm with the model's documentation.
+ Usually a Tuple (one element for each layer of the decoder) of tuples (two elements, key tensor and value
+ tensor). The first Tuple is of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
+ `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,
+ encoder_sequence_length, embed_size_per_head)`.
+ """
+
+ sequences: torch.LongTensor = None
+ scores: Optional[Tuple[torch.FloatTensor]] = None
+ logits: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
+ hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
+ past_key_values: Optional[Tuple[Tuple[Tuple[torch.FloatTensor]]]] = None
+
+
+@dataclass
+class GenerateEncoderDecoderOutput(ModelOutput):
+ """
+ Outputs of encoder-decoder generation models, when using non-beam methods.
+
+ Args:
+ sequences (`torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`):
+ The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
+ if all batches finished early due to the `eos_token_id`.
+ scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
+ Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
+ at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for
+ each generated token), with each tensor of shape `(batch_size, config.vocab_size)`.
+ logits (`tuple(torch.FloatTensor)` *optional*, returned when `output_logits=True` is passed or when `config.output_logits=True`):
+ Unprocessed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
+ at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for
+ each generated token), with each tensor of shape `(batch_size, config.vocab_size)`.
+ encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer of the decoder) of shape `(batch_size, num_heads,
+ sequence_length, sequence_length)`.
+ encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+ decoder_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
+ `torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
+ cross_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
+ `torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
+ decoder_hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
+ `torch.FloatTensor` of shape `(batch_size, generated_length, hidden_size)`.
+ past_key_values (`tuple(tuple(torch.FloatTensor)))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ NOTE: some models have a different `past_key_values` format, confirm with the model's documentation.
+ Usually a Tuple (one element for each layer of the decoder) of tuples (two elements, key tensor and value
+ tensor). The first Tuple is of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
+ `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,
+ encoder_sequence_length, embed_size_per_head)`.
+ """
+
+ sequences: torch.LongTensor = None
+ scores: Optional[Tuple[torch.FloatTensor]] = None
+ logits: Optional[Tuple[torch.FloatTensor]] = None
+ encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
+ cross_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
+ decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
+ past_key_values: Optional[Tuple[Tuple[Tuple[torch.FloatTensor]]]] = None
+
+
+@dataclass
+class GenerateBeamDecoderOnlyOutput(ModelOutput):
+ """
+ Outputs of decoder-only generation models, when using beam methods.
+
+ Args:
+ sequences (`torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`):
+ The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
+ if all batches finished early due to the `eos_token_id`.
+ sequences_scores (`torch.FloatTensor` of shape `(batch_size*num_return_sequences)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
+ Final beam scores of the generated `sequences`.
+ scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
+ Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting
+ of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam.
+ Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for each generated token),
+ with each tensor of shape `(batch_size*num_beams, config.vocab_size)`.
+ logits (`tuple(torch.FloatTensor)` *optional*, returned when `output_logits=True` is passed or when `config.output_logits=True`):
+ Unprocessed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
+ at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for
+ each generated token), with each tensor of shape `(batch_size, config.vocab_size)`.
+ beam_indices (`torch.LongTensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
+ Beam indices of generated token id at each generation step. `torch.LongTensor` of shape
+ `(batch_size*num_return_sequences, sequence_length)`.
+ attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
+ `torch.FloatTensor` of shape `(batch_size*num_beams, num_heads, generated_length, sequence_length)`.
+ hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
+ `torch.FloatTensor` of shape `(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)`.
+ past_key_values (`tuple(tuple(torch.FloatTensor)))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ NOTE: some models have a different `past_key_values` format, confirm with the model's documentation.
+ Usually a Tuple (one element for each layer of the decoder) of tuples (two elements, key tensor and value
+ tensor). The first Tuple is of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
+ `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,
+ encoder_sequence_length, embed_size_per_head)`.
+ """
+
+ sequences: torch.LongTensor = None
+ sequences_scores: Optional[torch.FloatTensor] = None
+ scores: Optional[Tuple[torch.FloatTensor]] = None
+ logits: Optional[Tuple[torch.FloatTensor]] = None
+ beam_indices: Optional[torch.LongTensor] = None
+ attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
+ hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
+ past_key_values: Optional[Tuple[Tuple[Tuple[torch.FloatTensor]]]] = None
+
+
+@dataclass
+class GenerateBeamEncoderDecoderOutput(ModelOutput):
+ """
+ Outputs of encoder-decoder generation models, when using beam methods.
+
+ Args:
+ sequences (`torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`):
+ The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
+ if all batches finished early due to the `eos_token_id`.
+ sequences_scores (`torch.FloatTensor` of shape `(batch_size*num_return_sequences)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
+ Final beam scores of the generated `sequences`.
+ scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
+ Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting
+ of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam.
+ Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for each generated token),
+ with each tensor of shape `(batch_size*num_beams, config.vocab_size)`.
+ logits (`tuple(torch.FloatTensor)` *optional*, returned when `output_logits=True` is passed or when `config.output_logits=True`):
+ Unprocessed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
+ at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for
+ each generated token), with each tensor of shape `(batch_size, config.vocab_size)`.
+ beam_indices (`torch.LongTensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
+ Beam indices of generated token id at each generation step. `torch.LongTensor` of shape
+ `(batch_size*num_return_sequences, sequence_length)`.
+ encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer of the decoder) of shape `(batch_size, num_heads,
+ sequence_length, sequence_length)`.
+ encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size*num_beams*num_return_sequences, sequence_length, hidden_size)`.
+ decoder_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
+ `torch.FloatTensor` of shape `(batch_size*num_beams*num_return_sequences, num_heads, generated_length,
+ sequence_length)`.
+ cross_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
+ `torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
+ decoder_hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
+ `torch.FloatTensor` of shape `(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)`.
+ past_key_values (`tuple(tuple(torch.FloatTensor)))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ NOTE: some models have a different `past_key_values` format, confirm with the model's documentation.
+ Usually a Tuple (one element for each layer of the decoder) of tuples (two elements, key tensor and value
+ tensor). The first Tuple is of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
+ `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,
+ encoder_sequence_length, embed_size_per_head)`.
+ """
+
+ sequences: torch.LongTensor = None
+ sequences_scores: Optional[torch.FloatTensor] = None
+ scores: Optional[Tuple[torch.FloatTensor]] = None
+ logits: Optional[Tuple[torch.FloatTensor]] = None
+ beam_indices: Optional[torch.LongTensor] = None
+ encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
+ cross_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
+ decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
+ past_key_values: Optional[Tuple[Tuple[Tuple[torch.FloatTensor]]]] = None
+
+
+# Equivalent classes (kept for retrocompatibility purposes)
+GreedySearchDecoderOnlyOutput = GenerateDecoderOnlyOutput
+ContrastiveSearchDecoderOnlyOutput = GenerateDecoderOnlyOutput
+SampleDecoderOnlyOutput = GenerateDecoderOnlyOutput
+
+ContrastiveSearchEncoderDecoderOutput = GenerateEncoderDecoderOutput
+GreedySearchEncoderDecoderOutput = GenerateEncoderDecoderOutput
+SampleEncoderDecoderOutput = GenerateEncoderDecoderOutput
+
+BeamSearchDecoderOnlyOutput = GenerateBeamDecoderOnlyOutput
+BeamSampleDecoderOnlyOutput = GenerateBeamDecoderOnlyOutput
+
+BeamSearchEncoderDecoderOutput = GenerateBeamEncoderDecoderOutput
+BeamSampleEncoderDecoderOutput = GenerateBeamEncoderDecoderOutput
+
+GreedySearchOutput = Union[GreedySearchEncoderDecoderOutput, GreedySearchDecoderOnlyOutput]
+SampleOutput = Union[SampleEncoderDecoderOutput, SampleDecoderOnlyOutput]
+BeamSearchOutput = Union[BeamSearchEncoderDecoderOutput, BeamSearchDecoderOnlyOutput]
+BeamSampleOutput = Union[BeamSampleEncoderDecoderOutput, BeamSampleDecoderOnlyOutput]
+ContrastiveSearchOutput = Union[ContrastiveSearchEncoderDecoderOutput, ContrastiveSearchDecoderOnlyOutput]
+
+# Typing shortcuts
+GenerateNonBeamOutput = Union[GenerateDecoderOnlyOutput, GenerateEncoderDecoderOutput]
+GenerateBeamOutput = Union[GenerateBeamDecoderOnlyOutput, GenerateBeamEncoderDecoderOutput]
+GenerateOutput = Union[GenerateNonBeamOutput, GenerateBeamOutput]
+
+
+class GenerationMixin:
+ """
+ A class containing all functions for auto-regressive text generation, to be used as a mixin in [`PreTrainedModel`].
+
+ The class exposes [`~generation.GenerationMixin.generate`], which can be used for:
+ - *greedy decoding* by calling [`~generation.GenerationMixin._greedy_search`] if `num_beams=1` and
+ `do_sample=False`
+ - *contrastive search* by calling [`~generation.GenerationMixin._contrastive_search`] if `penalty_alpha>0` and
+ `top_k>1`
+ - *multinomial sampling* by calling [`~generation.GenerationMixin._sample`] if `num_beams=1` and
+ `do_sample=True`
+ - *beam-search decoding* by calling [`~generation.GenerationMixin._beam_search`] if `num_beams>1` and
+ `do_sample=False`
+ - *beam-search multinomial sampling* by calling [`~generation.GenerationMixin._beam_sample`] if `num_beams>1`
+ and `do_sample=True`
+ - *diverse beam-search decoding* by calling [`~generation.GenerationMixin._group_beam_search`], if `num_beams>1`
+ and `num_beam_groups>1`
+ - *constrained beam-search decoding* by calling [`~generation.GenerationMixin._constrained_beam_search`], if
+ `constraints!=None` or `force_words_ids!=None`
+ - *assisted decoding* by calling [`~generation.GenerationMixin._assisted_decoding`], if
+ `assistant_model` or `prompt_lookup_num_tokens` is passed to `.generate()`
+
+ You do not need to call any of the above methods directly. Pass custom parameter values to 'generate' instead. To
+ learn more about decoding strategies refer to the [text generation strategies guide](../generation_strategies).
+ """
+
+ def prepare_inputs_for_generation(self, *args, **kwargs):
+ raise NotImplementedError(
+ "A model class needs to define a `prepare_inputs_for_generation` method in order to use `.generate()`."
+ )
+
+ def _prepare_model_inputs(
+ self,
+ inputs: Optional[torch.Tensor] = None,
+ bos_token_id: Optional[int] = None,
+ model_kwargs: Optional[Dict[str, torch.Tensor]] = None,
+ ) -> Tuple[torch.Tensor, Optional[str], Dict[str, torch.Tensor]]:
+ """
+ This function extracts the model-specific `inputs` for generation.
+ """
+ # 1. retrieve all kwargs that are non-None or non-model input related.
+ # some encoder-decoder models have different names for model and encoder
+ if (
+ self.config.is_encoder_decoder
+ and hasattr(self, "encoder")
+ and self.encoder.main_input_name != self.main_input_name
+ ):
+ input_name = self.encoder.main_input_name
+ else:
+ input_name = self.main_input_name
+
+ model_kwargs = {k: v for k, v in model_kwargs.items() if v is not None or k != input_name}
+
+ # 2. check whether model_input_name is passed as kwarg
+ # if yes and `inputs` is None use kwarg inputs
+ inputs_kwarg = model_kwargs.pop(input_name, None)
+ if inputs_kwarg is not None and inputs is not None:
+ raise ValueError(
+ f"`inputs`: {inputs}` were passed alongside {input_name} which is not allowed. "
+ f"Make sure to either pass {inputs} or {input_name}=..."
+ )
+ elif inputs_kwarg is not None:
+ inputs = inputs_kwarg
+
+ # 3. In the presence of `inputs_embeds` for text models:
+ # - decoder-only models should complain if the user attempts to pass `inputs_embeds`, but the model
+ # doesn't have its forwarding implemented. `inputs_embeds` is kept in `model_kwargs` and can coexist with
+ # input_ids (`inputs_embeds` will be used in the 1st generation step, as opposed to `input_ids`)
+ # - encoder-decoder models should complain if the user attempts to pass `inputs_embeds` and `input_ids`, and
+ # pull the former to inputs. It will be used in place of `input_ids` to get the encoder hidden states.
+ if input_name == "input_ids" and "inputs_embeds" in model_kwargs:
+ if not self.config.is_encoder_decoder:
+ has_inputs_embeds_forwarding = "inputs_embeds" in set(
+ inspect.signature(self.prepare_inputs_for_generation).parameters.keys()
+ )
+ if not has_inputs_embeds_forwarding:
+ raise ValueError(
+ f"You passed `inputs_embeds` to `.generate()`, but the model class {self.__class__.__name__} "
+ "doesn't have its forwarding implemented. See the GPT2 implementation for an example "
+ "(https://github.com/huggingface/transformers/pull/21405), and feel free to open a PR with it!"
+ )
+ # In this case, `input_ids` is moved to the `model_kwargs`, so a few automations (like the creation of
+ # the attention mask) can rely on the actual model input.
+ model_kwargs["input_ids"] = self._maybe_initialize_input_ids_for_generation(
+ inputs, bos_token_id, model_kwargs=model_kwargs
+ )
+ else:
+ if inputs is not None:
+ raise ValueError("You passed `inputs_embeds` and `input_ids` to `.generate()`. Please pick one.")
+ inputs, input_name = model_kwargs["inputs_embeds"], "inputs_embeds"
+
+ # 4. if `inputs` is still None, try to create `input_ids` from BOS token
+ inputs = self._maybe_initialize_input_ids_for_generation(inputs, bos_token_id, model_kwargs)
+ return inputs, input_name, model_kwargs
+
+ def _maybe_initialize_input_ids_for_generation(
+ self,
+ inputs: Optional[torch.Tensor] = None,
+ bos_token_id: Optional[int] = None,
+ model_kwargs: Optional[Dict[str, torch.Tensor]] = None,
+ ) -> torch.LongTensor:
+ """Initializes input ids for generation, if necessary."""
+ if inputs is not None:
+ return inputs
+
+ encoder_outputs = model_kwargs.get("encoder_outputs")
+ if self.config.is_encoder_decoder and encoder_outputs is not None:
+ # make dummy input_ids with value -100, as a sanity check ensuring that they won't be used for encoding
+ shape = encoder_outputs.last_hidden_state.size()[:-1]
+ return torch.ones(shape, dtype=torch.long, device=self.device) * -100
+
+ if bos_token_id is None:
+ raise ValueError("`bos_token_id` has to be defined when no `input_ids` are provided.")
+
+ # If there is some tensor in `model_kwargs`, we can infer the batch size from it. This is helpful with
+ # soft-prompting or in multimodal implementations built on top of decoder-only language models.
+ batch_size = 1
+ for value in model_kwargs.values():
+ if isinstance(value, torch.Tensor):
+ batch_size = value.shape[0]
+ break
+
+ if "inputs_embeds" in model_kwargs:
+ return torch.ones((batch_size, 0), dtype=torch.long, device=self.device)
+ return torch.ones((batch_size, 1), dtype=torch.long, device=self.device) * bos_token_id
+
+ def _prepare_attention_mask_for_generation(
+ self,
+ inputs: torch.Tensor,
+ pad_token_id: Optional[int],
+ eos_token_id: Optional[Union[int, List[int]]],
+ ) -> torch.LongTensor:
+ is_input_ids = len(inputs.shape) == 2 and inputs.dtype in [torch.int, torch.long]
+ is_pad_token_in_inputs = (pad_token_id is not None) and (pad_token_id in inputs)
+ if isinstance(eos_token_id, int):
+ eos_token_id = [eos_token_id]
+ is_pad_token_not_equal_to_eos_token_id = (eos_token_id is None) or (pad_token_id not in eos_token_id)
+
+ # Check if input is input_ids and padded -> only then is attention_mask defined
+ if is_input_ids and is_pad_token_in_inputs and is_pad_token_not_equal_to_eos_token_id:
+ return inputs.ne(pad_token_id).long()
+ else:
+ return torch.ones(inputs.shape[:2], dtype=torch.long, device=inputs.device)
+
+ def _prepare_encoder_decoder_kwargs_for_generation(
+ self, inputs_tensor: torch.Tensor, model_kwargs, model_input_name: Optional[str] = None
+ ) -> Dict[str, Any]:
+ # 1. get encoder
+ encoder = self.get_encoder()
+ # Compatibility with Accelerate big model inference: we need the encoder to outputs stuff on the same device
+ # as the inputs.
+ if hasattr(self, "hf_device_map"):
+ if hasattr(encoder, "_hf_hook"):
+ encoder._hf_hook.io_same_device = True
+ else:
+ add_hook_to_module(encoder, AlignDevicesHook(io_same_device=True))
+
+ # 2. Prepare encoder args and encoder kwargs from model kwargs.
+ irrelevant_prefix = ["decoder_", "cross_attn", "use_cache"]
+ encoder_kwargs = {
+ argument: value
+ for argument, value in model_kwargs.items()
+ if not any(argument.startswith(p) for p in irrelevant_prefix)
+ }
+ encoder_signature = set(inspect.signature(encoder.forward).parameters)
+ encoder_accepts_wildcard = "kwargs" in encoder_signature or "model_kwargs" in encoder_signature
+ if not encoder_accepts_wildcard:
+ encoder_kwargs = {
+ argument: value for argument, value in encoder_kwargs.items() if argument in encoder_signature
+ }
+
+ # 3. make sure that encoder returns `ModelOutput`
+ model_input_name = model_input_name if model_input_name is not None else self.main_input_name
+ encoder_kwargs["return_dict"] = True
+ encoder_kwargs[model_input_name] = inputs_tensor
+ model_kwargs["encoder_outputs"]: ModelOutput = encoder(**encoder_kwargs)
+
+ return model_kwargs
+
+ def _prepare_decoder_input_ids_for_generation(
+ self,
+ batch_size: int,
+ model_input_name: str,
+ model_kwargs: Dict[str, torch.Tensor],
+ decoder_start_token_id: Union[int, List[int]] = None,
+ bos_token_id: int = None,
+ device: torch.device = None,
+ ) -> Tuple[torch.LongTensor, Dict[str, torch.Tensor]]:
+ """Prepares `decoder_input_ids` for generation with encoder-decoder models"""
+ # 1. Check whether the user has defined `decoder_input_ids` manually. To facilitate in terms of input naming,
+ # we also allow the user to pass it under `input_ids`, if the encoder does not use it as the main input.
+ if model_kwargs is not None and "decoder_input_ids" in model_kwargs:
+ decoder_input_ids = model_kwargs.pop("decoder_input_ids")
+ elif "input_ids" in model_kwargs and model_input_name != "input_ids":
+ decoder_input_ids = model_kwargs.pop("input_ids")
+ else:
+ decoder_input_ids = None
+
+ # 2. Encoder-decoder models expect the `decoder_input_ids` to start with a special token. Let's ensure that.
+ decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id)
+ if device is None:
+ device = self.device
+ if isinstance(decoder_start_token_id, list):
+ if len(decoder_start_token_id) != batch_size:
+ raise ValueError(
+ f"`decoder_start_token_id` expcted to have length {batch_size} but got {len(decoder_start_token_id)}"
+ )
+ decoder_input_ids_start = torch.tensor(decoder_start_token_id, dtype=torch.long, device=device)
+ decoder_input_ids_start = decoder_input_ids_start.view(-1, 1)
+ else:
+ decoder_input_ids_start = (
+ torch.ones((batch_size, 1), dtype=torch.long, device=device) * decoder_start_token_id
+ )
+
+ # no user input -> use decoder_start_token_id as decoder_input_ids
+ if decoder_input_ids is None:
+ decoder_input_ids = decoder_input_ids_start
+ # exception: Donut checkpoints have task-specific decoder starts and don't expect a BOS token
+ elif self.config.model_type == "vision-encoder-decoder" and "donut" in self.name_or_path.lower():
+ pass
+ elif self.config.model_type in ["whisper"]:
+ pass
+ # user input but doesn't start with decoder_start_token_id -> prepend decoder_start_token_id (and adjust
+ # decoder_attention_mask if provided)
+ elif (
+ isinstance(decoder_start_token_id, int)
+ and (decoder_input_ids[:, 0] != decoder_start_token_id).all().item()
+ ) or (
+ isinstance(decoder_start_token_id, torch.Tensor)
+ and (decoder_input_ids[:, 0] != decoder_start_token_id[:, 0]).all().item()
+ ):
+ decoder_input_ids = torch.cat([decoder_input_ids_start, decoder_input_ids], dim=-1)
+ if "decoder_attention_mask" in model_kwargs:
+ decoder_attention_mask = model_kwargs["decoder_attention_mask"]
+ decoder_attention_mask = torch.cat(
+ (torch.ones_like(decoder_attention_mask)[:, :1], decoder_attention_mask),
+ dim=-1,
+ )
+ model_kwargs["decoder_attention_mask"] = decoder_attention_mask
+
+ return decoder_input_ids, model_kwargs
+
+ def _get_decoder_start_token_id(
+ self, decoder_start_token_id: Union[int, List[int]] = None, bos_token_id: int = None
+ ) -> int:
+ decoder_start_token_id = (
+ decoder_start_token_id
+ if decoder_start_token_id is not None
+ else self.generation_config.decoder_start_token_id
+ )
+ bos_token_id = bos_token_id if bos_token_id is not None else self.generation_config.bos_token_id
+
+ if decoder_start_token_id is not None:
+ return decoder_start_token_id
+ elif bos_token_id is not None:
+ return bos_token_id
+ raise ValueError(
+ "`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation."
+ )
+
+ @staticmethod
+ def _expand_inputs_for_generation(
+ expand_size: int = 1,
+ is_encoder_decoder: bool = False,
+ input_ids: Optional[torch.LongTensor] = None,
+ **model_kwargs,
+ ) -> Tuple[torch.LongTensor, Dict[str, Any]]:
+ """Expands tensors from [batch_size, ...] to [batch_size * expand_size, ...]"""
+
+ def _expand_dict_for_generation(dict_to_expand):
+ for key in dict_to_expand:
+ if dict_to_expand[key] is not None and isinstance(dict_to_expand[key], torch.Tensor):
+ dict_to_expand[key] = dict_to_expand[key].repeat_interleave(expand_size, dim=0)
+ return dict_to_expand
+
+ if input_ids is not None:
+ input_ids = input_ids.repeat_interleave(expand_size, dim=0)
+
+ model_kwargs = _expand_dict_for_generation(model_kwargs)
+
+ if is_encoder_decoder:
+ if model_kwargs.get("encoder_outputs") is None:
+ raise ValueError("If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined.")
+ model_kwargs["encoder_outputs"] = _expand_dict_for_generation(model_kwargs["encoder_outputs"])
+
+ return input_ids, model_kwargs
+
+ def _extract_past_from_model_output(self, outputs: ModelOutput, standardize_cache_format: bool = False):
+ past_key_values = None
+ if "past_key_values" in outputs:
+ past_key_values = outputs.past_key_values
+ elif "mems" in outputs:
+ past_key_values = outputs.mems
+ elif "past_buckets_states" in outputs:
+ past_key_values = outputs.past_buckets_states
+
+ # Bloom fix: standardizes the cache format when requested
+ if standardize_cache_format and hasattr(self, "_convert_to_standard_cache"):
+ batch_size = outputs.logits.shape[0]
+ past_key_values = self._convert_to_standard_cache(past_key_values, batch_size=batch_size)
+ return past_key_values
+
+ def _update_model_kwargs_for_generation(
+ self,
+ outputs: ModelOutput,
+ model_kwargs: Dict[str, Any],
+ is_encoder_decoder: bool = False,
+ standardize_cache_format: bool = False,
+ ) -> Dict[str, Any]:
+ # update past_key_values
+ model_kwargs["past_key_values"] = self._extract_past_from_model_output(
+ outputs, standardize_cache_format=standardize_cache_format
+ )
+ if getattr(outputs, "state", None) is not None:
+ model_kwargs["state"] = outputs.state
+
+ # update token_type_ids with last value
+ if "token_type_ids" in model_kwargs:
+ token_type_ids = model_kwargs["token_type_ids"]
+ model_kwargs["token_type_ids"] = torch.cat([token_type_ids, token_type_ids[:, -1].unsqueeze(-1)], dim=-1)
+
+ if not is_encoder_decoder:
+ # update attention mask
+ if "attention_mask" in model_kwargs:
+ attention_mask = model_kwargs["attention_mask"]
+ model_kwargs["attention_mask"] = torch.cat(
+ [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
+ )
+ else:
+ # update decoder attention mask
+ if "decoder_attention_mask" in model_kwargs:
+ decoder_attention_mask = model_kwargs["decoder_attention_mask"]
+ model_kwargs["decoder_attention_mask"] = torch.cat(
+ [decoder_attention_mask, decoder_attention_mask.new_ones((decoder_attention_mask.shape[0], 1))],
+ dim=-1,
+ )
+
+ if "cache_position" in model_kwargs and model_kwargs["cache_position"] is not None:
+ model_kwargs["cache_position"] = model_kwargs["cache_position"][-1:] + 1
+
+ return model_kwargs
+
+ def _reorder_cache(self, past_key_values, beam_idx):
+ raise NotImplementedError(
+ f"Make sure that a `_reorder_cache` function is correctly implemented in {self.__class__.__module__} to"
+ f" enable beam search for {self.__class__}"
+ )
+
+ def _get_candidate_generator(
+ self,
+ generation_config: GenerationConfig,
+ input_ids: torch.LongTensor,
+ inputs_tensor: torch.Tensor,
+ assistant_model: "PreTrainedModel",
+ logits_processor: LogitsProcessorList,
+ model_kwargs: Dict,
+ ) -> CandidateGenerator:
+ """
+ Returns the candidate generator to be used in `assisted_generation`
+ """
+ if generation_config.prompt_lookup_num_tokens is not None:
+ candidate_generator = PromptLookupCandidateGenerator(
+ num_output_tokens=generation_config.prompt_lookup_num_tokens,
+ max_matching_ngram_size=generation_config.max_matching_ngram_size,
+ )
+ else:
+ candidate_generator = AssistedCandidateGenerator(
+ input_ids=input_ids,
+ assistant_model=assistant_model,
+ generation_config=generation_config,
+ logits_processor=logits_processor,
+ model_kwargs=model_kwargs,
+ inputs_tensor=inputs_tensor,
+ )
+ return candidate_generator
+
+ def _get_logits_warper(
+ self,
+ generation_config: GenerationConfig,
+ ) -> LogitsProcessorList:
+ """
+ This class returns a [`LogitsProcessorList`] list object that contains all relevant [`LogitsWarper`] instances
+ used for multinomial sampling.
+ """
+
+ # instantiate warpers list
+ warpers = LogitsProcessorList()
+
+ # In beam methods, we need to keep at least one non-eos token to explore continuations that might have a
+ # better score (i.e. keep len(list(generation_config.eos_token_id)) + 1)
+ if generation_config.num_beams > 1:
+ if isinstance(generation_config.eos_token_id, list):
+ min_tokens_to_keep = len(generation_config.eos_token_id) + 1
+ else:
+ min_tokens_to_keep = 2
+ else:
+ min_tokens_to_keep = 1
+
+ # the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files
+ # all samplers can be found in `generation_utils_samplers.py`
+ if generation_config.temperature is not None and generation_config.temperature != 1.0:
+ warpers.append(TemperatureLogitsWarper(generation_config.temperature))
+ if generation_config.top_k is not None and generation_config.top_k != 0:
+ warpers.append(TopKLogitsWarper(top_k=generation_config.top_k, min_tokens_to_keep=min_tokens_to_keep))
+ if generation_config.top_p is not None and generation_config.top_p < 1.0:
+ warpers.append(TopPLogitsWarper(top_p=generation_config.top_p, min_tokens_to_keep=min_tokens_to_keep))
+ if generation_config.typical_p is not None and generation_config.typical_p < 1.0:
+ warpers.append(
+ TypicalLogitsWarper(mass=generation_config.typical_p, min_tokens_to_keep=min_tokens_to_keep)
+ )
+ if generation_config.epsilon_cutoff is not None and 0.0 < generation_config.epsilon_cutoff < 1.0:
+ warpers.append(
+ EpsilonLogitsWarper(epsilon=generation_config.epsilon_cutoff, min_tokens_to_keep=min_tokens_to_keep)
+ )
+ if generation_config.eta_cutoff is not None and 0.0 < generation_config.eta_cutoff < 1.0:
+ warpers.append(
+ EtaLogitsWarper(epsilon=generation_config.eta_cutoff, min_tokens_to_keep=min_tokens_to_keep)
+ )
+ # `LogitNormalization` should always be the last logit processor, when present
+ if generation_config.renormalize_logits is True:
+ warpers.append(LogitNormalization())
+ return warpers
+
+ def _get_logits_processor(
+ self,
+ generation_config: GenerationConfig,
+ input_ids_seq_length: int,
+ encoder_input_ids: torch.LongTensor,
+ prefix_allowed_tokens_fn: Callable[[int, torch.Tensor], List[int]],
+ logits_processor: Optional[LogitsProcessorList],
+ model_kwargs: Optional[Dict[str, Any]] = None,
+ negative_prompt_ids: Optional[torch.Tensor] = None,
+ negative_prompt_attention_mask: Optional[torch.Tensor] = None,
+ ) -> LogitsProcessorList:
+ """
+ This class returns a [`LogitsProcessorList`] list object that contains all relevant [`LogitsProcessor`]
+ instances used to modify the scores of the language model head.
+ """
+ # instantiate processors list
+ processors = LogitsProcessorList()
+
+ if generation_config.guidance_scale is not None and generation_config.guidance_scale != 1:
+ processors.append(
+ UnbatchedClassifierFreeGuidanceLogitsProcessor(
+ generation_config.guidance_scale,
+ self,
+ unconditional_ids=negative_prompt_ids,
+ unconditional_attention_mask=negative_prompt_attention_mask,
+ use_cache=model_kwargs["use_cache"],
+ )
+ )
+ if generation_config.sequence_bias is not None:
+ processors.append(SequenceBiasLogitsProcessor(sequence_bias=generation_config.sequence_bias))
+
+ if generation_config.diversity_penalty is not None and generation_config.diversity_penalty > 0.0:
+ processors.append(
+ HammingDiversityLogitsProcessor(
+ diversity_penalty=generation_config.diversity_penalty,
+ num_beams=generation_config.num_beams,
+ num_beam_groups=generation_config.num_beam_groups,
+ )
+ )
+ if (
+ generation_config.encoder_repetition_penalty is not None
+ and generation_config.encoder_repetition_penalty != 1.0
+ ):
+ processors.append(
+ EncoderRepetitionPenaltyLogitsProcessor(
+ penalty=generation_config.encoder_repetition_penalty, encoder_input_ids=encoder_input_ids
+ )
+ )
+ if generation_config.repetition_penalty is not None and generation_config.repetition_penalty != 1.0:
+ processors.append(RepetitionPenaltyLogitsProcessor(penalty=generation_config.repetition_penalty))
+ if generation_config.no_repeat_ngram_size is not None and generation_config.no_repeat_ngram_size > 0:
+ processors.append(NoRepeatNGramLogitsProcessor(generation_config.no_repeat_ngram_size))
+ if (
+ generation_config.encoder_no_repeat_ngram_size is not None
+ and generation_config.encoder_no_repeat_ngram_size > 0
+ ):
+ processors.append(
+ EncoderNoRepeatNGramLogitsProcessor(generation_config.encoder_no_repeat_ngram_size, encoder_input_ids)
+ )
+ if generation_config.bad_words_ids is not None:
+ processors.append(
+ NoBadWordsLogitsProcessor(generation_config.bad_words_ids, generation_config.eos_token_id)
+ )
+ if (
+ generation_config.min_length is not None
+ and generation_config.eos_token_id is not None
+ and generation_config.min_length > 0
+ ):
+ processors.append(MinLengthLogitsProcessor(generation_config.min_length, generation_config.eos_token_id))
+ if (
+ generation_config.min_new_tokens is not None
+ and generation_config.eos_token_id is not None
+ and generation_config.min_new_tokens > 0
+ ):
+ processors.append(
+ MinNewTokensLengthLogitsProcessor(
+ input_ids_seq_length, generation_config.min_new_tokens, generation_config.eos_token_id
+ )
+ )
+ if prefix_allowed_tokens_fn is not None:
+ processors.append(
+ PrefixConstrainedLogitsProcessor(
+ prefix_allowed_tokens_fn, generation_config.num_beams // generation_config.num_beam_groups
+ )
+ )
+ if generation_config.forced_bos_token_id is not None:
+ processors.append(ForcedBOSTokenLogitsProcessor(generation_config.forced_bos_token_id))
+ if generation_config.forced_eos_token_id is not None:
+ processors.append(
+ ForcedEOSTokenLogitsProcessor(generation_config.max_length, generation_config.forced_eos_token_id)
+ )
+ if generation_config.remove_invalid_values is True:
+ processors.append(InfNanRemoveLogitsProcessor())
+ if generation_config.exponential_decay_length_penalty is not None:
+ processors.append(
+ ExponentialDecayLengthPenalty(
+ generation_config.exponential_decay_length_penalty,
+ generation_config.eos_token_id,
+ input_ids_seq_length,
+ )
+ )
+ if generation_config.suppress_tokens is not None:
+ processors.append(SuppressTokensLogitsProcessor(generation_config.suppress_tokens))
+ if generation_config.begin_suppress_tokens is not None:
+ begin_index = input_ids_seq_length
+ begin_index = (
+ begin_index
+ if (input_ids_seq_length > 1 or generation_config.forced_bos_token_id is None)
+ else begin_index + 1
+ )
+ if generation_config.forced_decoder_ids is not None:
+ # generation starts after the last token that is forced
+ begin_index += generation_config.forced_decoder_ids[-1][0]
+ processors.append(
+ SuppressTokensAtBeginLogitsProcessor(generation_config.begin_suppress_tokens, begin_index)
+ )
+ if generation_config.forced_decoder_ids is not None:
+ # TODO(Sanchit): deprecate in v4.40 by removing this logic
+ warnings.warn(
+ "You have explicitly specified `forced_decoder_ids`. This functionality has been deprecated and will throw an error in v4.40. Please remove the `forced_decoder_ids` argument in favour of `input_ids` or `decoder_input_ids` respectively.",
+ FutureWarning,
+ )
+ processors.append(ForceTokensLogitsProcessor(generation_config.forced_decoder_ids, _has_warned=True))
+ processors = self._merge_criteria_processor_list(processors, logits_processor)
+ # `LogitNormalization` should always be the last logit processor, when present
+ if generation_config.renormalize_logits is True:
+ processors.append(LogitNormalization())
+ return processors
+
+ def _get_stopping_criteria(
+ self, generation_config: GenerationConfig, stopping_criteria: Optional[StoppingCriteriaList]
+ ) -> StoppingCriteriaList:
+ criteria = StoppingCriteriaList()
+ if generation_config.max_length is not None:
+ max_position_embeddings = getattr(self.config, "max_position_embeddings", None)
+ criteria.append(
+ MaxLengthCriteria(
+ max_length=generation_config.max_length,
+ max_position_embeddings=max_position_embeddings,
+ )
+ )
+ if generation_config.max_time is not None:
+ criteria.append(MaxTimeCriteria(max_time=generation_config.max_time))
+ criteria = self._merge_criteria_processor_list(criteria, stopping_criteria)
+ return criteria
+
+ def _merge_criteria_processor_list(
+ self,
+ default_list: Union[LogitsProcessorList, StoppingCriteriaList],
+ custom_list: Union[LogitsProcessorList, StoppingCriteriaList],
+ ) -> Union[LogitsProcessorList, StoppingCriteriaList]:
+ if len(custom_list) == 0:
+ return default_list
+ for default in default_list:
+ for custom in custom_list:
+ if type(custom) is type(default):
+ object_type = "stopping criteria" if isinstance(custom, StoppingCriteria) else "logits processor"
+ raise ValueError(
+ f"A custom {object_type} of type {type(custom)} with values {custom} has been passed to"
+ f" `.generate()`, but it has already been created with the values {default}. {default} has been"
+ " created by passing the corresponding arguments to generate or by the model's config default"
+ f" values. If you just want to change the default values of {object_type} consider passing"
+ f" them as arguments to `.generate()` instead of using a custom {object_type}."
+ )
+ default_list.extend(custom_list)
+ return default_list
+
+ def compute_transition_scores(
+ self,
+ sequences: torch.Tensor,
+ scores: Tuple[torch.Tensor],
+ beam_indices: Optional[torch.Tensor] = None,
+ normalize_logits: bool = False,
+ ) -> torch.Tensor:
+ """
+ Computes the transition scores of sequences given the generation scores (and beam indices, if beam search was
+ used). This is a convenient method to quicky obtain the scores of the selected tokens at generation time.
+
+ Parameters:
+ sequences (`torch.LongTensor`):
+ The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or
+ shorter if all batches finished early due to the `eos_token_id`.
+ scores (`tuple(torch.FloatTensor)`):
+ Transition scores for each vocabulary token at each generation step. Beam transition scores consisting
+ of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam.
+ Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for each generated token),
+ with each tensor of shape `(batch_size*num_beams, config.vocab_size)`.
+ beam_indices (`torch.LongTensor`, *optional*):
+ Beam indices of generated token id at each generation step. `torch.LongTensor` of shape
+ `(batch_size*num_return_sequences, sequence_length)`. Only required if a `num_beams>1` at
+ generate-time.
+ normalize_logits (`bool`, *optional*, defaults to `False`):
+ Whether to normalize the logits (which, for legacy reasons, may be unnormalized).
+
+ Return:
+ `torch.Tensor`: A `torch.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)` containing
+ the transition scores (logits)
+
+ Examples:
+
+ ```python
+ >>> from transformers import GPT2Tokenizer, AutoModelForCausalLM
+ >>> import numpy as np
+
+ >>> tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
+ >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
+ >>> tokenizer.pad_token_id = tokenizer.eos_token_id
+ >>> inputs = tokenizer(["Today is"], return_tensors="pt")
+
+ >>> # Example 1: Print the scores for each token generated with Greedy Search
+ >>> outputs = model.generate(**inputs, max_new_tokens=5, return_dict_in_generate=True, output_scores=True)
+ >>> transition_scores = model.compute_transition_scores(
+ ... outputs.sequences, outputs.scores, normalize_logits=True
+ ... )
+ >>> # input_length is the length of the input prompt for decoder-only models, like the GPT family, and 1 for
+ >>> # encoder-decoder models, like BART or T5.
+ >>> input_length = 1 if model.config.is_encoder_decoder else inputs.input_ids.shape[1]
+ >>> generated_tokens = outputs.sequences[:, input_length:]
+ >>> for tok, score in zip(generated_tokens[0], transition_scores[0]):
+ ... # | token | token string | log probability | probability
+ ... print(f"| {tok:5d} | {tokenizer.decode(tok):8s} | {score.numpy():.3f} | {np.exp(score.numpy()):.2%}")
+ | 262 | the | -1.414 | 24.33%
+ | 1110 | day | -2.609 | 7.36%
+ | 618 | when | -2.010 | 13.40%
+ | 356 | we | -1.859 | 15.58%
+ | 460 | can | -2.508 | 8.14%
+
+ >>> # Example 2: Reconstruct the sequence scores from Beam Search
+ >>> outputs = model.generate(
+ ... **inputs,
+ ... max_new_tokens=5,
+ ... num_beams=4,
+ ... num_return_sequences=4,
+ ... return_dict_in_generate=True,
+ ... output_scores=True,
+ ... )
+ >>> transition_scores = model.compute_transition_scores(
+ ... outputs.sequences, outputs.scores, outputs.beam_indices, normalize_logits=False
+ ... )
+ >>> # If you sum the generated tokens' scores and apply the length penalty, you'll get the sequence scores.
+ >>> # Tip 1: recomputing the scores is only guaranteed to match with `normalize_logits=False`. Depending on the
+ >>> # use case, you might want to recompute it with `normalize_logits=True`.
+ >>> # Tip 2: the output length does NOT include the input length
+ >>> output_length = np.sum(transition_scores.numpy() < 0, axis=1)
+ >>> length_penalty = model.generation_config.length_penalty
+ >>> reconstructed_scores = transition_scores.sum(axis=1) / (output_length**length_penalty)
+ >>> print(np.allclose(outputs.sequences_scores, reconstructed_scores))
+ True
+ ```"""
+ # 1. In absence of `beam_indices`, we can assume that we come from e.g. greedy search, which is equivalent
+ # to a beam search approach were the first (and only) beam is always selected
+ if beam_indices is None:
+ beam_indices = torch.arange(scores[0].shape[0]).view(-1, 1).to(sequences.device)
+ beam_indices = beam_indices.expand(-1, len(scores))
+
+ # 2. reshape scores as [batch_size*vocab_size, # generation steps] with # generation steps being
+ # seq_len - input_length
+ scores = torch.stack(scores).reshape(len(scores), -1).transpose(0, 1)
+
+ # 3. Optionally normalize the logits (across the vocab dimension)
+ if normalize_logits:
+ scores = scores.reshape(-1, self.config.vocab_size, scores.shape[-1])
+ scores = torch.nn.functional.log_softmax(scores, dim=1)
+ scores = scores.reshape(-1, scores.shape[-1])
+
+ # 4. cut beam_indices to longest beam length
+ beam_indices_mask = beam_indices < 0
+ max_beam_length = (1 - beam_indices_mask.long()).sum(-1).max()
+ beam_indices = beam_indices.clone()[:, :max_beam_length]
+ beam_indices_mask = beam_indices_mask[:, :max_beam_length]
+
+ # 5. Set indices of beams that finished early to 0; such indices will be masked correctly afterwards
+ beam_indices[beam_indices_mask] = 0
+
+ # 6. multiply beam_indices with vocab size to gather correctly from scores
+ beam_sequence_indices = beam_indices * self.config.vocab_size
+
+ # 7. Define which indices contributed to scores
+ cut_idx = sequences.shape[-1] - max_beam_length
+ indices = sequences[:, cut_idx:] + beam_sequence_indices
+
+ # 8. Compute scores
+ transition_scores = scores.gather(0, indices)
+
+ # 9. Mask out transition_scores of beams that stopped early
+ transition_scores[beam_indices_mask] = 0
+
+ return transition_scores
+
+ def _validate_model_class(self):
+ """
+ Confirms that the model class is compatible with generation. If not, raises an exception that points to the
+ right class to use.
+ """
+ if not self.can_generate():
+ generate_compatible_mappings = [
+ MODEL_FOR_CAUSAL_LM_MAPPING,
+ MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING,
+ MODEL_FOR_VISION_2_SEQ_MAPPING,
+ MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
+ MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING,
+ ]
+ generate_compatible_classes = set()
+ for model_mapping in generate_compatible_mappings:
+ supported_models = model_mapping.get(type(self.config), default=None)
+ if supported_models is not None:
+ generate_compatible_classes.add(supported_models.__name__)
+ exception_message = (
+ f"The current model class ({self.__class__.__name__}) is not compatible with `.generate()`, as "
+ "it doesn't have a language model head."
+ )
+ if generate_compatible_classes:
+ exception_message += f" Please use one of the following classes instead: {generate_compatible_classes}"
+ raise TypeError(exception_message)
+
+ def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]):
+ """Validates model kwargs for generation. Generate argument typos will also be caught here."""
+ # If a `Cache` instance is passed, checks whether the model is compatible with it
+ if isinstance(model_kwargs.get("past_key_values", None), Cache) and not self._supports_cache_class:
+ raise ValueError(
+ f"{self.__class__.__name__} does not support an instance of `Cache` as `past_key_values`. Please "
+ "check the model documentation for supported cache formats."
+ )
+
+ # Excludes arguments that are handled before calling any model function
+ if self.config.is_encoder_decoder:
+ for key in ["decoder_input_ids"]:
+ model_kwargs.pop(key, None)
+
+ unused_model_args = []
+ model_args = set(inspect.signature(self.prepare_inputs_for_generation).parameters)
+ # `kwargs`/`model_kwargs` is often used to handle optional forward pass inputs like `attention_mask`. If
+ # `prepare_inputs_for_generation` doesn't accept them, then a stricter check can be made ;)
+ if "kwargs" in model_args or "model_kwargs" in model_args:
+ model_args |= set(inspect.signature(self.forward).parameters)
+
+ # Encoder-Decoder models may also need Encoder arguments from `model_kwargs`
+ if self.config.is_encoder_decoder:
+ base_model = getattr(self, self.base_model_prefix, None)
+
+ # allow encoder kwargs
+ encoder = getattr(self, "encoder", None)
+ # `MusicgenForConditionalGeneration` has `text_encoder` and `audio_encoder`.
+ # Also, it has `base_model_prefix = "encoder_decoder"` but there is no `self.encoder_decoder`
+ # TODO: A better way to handle this.
+ if encoder is None and base_model is not None:
+ encoder = getattr(base_model, "encoder", None)
+
+ if encoder is not None:
+ encoder_model_args = set(inspect.signature(encoder.forward).parameters)
+ model_args |= encoder_model_args
+
+ # allow decoder kwargs
+ decoder = getattr(self, "decoder", None)
+ if decoder is None and base_model is not None:
+ decoder = getattr(base_model, "decoder", None)
+
+ if decoder is not None:
+ decoder_model_args = set(inspect.signature(decoder.forward).parameters)
+ model_args |= {f"decoder_{x}" for x in decoder_model_args}
+
+ # allow assistant_encoder_outputs to be passed if we're doing assisted generating
+ if "assistant_encoder_outputs" in model_kwargs:
+ model_args |= {"assistant_encoder_outputs"}
+
+ for key, value in model_kwargs.items():
+ if value is not None and key not in model_args:
+ unused_model_args.append(key)
+
+ if unused_model_args:
+ raise ValueError(
+ f"The following `model_kwargs` are not used by the model: {unused_model_args} (note: typos in the"
+ " generate arguments will also show up in this list)"
+ )
+
+ def _validate_generated_length(self, generation_config, input_ids_length, has_default_max_length):
+ """Performs validation related to the resulting generated length"""
+
+ # 1. Max length warnings related to poor parameterization
+ if has_default_max_length and generation_config.max_new_tokens is None and generation_config.max_length == 20:
+ # 20 is the default max_length of the generation config
+ warnings.warn(
+ f"Using the model-agnostic default `max_length` (={generation_config.max_length}) to control the "
+ "generation length. We recommend setting `max_new_tokens` to control the maximum length of the "
+ "generation.",
+ UserWarning,
+ )
+ if input_ids_length >= generation_config.max_length:
+ input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids"
+ raise ValueError(
+ f"Input length of {input_ids_string} is {input_ids_length}, but `max_length` is set to"
+ f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider"
+ " increasing `max_length` or, better yet, setting `max_new_tokens`."
+ )
+
+ # 2. Min length warnings due to unfeasible parameter combinations
+ min_length_error_suffix = (
+ " Generation will stop at the defined maximum length. You should decrease the minimum length and/or "
+ "increase the maximum length."
+ )
+ if has_default_max_length:
+ min_length_error_suffix += (
+ f" Note that `max_length` is set to {generation_config.max_length}, its default value."
+ )
+ if generation_config.min_length is not None and generation_config.min_length > generation_config.max_length:
+ warnings.warn(
+ f"Unfeasible length constraints: `min_length` ({generation_config.min_length}) is larger than"
+ f" the maximum possible length ({generation_config.max_length})." + min_length_error_suffix,
+ UserWarning,
+ )
+ if generation_config.min_new_tokens is not None:
+ min_length = generation_config.min_new_tokens + input_ids_length
+ if min_length > generation_config.max_length:
+ warnings.warn(
+ f"Unfeasible length constraints: `min_new_tokens` ({generation_config.min_new_tokens}), when "
+ f"added to the prompt length ({input_ids_length}), is larger than"
+ f" the maximum possible length ({generation_config.max_length})." + min_length_error_suffix,
+ UserWarning,
+ )
+
+ def _prepare_generation_config(
+ self, generation_config: GenerationConfig, **kwargs: Dict
+ ) -> Tuple[GenerationConfig, Dict]:
+ """
+ Prepares the base generation config, then applies any generation configuration options from kwargs.
+ """
+ # TODO joao: when we can detect `fullgraph=True` in `torch.compile` (https://github.com/pytorch/pytorch/pull/120400)
+ # replace `is_torchdynamo_compiling` by the corresponding check. As it is, we are being too restrictive with
+ # the parameterization in `fullgraph=False` so as to enable `fullgraph=True`.
+
+ # priority: `generation_config` argument > `model.generation_config` (the default generation config)
+ if generation_config is None:
+ # legacy: users may modify the model configuration to control generation. To trigger this legacy behavior,
+ # three conditions must be met
+ # 1) the generation config must have been created from the model config (`_from_model_config` field);
+ # 2) the generation config must have seen no modification since its creation (the hash is the same);
+ # 3) the user must have set generation parameters in the model config.
+ # NOTE: `torch.compile` can't compile `hash`, this legacy support is disabled with compilation.
+ if (
+ not is_torchdynamo_compiling()
+ and self.generation_config._from_model_config
+ and self.generation_config._original_object_hash == hash(self.generation_config)
+ and self.config._has_non_default_generation_parameters()
+ ):
+ new_generation_config = GenerationConfig.from_model_config(self.config)
+ if new_generation_config != self.generation_config:
+ warnings.warn(
+ "You have modified the pretrained model configuration to control generation. This is a"
+ " deprecated strategy to control generation and will be removed soon, in a future version."
+ " Please use and modify the model generation configuration (see"
+ " https://huggingface.co/docs/transformers/generation_strategies#default-text-generation-configuration )"
+ )
+ self.generation_config = new_generation_config
+ generation_config = self.generation_config
+
+ # `torch.compile` can't compile `copy.deepcopy`, arguments in `kwargs` that are part of `generation_config`
+ # will mutate the object with `.update`. As such, passing these arguments through `kwargs` is disabled.
+ if is_torchdynamo_compiling():
+ model_kwargs = kwargs
+ generate_attributes_in_kwargs = [
+ key for key, value in kwargs.items() if getattr(generation_config, key, None) != value
+ ]
+ if len(generate_attributes_in_kwargs) > 0:
+ raise ValueError(
+ "`torch.compile` exception: all generation configuration attributes must be passed within a "
+ f"`generation_config` instance passed to `generate` (found: {generate_attributes_in_kwargs})."
+ )
+ else:
+ generation_config = copy.deepcopy(generation_config)
+ model_kwargs = generation_config.update(**kwargs)
+
+ return generation_config, model_kwargs
+
+ @torch.no_grad()
+ def generate(
+ self,
+ inputs: Optional[torch.Tensor] = None,
+ generation_config: Optional[GenerationConfig] = None,
+ logits_processor: Optional[LogitsProcessorList] = None,
+ stopping_criteria: Optional[StoppingCriteriaList] = None,
+ prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,
+ synced_gpus: Optional[bool] = None,
+ assistant_model: Optional["PreTrainedModel"] = None,
+ streamer: Optional["BaseStreamer"] = None,
+ negative_prompt_ids: Optional[torch.Tensor] = None,
+ negative_prompt_attention_mask: Optional[torch.Tensor] = None,
+ **kwargs,
+ ) -> Union[GenerateOutput, torch.LongTensor]:
+ r"""
+
+ Generates sequences of token ids for models with a language modeling head.
+
+
+
+ Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the
+ model's default generation configuration. You can override any `generation_config` by passing the corresponding
+ parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`.
+
+ For an overview of generation strategies and code examples, check out the [following
+ guide](../generation_strategies).
+
+
+
+ Parameters:
+ inputs (`torch.Tensor` of varying shape depending on the modality, *optional*):
+ The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the
+ method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs`
+ should be in the format of `input_ids`. For encoder-decoder models *inputs* can represent any of
+ `input_ids`, `input_values`, `input_features`, or `pixel_values`.
+ generation_config (`~generation.GenerationConfig`, *optional*):
+ The generation configuration to be used as base parametrization for the generation call. `**kwargs`
+ passed to generate matching the attributes of `generation_config` will override them. If
+ `generation_config` is not provided, the default will be used, which has the following loading
+ priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
+ configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
+ default values, whose documentation should be checked to parameterize generation.
+ logits_processor (`LogitsProcessorList`, *optional*):
+ Custom logits processors that complement the default logits processors built from arguments and
+ generation config. If a logit processor is passed that is already created with the arguments or a
+ generation config an error is thrown. This feature is intended for advanced users.
+ stopping_criteria (`StoppingCriteriaList`, *optional*):
+ Custom stopping criteria that complements the default stopping criteria built from arguments and a
+ generation config. If a stopping criteria is passed that is already created with the arguments or a
+ generation config an error is thrown. If your stopping criteria depends on the `scores` input, make
+ sure you pass `return_dict_in_generate=True, output_scores=True` to `generate`. This feature is
+ intended for advanced users.
+ prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], List[int]]`, *optional*):
+ If provided, this function constraints the beam search to allowed tokens only at each step. If not
+ provided no constraint is applied. This function takes 2 arguments: the batch ID `batch_id` and
+ `input_ids`. It has to return a list with the allowed tokens for the next generation step conditioned
+ on the batch ID `batch_id` and the previously generated tokens `inputs_ids`. This argument is useful
+ for constrained generation conditioned on the prefix, as described in [Autoregressive Entity
+ Retrieval](https://arxiv.org/abs/2010.00904).
+ synced_gpus (`bool`, *optional*):
+ Whether to continue running the while loop until max_length. Unless overridden this flag will be set to
+ `True` under DeepSpeed ZeRO Stage 3 multiple GPUs environment to avoid hanging if one GPU finished
+ generating before other GPUs. Otherwise it'll be set to `False`.
+ assistant_model (`PreTrainedModel`, *optional*):
+ An assistant model that can be used to accelerate generation. The assistant model must have the exact
+ same tokenizer. The acceleration is achieved when forecasting candidate tokens with the assistent model
+ is much faster than running generation with the model you're calling generate from. As such, the
+ assistant model should be much smaller.
+ streamer (`BaseStreamer`, *optional*):
+ Streamer object that will be used to stream the generated sequences. Generated tokens are passed
+ through `streamer.put(token_ids)` and the streamer is responsible for any further processing.
+ negative_prompt_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ The negative prompt needed for some processors such as CFG. The batch size must match the input batch
+ size. This is an experimental feature, subject to breaking API changes in future versions.
+ negative_prompt_attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Attention_mask for `negative_prompt_ids`.
+ kwargs (`Dict[str, Any]`, *optional*):
+ Ad hoc parametrization of `generation_config` and/or additional model-specific kwargs that will be
+ forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder
+ specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*.
+
+ Return:
+ [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True`
+ or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`.
+
+ If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible
+ [`~utils.ModelOutput`] types are:
+
+ - [`~generation.GenerateDecoderOnlyOutput`],
+ - [`~generation.GenerateBeamDecoderOnlyOutput`]
+
+ If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible
+ [`~utils.ModelOutput`] types are:
+
+ - [`~generation.GenerateEncoderDecoderOutput`],
+ - [`~generation.GenerateBeamEncoderDecoderOutput`]
+ """
+ # 1. Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call
+ self._validate_model_class()
+ generation_config, model_kwargs = self._prepare_generation_config(generation_config, **kwargs)
+ self._validate_model_kwargs(model_kwargs.copy())
+
+ # 2. Set generation parameters if not already defined
+ if synced_gpus is None:
+ if is_deepspeed_zero3_enabled() and dist.get_world_size() > 1:
+ synced_gpus = True
+ else:
+ synced_gpus = False
+ logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
+ stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
+
+ if generation_config.pad_token_id is None and generation_config.eos_token_id is not None:
+ if model_kwargs.get("attention_mask", None) is None:
+ logger.warning(
+ "The attention mask and the pad token id were not set. As a consequence, you may observe "
+ "unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results."
+ )
+ eos_token_id = generation_config.eos_token_id
+ if isinstance(eos_token_id, list):
+ eos_token_id = eos_token_id[0]
+ logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.")
+ generation_config.pad_token_id = eos_token_id
+
+ # 3. Define model inputs
+ # inputs_tensor has to be defined
+ # model_input_name is defined if model-specific keyword input is passed
+ # otherwise model_input_name is None
+ # all model-specific keyword inputs are removed from `model_kwargs`
+ inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs(
+ inputs, generation_config.bos_token_id, model_kwargs
+ )
+ batch_size = inputs_tensor.shape[0]
+
+ # 4. Define other model kwargs
+ model_kwargs["output_attentions"] = generation_config.output_attentions
+ model_kwargs["output_hidden_states"] = generation_config.output_hidden_states
+ # decoder-only models with inputs_embeds forwarding must use caching (otherwise we can't detect whether we are
+ # generating the first new token or not, and we only want to use the embeddings for the first new token)
+ if not self.config.is_encoder_decoder and model_input_name == "inputs_embeds":
+ model_kwargs["use_cache"] = True
+ else:
+ model_kwargs["use_cache"] = generation_config.use_cache
+
+ accepts_attention_mask = "attention_mask" in set(inspect.signature(self.forward).parameters.keys())
+ requires_attention_mask = "encoder_outputs" not in model_kwargs
+
+ if model_kwargs.get("attention_mask", None) is None and requires_attention_mask and accepts_attention_mask:
+ model_kwargs["attention_mask"] = self._prepare_attention_mask_for_generation(
+ inputs_tensor, generation_config.pad_token_id, generation_config.eos_token_id
+ )
+
+ # decoder-only models should use left-padding for generation
+ if not self.config.is_encoder_decoder:
+ # If `input_ids` was given, check if the last id in any sequence is `pad_token_id`
+ # Note: If using, `inputs_embeds` this check does not work, because we want to be more hands-off.
+ if (
+ generation_config.pad_token_id is not None
+ and len(inputs_tensor.shape) == 2
+ and torch.sum(inputs_tensor[:, -1] == generation_config.pad_token_id) > 0
+ ):
+ logger.warning(
+ "A decoder-only architecture is being used, but right-padding was detected! For correct "
+ "generation results, please set `padding_side='left'` when initializing the tokenizer."
+ )
+
+ if self.config.is_encoder_decoder and "encoder_outputs" not in model_kwargs:
+ # if model is encoder decoder encoder_outputs are created
+ # and added to `model_kwargs`
+ model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(
+ inputs_tensor, model_kwargs, model_input_name
+ )
+
+ # 5. Prepare `input_ids` which will be used for auto-regressive generation
+ if self.config.is_encoder_decoder:
+ input_ids, model_kwargs = self._prepare_decoder_input_ids_for_generation(
+ batch_size=batch_size,
+ model_input_name=model_input_name,
+ model_kwargs=model_kwargs,
+ decoder_start_token_id=generation_config.decoder_start_token_id,
+ bos_token_id=generation_config.bos_token_id,
+ device=inputs_tensor.device,
+ )
+ else:
+ input_ids = inputs_tensor if model_input_name == "input_ids" else model_kwargs.pop("input_ids")
+
+ if streamer is not None:
+ streamer.put(input_ids.cpu())
+
+ # 6. Prepare `max_length` depending on other stopping criteria.
+ input_ids_length = input_ids.shape[-1]
+ has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
+ if generation_config.max_new_tokens is not None:
+ if not has_default_max_length and generation_config.max_length is not None:
+ logger.warning(
+ f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(="
+ f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. "
+ "Please refer to the documentation for more information. "
+ "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)"
+ )
+ generation_config.max_length = generation_config.max_new_tokens + input_ids_length
+
+ # otherwise the total length [inputs-embeds-len + new-tokens-len] will go beyond indicated `max_length``
+ elif (
+ model_input_name == "inputs_embeds"
+ and inputs_tensor.shape[:-1] != input_ids.shape
+ and not self.config.is_encoder_decoder
+ ):
+ generation_config.max_length -= inputs_tensor.shape[1]
+ generation_config.min_length = max(generation_config.min_length - inputs_tensor.shape[1], 0)
+
+ if generation_config.cache_implementation in NEED_SETUP_CACHE_CLASSES_MAPPING:
+ if generation_config.cache_implementation == "static":
+ if model_kwargs.get("past_key_values", False) is not False:
+ raise ValueError(
+ "Using `past_key_values` argument with `generate()` when using a static KV cache is not supported. Please open an issue in Transformers GitHub repository."
+ )
+ cache_cls = NEED_SETUP_CACHE_CLASSES_MAPPING["static"]
+ if not callable(getattr(self, "_setup_cache", None)):
+ raise ValueError(
+ "The `generation_config` defines a `cache_implementation` that is not compatible with this model."
+ " Make sure it has a `_setup_cache` function."
+ )
+ self._setup_cache(cache_cls, max_batch_size=batch_size, max_cache_len=generation_config.max_length)
+
+ self._validate_generated_length(generation_config, input_ids_length, has_default_max_length)
+
+ # 7. determine generation mode
+ generation_mode = generation_config.get_generation_mode(assistant_model)
+
+ if streamer is not None and (generation_config.num_beams > 1):
+ raise ValueError(
+ "`streamer` cannot be used with beam search (yet!). Make sure that `num_beams` is set to 1."
+ )
+
+ if self.device.type != input_ids.device.type:
+ warnings.warn(
+ "You are calling .generate() with the `input_ids` being on a device type different"
+ f" than your model's device. `input_ids` is on {input_ids.device.type}, whereas the model"
+ f" is on {self.device.type}. You may experience unexpected behaviors or slower generation."
+ " Please make sure that you have put `input_ids` to the"
+ f" correct device by calling for example input_ids = input_ids.to('{self.device.type}') before"
+ " running `.generate()`.",
+ UserWarning,
+ )
+
+ # 8. prepare distribution pre_processing samplers
+ prepared_logits_processor = self._get_logits_processor(
+ generation_config=generation_config,
+ input_ids_seq_length=input_ids_length,
+ encoder_input_ids=inputs_tensor,
+ prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
+ logits_processor=logits_processor,
+ model_kwargs=model_kwargs,
+ negative_prompt_ids=negative_prompt_ids,
+ negative_prompt_attention_mask=negative_prompt_attention_mask,
+ )
+
+ # 9. prepare stopping criteria
+ prepared_stopping_criteria = self._get_stopping_criteria(
+ generation_config=generation_config, stopping_criteria=stopping_criteria
+ )
+ # 10. go into different generation modes
+ if generation_mode == GenerationMode.ASSISTED_GENERATION:
+ if generation_config.num_return_sequences > 1:
+ raise ValueError(
+ "num_return_sequences has to be 1 when doing assisted generate, "
+ f"but is {generation_config.num_return_sequences}."
+ )
+ if batch_size > 1:
+ raise ValueError("assisted generate is only supported for batch_size = 1")
+ if not model_kwargs["use_cache"]:
+ raise ValueError("assisted generate requires `use_cache=True`")
+
+ # 11. Get the candidate generator, given the parameterization
+ candidate_generator = self._get_candidate_generator(
+ generation_config=generation_config,
+ input_ids=input_ids,
+ inputs_tensor=inputs_tensor,
+ assistant_model=assistant_model,
+ logits_processor=logits_processor,
+ model_kwargs=model_kwargs,
+ )
+
+ # 12. run assisted generate
+ result = self.assisted_decoding(
+ input_ids,
+ candidate_generator=candidate_generator,
+ do_sample=generation_config.do_sample,
+ logits_processor=prepared_logits_processor,
+ logits_warper=self._get_logits_warper(generation_config) if generation_config.do_sample else None,
+ stopping_criteria=prepared_stopping_criteria,
+ pad_token_id=generation_config.pad_token_id,
+ eos_token_id=generation_config.eos_token_id,
+ output_scores=generation_config.output_scores,
+ output_logits=generation_config.output_logits,
+ return_dict_in_generate=generation_config.return_dict_in_generate,
+ synced_gpus=synced_gpus,
+ streamer=streamer,
+ **model_kwargs,
+ )
+ if generation_mode == GenerationMode.GREEDY_SEARCH:
+ # 11. run greedy search
+ result = self._greedy_search(
+ input_ids,
+ logits_processor=prepared_logits_processor,
+ stopping_criteria=prepared_stopping_criteria,
+ pad_token_id=generation_config.pad_token_id,
+ eos_token_id=generation_config.eos_token_id,
+ output_scores=generation_config.output_scores,
+ output_logits=generation_config.output_logits,
+ return_dict_in_generate=generation_config.return_dict_in_generate,
+ synced_gpus=synced_gpus,
+ streamer=streamer,
+ **model_kwargs,
+ )
+
+ elif generation_mode == GenerationMode.CONTRASTIVE_SEARCH:
+ if not model_kwargs["use_cache"]:
+ raise ValueError("Contrastive search requires `use_cache=True`")
+
+ result = self._contrastive_search(
+ input_ids,
+ top_k=generation_config.top_k,
+ penalty_alpha=generation_config.penalty_alpha,
+ logits_processor=prepared_logits_processor,
+ stopping_criteria=prepared_stopping_criteria,
+ pad_token_id=generation_config.pad_token_id,
+ eos_token_id=generation_config.eos_token_id,
+ output_scores=generation_config.output_scores,
+ output_logits=generation_config.output_logits,
+ return_dict_in_generate=generation_config.return_dict_in_generate,
+ synced_gpus=synced_gpus,
+ streamer=streamer,
+ sequential=generation_config.low_memory,
+ **model_kwargs,
+ )
+
+ elif generation_mode == GenerationMode.SAMPLE:
+ # 11. prepare logits warper
+ logits_warper = self._get_logits_warper(generation_config)
+
+ # 12. expand input_ids with `num_return_sequences` additional sequences per batch
+ input_ids, model_kwargs = self._expand_inputs_for_generation(
+ input_ids=input_ids,
+ expand_size=generation_config.num_return_sequences,
+ is_encoder_decoder=self.config.is_encoder_decoder,
+ **model_kwargs,
+ )
+
+ # 13. run sample
+ result = self._sample(
+ input_ids,
+ logits_processor=prepared_logits_processor,
+ logits_warper=logits_warper,
+ stopping_criteria=prepared_stopping_criteria,
+ pad_token_id=generation_config.pad_token_id,
+ eos_token_id=generation_config.eos_token_id,
+ output_scores=generation_config.output_scores,
+ output_logits=generation_config.output_logits,
+ return_dict_in_generate=generation_config.return_dict_in_generate,
+ synced_gpus=synced_gpus,
+ streamer=streamer,
+ **model_kwargs,
+ )
+
+ elif generation_mode == GenerationMode.BEAM_SEARCH:
+ # 11. prepare beam search scorer
+ beam_scorer = BeamSearchScorer(
+ batch_size=batch_size,
+ num_beams=generation_config.num_beams,
+ device=inputs_tensor.device,
+ length_penalty=generation_config.length_penalty,
+ do_early_stopping=generation_config.early_stopping,
+ num_beam_hyps_to_keep=generation_config.num_return_sequences,
+ max_length=generation_config.max_length,
+ )
+ # 12. interleave input_ids with `num_beams` additional sequences per batch
+ input_ids, model_kwargs = self._expand_inputs_for_generation(
+ input_ids=input_ids,
+ expand_size=generation_config.num_beams,
+ is_encoder_decoder=self.config.is_encoder_decoder,
+ **model_kwargs,
+ )
+ # 13. run beam search
+ result = self._beam_search(
+ input_ids,
+ beam_scorer,
+ logits_processor=prepared_logits_processor,
+ stopping_criteria=prepared_stopping_criteria,
+ pad_token_id=generation_config.pad_token_id,
+ eos_token_id=generation_config.eos_token_id,
+ output_scores=generation_config.output_scores,
+ output_logits=generation_config.output_logits,
+ return_dict_in_generate=generation_config.return_dict_in_generate,
+ synced_gpus=synced_gpus,
+ sequential=generation_config.low_memory,
+ **model_kwargs,
+ )
+
+ elif generation_mode == GenerationMode.BEAM_SAMPLE:
+ # 11. prepare logits warper
+ logits_warper = self._get_logits_warper(generation_config)
+
+ # 12. prepare beam search scorer
+ beam_scorer = BeamSearchScorer(
+ batch_size=batch_size,
+ num_beams=generation_config.num_beams,
+ device=inputs_tensor.device,
+ length_penalty=generation_config.length_penalty,
+ do_early_stopping=generation_config.early_stopping,
+ num_beam_hyps_to_keep=generation_config.num_return_sequences,
+ max_length=generation_config.max_length,
+ )
+
+ # 13. interleave input_ids with `num_beams` additional sequences per batch
+ input_ids, model_kwargs = self._expand_inputs_for_generation(
+ input_ids=input_ids,
+ expand_size=generation_config.num_beams,
+ is_encoder_decoder=self.config.is_encoder_decoder,
+ **model_kwargs,
+ )
+
+ # 14. run beam sample
+ result = self._beam_sample(
+ input_ids,
+ beam_scorer,
+ logits_processor=prepared_logits_processor,
+ logits_warper=logits_warper,
+ stopping_criteria=prepared_stopping_criteria,
+ pad_token_id=generation_config.pad_token_id,
+ eos_token_id=generation_config.eos_token_id,
+ output_scores=generation_config.output_scores,
+ output_logits=generation_config.output_logits,
+ return_dict_in_generate=generation_config.return_dict_in_generate,
+ synced_gpus=synced_gpus,
+ **model_kwargs,
+ )
+
+ elif generation_mode == GenerationMode.GROUP_BEAM_SEARCH:
+ # 11. prepare beam search scorer
+ beam_scorer = BeamSearchScorer(
+ batch_size=batch_size,
+ num_beams=generation_config.num_beams,
+ device=inputs_tensor.device,
+ length_penalty=generation_config.length_penalty,
+ do_early_stopping=generation_config.early_stopping,
+ num_beam_hyps_to_keep=generation_config.num_return_sequences,
+ num_beam_groups=generation_config.num_beam_groups,
+ max_length=generation_config.max_length,
+ )
+ # 12. interleave input_ids with `num_beams` additional sequences per batch
+ input_ids, model_kwargs = self._expand_inputs_for_generation(
+ input_ids=input_ids,
+ expand_size=generation_config.num_beams,
+ is_encoder_decoder=self.config.is_encoder_decoder,
+ **model_kwargs,
+ )
+ # 13. run beam search
+ result = self._group_beam_search(
+ input_ids,
+ beam_scorer,
+ logits_processor=prepared_logits_processor,
+ stopping_criteria=prepared_stopping_criteria,
+ pad_token_id=generation_config.pad_token_id,
+ eos_token_id=generation_config.eos_token_id,
+ output_scores=generation_config.output_scores,
+ output_logits=generation_config.output_logits,
+ return_dict_in_generate=generation_config.return_dict_in_generate,
+ synced_gpus=synced_gpus,
+ **model_kwargs,
+ )
+
+ elif generation_mode == GenerationMode.CONSTRAINED_BEAM_SEARCH:
+ final_constraints = []
+ if generation_config.constraints is not None:
+ final_constraints = generation_config.constraints
+
+ if generation_config.force_words_ids is not None:
+
+ def typeerror():
+ raise ValueError(
+ "`force_words_ids` has to either be a `List[List[List[int]]]` or `List[List[int]]` "
+ f"of positive integers, but is {generation_config.force_words_ids}."
+ )
+
+ if (
+ not isinstance(generation_config.force_words_ids, list)
+ or len(generation_config.force_words_ids) == 0
+ ):
+ typeerror()
+
+ for word_ids in generation_config.force_words_ids:
+ if isinstance(word_ids[0], list):
+ if not isinstance(word_ids, list) or len(word_ids) == 0:
+ typeerror()
+ if any(not isinstance(token_ids, list) for token_ids in word_ids):
+ typeerror()
+ if any(
+ any((not isinstance(token_id, int) or token_id < 0) for token_id in token_ids)
+ for token_ids in word_ids
+ ):
+ typeerror()
+
+ constraint = DisjunctiveConstraint(word_ids)
+ else:
+ if not isinstance(word_ids, list) or len(word_ids) == 0:
+ typeerror()
+ if any((not isinstance(token_id, int) or token_id < 0) for token_id in word_ids):
+ typeerror()
+
+ constraint = PhrasalConstraint(word_ids)
+ final_constraints.append(constraint)
+
+ # 11. prepare beam search scorer
+ constrained_beam_scorer = ConstrainedBeamSearchScorer(
+ constraints=final_constraints,
+ batch_size=batch_size,
+ num_beams=generation_config.num_beams,
+ device=inputs_tensor.device,
+ length_penalty=generation_config.length_penalty,
+ do_early_stopping=generation_config.early_stopping,
+ num_beam_hyps_to_keep=generation_config.num_return_sequences,
+ max_length=generation_config.max_length,
+ )
+ # 12. interleave input_ids with `num_beams` additional sequences per batch
+ input_ids, model_kwargs = self._expand_inputs_for_generation(
+ input_ids=input_ids,
+ expand_size=generation_config.num_beams,
+ is_encoder_decoder=self.config.is_encoder_decoder,
+ **model_kwargs,
+ )
+ # 13. run beam search
+ result = self._constrained_beam_search(
+ input_ids,
+ constrained_beam_scorer=constrained_beam_scorer,
+ logits_processor=prepared_logits_processor,
+ stopping_criteria=prepared_stopping_criteria,
+ pad_token_id=generation_config.pad_token_id,
+ eos_token_id=generation_config.eos_token_id,
+ output_scores=generation_config.output_scores,
+ output_logits=generation_config.output_logits,
+ return_dict_in_generate=generation_config.return_dict_in_generate,
+ synced_gpus=synced_gpus,
+ **model_kwargs,
+ )
+
+ if generation_config.cache_implementation in NEED_SETUP_CACHE_CLASSES_MAPPING:
+ if not callable(getattr(self, "_reset_cache", None)):
+ raise ValueError(
+ "A `static_cache` was used to generate but there was a failure when trying to release the cache. "
+ " Make sure this model implements a `_reset_cache` function."
+ )
+ self._reset_cache()
+
+ return result
+
+ def _has_unfinished_sequences(self, this_peer_finished: bool, synced_gpus: bool, device: torch.device) -> bool:
+ """
+ Returns whether there are still unfinished sequences in the device. The existence of unfinished sequences is
+ fed through `this_peer_finished`. ZeRO stage 3-friendly.
+ """
+ if synced_gpus:
+ # Under synced_gpus the `forward` call must continue until all gpus complete their sequence.
+ # The following logic allows an early break if all peers finished generating their sequence
+ this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(device)
+ # send 0.0 if we finished, 1.0 otherwise
+ dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM)
+ # did all peers finish? the reduced sum will be 0.0 then
+ if this_peer_finished_flag.item() == 0.0:
+ return False
+ elif this_peer_finished:
+ return False
+ return True
+
+ def contrastive_search(self, *args, **kwargs):
+ logger.warning_once(
+ "Calling `contrastive_search` directly is deprecated and will be removed in v4.41. Use `generate` or a "
+ "custom generation loop instead.",
+ )
+ return self._contrastive_search(*args, **kwargs)
+
+ @torch.no_grad()
+ def _contrastive_search(
+ self,
+ input_ids: torch.LongTensor,
+ top_k: Optional[int] = 1,
+ penalty_alpha: Optional[float] = 0,
+ logits_processor: Optional[LogitsProcessorList] = None,
+ logits_warper: Optional[LogitsProcessorList] = None,
+ stopping_criteria: Optional[StoppingCriteriaList] = None,
+ pad_token_id: Optional[int] = None,
+ eos_token_id: Optional[Union[int, List[int]]] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_scores: Optional[bool] = None,
+ output_logits: Optional[bool] = None,
+ return_dict_in_generate: Optional[bool] = None,
+ synced_gpus: bool = False,
+ streamer: Optional["BaseStreamer"] = None,
+ sequential: Optional[bool] = None,
+ **model_kwargs,
+ ) -> Union[GenerateNonBeamOutput, torch.LongTensor]:
+ r"""
+ Generates sequences of token ids for models with a language modeling head using **contrastive search** and can
+ be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
+
+
+
+ In most cases, you do not need to call [`~generation.GenerationMixin._contrastive_search`] directly. Use
+ generate() instead. For an overview of generation strategies and code examples, check the [following
+ guide](../generation_strategies).
+
+
+
+ Parameters:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ The sequence used as a prompt for the generation.
+ top_k (`int`, *optional*, defaults to 1):
+ The size of the candidate set that is used to re-rank for contrastive search
+ penalty_alpha (`float`, *optional*, defaults to 0):
+ The degeneration penalty for contrastive search; activate when it is larger than 0
+ logits_processor (`LogitsProcessorList`, *optional*):
+ An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]
+ used to modify the prediction scores of the language modeling head applied at each generation step.
+ logits_warper (`LogitsProcessorList`, *optional*):
+ An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used
+ to warp the prediction score distribution of the language modeling head applied before multinomial
+ sampling at each generation step.
+ stopping_criteria (`StoppingCriteriaList`, *optional*):
+ An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`]
+ used to tell if the generation loop should stop.
+ pad_token_id (`int`, *optional*):
+ The id of the *padding* token.
+ eos_token_id (`Union[int, List[int]]`, *optional*):
+ The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
+ output_attentions (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more details.
+ output_hidden_states (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more details.
+ output_scores (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the prediction scores. See `scores` under returned tensors for more details.
+ output_logits (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the raw prediction logit scores. See `logits` under returned tensors
+ for more details.
+ return_dict_in_generate (`bool`, *optional*, defaults to `False`):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ synced_gpus (`bool`, *optional*, defaults to `False`):
+ Whether to continue running the while loop until max_length (needed for ZeRO stage 3)
+ streamer (`BaseStreamer`, *optional*):
+ Streamer object that will be used to stream the generated sequences. Generated tokens are passed
+ through `streamer.put(token_ids)` and the streamer is responsible for any further processing.
+ sequential (`bool`, *optional*):
+ Switches topk hidden state computation from parallel to sequential to reduce memory if True.
+ model_kwargs:
+ Additional model specific keyword arguments will be forwarded to the `forward` function of the model.
+ If model is an encoder-decoder model the kwargs should include `encoder_outputs`.
+
+ Return:
+ [`~generation.GenerateDecoderOnlyOutput`], [`~generation.GenerateEncoderDecoderOutput`]
+ or `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a
+ [`~generation.GenerateDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and
+ `return_dict_in_generate=True` or a [`~generation.GenerateEncoderDecoderOutput`] if
+ `model.config.is_encoder_decoder=True`.
+
+ Examples:
+ ```python
+ >>> from transformers import (
+ ... AutoTokenizer,
+ ... AutoModelForCausalLM,
+ ... StoppingCriteriaList,
+ ... MaxLengthCriteria,
+ ... )
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-125m")
+ >>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m")
+ >>> # set pad_token_id to eos_token_id because OPT does not have a PAD token
+ >>> model.config.pad_token_id = model.config.eos_token_id
+ >>> input_prompt = "DeepMind Company is"
+ >>> input_ids = tokenizer(input_prompt, return_tensors="pt")
+ >>> stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=64)])
+ >>> outputs = model._contrastive_search(
+ ... **input_ids, penalty_alpha=0.6, top_k=4, stopping_criteria=stopping_criteria
+ ... )
+ >>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
+ ['DeepMind Company is a company that focuses on the development and commercialization of artificial intelligence (AI). DeepMind’s mission is to help people understand and solve problems that are difficult to solve in the world today.\n\nIn this post, we talk about the benefits of deep learning in business and how it']
+ ```"""
+ # init values
+ logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
+ logits_warper = logits_warper if logits_warper is not None else LogitsProcessorList()
+ stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
+ pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
+ eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
+ sequential = sequential if sequential is not None else self.generation_config.low_memory
+ if isinstance(eos_token_id, int):
+ eos_token_id = [eos_token_id]
+ eos_token_id_tensor = torch.tensor(eos_token_id).to(input_ids.device) if eos_token_id is not None else None
+ output_scores = output_scores if output_scores is not None else self.generation_config.output_scores
+ output_logits = output_logits if output_logits is not None else self.generation_config.output_logits
+ output_attentions = (
+ output_attentions if output_attentions is not None else self.generation_config.output_attentions
+ )
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states
+ )
+ return_dict_in_generate = (
+ return_dict_in_generate
+ if return_dict_in_generate is not None
+ else self.generation_config.return_dict_in_generate
+ )
+
+ # init attention / hidden states / scores tuples
+ raw_logits = () if (return_dict_in_generate and output_logits) else None
+ scores = () if (return_dict_in_generate and output_scores) else None
+ decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
+ cross_attentions = () if (return_dict_in_generate and output_attentions) else None
+ decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
+
+ # if model is an encoder-decoder, retrieve encoder attention weights and hidden states
+ if return_dict_in_generate and self.config.is_encoder_decoder:
+ encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
+ encoder_hidden_states = (
+ model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
+ )
+
+ # keep track of which sequences are already finished
+ batch_size, cur_len = input_ids.shape
+ if "inputs_embeds" in model_kwargs:
+ cur_len = model_kwargs["inputs_embeds"].shape[1]
+ unfinished_sequences = torch.ones(batch_size, dtype=torch.long, device=input_ids.device)
+ model_kwargs["cache_position"] = torch.arange(cur_len, device=input_ids.device)
+
+ this_peer_finished = False
+
+ while self._has_unfinished_sequences(this_peer_finished, synced_gpus, device=input_ids.device):
+ # if the first step in the loop, encode all the prefix and obtain: (1) past_key_values;
+ # (2) last_hidden_states; (3) logit_for_next_step; (4) update model kwargs for the next step
+ if model_kwargs.get("past_key_values") is None:
+ # prepare inputs
+ model_kwargs["use_cache"] = True
+ model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
+
+ # encode the given prefix and prepare model inputs; encoder-decoder model process the prefix and save
+ # the `encoder_outputs`
+ outputs = self(
+ **model_inputs, return_dict=True, output_hidden_states=True, output_attentions=output_attentions
+ )
+
+ # last decoder hidden states will be used to compute the degeneration penalty (cosine similarity with
+ # previous tokens)
+ if self.config.is_encoder_decoder:
+ last_hidden_states = outputs.decoder_hidden_states[-1]
+ else:
+ last_hidden_states = outputs.hidden_states[-1]
+
+ # next logit for contrastive search to select top-k candidate tokens
+ logit_for_next_step = outputs.logits[:, -1, :]
+
+ model_kwargs = self._update_model_kwargs_for_generation(
+ outputs,
+ model_kwargs,
+ is_encoder_decoder=self.config.is_encoder_decoder,
+ standardize_cache_format=True,
+ )
+ if not sequential:
+ # Expands model inputs top_k times, for batched forward passes (akin to beam search).
+ _, model_kwargs = self._expand_inputs_for_generation(
+ expand_size=top_k, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs
+ )
+
+ past_key_values = model_kwargs.get("past_key_values")
+ if past_key_values is None:
+ raise ValueError(
+ f"{self.__class__.__name__} does not support caching and therefore **can't** be used "
+ "for contrastive search."
+ )
+ elif (
+ not isinstance(past_key_values[0], (tuple, torch.Tensor))
+ or past_key_values[0][0].shape[0] != batch_size
+ ):
+ raise ValueError(
+ f"{self.__class__.__name__} does not have a standard cache format and therefore **can't** be "
+ "used for contrastive search without further modifications."
+ )
+
+ # contrastive_search main logic start:
+ # contrastive search decoding consists of two steps: (1) candidate tokens recall; (2) candidate re-rank by
+ # degeneration penalty
+ processed_logit_for_next_step = logits_processor(input_ids, logit_for_next_step)
+ processed_logit_for_next_step = logits_warper(input_ids, processed_logit_for_next_step)
+ next_probs = nn.functional.softmax(processed_logit_for_next_step, dim=-1)
+
+ top_k_probs, top_k_ids = torch.topk(next_probs, dim=-1, k=top_k)
+
+ # Store scores, attentions and hidden_states when required
+ if return_dict_in_generate:
+ if output_logits:
+ raw_logits += (logit_for_next_step,)
+ if output_scores:
+ scores += (processed_logit_for_next_step,)
+ if output_attentions:
+ decoder_attentions += (
+ (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)
+ )
+ if self.config.is_encoder_decoder:
+ cross_attentions += (outputs.cross_attentions,)
+
+ if output_hidden_states:
+ decoder_hidden_states += (
+ (outputs.decoder_hidden_states,)
+ if self.config.is_encoder_decoder
+ else (outputs.hidden_states,)
+ )
+
+ # Replicates the new past_key_values to match the `top_k` candidates
+ new_key_values = []
+ for layer in model_kwargs["past_key_values"]:
+ items = []
+ # item is either the key or the value matrix
+ for item in layer:
+ if sequential:
+ items.append(item.repeat_interleave(1, dim=0))
+ else:
+ items.append(item.repeat_interleave(top_k, dim=0))
+ new_key_values.append(tuple(items))
+ model_kwargs["past_key_values"] = tuple(new_key_values)
+
+ if sequential:
+ all_outputs = []
+ for i in range(top_k):
+ # compute the candidate tokens by the language model and collect their hidden_states
+ next_model_inputs = self.prepare_inputs_for_generation(top_k_ids[:, i].view(-1, 1), **model_kwargs)
+
+ outputs = self(
+ **next_model_inputs,
+ return_dict=True,
+ output_hidden_states=True,
+ output_attentions=output_attentions,
+ )
+ all_outputs.append(outputs)
+ outputs = stack_model_outputs(all_outputs)
+
+ else:
+ # compute the candidate tokens by the language model and collect their hidden_states
+ # assembles top_k_ids into batch of size k
+ next_model_inputs = self.prepare_inputs_for_generation(top_k_ids.view(-1, 1), **model_kwargs)
+
+ outputs = self(
+ **next_model_inputs,
+ return_dict=True,
+ output_hidden_states=True,
+ output_attentions=output_attentions,
+ )
+ # name is different for encoder-decoder and decoder-only models
+ if self.config.is_encoder_decoder:
+ next_hidden = outputs.decoder_hidden_states[-1]
+ full_hidden_states = outputs.decoder_hidden_states
+ else:
+ next_hidden = outputs.hidden_states[-1]
+ full_hidden_states = outputs.hidden_states
+
+ logits = outputs.logits[:, -1, :]
+
+ context_hidden = last_hidden_states.repeat_interleave(top_k, dim=0)
+
+ # compute the degeneration penalty and re-rank the candidates based on the degeneration penalty and the
+ # model confidence. Keeping `selected_idx` on CPU enables multi-device contrastive search and doesn't
+ # introduce (noticeable) slowdowns on single-device runs.
+ selected_idx = _ranking_fast(context_hidden, next_hidden, top_k_probs, penalty_alpha, top_k)
+ selected_idx = selected_idx.to("cpu")
+
+ # prepare for the next step: (1) next token_id; (2) past_key_values; (3) last_hidden_states for computing
+ # the degeneration penalty; (4) logits for selecting next top-k candidates; (5) selected tokens scores
+ # (model confidence minus degeneration penalty); (6) decoder hidden_states
+ next_tokens = top_k_ids[range(len(top_k_ids)), selected_idx]
+ next_hidden = torch.stack(torch.split(next_hidden.squeeze(dim=1), top_k))
+ next_hidden = next_hidden[range(batch_size), selected_idx, :]
+ last_hidden_states = torch.cat([last_hidden_states, next_hidden.unsqueeze(1)], dim=1)
+
+ next_decoder_hidden_states = ()
+ for layer in full_hidden_states:
+ layer = torch.stack(torch.split(layer, top_k))[range(batch_size), selected_idx, :]
+ next_decoder_hidden_states += (layer,)
+
+ # generate past_key_values cache of only the selected token
+ if sequential:
+ next_model_input = self.prepare_inputs_for_generation(
+ top_k_ids[:, selected_idx].view(-1, 1), **model_kwargs
+ )
+
+ selected_outputs = self(
+ **next_model_input,
+ return_dict=True,
+ output_hidden_states=False,
+ output_attentions=False,
+ )
+ next_past_key_values = selected_outputs["past_key_values"]
+
+ else:
+ next_past_key_values = self._extract_past_from_model_output(outputs, standardize_cache_format=True)
+ new_key_values = ()
+ for layer in next_past_key_values:
+ items = ()
+ # item is either the key or the value matrix
+ for item in layer:
+ item = torch.stack(torch.split(item, top_k, dim=0)) # [B, K, num_head, seq_len, esz]
+ item = item[range(batch_size), selected_idx, ...] # [B, num_head, seq_len, esz]
+ items += (item,)
+ new_key_values += (items,)
+ next_past_key_values = new_key_values
+
+ logit_for_next_step = torch.stack(torch.split(logits, top_k))[range(batch_size), selected_idx, :]
+
+ # Rebuilds the relevant parts of the model output for the selected token, for use in the next iteration
+ if self.config.is_encoder_decoder:
+ next_step_cross_attentions = ()
+ next_step_decoder_attentions = ()
+ if output_attentions:
+ for layer in outputs.cross_attentions:
+ layer = torch.stack(torch.split(layer, top_k, dim=0))[range(batch_size), selected_idx, ...]
+ next_step_cross_attentions += (layer,)
+ for layer in outputs.decoder_attentions:
+ layer = torch.stack(torch.split(layer, top_k, dim=0))[range(batch_size), selected_idx, ...]
+ next_step_decoder_attentions += (layer,)
+ outputs = Seq2SeqLMOutput(
+ past_key_values=next_past_key_values,
+ decoder_hidden_states=next_decoder_hidden_states,
+ decoder_attentions=next_step_decoder_attentions or None,
+ cross_attentions=next_step_cross_attentions or None,
+ )
+ else:
+ next_step_attentions = ()
+ if output_attentions:
+ for layer in outputs.attentions:
+ layer = torch.stack(torch.split(layer, top_k, dim=0))[range(batch_size), selected_idx, ...]
+ next_step_attentions += (layer,)
+ outputs = CausalLMOutputWithPast(
+ past_key_values=next_past_key_values,
+ hidden_states=next_decoder_hidden_states,
+ attentions=next_step_attentions or None,
+ )
+ # contrastive_search main logic end
+
+ if synced_gpus and this_peer_finished:
+ continue # don't waste resources running the code we don't need
+
+ # finished sentences should have their next token be a padding token
+ if eos_token_id is not None:
+ if pad_token_id is None:
+ raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.")
+ next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences)
+
+ # update generated ids, model inputs, and length for next step
+ input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
+ if streamer is not None:
+ streamer.put(next_tokens.cpu())
+ model_kwargs = self._update_model_kwargs_for_generation(
+ outputs,
+ model_kwargs,
+ is_encoder_decoder=self.config.is_encoder_decoder,
+ )
+
+ # if eos_token was found in one sentence, set sentence to finished
+ if eos_token_id_tensor is not None:
+ unfinished_sequences = unfinished_sequences.mul(
+ next_tokens.tile(eos_token_id_tensor.shape[0], 1).ne(eos_token_id_tensor.unsqueeze(1)).prod(dim=0)
+ )
+
+ # stop when each sentence is finished
+ unfinished_sequences = unfinished_sequences & ~stopping_criteria(input_ids, scores)
+ this_peer_finished = unfinished_sequences.max() == 0
+
+ if streamer is not None:
+ streamer.end()
+
+ if return_dict_in_generate:
+ # Contrastive search works by forward looking at the next token, so we need to exclude it from
+ # `past_key_values` to be consistent with the other decoding methods
+ if model_kwargs.get("past_key_values") is not None:
+ past_key_values = []
+ for layer in model_kwargs["past_key_values"]:
+ layer_past_key_values = []
+ for item in layer:
+ layer_past_key_values.append(item[..., :-1, :])
+ past_key_values.append(tuple(layer_past_key_values))
+ model_kwargs["past_key_values"] = tuple(past_key_values)
+
+ if self.config.is_encoder_decoder:
+ return GenerateEncoderDecoderOutput(
+ sequences=input_ids,
+ scores=scores,
+ logits=raw_logits,
+ encoder_attentions=encoder_attentions,
+ encoder_hidden_states=encoder_hidden_states,
+ decoder_attentions=decoder_attentions,
+ cross_attentions=cross_attentions,
+ decoder_hidden_states=decoder_hidden_states,
+ past_key_values=model_kwargs.get("past_key_values"),
+ )
+ else:
+ return GenerateDecoderOnlyOutput(
+ sequences=input_ids,
+ scores=scores,
+ logits=raw_logits,
+ attentions=decoder_attentions,
+ hidden_states=decoder_hidden_states,
+ past_key_values=model_kwargs.get("past_key_values"),
+ )
+ else:
+ return input_ids
+
+ def greedy_search(self, *args, **kwargs):
+ logger.warning_once(
+ "Calling `greedy_search` directly is deprecated and will be removed in v4.41. Use `generate` or a "
+ "custom generation loop instead.",
+ )
+ return self._greedy_search(*args, **kwargs)
+
+ def _greedy_search(
+ self,
+ input_ids: torch.LongTensor,
+ logits_processor: Optional[LogitsProcessorList] = None,
+ stopping_criteria: Optional[StoppingCriteriaList] = None,
+ max_length: Optional[int] = None,
+ pad_token_id: Optional[int] = None,
+ eos_token_id: Optional[Union[int, List[int]]] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_scores: Optional[bool] = None,
+ output_logits: Optional[bool] = None,
+ return_dict_in_generate: Optional[bool] = None,
+ synced_gpus: bool = False,
+ streamer: Optional["BaseStreamer"] = None,
+ **model_kwargs,
+ ) -> Union[GenerateNonBeamOutput, torch.LongTensor]:
+ r"""
+ Generates sequences of token ids for models with a language modeling head using **greedy decoding** and can be
+ used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
+
+
+
+ In most cases, you do not need to call [`~generation.GenerationMixin._greedy_search`] directly. Use generate()
+ instead. For an overview of generation strategies and code examples, check the [following
+ guide](../generation_strategies).
+
+
+
+
+ Parameters:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ The sequence used as a prompt for the generation.
+ logits_processor (`LogitsProcessorList`, *optional*):
+ An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]
+ used to modify the prediction scores of the language modeling head applied at each generation step.
+ stopping_criteria (`StoppingCriteriaList`, *optional*):
+ An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`]
+ used to tell if the generation loop should stop.
+
+ max_length (`int`, *optional*, defaults to 20):
+ **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated
+ tokens. The maximum length of the sequence to be generated.
+ pad_token_id (`int`, *optional*):
+ The id of the *padding* token.
+ eos_token_id (`Union[int, List[int]]`, *optional*):
+ The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
+ output_attentions (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more details.
+ output_hidden_states (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more details.
+ output_scores (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the prediction scores. See `scores` under returned tensors for more details.
+ output_logits (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the raw prediction logit scores. See `logits` under returned tensors
+ for more details.
+ return_dict_in_generate (`bool`, *optional*, defaults to `False`):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ synced_gpus (`bool`, *optional*, defaults to `False`):
+ Whether to continue running the while loop until max_length (needed for ZeRO stage 3)
+ streamer (`BaseStreamer`, *optional*):
+ Streamer object that will be used to stream the generated sequences. Generated tokens are passed
+ through `streamer.put(token_ids)` and the streamer is responsible for any further processing.
+ model_kwargs:
+ Additional model specific keyword arguments will be forwarded to the `forward` function of the model.
+ If model is an encoder-decoder model the kwargs should include `encoder_outputs`.
+
+ Return:
+ [`~generation.GenerateDecoderOnlyOutput`], [`~generation.GenerateEncoderDecoderOutput`] or
+ `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a
+ [`~generation.GenerateDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and
+ `return_dict_in_generate=True` or a [`~generation.GenerateEncoderDecoderOutput`] if
+ `model.config.is_encoder_decoder=True`.
+
+ Examples:
+
+ ```python
+ >>> from transformers import (
+ ... AutoTokenizer,
+ ... AutoModelForCausalLM,
+ ... LogitsProcessorList,
+ ... MinLengthLogitsProcessor,
+ ... StoppingCriteriaList,
+ ... MaxLengthCriteria,
+ ... )
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
+ >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
+
+ >>> # set pad_token_id to eos_token_id because GPT2 does not have a PAD token
+ >>> model.generation_config.pad_token_id = model.generation_config.eos_token_id
+
+ >>> input_prompt = "It might be possible to"
+ >>> input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids
+
+ >>> # instantiate logits processors
+ >>> logits_processor = LogitsProcessorList(
+ ... [
+ ... MinLengthLogitsProcessor(10, eos_token_id=model.generation_config.eos_token_id),
+ ... ]
+ ... )
+ >>> stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=20)])
+
+ >>> outputs = model._greedy_search(
+ ... input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria
+ ... )
+
+ >>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
+ ["It might be possible to get a better understanding of the nature of the problem, but it's not"]
+ ```"""
+ # init values
+ logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
+ stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
+ if max_length is not None:
+ warnings.warn(
+ "`max_length` is deprecated in this function, use"
+ " `stopping_criteria=StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)])` instead.",
+ UserWarning,
+ )
+ stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length)
+ pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
+ eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
+ if isinstance(eos_token_id, int):
+ eos_token_id = [eos_token_id]
+ eos_token_id_tensor = torch.tensor(eos_token_id).to(input_ids.device) if eos_token_id is not None else None
+ output_scores = output_scores if output_scores is not None else self.generation_config.output_scores
+ output_attentions = (
+ output_attentions if output_attentions is not None else self.generation_config.output_attentions
+ )
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states
+ )
+ return_dict_in_generate = (
+ return_dict_in_generate
+ if return_dict_in_generate is not None
+ else self.generation_config.return_dict_in_generate
+ )
+
+ # init attention / hidden states / scores tuples
+ raw_logits = () if (return_dict_in_generate and output_logits) else None
+ scores = () if (return_dict_in_generate and output_scores) else None
+ decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
+ cross_attentions = () if (return_dict_in_generate and output_attentions) else None
+ decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
+
+ # if model is an encoder-decoder, retrieve encoder attention weights and hidden states
+ if return_dict_in_generate and self.config.is_encoder_decoder:
+ encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
+ encoder_hidden_states = (
+ model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
+ )
+
+ # keep track of which sequences are already finished
+ batch_size, cur_len = input_ids.shape
+ if "inputs_embeds" in model_kwargs:
+ cur_len = model_kwargs["inputs_embeds"].shape[1]
+ this_peer_finished = False
+ unfinished_sequences = torch.ones(batch_size, dtype=torch.long, device=input_ids.device)
+ model_kwargs["cache_position"] = torch.arange(cur_len, device=input_ids.device)
+
+ while self._has_unfinished_sequences(this_peer_finished, synced_gpus, device=input_ids.device):
+ # prepare model inputs
+ model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
+
+ # forward pass to get next token
+ outputs = self(
+ **model_inputs,
+ return_dict=True,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ )
+
+ if synced_gpus and this_peer_finished:
+ continue # don't waste resources running the code we don't need
+
+ next_token_logits = outputs.logits[:, -1, :]
+
+ # pre-process distribution
+ next_tokens_scores = logits_processor(input_ids, next_token_logits)
+
+ # Store scores, attentions and hidden_states when required
+ if return_dict_in_generate:
+ if output_scores:
+ scores += (next_tokens_scores,)
+ if output_logits:
+ raw_logits += (next_token_logits,)
+ if output_attentions:
+ decoder_attentions += (
+ (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)
+ )
+ if self.config.is_encoder_decoder:
+ cross_attentions += (outputs.cross_attentions,)
+
+ if output_hidden_states:
+ decoder_hidden_states += (
+ (outputs.decoder_hidden_states,)
+ if self.config.is_encoder_decoder
+ else (outputs.hidden_states,)
+ )
+
+ # argmax
+ next_tokens = torch.argmax(next_tokens_scores, dim=-1)
+
+ # finished sentences should have their next token be a padding token
+ if eos_token_id is not None:
+ if pad_token_id is None:
+ raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.")
+ next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences)
+
+ # update generated ids, model inputs, and length for next step
+ input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
+ if streamer is not None:
+ streamer.put(next_tokens.cpu())
+ model_kwargs = self._update_model_kwargs_for_generation(
+ outputs,
+ model_kwargs,
+ is_encoder_decoder=self.config.is_encoder_decoder,
+ )
+
+ # if eos_token was found in one sentence, set sentence to finished
+ if eos_token_id_tensor is not None:
+ unfinished_sequences = unfinished_sequences.mul(
+ next_tokens.tile(eos_token_id_tensor.shape[0], 1).ne(eos_token_id_tensor.unsqueeze(1)).prod(dim=0)
+ )
+
+ unfinished_sequences = unfinished_sequences & ~stopping_criteria(input_ids, scores)
+ this_peer_finished = unfinished_sequences.max() == 0
+
+ if streamer is not None:
+ streamer.end()
+
+ if return_dict_in_generate:
+ if self.config.is_encoder_decoder:
+ return GenerateEncoderDecoderOutput(
+ sequences=input_ids,
+ scores=scores,
+ logits=raw_logits,
+ encoder_attentions=encoder_attentions,
+ encoder_hidden_states=encoder_hidden_states,
+ decoder_attentions=decoder_attentions,
+ cross_attentions=cross_attentions,
+ decoder_hidden_states=decoder_hidden_states,
+ past_key_values=model_kwargs.get("past_key_values"),
+ )
+ else:
+ return GenerateDecoderOnlyOutput(
+ sequences=input_ids,
+ scores=scores,
+ logits=raw_logits,
+ attentions=decoder_attentions,
+ hidden_states=decoder_hidden_states,
+ past_key_values=model_kwargs.get("past_key_values"),
+ )
+ else:
+ return input_ids
+
+ def sample(self, *args, **kwargs):
+ logger.warning_once(
+ "Calling `sample` directly is deprecated and will be removed in v4.41. Use `generate` or a "
+ "custom generation loop instead.",
+ )
+ return self._sample(*args, **kwargs)
+
+ def _sample(
+ self,
+ input_ids: torch.LongTensor,
+ logits_processor: Optional[LogitsProcessorList] = None,
+ stopping_criteria: Optional[StoppingCriteriaList] = None,
+ logits_warper: Optional[LogitsProcessorList] = None,
+ max_length: Optional[int] = None,
+ pad_token_id: Optional[int] = None,
+ eos_token_id: Optional[Union[int, List[int]]] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_scores: Optional[bool] = None,
+ output_logits: Optional[bool] = None,
+ return_dict_in_generate: Optional[bool] = None,
+ synced_gpus: bool = False,
+ streamer: Optional["BaseStreamer"] = None,
+ **model_kwargs,
+ ) -> Union[GenerateNonBeamOutput, torch.LongTensor]:
+ r"""
+ Generates sequences of token ids for models with a language modeling head using **multinomial sampling** and
+ can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
+
+
+
+ In most cases, you do not need to call [`~generation.GenerationMixin._sample`] directly. Use generate() instead.
+ For an overview of generation strategies and code examples, check the [following
+ guide](../generation_strategies).
+
+
+
+ Parameters:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ The sequence used as a prompt for the generation.
+ logits_processor (`LogitsProcessorList`, *optional*):
+ An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]
+ used to modify the prediction scores of the language modeling head applied at each generation step.
+ stopping_criteria (`StoppingCriteriaList`, *optional*):
+ An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`]
+ used to tell if the generation loop should stop.
+ logits_warper (`LogitsProcessorList`, *optional*):
+ An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used
+ to warp the prediction score distribution of the language modeling head applied before multinomial
+ sampling at each generation step.
+ max_length (`int`, *optional*, defaults to 20):
+ **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated
+ tokens. The maximum length of the sequence to be generated.
+ pad_token_id (`int`, *optional*):
+ The id of the *padding* token.
+ eos_token_id (`Union[int, List[int]]`, *optional*):
+ The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
+ output_attentions (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more details.
+ output_hidden_states (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more details.
+ output_scores (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the prediction scores. See `scores` under returned tensors for more details.
+ output_logits (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the raw prediction logit scores. See `logits` under returned tensors for
+ more details.
+ return_dict_in_generate (`bool`, *optional*, defaults to `False`):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ synced_gpus (`bool`, *optional*, defaults to `False`):
+ Whether to continue running the while loop until max_length (needed for ZeRO stage 3)
+ streamer (`BaseStreamer`, *optional*):
+ Streamer object that will be used to stream the generated sequences. Generated tokens are passed
+ through `streamer.put(token_ids)` and the streamer is responsible for any further processing.
+ model_kwargs:
+ Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is
+ an encoder-decoder model the kwargs should include `encoder_outputs`.
+
+ Return:
+ [`~generation.GenerateDecoderOnlyOutput`], [`~generation.GenerateEncoderDecoderOutput`] or `torch.LongTensor`:
+ A `torch.LongTensor` containing the generated tokens (default behaviour) or a
+ [`~generation.GenerateDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and
+ `return_dict_in_generate=True` or a [`~generation.GenerateEncoderDecoderOutput`] if
+ `model.config.is_encoder_decoder=True`.
+
+ Examples:
+
+ ```python
+ >>> from transformers import (
+ ... AutoTokenizer,
+ ... AutoModelForCausalLM,
+ ... LogitsProcessorList,
+ ... MinLengthLogitsProcessor,
+ ... TopKLogitsWarper,
+ ... TemperatureLogitsWarper,
+ ... StoppingCriteriaList,
+ ... MaxLengthCriteria,
+ ... )
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
+ >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
+
+ >>> # set pad_token_id to eos_token_id because GPT2 does not have a EOS token
+ >>> model.config.pad_token_id = model.config.eos_token_id
+ >>> model.generation_config.pad_token_id = model.config.eos_token_id
+
+ >>> input_prompt = "Today is a beautiful day, and"
+ >>> input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids
+
+ >>> # instantiate logits processors
+ >>> logits_processor = LogitsProcessorList(
+ ... [
+ ... MinLengthLogitsProcessor(15, eos_token_id=model.generation_config.eos_token_id),
+ ... ]
+ ... )
+ >>> # instantiate logits processors
+ >>> logits_warper = LogitsProcessorList(
+ ... [
+ ... TopKLogitsWarper(50),
+ ... TemperatureLogitsWarper(0.7),
+ ... ]
+ ... )
+
+ >>> stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=20)])
+
+ >>> torch.manual_seed(0) # doctest: +IGNORE_RESULT
+ >>> outputs = model._sample(
+ ... input_ids,
+ ... logits_processor=logits_processor,
+ ... logits_warper=logits_warper,
+ ... stopping_criteria=stopping_criteria,
+ ... )
+
+ >>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
+ ['Today is a beautiful day, and we must do everything possible to make it a day of celebration.']
+ ```"""
+ # init values
+ logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
+ stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
+ if max_length is not None:
+ warnings.warn(
+ "`max_length` is deprecated in this function, use"
+ " `stopping_criteria=StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)])` instead.",
+ UserWarning,
+ )
+ stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length)
+ logits_warper = logits_warper if logits_warper is not None else LogitsProcessorList()
+ pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
+ eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
+ if isinstance(eos_token_id, int):
+ eos_token_id = [eos_token_id]
+ eos_token_id_tensor = torch.tensor(eos_token_id).to(input_ids.device) if eos_token_id is not None else None
+ output_scores = output_scores if output_scores is not None else self.generation_config.output_scores
+ output_logits = output_logits if output_logits is not None else self.generation_config.output_logits
+ output_attentions = (
+ output_attentions if output_attentions is not None else self.generation_config.output_attentions
+ )
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states
+ )
+ return_dict_in_generate = (
+ return_dict_in_generate
+ if return_dict_in_generate is not None
+ else self.generation_config.return_dict_in_generate
+ )
+
+ # init attention / hidden states / scores tuples
+ scores = () if (return_dict_in_generate and output_scores) else None
+ raw_logits = () if (return_dict_in_generate and output_logits) else None
+ decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
+ cross_attentions = () if (return_dict_in_generate and output_attentions) else None
+ decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
+
+ # if model is an encoder-decoder, retrieve encoder attention weights and hidden states
+ if return_dict_in_generate and self.config.is_encoder_decoder:
+ encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
+ encoder_hidden_states = (
+ model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
+ )
+
+ # keep track of which sequences are already finished
+ batch_size, cur_len = input_ids.shape
+ if "inputs_embeds" in model_kwargs:
+ cur_len = model_kwargs["inputs_embeds"].shape[1]
+ this_peer_finished = False
+ unfinished_sequences = torch.ones(batch_size, dtype=torch.long, device=input_ids.device)
+ model_kwargs["cache_position"] = torch.arange(cur_len, device=input_ids.device)
+
+ while self._has_unfinished_sequences(this_peer_finished, synced_gpus, device=input_ids.device):
+ # prepare model inputs
+ model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
+
+ # forward pass to get next token
+ outputs = self(
+ **model_inputs,
+ return_dict=True,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ )
+
+ if synced_gpus and this_peer_finished:
+ continue # don't waste resources running the code we don't need
+
+ next_token_logits = outputs.logits[:, -1, :]
+
+ # pre-process distribution
+ next_token_scores = logits_processor(input_ids, next_token_logits)
+ next_token_scores = logits_warper(input_ids, next_token_scores)
+
+ # Store scores, attentions and hidden_states when required
+ if return_dict_in_generate:
+ if output_scores:
+ scores += (next_token_scores,)
+ if output_logits:
+ raw_logits += (next_token_logits,)
+ if output_attentions:
+ decoder_attentions += (
+ (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)
+ )
+ if self.config.is_encoder_decoder:
+ cross_attentions += (outputs.cross_attentions,)
+
+ if output_hidden_states:
+ decoder_hidden_states += (
+ (outputs.decoder_hidden_states,)
+ if self.config.is_encoder_decoder
+ else (outputs.hidden_states,)
+ )
+
+ # sample
+ probs = nn.functional.softmax(next_token_scores, dim=-1)
+ next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
+
+ # finished sentences should have their next token be a padding token
+ if eos_token_id is not None:
+ if pad_token_id is None:
+ raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.")
+ next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences)
+
+ # update generated ids, model inputs, and length for next step
+ input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
+ if streamer is not None:
+ streamer.put(next_tokens.cpu())
+ model_kwargs = self._update_model_kwargs_for_generation(
+ outputs,
+ model_kwargs,
+ is_encoder_decoder=self.config.is_encoder_decoder,
+ )
+
+ # if eos_token was found in one sentence, set sentence to finished
+ if eos_token_id_tensor is not None:
+ unfinished_sequences = unfinished_sequences.mul(
+ next_tokens.tile(eos_token_id_tensor.shape[0], 1).ne(eos_token_id_tensor.unsqueeze(1)).prod(dim=0)
+ )
+
+ unfinished_sequences = unfinished_sequences & ~stopping_criteria(input_ids, scores)
+ this_peer_finished = unfinished_sequences.max() == 0
+
+ if streamer is not None:
+ streamer.end()
+
+ if return_dict_in_generate:
+ if self.config.is_encoder_decoder:
+ return GenerateEncoderDecoderOutput(
+ sequences=input_ids,
+ scores=scores,
+ logits=raw_logits,
+ encoder_attentions=encoder_attentions,
+ encoder_hidden_states=encoder_hidden_states,
+ decoder_attentions=decoder_attentions,
+ cross_attentions=cross_attentions,
+ decoder_hidden_states=decoder_hidden_states,
+ past_key_values=model_kwargs.get("past_key_values"),
+ )
+ else:
+ return GenerateDecoderOnlyOutput(
+ sequences=input_ids,
+ scores=scores,
+ logits=raw_logits,
+ attentions=decoder_attentions,
+ hidden_states=decoder_hidden_states,
+ past_key_values=model_kwargs.get("past_key_values"),
+ )
+ else:
+ return input_ids
+
+ def _temporary_reorder_cache(self, past_key_values, beam_idx):
+ """
+ Temporary function to handle the different types of cache reordering processes while we roll out `Cache`.
+
+ TODO: standardize cache formats and make all models compatible with `Cache`. It would remove the need
+ for this function, with `Cache.reorder_cache` being the sole remaining code path
+ """
+ model_class = self.__class__.__name__.lower()
+ # Exception 1: code path for models using the legacy cache format
+ if isinstance(past_key_values, (tuple, list)):
+ past_key_values = self._reorder_cache(past_key_values, beam_idx)
+ # Exception 2: models with different cache formats. These are limited to `DynamicCache` until their
+ # cache format is standardized, to avoid adding complexity to the codebase.
+ elif "bloom" in model_class or "gptbigcode" in model_class:
+ if not isinstance(past_key_values, DynamicCache):
+ raise ValueError(
+ f"Using an unsupported cache format with {model_class}. Currently, it only supports the "
+ "legacy tuple format or `DynamicCache`"
+ )
+ past_key_values = self._reorder_cache(past_key_values, beam_idx)
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
+ # Standard code path: use the `Cache.reorder_cache`
+ else:
+ past_key_values.reorder_cache(beam_idx)
+ return past_key_values
+
+ def beam_search(self, *args, **kwargs):
+ logger.warning_once(
+ "Calling `beam_search` directly is deprecated and will be removed in v4.41. Use `generate` or a "
+ "custom generation loop instead.",
+ )
+ return self._beam_search(*args, **kwargs)
+
+ def _beam_search(
+ self,
+ input_ids: torch.LongTensor,
+ beam_scorer: BeamScorer,
+ logits_processor: Optional[LogitsProcessorList] = None,
+ stopping_criteria: Optional[StoppingCriteriaList] = None,
+ max_length: Optional[int] = None,
+ pad_token_id: Optional[int] = None,
+ eos_token_id: Optional[Union[int, List[int]]] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_scores: Optional[bool] = None,
+ output_logits: Optional[bool] = None,
+ return_dict_in_generate: Optional[bool] = None,
+ synced_gpus: bool = False,
+ sequential: Optional[bool] = None,
+ **model_kwargs,
+ ) -> Union[GenerateBeamOutput, torch.LongTensor]:
+ r"""
+ Generates sequences of token ids for models with a language modeling head using **beam search decoding** and
+ can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
+
+
+
+ In most cases, you do not need to call [`~generation.GenerationMixin._beam_search`] directly. Use generate()
+ instead. For an overview of generation strategies and code examples, check the [following
+ guide](../generation_strategies).
+
+
+
+ Parameters:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ The sequence used as a prompt for the generation.
+ beam_scorer (`BeamScorer`):
+ An derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and
+ sorted during generation. For more information, the documentation of [`BeamScorer`] should be read.
+ logits_processor (`LogitsProcessorList`, *optional*):
+ An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]
+ used to modify the prediction scores of the language modeling head applied at each generation step.
+ stopping_criteria (`StoppingCriteriaList`, *optional*):
+ An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`]
+ used to tell if the generation loop should stop.
+ max_length (`int`, *optional*, defaults to 20):
+ **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated
+ tokens. The maximum length of the sequence to be generated.
+ pad_token_id (`int`, *optional*):
+ The id of the *padding* token.
+ eos_token_id (`Union[int, List[int]]`, *optional*):
+ The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
+ output_attentions (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more details.
+ output_hidden_states (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more details.
+ output_logits (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the raw prediction logit scores. See `logits` under returned tensors for
+ more details.
+ output_scores (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the prediction scores. See `scores` under returned tensors for more details.
+ return_dict_in_generate (`bool`, *optional*, defaults to `False`):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ synced_gpus (`bool`, *optional*, defaults to `False`):
+ Whether to continue running the while loop until max_length (needed for ZeRO stage 3)
+ sequential (`bool`, defaults to `False`):
+ By default, beam search has `batch_size * num_beams` as effective batch size (see `beam_search()` for
+ more details). This flag will avoid parallelizing the beam search and will instead run beam search
+ sequentially.
+ model_kwargs:
+ Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is
+ an encoder-decoder model the kwargs should include `encoder_outputs`.
+
+ Return:
+ [`generation.GenerateBeamDecoderOnlyOutput`], [`~generation.GenerateBeamEncoderDecoderOutput`] or
+ `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a
+ [`~generation.GenerateBeamDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and
+ `return_dict_in_generate=True` or a [`~generation.GenerateBeamEncoderDecoderOutput`] if
+ `model.config.is_encoder_decoder=True`.
+
+
+ Examples:
+
+ ```python
+ >>> from transformers import (
+ ... AutoTokenizer,
+ ... AutoModelForSeq2SeqLM,
+ ... LogitsProcessorList,
+ ... MinLengthLogitsProcessor,
+ ... BeamSearchScorer,
+ ... )
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base")
+ >>> model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-base")
+
+ >>> encoder_input_str = "translate English to German: How old are you?"
+ >>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids
+
+
+ >>> # lets run beam search using 3 beams
+ >>> num_beams = 3
+ >>> # define decoder start token ids
+ >>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long)
+ >>> input_ids = input_ids * model.config.decoder_start_token_id
+
+ >>> # add encoder_outputs to model keyword arguments
+ >>> model_kwargs = {
+ ... "encoder_outputs": model.get_encoder()(
+ ... encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True
+ ... )
+ ... }
+
+ >>> # instantiate beam scorer
+ >>> beam_scorer = BeamSearchScorer(
+ ... batch_size=1,
+ ... num_beams=num_beams,
+ ... device=model.device,
+ ... )
+
+ >>> # instantiate logits processors
+ >>> logits_processor = LogitsProcessorList(
+ ... [
+ ... MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id),
+ ... ]
+ ... )
+
+ >>> outputs = model._beam_search(input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs)
+
+ >>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
+ ['Wie alt bist du?']
+ ```"""
+ # init values
+ logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
+ stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
+ sequential = sequential if sequential is not None else self.generation_config.low_memory
+ if max_length is not None:
+ warnings.warn(
+ "`max_length` is deprecated in this function, use"
+ " `stopping_criteria=StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)])` instead.",
+ UserWarning,
+ )
+ stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length)
+ if len(stopping_criteria) == 0:
+ warnings.warn("You don't have defined any stopping_criteria, this will likely loop forever", UserWarning)
+ pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
+ eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
+ if isinstance(eos_token_id, int):
+ eos_token_id = [eos_token_id]
+ output_scores = output_scores if output_scores is not None else self.generation_config.output_scores
+ output_logits = output_logits if output_logits is not None else self.generation_config.output_logits
+ output_attentions = (
+ output_attentions if output_attentions is not None else self.generation_config.output_attentions
+ )
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states
+ )
+ return_dict_in_generate = (
+ return_dict_in_generate
+ if return_dict_in_generate is not None
+ else self.generation_config.return_dict_in_generate
+ )
+
+ batch_size = len(beam_scorer._beam_hyps)
+ num_beams = beam_scorer.num_beams
+
+ batch_beam_size, cur_len = input_ids.shape
+ if "inputs_embeds" in model_kwargs:
+ cur_len = model_kwargs["inputs_embeds"].shape[1]
+ model_kwargs["cache_position"] = torch.arange(cur_len, device=input_ids.device)
+
+ if num_beams * batch_size != batch_beam_size:
+ raise ValueError(
+ f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}."
+ )
+
+ # init attention / hidden states / scores tuples
+ scores = () if (return_dict_in_generate and output_scores) else None
+ raw_logits = () if (return_dict_in_generate and output_logits) else None
+ beam_indices = (
+ tuple(() for _ in range(batch_beam_size)) if (return_dict_in_generate and output_scores) else None
+ )
+ decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
+ cross_attentions = () if (return_dict_in_generate and output_attentions) else None
+ decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
+
+ # if model is an encoder-decoder, retrieve encoder attention weights and hidden states
+ if return_dict_in_generate and self.config.is_encoder_decoder:
+ encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
+ encoder_hidden_states = (
+ model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
+ )
+
+ # initialise score of first beam with 0 and the rest with -1e9. This makes sure that only tokens
+ # of the first beam are considered to avoid sampling the exact same tokens across all beams.
+ beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device)
+ beam_scores[:, 1:] = -1e9
+ beam_scores = beam_scores.view((batch_size * num_beams,))
+
+ this_peer_finished = False
+
+ decoder_prompt_len = input_ids.shape[-1] # record the prompt length of decoder
+
+ while self._has_unfinished_sequences(this_peer_finished, synced_gpus, device=input_ids.device):
+ model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
+
+ # if sequential is True, split the input to batches of batch_size and run sequentially
+ if sequential:
+ if any(
+ model_name in self.__class__.__name__.lower()
+ for model_name in [
+ "fsmt",
+ "reformer",
+ "bloom",
+ "ctrl",
+ "gpt_bigcode",
+ "transo_xl",
+ "xlnet",
+ "cpm",
+ ]
+ ):
+ raise RuntimeError(
+ f"Currently generation for {self.__class__.__name__} is not supported "
+ f"for `low_memory beam_search`. Please open an issue on GitHub if you need this feature."
+ )
+
+ inputs_per_sub_batches = _split_model_inputs(
+ model_inputs, split_size=batch_size, full_batch_size=batch_beam_size
+ )
+ outputs_per_sub_batch = [
+ self(
+ **inputs_per_sub_batch,
+ return_dict=True,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ )
+ for inputs_per_sub_batch in inputs_per_sub_batches
+ ]
+
+ outputs = stack_model_outputs(outputs_per_sub_batch)
+
+ else: # Unchanged original behavior
+ outputs = self(
+ **model_inputs,
+ return_dict=True,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ )
+
+ if synced_gpus and this_peer_finished:
+ cur_len = cur_len + 1
+ continue # don't waste resources running the code we don't need
+
+ next_token_logits = outputs.logits[:, -1, :]
+ next_token_scores = nn.functional.log_softmax(
+ next_token_logits, dim=-1
+ ) # (batch_size * num_beams, vocab_size)
+
+ next_token_scores_processed = logits_processor(input_ids, next_token_scores)
+ next_token_scores = next_token_scores_processed + beam_scores[:, None].expand_as(
+ next_token_scores_processed
+ )
+
+ # Store scores, attentions and hidden_states when required
+ if return_dict_in_generate:
+ if output_scores:
+ scores += (next_token_scores_processed,)
+ if output_logits:
+ raw_logits += (next_token_logits,)
+ if output_attentions:
+ decoder_attentions += (
+ (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)
+ )
+ if self.config.is_encoder_decoder:
+ cross_attentions += (outputs.cross_attentions,)
+ if output_hidden_states:
+ decoder_hidden_states += (
+ (outputs.decoder_hidden_states,)
+ if self.config.is_encoder_decoder
+ else (outputs.hidden_states,)
+ )
+
+ # reshape for beam search
+ vocab_size = next_token_scores.shape[-1]
+ next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size)
+
+ # Sample 1 + len(eos_token_id) next tokens for each beam so we have at least 1 non eos token per beam.
+ n_eos_tokens = len(eos_token_id) if eos_token_id else 0
+ next_token_scores, next_tokens = torch.topk(
+ next_token_scores, max(2, 1 + n_eos_tokens) * num_beams, dim=1, largest=True, sorted=True
+ )
+
+ next_indices = torch.div(next_tokens, vocab_size, rounding_mode="floor")
+ next_tokens = next_tokens % vocab_size
+
+ # stateless
+ beam_outputs = beam_scorer.process(
+ input_ids,
+ next_token_scores,
+ next_tokens,
+ next_indices,
+ pad_token_id=pad_token_id,
+ eos_token_id=eos_token_id,
+ beam_indices=beam_indices,
+ decoder_prompt_len=decoder_prompt_len,
+ )
+
+ beam_scores = beam_outputs["next_beam_scores"]
+ beam_next_tokens = beam_outputs["next_beam_tokens"]
+ beam_idx = beam_outputs["next_beam_indices"]
+
+ input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1)
+
+ model_kwargs = self._update_model_kwargs_for_generation(
+ outputs,
+ model_kwargs,
+ is_encoder_decoder=self.config.is_encoder_decoder,
+ )
+ if model_kwargs.get("past_key_values", None) is not None:
+ model_kwargs["past_key_values"] = self._temporary_reorder_cache(
+ model_kwargs["past_key_values"], beam_idx
+ )
+
+ if return_dict_in_generate and output_scores:
+ beam_indices = tuple((beam_indices[beam_idx[i]] + (beam_idx[i],) for i in range(len(beam_indices))))
+
+ # increase cur_len
+ cur_len = cur_len + 1
+
+ if beam_scorer.is_done or all(stopping_criteria(input_ids, scores)):
+ this_peer_finished = True
+
+ sequence_outputs = beam_scorer.finalize(
+ input_ids,
+ beam_scores,
+ next_tokens,
+ next_indices,
+ pad_token_id=pad_token_id,
+ eos_token_id=eos_token_id,
+ max_length=stopping_criteria.max_length,
+ beam_indices=beam_indices,
+ decoder_prompt_len=decoder_prompt_len,
+ )
+
+ if return_dict_in_generate:
+ if not output_scores:
+ sequence_outputs["sequence_scores"] = None
+
+ if self.config.is_encoder_decoder:
+ return GenerateBeamEncoderDecoderOutput(
+ sequences=sequence_outputs["sequences"],
+ sequences_scores=sequence_outputs["sequence_scores"],
+ scores=scores,
+ logits=raw_logits,
+ beam_indices=sequence_outputs["beam_indices"],
+ encoder_attentions=encoder_attentions,
+ encoder_hidden_states=encoder_hidden_states,
+ decoder_attentions=decoder_attentions,
+ cross_attentions=cross_attentions,
+ decoder_hidden_states=decoder_hidden_states,
+ past_key_values=model_kwargs.get("past_key_values"),
+ )
+ else:
+ return GenerateBeamDecoderOnlyOutput(
+ sequences=sequence_outputs["sequences"],
+ sequences_scores=sequence_outputs["sequence_scores"],
+ scores=scores,
+ logits=raw_logits,
+ beam_indices=sequence_outputs["beam_indices"],
+ attentions=decoder_attentions,
+ hidden_states=decoder_hidden_states,
+ past_key_values=model_kwargs.get("past_key_values"),
+ )
+ else:
+ return sequence_outputs["sequences"]
+
+ def beam_sample(self, *args, **kwargs):
+ logger.warning_once(
+ "Calling `beam_sample` directly is deprecated and will be removed in v4.41. Use `generate` or a "
+ "custom generation loop instead.",
+ )
+ return self._beam_sample(*args, **kwargs)
+
+ def _beam_sample(
+ self,
+ input_ids: torch.LongTensor,
+ beam_scorer: BeamScorer,
+ logits_processor: Optional[LogitsProcessorList] = None,
+ stopping_criteria: Optional[StoppingCriteriaList] = None,
+ logits_warper: Optional[LogitsProcessorList] = None,
+ max_length: Optional[int] = None,
+ pad_token_id: Optional[int] = None,
+ eos_token_id: Optional[Union[int, List[int]]] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_scores: Optional[bool] = None,
+ output_logits: Optional[bool] = None,
+ return_dict_in_generate: Optional[bool] = None,
+ synced_gpus: bool = False,
+ **model_kwargs,
+ ) -> Union[GenerateBeamOutput, torch.LongTensor]:
+ r"""
+ Generates sequences of token ids for models with a language modeling head using **beam search multinomial
+ sampling** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
+
+
+
+ In most cases, you do not need to call [`~generation.GenerationMixin._beam_sample`] directly. Use generate()
+ instead. For an overview of generation strategies and code examples, check the [following
+ guide](../generation_strategies).
+
+
+
+ Parameters:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ The sequence used as a prompt for the generation.
+ beam_scorer (`BeamScorer`):
+ A derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and
+ sorted during generation. For more information, the documentation of [`BeamScorer`] should be read.
+ logits_processor (`LogitsProcessorList`, *optional*):
+ An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]
+ used to modify the prediction scores of the language modeling head applied at each generation step.
+ stopping_criteria (`StoppingCriteriaList`, *optional*):
+ An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`]
+ used to tell if the generation loop should stop.
+ logits_warper (`LogitsProcessorList`, *optional*):
+ An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used
+ to warp the prediction score distribution of the language modeling head applied before multinomial
+ sampling at each generation step.
+ max_length (`int`, *optional*, defaults to 20):
+ **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated
+ tokens. The maximum length of the sequence to be generated.
+ pad_token_id (`int`, *optional*):
+ The id of the *padding* token.
+ eos_token_id (`Union[int, List[int]]`, *optional*):
+ The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
+ output_attentions (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more details.
+ output_hidden_states (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more details.
+ output_scores (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the prediction scores. See `scores` under returned tensors for more details.
+ output_logits (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the raw prediction logit scores. See `logits` under returned tensors for
+ more details.
+ return_dict_in_generate (`bool`, *optional*, defaults to `False`):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ synced_gpus (`bool`, *optional*, defaults to `False`):
+ Whether to continue running the while loop until max_length (needed for ZeRO stage 3)
+ model_kwargs:
+ Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is
+ an encoder-decoder model the kwargs should include `encoder_outputs`.
+
+ Return:
+ [`~generation.GenerateBeamDecoderOnlyOutput`], [`~generation.GenerateBeamEncoderDecoderOutput`] or
+ `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a
+ [`~generation.GenerateBeamDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and
+ `return_dict_in_generate=True` or a [`~generation.GenerateBeamEncoderDecoderOutput`] if
+ `model.config.is_encoder_decoder=True`.
+
+ Examples:
+
+ ```python
+ >>> from transformers import (
+ ... AutoTokenizer,
+ ... AutoModelForSeq2SeqLM,
+ ... LogitsProcessorList,
+ ... MinLengthLogitsProcessor,
+ ... TopKLogitsWarper,
+ ... TemperatureLogitsWarper,
+ ... BeamSearchScorer,
+ ... )
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base")
+ >>> model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-base")
+
+ >>> encoder_input_str = "translate English to German: How old are you?"
+ >>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids
+
+ >>> # lets run beam search using 3 beams
+ >>> num_beams = 3
+ >>> # define decoder start token ids
+ >>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long)
+ >>> input_ids = input_ids * model.config.decoder_start_token_id
+
+ >>> # add encoder_outputs to model keyword arguments
+ >>> model_kwargs = {
+ ... "encoder_outputs": model.get_encoder()(
+ ... encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True
+ ... )
+ ... }
+
+ >>> # instantiate beam scorer
+ >>> beam_scorer = BeamSearchScorer(
+ ... batch_size=1,
+ ... max_length=model.config.max_length,
+ ... num_beams=num_beams,
+ ... device=model.device,
+ ... )
+
+ >>> # instantiate logits processors
+ >>> logits_processor = LogitsProcessorList(
+ ... [MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id)]
+ ... )
+ >>> # instantiate logits processors
+ >>> logits_warper = LogitsProcessorList(
+ ... [
+ ... TopKLogitsWarper(50),
+ ... TemperatureLogitsWarper(0.7),
+ ... ]
+ ... )
+
+ >>> outputs = model._beam_sample(
+ ... input_ids, beam_scorer, logits_processor=logits_processor, logits_warper=logits_warper, **model_kwargs
+ ... )
+
+ >>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
+ ['Wie alt bist du?']
+ ```"""
+ # init values
+ logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
+ stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
+ if max_length is not None:
+ warnings.warn(
+ "`max_length` is deprecated in this function, use"
+ " `stopping_criteria=StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)])` instead.",
+ UserWarning,
+ )
+ stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length)
+ pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
+ eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
+ if isinstance(eos_token_id, int):
+ eos_token_id = [eos_token_id]
+ output_scores = output_scores if output_scores is not None else self.generation_config.output_scores
+ output_logits = output_logits if output_logits is not None else self.generation_config.output_logits
+ output_attentions = (
+ output_attentions if output_attentions is not None else self.generation_config.output_attentions
+ )
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states
+ )
+ return_dict_in_generate = (
+ return_dict_in_generate
+ if return_dict_in_generate is not None
+ else self.generation_config.return_dict_in_generate
+ )
+
+ batch_size = len(beam_scorer._beam_hyps)
+ num_beams = beam_scorer.num_beams
+
+ batch_beam_size, cur_len = input_ids.shape
+ if "inputs_embeds" in model_kwargs:
+ cur_len = model_kwargs["inputs_embeds"].shape[1]
+ model_kwargs["cache_position"] = torch.arange(cur_len, device=input_ids.device)
+
+ # init attention / hidden states / scores tuples
+ scores = () if (return_dict_in_generate and output_scores) else None
+ raw_logits = () if (return_dict_in_generate and output_logits) else None
+ beam_indices = (
+ tuple(() for _ in range(batch_beam_size)) if (return_dict_in_generate and output_scores) else None
+ )
+ decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
+ cross_attentions = () if (return_dict_in_generate and output_attentions) else None
+ decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
+
+ # if model is an encoder-decoder, retrieve encoder attention weights and hidden states
+ if return_dict_in_generate and self.config.is_encoder_decoder:
+ encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
+ encoder_hidden_states = (
+ model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
+ )
+
+ beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device)
+ beam_scores = beam_scores.view((batch_size * num_beams,))
+
+ this_peer_finished = False
+
+ decoder_prompt_len = input_ids.shape[-1] # record the prompt length of decoder
+ while self._has_unfinished_sequences(this_peer_finished, synced_gpus, device=input_ids.device):
+ model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
+
+ outputs = self(
+ **model_inputs,
+ return_dict=True,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ )
+
+ if synced_gpus and this_peer_finished:
+ cur_len = cur_len + 1
+ continue # don't waste resources running the code we don't need
+
+ next_token_logits = outputs.logits[:, -1, :]
+
+ next_token_scores = nn.functional.log_softmax(
+ next_token_logits, dim=-1
+ ) # (batch_size * num_beams, vocab_size)
+
+ next_token_scores_processed = logits_processor(input_ids, next_token_scores)
+ next_token_scores_processed = logits_warper(input_ids, next_token_scores_processed)
+ next_token_scores = next_token_scores_processed + beam_scores[:, None].expand_as(
+ next_token_scores_processed
+ )
+
+ # Store scores, attentions and hidden_states when required
+ if return_dict_in_generate:
+ if output_scores:
+ scores += (next_token_scores_processed,)
+ if output_logits:
+ raw_logits += (next_token_logits,)
+ if output_attentions:
+ decoder_attentions += (
+ (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)
+ )
+ if self.config.is_encoder_decoder:
+ cross_attentions += (outputs.cross_attentions,)
+
+ if output_hidden_states:
+ decoder_hidden_states += (
+ (outputs.decoder_hidden_states,)
+ if self.config.is_encoder_decoder
+ else (outputs.hidden_states,)
+ )
+
+ # reshape for beam search
+ vocab_size = next_token_scores.shape[-1]
+ next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size)
+
+ probs = nn.functional.softmax(next_token_scores, dim=-1)
+
+ next_tokens = torch.multinomial(probs, num_samples=2 * num_beams)
+ next_token_scores = torch.gather(next_token_scores, -1, next_tokens)
+
+ next_token_scores, _indices = torch.sort(next_token_scores, descending=True, dim=1)
+ next_tokens = torch.gather(next_tokens, -1, _indices)
+
+ next_indices = torch.div(next_tokens, vocab_size, rounding_mode="floor")
+ next_tokens = next_tokens % vocab_size
+
+ # stateless
+ beam_outputs = beam_scorer.process(
+ input_ids,
+ next_token_scores,
+ next_tokens,
+ next_indices,
+ pad_token_id=pad_token_id,
+ eos_token_id=eos_token_id,
+ beam_indices=beam_indices,
+ decoder_prompt_len=decoder_prompt_len,
+ )
+ beam_scores = beam_outputs["next_beam_scores"]
+ beam_next_tokens = beam_outputs["next_beam_tokens"]
+ beam_idx = beam_outputs["next_beam_indices"]
+
+ input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1)
+
+ model_kwargs = self._update_model_kwargs_for_generation(
+ outputs,
+ model_kwargs,
+ is_encoder_decoder=self.config.is_encoder_decoder,
+ )
+ if model_kwargs.get("past_key_values", None) is not None:
+ model_kwargs["past_key_values"] = self._temporary_reorder_cache(
+ model_kwargs["past_key_values"], beam_idx
+ )
+
+ if return_dict_in_generate and output_scores:
+ beam_indices = tuple((beam_indices[beam_idx[i]] + (beam_idx[i],) for i in range(len(beam_indices))))
+
+ # increase cur_len
+ cur_len = cur_len + 1
+
+ if beam_scorer.is_done or all(stopping_criteria(input_ids, scores)):
+ this_peer_finished = True
+
+ sequence_outputs = beam_scorer.finalize(
+ input_ids,
+ beam_scores,
+ next_tokens,
+ next_indices,
+ pad_token_id=pad_token_id,
+ eos_token_id=eos_token_id,
+ max_length=stopping_criteria.max_length,
+ beam_indices=beam_indices,
+ decoder_prompt_len=decoder_prompt_len,
+ )
+
+ if return_dict_in_generate:
+ if not output_scores:
+ sequence_outputs["sequence_scores"] = None
+
+ if self.config.is_encoder_decoder:
+ return GenerateBeamEncoderDecoderOutput(
+ sequences=sequence_outputs["sequences"],
+ sequences_scores=sequence_outputs["sequence_scores"],
+ scores=scores,
+ logits=raw_logits,
+ beam_indices=sequence_outputs["beam_indices"],
+ encoder_attentions=encoder_attentions,
+ encoder_hidden_states=encoder_hidden_states,
+ decoder_attentions=decoder_attentions,
+ cross_attentions=cross_attentions,
+ decoder_hidden_states=decoder_hidden_states,
+ past_key_values=model_kwargs.get("past_key_values"),
+ )
+ else:
+ return GenerateBeamDecoderOnlyOutput(
+ sequences=sequence_outputs["sequences"],
+ sequences_scores=sequence_outputs["sequence_scores"],
+ scores=scores,
+ logits=raw_logits,
+ beam_indices=sequence_outputs["beam_indices"],
+ attentions=decoder_attentions,
+ hidden_states=decoder_hidden_states,
+ past_key_values=model_kwargs.get("past_key_values"),
+ )
+ else:
+ return sequence_outputs["sequences"]
+
+ def group_beam_search(self, *args, **kwargs):
+ logger.warning_once(
+ "Calling `group_beam_search` directly is deprecated and will be removed in v4.41. Use `generate` or a "
+ "custom generation loop instead.",
+ )
+ return self._group_beam_search(*args, **kwargs)
+
+ def _group_beam_search(
+ self,
+ input_ids: torch.LongTensor,
+ beam_scorer: BeamScorer,
+ logits_processor: Optional[LogitsProcessorList] = None,
+ stopping_criteria: Optional[StoppingCriteriaList] = None,
+ max_length: Optional[int] = None,
+ pad_token_id: Optional[int] = None,
+ eos_token_id: Optional[Union[int, List[int]]] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_scores: Optional[bool] = None,
+ output_logits: Optional[bool] = None,
+ return_dict_in_generate: Optional[bool] = None,
+ synced_gpus: bool = False,
+ **model_kwargs,
+ ):
+ r"""
+ Generates sequences of token ids for models with a language modeling head using **diverse beam search
+ decoding** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
+
+
+
+ In most cases, you do not need to call [`~generation.GenerationMixin._group_beam_search`] directly. Use
+ generate() instead. For an overview of generation strategies and code examples, check the [following
+ guide](../generation_strategies).
+
+
+
+ Parameters:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ The sequence used as a prompt for the generation.
+ beam_scorer (`BeamScorer`):
+ An derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and
+ sorted during generation. For more information, the documentation of [`BeamScorer`] should be read.
+ logits_processor (`LogitsProcessorList`, *optional*):
+ An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]
+ used to modify the prediction scores of the language modeling head applied at each generation step.
+ stopping_criteria (`StoppingCriteriaList`, *optional*):
+ An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`]
+ used to tell if the generation loop should stop.
+ max_length (`int`, *optional*, defaults to 20):
+ **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated
+ tokens. The maximum length of the sequence to be generated.
+ pad_token_id (`int`, *optional*):
+ The id of the *padding* token.
+ eos_token_id (`Union[int, List[int]]`, *optional*):
+ The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
+ output_attentions (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more details.
+ output_hidden_states (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more details.
+ output_scores (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the prediction scores. See `scores` under returned tensors for more details.
+ output_logits (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the raw prediction logit scores. See `logits` under returned tensors for
+ more details.
+ return_dict_in_generate (`bool`, *optional*, defaults to `False`):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ synced_gpus (`bool`, *optional*, defaults to `False`):
+ Whether to continue running the while loop until max_length (needed for ZeRO stage 3)
+
+ model_kwargs:
+ Additional model specific kwargs that will be forwarded to the `forward` function of the model. If
+ model is an encoder-decoder model the kwargs should include `encoder_outputs`.
+
+ Return:
+ [`~generation.GenerateBeamDecoderOnlyOutput`], [`~generation.GenerateBeamEncoderDecoderOutput`] or
+ `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a
+ [`~generation.GenerateBeamDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and
+ `return_dict_in_generate=True` or a [`~generation.GenerateBeamEncoderDecoderOutput`] if
+ `model.config.is_encoder_decoder=True`.
+
+ Examples:
+
+ ```python
+ >>> from transformers import (
+ ... AutoTokenizer,
+ ... AutoModelForSeq2SeqLM,
+ ... LogitsProcessorList,
+ ... MinLengthLogitsProcessor,
+ ... HammingDiversityLogitsProcessor,
+ ... BeamSearchScorer,
+ ... )
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base")
+ >>> model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-base")
+
+ >>> encoder_input_str = "translate English to German: How old are you?"
+ >>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids
+
+
+ >>> # lets run diverse beam search using 6 beams
+ >>> num_beams = 6
+ >>> # define decoder start token ids
+ >>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long)
+ >>> input_ids = input_ids * model.config.decoder_start_token_id
+
+ >>> # add encoder_outputs to model keyword arguments
+ >>> model_kwargs = {
+ ... "encoder_outputs": model.get_encoder()(
+ ... encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True
+ ... )
+ ... }
+
+ >>> # instantiate beam scorer
+ >>> beam_scorer = BeamSearchScorer(
+ ... batch_size=1,
+ ... max_length=model.config.max_length,
+ ... num_beams=num_beams,
+ ... device=model.device,
+ ... num_beam_groups=3,
+ ... )
+
+ >>> # instantiate logits processors
+ >>> logits_processor = LogitsProcessorList(
+ ... [
+ ... HammingDiversityLogitsProcessor(5.5, num_beams=6, num_beam_groups=3),
+ ... MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id),
+ ... ]
+ ... )
+
+ >>> outputs = model._group_beam_search(
+ ... input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs
+ ... )
+
+ >>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
+ ['Wie alt bist du?']
+ ```"""
+ # init values
+ logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
+ stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
+ if max_length is not None:
+ warnings.warn(
+ "`max_length` is deprecated in this function, use"
+ " `stopping_criteria=StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)])` instead.",
+ UserWarning,
+ )
+ stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length)
+ pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
+ eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
+ if isinstance(eos_token_id, int):
+ eos_token_id = [eos_token_id]
+ output_scores = output_scores if output_scores is not None else self.generation_config.output_scores
+ output_logits = output_logits if output_logits is not None else self.generation_config.output_logits
+ output_attentions = (
+ output_attentions if output_attentions is not None else self.generation_config.output_attentions
+ )
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states
+ )
+ return_dict_in_generate = (
+ return_dict_in_generate
+ if return_dict_in_generate is not None
+ else self.generation_config.return_dict_in_generate
+ )
+
+ num_beams = beam_scorer.num_beams
+ num_beam_groups = beam_scorer.num_beam_groups
+ num_sub_beams = num_beams // num_beam_groups
+ batch_size = len(beam_scorer._beam_hyps) // num_beam_groups
+ device = input_ids.device
+
+ batch_beam_size, cur_len = input_ids.shape
+ if "inputs_embeds" in model_kwargs:
+ cur_len = model_kwargs["inputs_embeds"].shape[1]
+ model_kwargs["cache_position"] = torch.arange(cur_len, device=input_ids.device)
+
+ if return_dict_in_generate and output_scores:
+ beam_indices = [tuple(() for _ in range(num_sub_beams * batch_size)) for _ in range(num_beam_groups)]
+ else:
+ beam_indices = None
+
+ if num_beams * batch_size != batch_beam_size:
+ raise ValueError(
+ f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}."
+ )
+
+ # init attention / hidden states / scores tuples
+ scores = () if (return_dict_in_generate and output_scores) else None
+ raw_logits = () if (return_dict_in_generate and output_logits) else None
+ decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
+ cross_attentions = () if (return_dict_in_generate and output_attentions) else None
+ decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
+
+ # if model is an encoder-decoder, retrieve encoder attention weights and hidden states
+ if return_dict_in_generate and self.config.is_encoder_decoder:
+ encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
+ encoder_hidden_states = (
+ model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
+ )
+
+ # initialise score of first beam of each group with 0 and the rest with -1e9. This ensures that the beams in
+ # the same group don't produce same tokens everytime.
+ beam_scores = torch.full((batch_size, num_beams), -1e9, dtype=torch.float, device=device)
+ beam_scores[:, ::num_sub_beams] = 0
+ beam_scores = beam_scores.view((batch_size * num_beams,))
+
+ this_peer_finished = False
+
+ decoder_prompt_len = input_ids.shape[-1] # record the prompt length of decoder
+ while self._has_unfinished_sequences(this_peer_finished, synced_gpus, device=input_ids.device):
+ # predicted tokens in cur_len step
+ current_tokens = torch.zeros(batch_size * num_beams, dtype=input_ids.dtype, device=device)
+
+ # indices which will form the beams in the next time step
+ reordering_indices = torch.zeros(batch_size * num_beams, dtype=torch.long, device=device)
+
+ # do one decoder step on all beams of all sentences in batch
+ model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
+ outputs = self(
+ **model_inputs,
+ return_dict=True,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ )
+
+ if synced_gpus and this_peer_finished:
+ cur_len = cur_len + 1
+ continue # don't waste resources running the code we don't need
+
+ if output_scores:
+ processed_score = torch.zeros_like(outputs.logits[:, -1, :])
+ if output_logits:
+ raw_logit_score = outputs.logits[:, -1, :]
+
+ for beam_group_idx in range(num_beam_groups):
+ group_start_idx = beam_group_idx * num_sub_beams
+ group_end_idx = min(group_start_idx + num_sub_beams, num_beams)
+ group_size = group_end_idx - group_start_idx
+
+ # indices of beams of current group among all sentences in batch
+ batch_group_indices = []
+
+ for batch_idx in range(batch_size):
+ batch_group_indices.extend(
+ [batch_idx * num_beams + idx for idx in range(group_start_idx, group_end_idx)]
+ )
+ group_input_ids = input_ids[batch_group_indices]
+
+ # select outputs of beams of current group only
+ next_token_logits = outputs.logits[batch_group_indices, -1, :]
+
+ next_token_scores = nn.functional.log_softmax(
+ next_token_logits, dim=-1
+ ) # (batch_size * group_size, vocab_size)
+ vocab_size = next_token_scores.shape[-1]
+
+ next_token_scores_processed = logits_processor(
+ group_input_ids, next_token_scores, current_tokens=current_tokens, beam_group_idx=beam_group_idx
+ )
+ next_token_scores = next_token_scores_processed + beam_scores[batch_group_indices].unsqueeze(-1)
+ next_token_scores = next_token_scores.expand_as(next_token_scores_processed)
+
+ if output_scores:
+ processed_score[batch_group_indices] = next_token_scores_processed
+
+ # reshape for beam search
+ next_token_scores = next_token_scores.view(batch_size, group_size * vocab_size)
+
+ # Sample 1 + len(eos_token_id) next tokens for each beam so we have at least 1 non eos token per beam.
+ n_eos_tokens = len(eos_token_id) if eos_token_id else 0
+ next_token_scores, next_tokens = torch.topk(
+ next_token_scores, max(2, 1 + n_eos_tokens) * group_size, dim=1, largest=True, sorted=True
+ )
+
+ next_indices = torch.div(next_tokens, vocab_size, rounding_mode="floor")
+ next_tokens = next_tokens % vocab_size
+
+ # stateless
+ process_beam_indices = sum(beam_indices, ()) if beam_indices is not None else None
+ beam_outputs = beam_scorer.process(
+ group_input_ids,
+ next_token_scores,
+ next_tokens,
+ next_indices,
+ pad_token_id=pad_token_id,
+ eos_token_id=eos_token_id,
+ beam_indices=process_beam_indices,
+ group_index=beam_group_idx,
+ decoder_prompt_len=decoder_prompt_len,
+ )
+ beam_scores[batch_group_indices] = beam_outputs["next_beam_scores"]
+ beam_next_tokens = beam_outputs["next_beam_tokens"]
+ beam_idx = beam_outputs["next_beam_indices"]
+
+ if return_dict_in_generate and output_scores:
+ beam_indices[beam_group_idx] = tuple(
+ beam_indices[beam_group_idx][beam_idx[i]] + (beam_idx[i],) for i in range(len(beam_indices[0]))
+ )
+
+ input_ids[batch_group_indices] = group_input_ids[beam_idx]
+ group_input_ids = torch.cat([group_input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1)
+ current_tokens[batch_group_indices] = group_input_ids[:, -1]
+
+ # (beam_idx // group_size) -> batch_idx
+ # (beam_idx % group_size) -> offset of idx inside the group
+ reordering_indices[batch_group_indices] = (
+ num_beams * torch.div(beam_idx, group_size, rounding_mode="floor")
+ + group_start_idx
+ + (beam_idx % group_size)
+ )
+
+ # Store scores, attentions and hidden_states when required
+ if return_dict_in_generate:
+ if output_scores:
+ scores += (processed_score,)
+ if output_logits:
+ raw_logits += (raw_logit_score,)
+ if output_attentions:
+ decoder_attentions += (
+ (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)
+ )
+ if self.config.is_encoder_decoder:
+ cross_attentions += (outputs.cross_attentions,)
+
+ if output_hidden_states:
+ decoder_hidden_states += (
+ (outputs.decoder_hidden_states,)
+ if self.config.is_encoder_decoder
+ else (outputs.hidden_states,)
+ )
+
+ input_ids = torch.cat([input_ids, current_tokens.unsqueeze(-1)], dim=-1)
+
+ model_kwargs = self._update_model_kwargs_for_generation(
+ outputs,
+ model_kwargs,
+ is_encoder_decoder=self.config.is_encoder_decoder,
+ )
+ if model_kwargs.get("past_key_values", None) is not None:
+ model_kwargs["past_key_values"] = self._temporary_reorder_cache(
+ model_kwargs["past_key_values"], reordering_indices
+ )
+
+ # increase cur_len
+ cur_len = cur_len + 1
+
+ if beam_scorer.is_done or all(stopping_criteria(input_ids, scores)):
+ this_peer_finished = True
+
+ final_beam_indices = sum(beam_indices, ()) if beam_indices is not None else None
+ sequence_outputs = beam_scorer.finalize(
+ input_ids,
+ beam_scores,
+ next_tokens,
+ next_indices,
+ pad_token_id=pad_token_id,
+ eos_token_id=eos_token_id,
+ max_length=stopping_criteria.max_length,
+ beam_indices=final_beam_indices,
+ decoder_prompt_len=decoder_prompt_len,
+ )
+
+ if return_dict_in_generate:
+ if not output_scores:
+ sequence_outputs["sequence_scores"] = None
+
+ if self.config.is_encoder_decoder:
+ return GenerateBeamEncoderDecoderOutput(
+ sequences=sequence_outputs["sequences"],
+ sequences_scores=sequence_outputs["sequence_scores"],
+ scores=scores,
+ logits=raw_logits,
+ beam_indices=sequence_outputs["beam_indices"],
+ encoder_attentions=encoder_attentions,
+ encoder_hidden_states=encoder_hidden_states,
+ decoder_attentions=decoder_attentions,
+ cross_attentions=cross_attentions,
+ decoder_hidden_states=decoder_hidden_states,
+ past_key_values=model_kwargs.get("past_key_values"),
+ )
+ else:
+ return GenerateBeamDecoderOnlyOutput(
+ sequences=sequence_outputs["sequences"],
+ sequences_scores=sequence_outputs["sequence_scores"],
+ scores=scores,
+ logits=raw_logits,
+ beam_indices=sequence_outputs["beam_indices"],
+ attentions=decoder_attentions,
+ hidden_states=decoder_hidden_states,
+ past_key_values=model_kwargs.get("past_key_values"),
+ )
+ else:
+ return sequence_outputs["sequences"]
+
+ def constrained_beam_search(self, *args, **kwargs):
+ logger.warning_once(
+ "Calling `constrained_beam_search` directly is deprecated and will be removed in v4.41. Use `generate` or a "
+ "custom generation loop instead.",
+ )
+ return self._constrained_beam_search(*args, **kwargs)
+
+ def _constrained_beam_search(
+ self,
+ input_ids: torch.LongTensor,
+ constrained_beam_scorer: ConstrainedBeamSearchScorer,
+ logits_processor: Optional[LogitsProcessorList] = None,
+ stopping_criteria: Optional[StoppingCriteriaList] = None,
+ max_length: Optional[int] = None,
+ pad_token_id: Optional[int] = None,
+ eos_token_id: Optional[Union[int, List[int]]] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_scores: Optional[bool] = None,
+ output_logits: Optional[bool] = None,
+ return_dict_in_generate: Optional[bool] = None,
+ synced_gpus: Optional[bool] = None,
+ **model_kwargs,
+ ) -> Union[GenerateBeamOutput, torch.LongTensor]:
+ r"""
+ Generates sequences of token ids for models with a language modeling head using **constrained beam search
+ decoding** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
+
+
+
+ In most cases, you do not need to call [`~generation.GenerationMixin._constrained_beam_search`] directly. Use
+ generate() instead. For an overview of generation strategies and code examples, check the [following
+ guide](../generation_strategies).
+
+
+
+ Parameters:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ The sequence used as a prompt for the generation.
+ constrained_beam_scorer (`ConstrainedBeamSearchScorer`):
+ A derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and
+ sorted during generation, while satisfying a list of positive constraints. For more information, the
+ documentation of [`ConstrainedBeamSearchScorer`] should be read.
+ logits_processor (`LogitsProcessorList`, *optional*):
+ An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]
+ used to modify the prediction scores of the language modeling head applied at each generation step.
+ stopping_criteria (`StoppingCriteriaList`, *optional*):
+ An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`]
+ used to tell if the generation loop should stop.
+ logits_warper (`LogitsProcessorList`, *optional*):
+ An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used
+ to warp the prediction score distribution of the language modeling head applied before multinomial
+ sampling at each generation step.
+ max_length (`int`, *optional*, defaults to 20):
+ **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated
+ tokens. The maximum length of the sequence to be generated.
+ pad_token_id (`int`, *optional*):
+ The id of the *padding* token.
+ eos_token_id (`Union[int, List[int]]`, *optional*):
+ The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
+ output_attentions (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more details.
+ output_hidden_states (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more details.
+ output_scores (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the prediction scores. See `scores` under returned tensors for more details.
+ output_logits (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the raw prediction logit scores. See `logits` under returned tensors for
+ more details.
+ return_dict_in_generate (`bool`, *optional*, defaults to `False`):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ synced_gpus (`bool`, *optional*, defaults to `False`):
+ Whether to continue running the while loop until max_length (needed for ZeRO stage 3)
+ model_kwargs:
+ Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is
+ an encoder-decoder model the kwargs should include `encoder_outputs`.
+
+ Return:
+ [`~generation.GenerateBeamDecoderOnlyOutput`], [`~generation.GenerateBeamEncoderDecoderOutput`] or
+ `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a
+ [`~generation.GenerateBeamDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and
+ `return_dict_in_generate=True` or a [`~generation.GenerateBeamEncoderDecoderOutput`] if
+ `model.config.is_encoder_decoder=True`.
+
+
+ Examples:
+
+ ```python
+ >>> from transformers import (
+ ... AutoTokenizer,
+ ... AutoModelForSeq2SeqLM,
+ ... LogitsProcessorList,
+ ... MinLengthLogitsProcessor,
+ ... ConstrainedBeamSearchScorer,
+ ... PhrasalConstraint,
+ ... )
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base")
+ >>> model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-base")
+
+ >>> encoder_input_str = "translate English to German: How old are you?"
+ >>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids
+
+
+ >>> # lets run beam search using 3 beams
+ >>> num_beams = 3
+ >>> # define decoder start token ids
+ >>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long)
+ >>> input_ids = input_ids * model.config.decoder_start_token_id
+
+ >>> # add encoder_outputs to model keyword arguments
+ >>> model_kwargs = {
+ ... "encoder_outputs": model.get_encoder()(
+ ... encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True
+ ... )
+ ... }
+
+ >>> constraint_str = "Sie"
+ >>> constraint_token_ids = tokenizer.encode(constraint_str)[:-1] # slice to remove eos token
+ >>> constraints = [PhrasalConstraint(token_ids=constraint_token_ids)]
+
+
+ >>> # instantiate beam scorer
+ >>> beam_scorer = ConstrainedBeamSearchScorer(
+ ... batch_size=1, num_beams=num_beams, device=model.device, constraints=constraints
+ ... )
+
+ >>> # instantiate logits processors
+ >>> logits_processor = LogitsProcessorList(
+ ... [
+ ... MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id),
+ ... ]
+ ... )
+
+ >>> outputs = model._constrained_beam_search(
+ ... input_ids, beam_scorer, constraints=constraints, logits_processor=logits_processor, **model_kwargs
+ ... )
+
+ >>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
+ ['Wie alt sind Sie?']
+ ```"""
+ # init values
+ logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
+ stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
+ if max_length is not None:
+ warnings.warn(
+ "`max_length` is deprecated in this function, use"
+ " `stopping_criteria=StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)])` instead.",
+ UserWarning,
+ )
+ stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length)
+ if len(stopping_criteria) == 0:
+ warnings.warn("You don't have defined any stopping_criteria, this will likely loop forever", UserWarning)
+ pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
+ eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
+ if isinstance(eos_token_id, int):
+ eos_token_id = [eos_token_id]
+ output_scores = output_scores if output_scores is not None else self.generation_config.output_scores
+ output_logits = output_logits if output_logits is not None else self.generation_config.output_logits
+ output_attentions = (
+ output_attentions if output_attentions is not None else self.generation_config.output_attentions
+ )
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states
+ )
+ return_dict_in_generate = (
+ return_dict_in_generate
+ if return_dict_in_generate is not None
+ else self.generation_config.return_dict_in_generate
+ )
+
+ batch_size = len(constrained_beam_scorer._beam_hyps)
+ num_beams = constrained_beam_scorer.num_beams
+
+ batch_beam_size, cur_len = input_ids.shape
+ if "inputs_embeds" in model_kwargs:
+ cur_len = model_kwargs["inputs_embeds"].shape[1]
+ model_kwargs["cache_position"] = torch.arange(cur_len, device=input_ids.device)
+
+ if num_beams * batch_size != batch_beam_size:
+ raise ValueError(
+ f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}."
+ )
+
+ # init attention / hidden states / scores tuples
+ scores = () if (return_dict_in_generate and output_scores) else None
+ raw_logits = () if (return_dict_in_generate and output_logits) else None
+ beam_indices = (
+ tuple(() for _ in range(batch_beam_size)) if (return_dict_in_generate and output_scores) else None
+ )
+ decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
+ cross_attentions = () if (return_dict_in_generate and output_attentions) else None
+ decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
+
+ # if model is an encoder-decoder, retrieve encoder attention weights and hidden states
+ if return_dict_in_generate and self.config.is_encoder_decoder:
+ encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
+ encoder_hidden_states = (
+ model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
+ )
+
+ # initialise score of first beam with 0 and the rest with -1e9. This makes sure that only tokens
+ # of the first beam are considered to avoid sampling the exact same tokens across all beams.
+ beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device)
+ beam_scores[:, 1:] = -1e9
+ beam_scores = beam_scores.view((batch_size * num_beams,))
+
+ this_peer_finished = False
+
+ decoder_prompt_len = input_ids.shape[-1] # record the prompt length of decoder
+ while self._has_unfinished_sequences(this_peer_finished, synced_gpus, device=input_ids.device):
+ model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
+
+ outputs = self(
+ **model_inputs,
+ return_dict=True,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ )
+
+ if synced_gpus and this_peer_finished:
+ cur_len = cur_len + 1
+ continue # don't waste resources running the code we don't need
+
+ next_token_logits = outputs.logits[:, -1, :]
+ next_token_scores = nn.functional.log_softmax(
+ next_token_logits, dim=-1
+ ) # (batch_size * num_beams, vocab_size)
+
+ next_token_scores_processed = logits_processor(input_ids, next_token_scores)
+
+ next_token_scores = next_token_scores_processed + beam_scores[:, None].expand_as(
+ next_token_scores_processed
+ )
+
+ scores_for_all_vocab = next_token_scores.clone()
+
+ # Store scores, attentions and hidden_states when required
+ if return_dict_in_generate:
+ if output_scores:
+ scores += (next_token_scores,)
+ if output_logits:
+ raw_logits += (next_token_logits,)
+ if output_attentions:
+ decoder_attentions += (
+ (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)
+ )
+ if self.config.is_encoder_decoder:
+ cross_attentions += (outputs.cross_attentions,)
+
+ if output_hidden_states:
+ decoder_hidden_states += (
+ (outputs.decoder_hidden_states,)
+ if self.config.is_encoder_decoder
+ else (outputs.hidden_states,)
+ )
+
+ # reshape for beam search
+ vocab_size = next_token_scores.shape[-1]
+ next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size)
+
+ # Sample 1 + len(eos_token_id) next tokens for each beam so we have at least 1 non eos token per beam.
+ n_eos_tokens = len(eos_token_id) if eos_token_id else 0
+ next_token_scores, next_tokens = torch.topk(
+ next_token_scores, max(2, 1 + n_eos_tokens) * num_beams, dim=1, largest=True, sorted=True
+ )
+
+ next_indices = (next_tokens / vocab_size).long()
+ next_tokens = next_tokens % vocab_size
+
+ # stateless
+ beam_outputs = constrained_beam_scorer.process(
+ input_ids,
+ next_token_scores,
+ next_tokens,
+ next_indices,
+ scores_for_all_vocab,
+ pad_token_id=pad_token_id,
+ eos_token_id=eos_token_id,
+ beam_indices=beam_indices,
+ decoder_prompt_len=decoder_prompt_len,
+ )
+ beam_scores = beam_outputs["next_beam_scores"]
+ beam_next_tokens = beam_outputs["next_beam_tokens"]
+ beam_idx = beam_outputs["next_beam_indices"]
+
+ input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1)
+ model_kwargs = self._update_model_kwargs_for_generation(
+ outputs,
+ model_kwargs,
+ is_encoder_decoder=self.config.is_encoder_decoder,
+ )
+ if model_kwargs.get("past_key_values", None) is not None:
+ model_kwargs["past_key_values"] = self._temporary_reorder_cache(
+ model_kwargs["past_key_values"], beam_idx
+ )
+
+ if return_dict_in_generate and output_scores:
+ beam_indices = tuple((beam_indices[beam_idx[i]] + (beam_idx[i],) for i in range(len(beam_indices))))
+
+ # increase cur_len
+ cur_len = cur_len + 1
+
+ if constrained_beam_scorer.is_done or all(stopping_criteria(input_ids, scores)):
+ this_peer_finished = True
+
+ sequence_outputs = constrained_beam_scorer.finalize(
+ input_ids,
+ beam_scores,
+ next_tokens,
+ next_indices,
+ pad_token_id=pad_token_id,
+ eos_token_id=eos_token_id,
+ max_length=stopping_criteria.max_length,
+ beam_indices=beam_indices,
+ decoder_prompt_len=decoder_prompt_len,
+ )
+
+ if return_dict_in_generate:
+ if not output_scores:
+ sequence_outputs["sequence_scores"] = None
+ if self.config.is_encoder_decoder:
+ return GenerateBeamEncoderDecoderOutput(
+ sequences=sequence_outputs["sequences"],
+ sequences_scores=sequence_outputs["sequence_scores"],
+ scores=scores,
+ logits=raw_logits,
+ beam_indices=sequence_outputs["beam_indices"],
+ encoder_attentions=encoder_attentions,
+ encoder_hidden_states=encoder_hidden_states,
+ decoder_attentions=decoder_attentions,
+ cross_attentions=cross_attentions,
+ decoder_hidden_states=decoder_hidden_states,
+ past_key_values=model_kwargs.get("past_key_values"),
+ )
+ else:
+ return GenerateBeamDecoderOnlyOutput(
+ sequences=sequence_outputs["sequences"],
+ sequences_scores=sequence_outputs["sequence_scores"],
+ scores=scores,
+ logits=raw_logits,
+ beam_indices=sequence_outputs["beam_indices"],
+ attentions=decoder_attentions,
+ hidden_states=decoder_hidden_states,
+ past_key_values=model_kwargs.get("past_key_values"),
+ )
+ else:
+ return sequence_outputs["sequences"]
+
+ def assisted_decoding(self, *args, **kwargs):
+ logger.warning_once(
+ "Calling `_assisted_decoding` directly is deprecated and will be removed in v4.41. Use `generate` or a "
+ "custom generation loop instead.",
+ )
+ return self._assisted_decoding(*args, **kwargs)
+
+ def _assisted_decoding(
+ self,
+ input_ids: torch.LongTensor,
+ candidate_generator: Optional["CandidateGenerator"] = None,
+ do_sample: bool = False,
+ logits_processor: Optional[LogitsProcessorList] = None,
+ logits_warper: Optional[LogitsProcessorList] = None,
+ stopping_criteria: Optional[StoppingCriteriaList] = None,
+ pad_token_id: Optional[int] = None,
+ eos_token_id: Optional[Union[int, List[int]]] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_scores: Optional[bool] = None,
+ output_logits: Optional[bool] = None,
+ return_dict_in_generate: Optional[bool] = None,
+ synced_gpus: bool = False,
+ streamer: Optional["BaseStreamer"] = None,
+ **model_kwargs,
+ ) -> Union[GenerateNonBeamOutput, torch.LongTensor]:
+ r"""
+ Generates sequences of token ids for models with a language modeling head using **greedy decoding** or
+ **sample** (depending on `do_sample`), assisted by candidate sequences. Assisted generation is an example of a
+ candidate decoding strategy. Can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text
+ models.
+
+
+
+ In most cases, you do not need to call [`~generation.GenerationMixin._assisted_decoding`] directly. Use
+ generate() instead. For an overview of generation strategies and code examples, check the [following
+ guide](../generation_strategies).
+
+
+
+ Parameters:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ The sequence used as a prompt for the generation.
+ candidate_generator (`CandidateGenerator`, *optional*):
+ A derived instance of [`CandidateGenerator`] that defines how candidate sequences are generated. For
+ more information, the documentation of [`CandidateGenerator`] should be read.
+ do_sample (`bool`, *optional*, defaults to `False`):
+ Whether or not to use sampling ; use greedy decoding otherwise.
+ logits_processor (`LogitsProcessorList`, *optional*):
+ An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]
+ used to modify the prediction scores of the language modeling head applied at each generation step.
+ logits_warper (`LogitsProcessorList`, *optional*):
+ An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used
+ to warp the prediction score distribution of the language modeling head applied before multinomial
+ sampling at each generation step.
+ stopping_criteria (`StoppingCriteriaList`, *optional*):
+ An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`]
+ used to tell if the generation loop should stop.
+ pad_token_id (`int`, *optional*):
+ The id of the *padding* token.
+ eos_token_id (`Union[int, List[int]]`, *optional*):
+ The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
+ output_attentions (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more details.
+ output_hidden_states (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more details.
+ output_scores (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the prediction scores. See `scores` under returned tensors for more details.
+ output_logits (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the raw prediction logit scores. See `logits` under returned tensors for
+ more details.
+ return_dict_in_generate (`bool`, *optional*, defaults to `False`):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ synced_gpus (`bool`, *optional*, defaults to `False`):
+ Whether to continue running the while loop until max_length (needed for ZeRO stage 3)
+ streamer (`BaseStreamer`, *optional*):
+ Streamer object that will be used to stream the generated sequences. Generated tokens are passed
+ through `streamer.put(token_ids)` and the streamer is responsible for any further processing.
+ model_kwargs:
+ Additional model specific keyword arguments will be forwarded to the `forward` function of the model.
+ If model is an encoder-decoder model the kwargs should include `encoder_outputs`.
+
+ Return:
+ [`~generation.GenerateDecoderOnlyOutput`], [`~generation.GenerateEncoderDecoderOutput`] or
+ `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a
+ [`~generation.GenerateDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and
+ `return_dict_in_generate=True` or a [`~generation.GenerateEncoderDecoderOutput`] if
+ `model.config.is_encoder_decoder=True`.
+
+ Examples:
+
+ ```python
+ >>> from transformers import (
+ ... AutoTokenizer,
+ ... AutoModelForCausalLM,
+ ... LogitsProcessorList,
+ ... MinLengthLogitsProcessor,
+ ... StoppingCriteriaList,
+ ... MaxLengthCriteria,
+ ... )
+ >>> from transformers.generation import AssistedCandidateGenerator
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
+ >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
+ >>> assistant_model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2")
+ >>> # set pad_token_id to eos_token_id because GPT2 does not have a PAD token
+ >>> model.generation_config.pad_token_id = model.generation_config.eos_token_id
+ >>> input_prompt = "It might be possible to"
+ >>> input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids
+ >>> # instantiate logits processors
+ >>> logits_processor = LogitsProcessorList(
+ ... [
+ ... MinLengthLogitsProcessor(10, eos_token_id=model.generation_config.eos_token_id),
+ ... ]
+ ... )
+ >>> stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=20)])
+ >>> candidate_generator = AssistedCandidateGenerator(
+ ... input_ids=input_ids,
+ ... assistant_model=assistant_model,
+ ... generation_config=model.generation_config,
+ ... logits_processor=logits_processor,
+ ... model_kwargs={},
+ ... )
+ >>> outputs = model._assisted_decoding(
+ ... input_ids,
+ ... candidate_generator=candidate_generator,
+ ... logits_processor=logits_processor,
+ ... stopping_criteria=stopping_criteria,
+ ... )
+ >>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
+ ["It might be possible to get a better understanding of the nature of the problem, but it's not"]
+ ```"""
+ # init values
+ logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
+ logits_warper = logits_warper if logits_warper is not None else LogitsProcessorList()
+ stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
+ pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
+ eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
+ if eos_token_id is not None and pad_token_id is None:
+ raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.")
+ if isinstance(eos_token_id, int):
+ eos_token_id = [eos_token_id]
+ eos_token_id_tensor = torch.tensor(eos_token_id).to(input_ids.device) if eos_token_id is not None else None
+ output_scores = output_scores if output_scores is not None else self.generation_config.output_scores
+ output_logits = output_logits if output_logits is not None else self.generation_config.output_logits
+ output_attentions = (
+ output_attentions if output_attentions is not None else self.generation_config.output_attentions
+ )
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states
+ )
+ return_dict_in_generate = (
+ return_dict_in_generate
+ if return_dict_in_generate is not None
+ else self.generation_config.return_dict_in_generate
+ )
+
+ # init attention / hidden states / scores tuples
+ scores = () if (return_dict_in_generate and output_scores) else None
+ raw_logits = () if (return_dict_in_generate and output_logits) else None
+ decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
+ cross_attentions = () if (return_dict_in_generate and output_attentions) else None
+ decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
+
+ # if model is an encoder-decoder, retrieve encoder attention weights and hidden states
+ if return_dict_in_generate and self.config.is_encoder_decoder:
+ encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
+ encoder_hidden_states = (
+ model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
+ )
+
+ # keep track of which sequences are already finished
+ batch_size, cur_len = input_ids.shape
+ if "inputs_embeds" in model_kwargs:
+ cur_len = model_kwargs["inputs_embeds"].shape[1]
+ unfinished_sequences = torch.ones(batch_size, dtype=torch.long, device=input_ids.device)
+ model_kwargs["cache_position"] = torch.arange(cur_len, device=input_ids.device)
+
+ # other auxiliary variables
+ max_len = stopping_criteria[0].max_length
+
+ this_peer_finished = False
+ while self._has_unfinished_sequences(this_peer_finished, synced_gpus, device=input_ids.device):
+ cur_len = input_ids.shape[-1]
+
+ # 1. Fetch candidate sequences from a `CandidateGenerator`
+ candidate_input_ids, candidate_logits = candidate_generator.get_candidates(input_ids)
+ candidate_input_ids = candidate_input_ids.to(self.device)
+ if candidate_logits is not None:
+ candidate_logits = candidate_logits.to(self.device)
+
+ candidate_length = candidate_input_ids.shape[1] - input_ids.shape[1]
+ last_assistant_token_is_eos = (
+ ~candidate_input_ids[:, -1]
+ .tile(eos_token_id_tensor.shape[0], 1)
+ .ne(eos_token_id_tensor.unsqueeze(1))
+ .prod(dim=0)
+ .bool()
+ )
+
+ # 2. Use the original model to obtain the next token logits given the candidate sequence. We obtain
+ # `candidate_length + 1` relevant logits from this process: in the event that all candidates are correct,
+ # we use this forward pass to also pick the subsequent logits in the original model.
+
+ # 2.1. Prepare the model inputs
+ candidate_kwargs = copy.copy(model_kwargs)
+ candidate_kwargs = _prepare_attention_mask(
+ candidate_kwargs, candidate_input_ids.shape[1], self.config.is_encoder_decoder
+ )
+ candidate_kwargs = _prepare_token_type_ids(candidate_kwargs, candidate_input_ids.shape[1])
+ if "cache_position" in candidate_kwargs:
+ candidate_kwargs["cache_position"] = torch.cat(
+ (
+ candidate_kwargs["cache_position"],
+ torch.arange(cur_len, cur_len + candidate_length, device=input_ids.device, dtype=torch.long),
+ ),
+ dim=0,
+ )
+
+ model_inputs = self.prepare_inputs_for_generation(candidate_input_ids, **candidate_kwargs)
+
+ # 2.2. Run a forward pass on the candidate sequence
+ outputs = self(
+ **model_inputs,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ )
+
+ # 2.3. Process the new logits
+ new_logits = outputs.logits[:, -candidate_length - 1 :] # excludes the input prompt if present
+ next_token_logits = new_logits.clone()
+ if len(logits_processor) > 0:
+ for i in range(candidate_length + 1):
+ new_logits[:, i, :] = logits_processor(candidate_input_ids[:, : cur_len + i], new_logits[:, i, :])
+ if len(logits_warper) > 0:
+ for i in range(candidate_length + 1):
+ new_logits[:, i, :] = logits_warper(candidate_input_ids[:, : cur_len + i], new_logits[:, i, :])
+
+ # 3. Select the accepted tokens. There are two possible cases:
+ # Case 1: `do_sample=True` and we have logits for the candidates (originally from speculative decoding)
+ # 👉 Apply algorithm 1 from the speculative decoding paper (https://arxiv.org/pdf/2211.17192.pdf).
+ max_matches = max_len - cur_len - 1
+ if do_sample and candidate_logits is not None:
+ valid_tokens, n_matches = _speculative_sampling(
+ candidate_input_ids,
+ candidate_logits,
+ candidate_length,
+ new_logits,
+ last_assistant_token_is_eos,
+ max_matches,
+ )
+
+ # Case 2: all other cases (originally from assisted generation) 👉 Compare the tokens selected from the
+ # original model logits with the candidate tokens. We can keep the candidate tokens until the first
+ # mismatch, or until the max length is reached.
+ else:
+ if do_sample:
+ probs = new_logits.softmax(dim=-1)
+ selected_tokens = torch.multinomial(probs[0, :, :], num_samples=1).squeeze(1)[None, :]
+ else:
+ selected_tokens = new_logits.argmax(dim=-1)
+
+ candidate_new_tokens = candidate_input_ids[:, cur_len:]
+ n_matches = ((~(candidate_new_tokens == selected_tokens[:, :-1])).cumsum(dim=-1) < 1).sum()
+
+ # Ensure we don't generate beyond max_len or an EOS token
+ if last_assistant_token_is_eos and n_matches == candidate_length:
+ n_matches -= 1
+ n_matches = min(n_matches, max_matches)
+ valid_tokens = selected_tokens[:, : n_matches + 1]
+
+ # 4. Update variables according to the number of matching assistant tokens. Remember: the token generated
+ # by the model after the last candidate match is also valid, as it is generated from a correct sequence.
+ # Because of this last token, assisted generation search reduces to a normal greedy search/sample if there
+ # is no match.
+
+ # 4.1. Get the valid continuation, after the matching tokens
+ input_ids = torch.cat((input_ids, valid_tokens), dim=-1)
+ if streamer is not None:
+ streamer.put(valid_tokens.cpu())
+ new_cur_len = input_ids.shape[-1]
+
+ # 4.2. Discard past key values relative to unused assistant tokens
+ new_cache_size = new_cur_len - 1
+ outputs.past_key_values = _crop_past_key_values(self, outputs.past_key_values, new_cache_size)
+
+ # 5. Update the candidate generation strategy if needed
+ candidate_generator.update_candidate_strategy(input_ids, new_logits, n_matches)
+
+ if synced_gpus and this_peer_finished:
+ continue # don't waste resources running the code we don't need
+
+ # Store scores, attentions and hidden_states when required
+ # Assistant: modified to append one tuple element per token, as in the other generation methods.
+ if return_dict_in_generate:
+ if output_scores:
+ scores += tuple(new_logits[:, i, :] for i in range(n_matches + 1))
+ if output_logits:
+ raw_logits += (next_token_logits,)
+
+ if "past_key_values" not in model_kwargs:
+ added_len = new_cur_len
+ else:
+ added_len = n_matches + 1
+
+ if output_attentions:
+ if self.config.is_encoder_decoder:
+ cross_attentions = _split_model_outputs(
+ cross_attentions, outputs.cross_attentions, cur_len, added_len
+ )
+ decoder_attentions = _split_model_outputs(
+ decoder_attentions,
+ outputs.decoder_attentions,
+ cur_len,
+ added_len,
+ is_decoder_attention=True,
+ )
+ else:
+ decoder_attentions = _split_model_outputs(
+ decoder_attentions,
+ outputs.attentions,
+ cur_len,
+ added_len,
+ is_decoder_attention=True,
+ )
+ if output_hidden_states:
+ if self.config.is_encoder_decoder:
+ decoder_hidden_states = _split_model_outputs(
+ decoder_hidden_states, outputs.decoder_hidden_states, cur_len, added_len
+ )
+ else:
+ decoder_hidden_states = _split_model_outputs(
+ decoder_hidden_states, outputs.hidden_states, cur_len, added_len
+ )
+
+ model_kwargs = self._update_model_kwargs_for_generation(
+ outputs,
+ model_kwargs,
+ is_encoder_decoder=self.config.is_encoder_decoder,
+ )
+
+ # if eos_token was found in one sentence, set sentence to finished
+ if eos_token_id_tensor is not None:
+ unfinished_sequences = unfinished_sequences.mul(
+ input_ids[:, -1]
+ .tile(eos_token_id_tensor.shape[0], 1)
+ .ne(eos_token_id_tensor.unsqueeze(1))
+ .prod(dim=0)
+ )
+
+ unfinished_sequences = unfinished_sequences & ~stopping_criteria(input_ids, scores)
+ this_peer_finished = unfinished_sequences.max() == 0
+
+ if streamer is not None:
+ streamer.end()
+
+ if (
+ hasattr(candidate_generator, "assistant_model")
+ and candidate_generator.assistant_model.generation_config.num_assistant_tokens_schedule == "heuristic"
+ ):
+ candidate_generator.assistant_model.generation_config.num_assistant_tokens = (
+ candidate_generator.num_assistant_tokens
+ )
+ if return_dict_in_generate:
+ if self.config.is_encoder_decoder:
+ return GenerateEncoderDecoderOutput(
+ sequences=input_ids,
+ scores=scores,
+ logits=raw_logits,
+ encoder_attentions=encoder_attentions,
+ encoder_hidden_states=encoder_hidden_states,
+ decoder_attentions=decoder_attentions,
+ cross_attentions=cross_attentions,
+ decoder_hidden_states=decoder_hidden_states,
+ past_key_values=model_kwargs.get("past_key_values"),
+ )
+ else:
+ return GenerateDecoderOnlyOutput(
+ sequences=input_ids,
+ scores=scores,
+ logits=raw_logits,
+ attentions=decoder_attentions,
+ hidden_states=decoder_hidden_states,
+ past_key_values=model_kwargs.get("past_key_values"),
+ )
+ else:
+ return input_ids
+
+
+def _speculative_sampling(
+ candidate_input_ids,
+ candidate_logits,
+ candidate_length,
+ new_logits,
+ last_assistant_token_is_eos,
+ max_matches,
+):
+ """
+ Applies sampling as in the speculative decoding paper (https://arxiv.org/pdf/2211.17192.pdf, algorithm 1). Returns
+ the selected tokens, as well as the number of candidate matches.
+
+ NOTE: Unless otherwise stated, the variable names match those in the paper.
+ """
+ new_candidate_input_ids = candidate_input_ids[:, -candidate_length:]
+ # Gets the probabilities from the logits. q_i and p_i denote the assistant and model probabilities of the tokens
+ # selected by the assistant, respectively.
+ q = candidate_logits.softmax(dim=-1)
+ q_i = q[:, torch.arange(candidate_length), new_candidate_input_ids].squeeze(0, 1)
+ p = new_logits.softmax(dim=-1)
+ p_i = p[:, torch.arange(candidate_length), new_candidate_input_ids].squeeze(0, 1)
+ probability_ratio = p_i / q_i
+
+ # When probability_ratio > 1 (i.e. q_i(x) < p_i(x), or "assistant probability of the candidate token is smaller
+ # than the model probability for the same token"), keep the token. Otherwise reject with p = 1 - probability_ratio
+ # (= keep with p = probability_ratio). Keep all the tokens until the first rejection
+ r_i = torch.rand_like(probability_ratio)
+ is_accepted = r_i <= probability_ratio
+ n_matches = ((~is_accepted).cumsum(dim=-1) < 1).sum() # this is `n` in algorithm 1
+
+ # Ensure we don't generate beyond max_len or an EOS token (not in algorithm 1, but needed for correct behavior)
+ if last_assistant_token_is_eos and n_matches == candidate_length:
+ # Output length is assumed to be `n_matches + 1`. Since we won't generate another token with the target model
+ # due to acceptance on EOS we fix `n_matches`
+ n_matches -= 1
+ valid_tokens = new_candidate_input_ids[:, : n_matches + 1]
+ else:
+ n_matches = min(n_matches, max_matches)
+
+ # Next token selection: if there is a rejection, adjust the distribution from the main model before sampling.
+ gamma = min(candidate_logits.shape[1], max_matches)
+ p_n_plus_1 = p[:, n_matches, :]
+ if n_matches < gamma:
+ q_n_plus_1 = q[:, n_matches, :]
+ p_prime = torch.clamp((p_n_plus_1 - q_n_plus_1), min=0)
+ p_prime.div_(p_prime.sum())
+ else:
+ p_prime = p_n_plus_1
+ t = torch.multinomial(p_prime, num_samples=1).squeeze(1)[None, :]
+
+ # The selected tokens include the matches (if any) plus the next sampled tokens
+ if n_matches > 0:
+ valid_tokens = torch.cat((new_candidate_input_ids[:, :n_matches], t), dim=-1)
+ else:
+ valid_tokens = t
+
+ return valid_tokens, n_matches
+
+
+def _split_model_outputs(outputs, new_outputs, cur_len, added_len, is_decoder_attention=False):
+ """
+ Given the (decoder/cross attentions)/(decoder hidden states) for multiple generated tokens, splits it into a tuple
+ where each member corresponds to a single generated token.
+ """
+ # Retrocompatibility: in our generation functions, the first iteration includes the attention/hidden states for the
+ # prompt.
+ if len(outputs) == 0:
+ new_tuple = ()
+ for layer in new_outputs:
+ last_dim_size = cur_len if is_decoder_attention else layer.shape[-1]
+ new_tuple += (layer[..., :cur_len, :last_dim_size],)
+ outputs += (new_tuple,)
+ # The first iteration contains the prompt + 1 generated token, let's update the length variables accordingly
+ cur_len += 1
+ added_len -= cur_len
+
+ for i in range(added_len):
+ new_tuple = ()
+ for layer in new_outputs:
+ last_dim_size = cur_len + i if is_decoder_attention else layer.shape[-1]
+ new_tuple += (layer[..., i : i + 1, :last_dim_size],)
+ outputs += (new_tuple,)
+ return outputs
+
+
+def _ranking_fast(
+ context_hidden: torch.FloatTensor,
+ next_hidden: torch.FloatTensor,
+ next_top_k_probs: torch.FloatTensor,
+ alpha: float,
+ beam_width: int,
+) -> torch.FloatTensor:
+ """
+ Reranks the top_k candidates based on a degeneration penalty (cosine similarity with previous tokens), as described
+ in the paper "A Contrastive Framework for Neural Text Generation". Returns the index of the best candidate for each
+ row in the batch.
+ """
+ norm_context_hidden = context_hidden / context_hidden.norm(dim=2, keepdim=True)
+ norm_next_hidden = next_hidden / next_hidden.norm(dim=2, keepdim=True)
+ cosine_matrix = torch.matmul(norm_context_hidden, norm_next_hidden.transpose(1, 2)).squeeze(-1) # [B*K, S]
+ degeneration_penalty, _ = torch.max(cosine_matrix, dim=-1) # [B*K]
+ next_top_k_probs = next_top_k_probs.view(-1) # [B*K]
+ contrastive_score = (1.0 - alpha) * next_top_k_probs - alpha * degeneration_penalty
+ contrastive_score = torch.stack(torch.split(contrastive_score, beam_width)) # [B, K]
+ _, selected_idx = contrastive_score.max(dim=-1) # [B]
+ return selected_idx
+
+
+def _split(data, full_batch_size: int, split_size: int = None):
+ """
+ Takes care of three cases:
+ 1. data is a tensor: e.g. last_hidden_state, pooler_output etc. split them on the batch_size dim
+ 2. data is a tuple: e.g. hidden_states, attentions etc. Keep the tuple as it is and split each tensor in it and
+ return a list of tuples
+ 3. data is a tuple of tuples, e.g. past_key_values. Keep the tuple as it is and split each tuple in it and
+ return a list of tuples of tuples
+ (see documentation of ModelOutput)
+ """
+ if data is None:
+ return [None] * (full_batch_size // split_size)
+ if isinstance(data, torch.Tensor):
+ return [data[i : i + split_size] for i in range(0, full_batch_size, split_size)]
+ elif isinstance(data, tuple):
+ # If the elements of the tuple are also tuples (e.g., past_key_values in our earlier example)
+ if isinstance(data[0], tuple):
+ return [
+ tuple(tuple(tensor[i : i + split_size] for tensor in inner_tuple) for inner_tuple in data)
+ for i in range(0, full_batch_size, split_size)
+ ]
+
+ else:
+ return [
+ tuple(sub_tensor[i : i + split_size] for sub_tensor in data)
+ for i in range(0, full_batch_size, split_size)
+ ]
+ else:
+ raise ValueError(f"Unexpected attribute type: {type(data)}")
+
+
+def _split_model_inputs(
+ model_input: Union[ModelOutput, Dict], split_size: int, full_batch_size: int
+) -> List[Union[ModelOutput, Dict]]:
+ """
+ Split a ModelOutput object (or its subclasses) or Dict into a list of same-class objects based on a specified split
+ size. The input object is dict when it was prepared for forward pass and ModelOutput when it was returned from
+ previous forward pass.
+ """
+ # Edge case: if model_input is None, return a list of Nones
+ # this happens with Whisper where encoder_outputs is None
+ if model_input is None:
+ return [model_input] * (full_batch_size // split_size)
+ # Infer the class from the object
+ model_output_cls = type(model_input)
+ if (full_batch_size % split_size) != 0:
+ raise ValueError("`full_batch_size` must be divisible by `split_size`")
+
+ if split_size > full_batch_size:
+ raise ValueError("`split_size` must be smaller or equal to `full_batch_size`")
+
+ # Helper function to split tensors or tuples of tensors
+
+ # Find all the dataclass fields (e.g., last_hidden_state, pooler_output etc.) and split them
+ keys = (
+ model_input.__dataclass_fields__.keys() if hasattr(model_input, "__dataclass_fields__") else model_input.keys()
+ )
+ # We only keep keys that are in the model_input
+ keys = [k for k in keys if k in model_input]
+ # Here we can have four types of values: tensors, tuples of tensors and booleans, and encoder_outputs which is a
+ # ModelOutput object.
+ # bool should not be split but replicated for each split
+ bool_keys = [k for k in keys if isinstance(model_input[k], bool) or k == "cache_position"]
+ keys_to_ignore = ["cache_position", "encoder_outputs"]
+ non_bool_keys = [k for k in keys if not isinstance(model_input[k], bool) and k not in keys_to_ignore]
+
+ # we split the tensors and tuples of tensors
+ data_split_list = [
+ {k: _split(model_input[k], full_batch_size, split_size)[i] for k in non_bool_keys}
+ for i in range(full_batch_size // split_size)
+ ]
+ # bool values are the same and replicated for each split
+ bool_data = {k: model_input[k] for k in bool_keys}
+ # encoder_outputs is a ModelOutput object and should be split by its own
+ if "encoder_outputs" in model_input:
+ encoder_outputs_split = _split_model_inputs(model_input["encoder_outputs"], split_size, full_batch_size)
+ data_split_list = [
+ {**data_split, "encoder_outputs": encoder_outputs_split[i]} for i, data_split in enumerate(data_split_list)
+ ]
+
+ # Convert each dictionary in the list to an object of the inferred class
+ split_model_inputs: List[Union[ModelOutput, Dict]] = [
+ model_output_cls(**data_split, **bool_data) for data_split in data_split_list
+ ]
+
+ return split_model_inputs
+
+
+def stack_model_outputs(model_outputs: List[ModelOutput]) -> ModelOutput:
+ """
+ Stack a list of ModelOutput objects (or its subclasses) along the batch_size dimension. The function infers the
+ specific ModelOutput subclass from the list provided.
+ """
+ if not model_outputs:
+ raise ValueError("Input list is empty.")
+
+ # Infer the class from the first object in the list
+ model_output_cls = type(model_outputs[0])
+
+ # Ensure all objects are of the same type
+ if not all(isinstance(obj, model_output_cls) for obj in model_outputs):
+ raise ValueError("All elements in the list should be of the same type.")
+
+ # Helper function to concat tensors or tuples of tensors
+ def _concat(data):
+ """
+ Reverse of `_split` function above.
+ """
+ if any(data is None for data in data):
+ return None
+ if isinstance(data[0], torch.Tensor):
+ return torch.cat(data, dim=0)
+ elif isinstance(data[0], tuple):
+ # If the elements of the tuple are also tuples (e.g., past_key_values in our earlier example)
+ if isinstance(data[0][0], tuple):
+ return tuple(
+ tuple(torch.cat([attr[i][j] for attr in data], dim=0) for j in range(len(data[0][0])))
+ for i in range(len(data[0]))
+ )
+ else:
+ return tuple(torch.cat([attr[i] for attr in data], dim=0) for i in range(len(data[0])))
+ elif isinstance(data[0], (int, float)):
+ # If the elements are integers or floats, return a tensor
+ return torch.tensor(data)
+ else:
+ raise ValueError(f"Unexpected attribute type: {type(data[0])}")
+
+ # Use a dictionary comprehension to gather attributes from all objects and concatenate them
+ concatenated_data = {
+ k: _concat([getattr(model_output, k) for model_output in model_outputs])
+ for k in model_output_cls.__dataclass_fields__.keys()
+ }
+
+ # Return a new object of the inferred class with the concatenated attributes
+ return model_output_cls(**concatenated_data)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/audio_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/audio_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c3416a4dda579ced80dd884b6aadfc31d38acd93
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/audio_utils.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/automatic_speech_recognition.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/automatic_speech_recognition.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f35b1736c71d3decdc32561b0fd7a77976613581
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/automatic_speech_recognition.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/depth_estimation.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/depth_estimation.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..740a978d05676c0e9645142179d4283c9bfd4451
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/depth_estimation.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/feature_extraction.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/feature_extraction.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..39102613da18830356e8ea2f3b571dd88e0233e7
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/feature_extraction.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_feature_extraction.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_feature_extraction.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..33d0f66c5d285bcc4e4bce91ede81a37cc95e754
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_feature_extraction.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_to_image.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_to_image.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..243c42935c4fc58d136d384f678e000ef6551fe0
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_to_image.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/text_generation.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/text_generation.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a2b0293f4870a5d03c35e9fd897737373615b9aa
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/text_generation.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/text_to_audio.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/text_to_audio.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d63cac9c4dcaf5abbd4bec6df3dee23fbed3a589
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/text_to_audio.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/token_classification.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/token_classification.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a3deb6947904ac94c512f4df22f9ba061dd59e0a
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/token_classification.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/zero_shot_object_detection.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/zero_shot_object_detection.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ac84693da2e8a369285d88549074fc35482ce304
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/zero_shot_object_detection.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/audio_utils.py b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/audio_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..8dd95d83059ae4ebaf47f45e84579301961bea85
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/audio_utils.py
@@ -0,0 +1,248 @@
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+import datetime
+import platform
+import subprocess
+from typing import Optional, Tuple, Union
+
+import numpy as np
+
+
+def ffmpeg_read(bpayload: bytes, sampling_rate: int) -> np.array:
+ """
+ Helper function to read an audio file through ffmpeg.
+ """
+ ar = f"{sampling_rate}"
+ ac = "1"
+ format_for_conversion = "f32le"
+ ffmpeg_command = [
+ "ffmpeg",
+ "-i",
+ "pipe:0",
+ "-ac",
+ ac,
+ "-ar",
+ ar,
+ "-f",
+ format_for_conversion,
+ "-hide_banner",
+ "-loglevel",
+ "quiet",
+ "pipe:1",
+ ]
+
+ try:
+ with subprocess.Popen(ffmpeg_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE) as ffmpeg_process:
+ output_stream = ffmpeg_process.communicate(bpayload)
+ except FileNotFoundError as error:
+ raise ValueError("ffmpeg was not found but is required to load audio files from filename") from error
+ out_bytes = output_stream[0]
+ audio = np.frombuffer(out_bytes, np.float32)
+ if audio.shape[0] == 0:
+ raise ValueError(
+ "Soundfile is either not in the correct format or is malformed. Ensure that the soundfile has "
+ "a valid audio file extension (e.g. wav, flac or mp3) and is not corrupted. If reading from a remote "
+ "URL, ensure that the URL is the full address to **download** the audio file."
+ )
+ return audio
+
+
+def ffmpeg_microphone(
+ sampling_rate: int,
+ chunk_length_s: float,
+ format_for_conversion: str = "f32le",
+):
+ """
+ Helper function to read raw microphone data.
+ """
+ ar = f"{sampling_rate}"
+ ac = "1"
+ if format_for_conversion == "s16le":
+ size_of_sample = 2
+ elif format_for_conversion == "f32le":
+ size_of_sample = 4
+ else:
+ raise ValueError(f"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`")
+
+ system = platform.system()
+ if system == "Linux":
+ format_ = "alsa"
+ input_ = "default"
+ elif system == "Darwin":
+ format_ = "avfoundation"
+ input_ = ":0"
+ elif system == "Windows":
+ format_ = "dshow"
+ input_ = _get_microphone_name()
+
+ ffmpeg_command = [
+ "ffmpeg",
+ "-f",
+ format_,
+ "-i",
+ input_,
+ "-ac",
+ ac,
+ "-ar",
+ ar,
+ "-f",
+ format_for_conversion,
+ "-fflags",
+ "nobuffer",
+ "-hide_banner",
+ "-loglevel",
+ "quiet",
+ "pipe:1",
+ ]
+ chunk_len = int(round(sampling_rate * chunk_length_s)) * size_of_sample
+ iterator = _ffmpeg_stream(ffmpeg_command, chunk_len)
+ for item in iterator:
+ yield item
+
+
+def ffmpeg_microphone_live(
+ sampling_rate: int,
+ chunk_length_s: float,
+ stream_chunk_s: Optional[int] = None,
+ stride_length_s: Optional[Union[Tuple[float, float], float]] = None,
+ format_for_conversion: str = "f32le",
+):
+ """
+ Helper function to read audio from the microphone file through ffmpeg. This will output `partial` overlapping
+ chunks starting from `stream_chunk_s` (if it is defined) until `chunk_length_s` is reached. It will make use of
+ striding to avoid errors on the "sides" of the various chunks.
+
+ Arguments:
+ sampling_rate (`int`):
+ The sampling_rate to use when reading the data from the microphone. Try using the model's sampling_rate to
+ avoid resampling later.
+ chunk_length_s (`float` or `int`):
+ The length of the maximum chunk of audio to be sent returned. This includes the eventual striding.
+ stream_chunk_s (`float` or `int`)
+ The length of the minimal temporary audio to be returned.
+ stride_length_s (`float` or `int` or `(float, float)`, *optional*, defaults to `None`)
+ The length of the striding to be used. Stride is used to provide context to a model on the (left, right) of
+ an audio sample but without using that part to actually make the prediction. Setting this does not change
+ the length of the chunk.
+ format_for_conversion (`str`, defalts to `f32le`)
+ The name of the format of the audio samples to be returned by ffmpeg. The standard is `f32le`, `s16le`
+ could also be used.
+ Return:
+ A generator yielding dictionaries of the following form
+
+ `{"sampling_rate": int, "raw": np.array(), "partial" bool}` With optionnally a `"stride" (int, int)` key if
+ `stride_length_s` is defined.
+
+ `stride` and `raw` are all expressed in `samples`, and `partial` is a boolean saying if the current yield item
+ is a whole chunk, or a partial temporary result to be later replaced by another larger chunk.
+
+
+ """
+ if stream_chunk_s is not None:
+ chunk_s = stream_chunk_s
+ else:
+ chunk_s = chunk_length_s
+
+ microphone = ffmpeg_microphone(sampling_rate, chunk_s, format_for_conversion=format_for_conversion)
+ if format_for_conversion == "s16le":
+ dtype = np.int16
+ size_of_sample = 2
+ elif format_for_conversion == "f32le":
+ dtype = np.float32
+ size_of_sample = 4
+ else:
+ raise ValueError(f"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`")
+
+ if stride_length_s is None:
+ stride_length_s = chunk_length_s / 6
+ chunk_len = int(round(sampling_rate * chunk_length_s)) * size_of_sample
+ if isinstance(stride_length_s, (int, float)):
+ stride_length_s = [stride_length_s, stride_length_s]
+
+ stride_left = int(round(sampling_rate * stride_length_s[0])) * size_of_sample
+ stride_right = int(round(sampling_rate * stride_length_s[1])) * size_of_sample
+ audio_time = datetime.datetime.now()
+ delta = datetime.timedelta(seconds=chunk_s)
+ for item in chunk_bytes_iter(microphone, chunk_len, stride=(stride_left, stride_right), stream=True):
+ # Put everything back in numpy scale
+ item["raw"] = np.frombuffer(item["raw"], dtype=dtype)
+ item["stride"] = (
+ item["stride"][0] // size_of_sample,
+ item["stride"][1] // size_of_sample,
+ )
+ item["sampling_rate"] = sampling_rate
+ audio_time += delta
+ if datetime.datetime.now() > audio_time + 10 * delta:
+ # We're late !! SKIP
+ continue
+ yield item
+
+
+def chunk_bytes_iter(iterator, chunk_len: int, stride: Tuple[int, int], stream: bool = False):
+ """
+ Reads raw bytes from an iterator and does chunks of length `chunk_len`. Optionally adds `stride` to each chunks to
+ get overlaps. `stream` is used to return partial results even if a full `chunk_len` is not yet available.
+ """
+ acc = b""
+ stride_left, stride_right = stride
+ if stride_left + stride_right >= chunk_len:
+ raise ValueError(
+ f"Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}"
+ )
+ _stride_left = 0
+ for raw in iterator:
+ acc += raw
+ if stream and len(acc) < chunk_len:
+ stride = (_stride_left, 0)
+ yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
+ else:
+ while len(acc) >= chunk_len:
+ # We are flushing the accumulator
+ stride = (_stride_left, stride_right)
+ item = {"raw": acc[:chunk_len], "stride": stride}
+ if stream:
+ item["partial"] = False
+ yield item
+ _stride_left = stride_left
+ acc = acc[chunk_len - stride_left - stride_right :]
+ # Last chunk
+ if len(acc) > stride_left:
+ item = {"raw": acc, "stride": (_stride_left, 0)}
+ if stream:
+ item["partial"] = False
+ yield item
+
+
+def _ffmpeg_stream(ffmpeg_command, buflen: int):
+ """
+ Internal function to create the generator of data through ffmpeg
+ """
+ bufsize = 2**24 # 16Mo
+ try:
+ with subprocess.Popen(ffmpeg_command, stdout=subprocess.PIPE, bufsize=bufsize) as ffmpeg_process:
+ while True:
+ raw = ffmpeg_process.stdout.read(buflen)
+ if raw == b"":
+ break
+ yield raw
+ except FileNotFoundError as error:
+ raise ValueError("ffmpeg was not found but is required to stream audio files from filename") from error
+
+
+def _get_microphone_name():
+ """
+ Retrieve the microphone name in Windows .
+ """
+ command = ["ffmpeg", "-list_devices", "true", "-f", "dshow", "-i", ""]
+
+ try:
+ ffmpeg_devices = subprocess.run(command, text=True, stderr=subprocess.PIPE, encoding="utf-8")
+ microphone_lines = [line for line in ffmpeg_devices.stderr.splitlines() if "(audio)" in line]
+
+ if microphone_lines:
+ microphone_name = microphone_lines[0].split('"')[1]
+ print(f"Using microphone: {microphone_name}")
+ return f"audio={microphone_name}"
+ except FileNotFoundError:
+ print("ffmpeg was not found. Please install it or make sure it is in your system PATH.")
+
+ return "default"
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/depth_estimation.py b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/depth_estimation.py
new file mode 100644
index 0000000000000000000000000000000000000000..c6431a499717a43f5e4974bfd1104e31ef9498ed
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/depth_estimation.py
@@ -0,0 +1,111 @@
+from typing import List, Union
+
+import numpy as np
+
+from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
+from .base import Pipeline, build_pipeline_init_args
+
+
+if is_vision_available():
+ from PIL import Image
+
+ from ..image_utils import load_image
+
+if is_torch_available():
+ import torch
+
+ from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES
+
+logger = logging.get_logger(__name__)
+
+
+@add_end_docstrings(build_pipeline_init_args(has_image_processor=True))
+class DepthEstimationPipeline(Pipeline):
+ """
+ Depth estimation pipeline using any `AutoModelForDepthEstimation`. This pipeline predicts the depth of an image.
+
+ Example:
+
+ ```python
+ >>> from transformers import pipeline
+
+ >>> depth_estimator = pipeline(task="depth-estimation", model="LiheYoung/depth-anything-base-hf")
+ >>> output = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg")
+ >>> # This is a tensor with the values being the depth expressed in meters for each pixel
+ >>> output["predicted_depth"].shape
+ torch.Size([1, 384, 384])
+ ```
+
+ Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
+
+
+ This depth estimation pipeline can currently be loaded from [`pipeline`] using the following task identifier:
+ `"depth-estimation"`.
+
+ See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=depth-estimation).
+ """
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ requires_backends(self, "vision")
+ self.check_model_type(MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES)
+
+ def __call__(self, images: Union[str, List[str], "Image.Image", List["Image.Image"]], **kwargs):
+ """
+ Predict the depth(s) of the image(s) passed as inputs.
+
+ Args:
+ images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`):
+ The pipeline handles three types of images:
+
+ - A string containing a http link pointing to an image
+ - A string containing a local path to an image
+ - An image loaded in PIL directly
+
+ The pipeline accepts either a single image or a batch of images, which must then be passed as a string.
+ Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL
+ images.
+ timeout (`float`, *optional*, defaults to None):
+ The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and
+ the call may block forever.
+
+ Return:
+ A dictionary or a list of dictionaries containing result. If the input is a single image, will return a
+ dictionary, if the input is a list of several images, will return a list of dictionaries corresponding to
+ the images.
+
+ The dictionaries contain the following keys:
+
+ - **predicted_depth** (`torch.Tensor`) -- The predicted depth by the model as a `torch.Tensor`.
+ - **depth** (`PIL.Image`) -- The predicted depth by the model as a `PIL.Image`.
+ """
+ return super().__call__(images, **kwargs)
+
+ def _sanitize_parameters(self, timeout=None, **kwargs):
+ preprocess_params = {}
+ if timeout is not None:
+ preprocess_params["timeout"] = timeout
+ return preprocess_params, {}, {}
+
+ def preprocess(self, image, timeout=None):
+ image = load_image(image, timeout)
+ self.image_size = image.size
+ model_inputs = self.image_processor(images=image, return_tensors=self.framework)
+ return model_inputs
+
+ def _forward(self, model_inputs):
+ model_outputs = self.model(**model_inputs)
+ return model_outputs
+
+ def postprocess(self, model_outputs):
+ predicted_depth = model_outputs.predicted_depth
+ prediction = torch.nn.functional.interpolate(
+ predicted_depth.unsqueeze(1), size=self.image_size[::-1], mode="bicubic", align_corners=False
+ )
+ output = prediction.squeeze().cpu().numpy()
+ formatted = (output * 255 / np.max(output)).astype("uint8")
+ depth = Image.fromarray(formatted)
+ output_dict = {}
+ output_dict["predicted_depth"] = predicted_depth
+ output_dict["depth"] = depth
+ return output_dict
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/fill_mask.py b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/fill_mask.py
new file mode 100644
index 0000000000000000000000000000000000000000..a6f240822322f75012ffb592e89f0e3f59189008
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/fill_mask.py
@@ -0,0 +1,273 @@
+from typing import Dict
+
+import numpy as np
+
+from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
+from .base import GenericTensor, Pipeline, PipelineException, build_pipeline_init_args
+
+
+if is_tf_available():
+ import tensorflow as tf
+
+ from ..tf_utils import stable_softmax
+
+
+if is_torch_available():
+ import torch
+
+
+logger = logging.get_logger(__name__)
+
+
+@add_end_docstrings(
+ build_pipeline_init_args(has_tokenizer=True),
+ r"""
+ top_k (`int`, defaults to 5):
+ The number of predictions to return.
+ targets (`str` or `List[str]`, *optional*):
+ When passed, the model will limit the scores to the passed targets instead of looking up in the whole
+ vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
+ token will be used (with a warning, and that might be slower).
+ tokenizer_kwargs (`dict`, *optional*):
+ Additional dictionary of keyword arguments passed along to the tokenizer.""",
+)
+class FillMaskPipeline(Pipeline):
+ """
+ Masked language modeling prediction pipeline using any `ModelWithLMHead`. See the [masked language modeling
+ examples](../task_summary#masked-language-modeling) for more information.
+
+ Example:
+
+ ```python
+ >>> from transformers import pipeline
+
+ >>> fill_masker = pipeline(model="google-bert/bert-base-uncased")
+ >>> fill_masker("This is a simple [MASK].")
+ [{'score': 0.042, 'token': 3291, 'token_str': 'problem', 'sequence': 'this is a simple problem.'}, {'score': 0.031, 'token': 3160, 'token_str': 'question', 'sequence': 'this is a simple question.'}, {'score': 0.03, 'token': 8522, 'token_str': 'equation', 'sequence': 'this is a simple equation.'}, {'score': 0.027, 'token': 2028, 'token_str': 'one', 'sequence': 'this is a simple one.'}, {'score': 0.024, 'token': 3627, 'token_str': 'rule', 'sequence': 'this is a simple rule.'}]
+ ```
+
+ Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
+
+ This mask filling pipeline can currently be loaded from [`pipeline`] using the following task identifier:
+ `"fill-mask"`.
+
+ The models that this pipeline can use are models that have been trained with a masked language modeling objective,
+ which includes the bi-directional models in the library. See the up-to-date list of available models on
+ [huggingface.co/models](https://huggingface.co/models?filter=fill-mask).
+
+
+
+ This pipeline only works for inputs with exactly one token masked. Experimental: We added support for multiple
+ masks. The returned values are raw model output, and correspond to disjoint probabilities where one might expect
+ joint probabilities (See [discussion](https://github.com/huggingface/transformers/pull/10222)).
+
+
+
+
+
+ This pipeline now supports tokenizer_kwargs. For example try:
+
+ ```python
+ >>> from transformers import pipeline
+
+ >>> fill_masker = pipeline(model="google-bert/bert-base-uncased")
+ >>> tokenizer_kwargs = {"truncation": True}
+ >>> fill_masker(
+ ... "This is a simple [MASK]. " + "...with a large amount of repeated text appended. " * 100,
+ ... tokenizer_kwargs=tokenizer_kwargs,
+ ... )
+ ```
+
+
+
+
+
+ """
+
+ def get_masked_index(self, input_ids: GenericTensor) -> np.ndarray:
+ if self.framework == "tf":
+ masked_index = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()
+ elif self.framework == "pt":
+ masked_index = torch.nonzero(input_ids == self.tokenizer.mask_token_id, as_tuple=False)
+ else:
+ raise ValueError("Unsupported framework")
+ return masked_index
+
+ def _ensure_exactly_one_mask_token(self, input_ids: GenericTensor) -> np.ndarray:
+ masked_index = self.get_masked_index(input_ids)
+ numel = np.prod(masked_index.shape)
+ if numel < 1:
+ raise PipelineException(
+ "fill-mask",
+ self.model.base_model_prefix,
+ f"No mask_token ({self.tokenizer.mask_token}) found on the input",
+ )
+
+ def ensure_exactly_one_mask_token(self, model_inputs: GenericTensor):
+ if isinstance(model_inputs, list):
+ for model_input in model_inputs:
+ self._ensure_exactly_one_mask_token(model_input["input_ids"][0])
+ else:
+ for input_ids in model_inputs["input_ids"]:
+ self._ensure_exactly_one_mask_token(input_ids)
+
+ def preprocess(
+ self, inputs, return_tensors=None, tokenizer_kwargs=None, **preprocess_parameters
+ ) -> Dict[str, GenericTensor]:
+ if return_tensors is None:
+ return_tensors = self.framework
+ if tokenizer_kwargs is None:
+ tokenizer_kwargs = {}
+
+ model_inputs = self.tokenizer(inputs, return_tensors=return_tensors, **tokenizer_kwargs)
+ self.ensure_exactly_one_mask_token(model_inputs)
+ return model_inputs
+
+ def _forward(self, model_inputs):
+ model_outputs = self.model(**model_inputs)
+ model_outputs["input_ids"] = model_inputs["input_ids"]
+ return model_outputs
+
+ def postprocess(self, model_outputs, top_k=5, target_ids=None):
+ # Cap top_k if there are targets
+ if target_ids is not None and target_ids.shape[0] < top_k:
+ top_k = target_ids.shape[0]
+ input_ids = model_outputs["input_ids"][0]
+ outputs = model_outputs["logits"]
+
+ if self.framework == "tf":
+ masked_index = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()[:, 0]
+
+ outputs = outputs.numpy()
+
+ logits = outputs[0, masked_index, :]
+ probs = stable_softmax(logits, axis=-1)
+ if target_ids is not None:
+ probs = tf.gather_nd(tf.squeeze(probs, 0), target_ids.reshape(-1, 1))
+ probs = tf.expand_dims(probs, 0)
+
+ topk = tf.math.top_k(probs, k=top_k)
+ values, predictions = topk.values.numpy(), topk.indices.numpy()
+ else:
+ masked_index = torch.nonzero(input_ids == self.tokenizer.mask_token_id, as_tuple=False).squeeze(-1)
+ # Fill mask pipeline supports only one ${mask_token} per sample
+
+ logits = outputs[0, masked_index, :]
+ probs = logits.softmax(dim=-1)
+ if target_ids is not None:
+ probs = probs[..., target_ids]
+
+ values, predictions = probs.topk(top_k)
+
+ result = []
+ single_mask = values.shape[0] == 1
+ for i, (_values, _predictions) in enumerate(zip(values.tolist(), predictions.tolist())):
+ row = []
+ for v, p in zip(_values, _predictions):
+ # Copy is important since we're going to modify this array in place
+ tokens = input_ids.numpy().copy()
+ if target_ids is not None:
+ p = target_ids[p].tolist()
+
+ tokens[masked_index[i]] = p
+ # Filter padding out:
+ tokens = tokens[np.where(tokens != self.tokenizer.pad_token_id)]
+ # Originally we skip special tokens to give readable output.
+ # For multi masks though, the other [MASK] would be removed otherwise
+ # making the output look odd, so we add them back
+ sequence = self.tokenizer.decode(tokens, skip_special_tokens=single_mask)
+ proposition = {"score": v, "token": p, "token_str": self.tokenizer.decode([p]), "sequence": sequence}
+ row.append(proposition)
+ result.append(row)
+ if single_mask:
+ return result[0]
+ return result
+
+ def get_target_ids(self, targets, top_k=None):
+ if isinstance(targets, str):
+ targets = [targets]
+ try:
+ vocab = self.tokenizer.get_vocab()
+ except Exception:
+ vocab = {}
+ target_ids = []
+ for target in targets:
+ id_ = vocab.get(target, None)
+ if id_ is None:
+ input_ids = self.tokenizer(
+ target,
+ add_special_tokens=False,
+ return_attention_mask=False,
+ return_token_type_ids=False,
+ max_length=1,
+ truncation=True,
+ )["input_ids"]
+ if len(input_ids) == 0:
+ logger.warning(
+ f"The specified target token `{target}` does not exist in the model vocabulary. "
+ "We cannot replace it with anything meaningful, ignoring it"
+ )
+ continue
+ id_ = input_ids[0]
+ # XXX: If users encounter this pass
+ # it becomes pretty slow, so let's make sure
+ # The warning enables them to fix the input to
+ # get faster performance.
+ logger.warning(
+ f"The specified target token `{target}` does not exist in the model vocabulary. "
+ f"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_)}`."
+ )
+ target_ids.append(id_)
+ target_ids = list(set(target_ids))
+ if len(target_ids) == 0:
+ raise ValueError("At least one target must be provided when passed.")
+ target_ids = np.array(target_ids)
+ return target_ids
+
+ def _sanitize_parameters(self, top_k=None, targets=None, tokenizer_kwargs=None):
+ preprocess_params = {}
+
+ if tokenizer_kwargs is not None:
+ preprocess_params["tokenizer_kwargs"] = tokenizer_kwargs
+
+ postprocess_params = {}
+
+ if targets is not None:
+ target_ids = self.get_target_ids(targets, top_k)
+ postprocess_params["target_ids"] = target_ids
+
+ if top_k is not None:
+ postprocess_params["top_k"] = top_k
+
+ if self.tokenizer.mask_token_id is None:
+ raise PipelineException(
+ "fill-mask", self.model.base_model_prefix, "The tokenizer does not define a `mask_token`."
+ )
+ return preprocess_params, {}, postprocess_params
+
+ def __call__(self, inputs, *args, **kwargs):
+ """
+ Fill the masked token in the text(s) given as inputs.
+
+ Args:
+ args (`str` or `List[str]`):
+ One or several texts (or one list of prompts) with masked tokens.
+ targets (`str` or `List[str]`, *optional*):
+ When passed, the model will limit the scores to the passed targets instead of looking up in the whole
+ vocab. If the provided targets are not in the model vocab, they will be tokenized and the first
+ resulting token will be used (with a warning, and that might be slower).
+ top_k (`int`, *optional*):
+ When passed, overrides the number of predictions to return.
+
+ Return:
+ A list or a list of list of `dict`: Each result comes as list of dictionaries with the following keys:
+
+ - **sequence** (`str`) -- The corresponding input with the mask token prediction.
+ - **score** (`float`) -- The corresponding probability.
+ - **token** (`int`) -- The predicted token id (to replace the masked one).
+ - **token_str** (`str`) -- The predicted token (to replace the masked one).
+ """
+ outputs = super().__call__(inputs, **kwargs)
+ if isinstance(inputs, list) and len(inputs) == 1:
+ return outputs[0]
+ return outputs
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/image_feature_extraction.py b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/image_feature_extraction.py
new file mode 100644
index 0000000000000000000000000000000000000000..3a361deabd797d98f87293acd236a05801ea0458
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/image_feature_extraction.py
@@ -0,0 +1,110 @@
+from typing import Dict
+
+from ..utils import add_end_docstrings, is_vision_available
+from .base import GenericTensor, Pipeline, build_pipeline_init_args
+
+
+if is_vision_available():
+ from ..image_utils import load_image
+
+
+@add_end_docstrings(
+ build_pipeline_init_args(has_image_processor=True),
+ """
+ image_processor_kwargs (`dict`, *optional*):
+ Additional dictionary of keyword arguments passed along to the image processor e.g.
+ {"size": {"height": 100, "width": 100}}
+ pool (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the pooled output. If `False`, the model will return the raw hidden states.
+ """,
+)
+class ImageFeatureExtractionPipeline(Pipeline):
+ """
+ Image feature extraction pipeline uses no model head. This pipeline extracts the hidden states from the base
+ transformer, which can be used as features in downstream tasks.
+
+ Example:
+
+ ```python
+ >>> from transformers import pipeline
+
+ >>> extractor = pipeline(model="google/vit-base-patch16-224", task="image-feature-extraction")
+ >>> result = extractor("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png", return_tensors=True)
+ >>> result.shape # This is a tensor of shape [1, sequence_lenth, hidden_dimension] representing the input image.
+ torch.Size([1, 197, 768])
+ ```
+
+ Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
+
+ This image feature extraction pipeline can currently be loaded from [`pipeline`] using the task identifier:
+ `"image-feature-extraction"`.
+
+ All vision models may be used for this pipeline. See a list of all models, including community-contributed models on
+ [huggingface.co/models](https://huggingface.co/models).
+ """
+
+ def _sanitize_parameters(self, image_processor_kwargs=None, return_tensors=None, pool=None, **kwargs):
+ preprocess_params = {} if image_processor_kwargs is None else image_processor_kwargs
+
+ postprocess_params = {}
+ if pool is not None:
+ postprocess_params["pool"] = pool
+ if return_tensors is not None:
+ postprocess_params["return_tensors"] = return_tensors
+
+ if "timeout" in kwargs:
+ preprocess_params["timeout"] = kwargs["timeout"]
+
+ return preprocess_params, {}, postprocess_params
+
+ def preprocess(self, image, timeout=None, **image_processor_kwargs) -> Dict[str, GenericTensor]:
+ image = load_image(image, timeout=timeout)
+ model_inputs = self.image_processor(image, return_tensors=self.framework, **image_processor_kwargs)
+ return model_inputs
+
+ def _forward(self, model_inputs):
+ model_outputs = self.model(**model_inputs)
+ return model_outputs
+
+ def postprocess(self, model_outputs, pool=None, return_tensors=False):
+ pool = pool if pool is not None else False
+
+ if pool:
+ if "pooler_output" not in model_outputs:
+ raise ValueError(
+ "No pooled output was returned. Make sure the model has a `pooler` layer when using the `pool` option."
+ )
+ outputs = model_outputs["pooler_output"]
+ else:
+ # [0] is the first available tensor, logits or last_hidden_state.
+ outputs = model_outputs[0]
+
+ if return_tensors:
+ return outputs
+ if self.framework == "pt":
+ return outputs.tolist()
+ elif self.framework == "tf":
+ return outputs.numpy().tolist()
+
+ def __call__(self, *args, **kwargs):
+ """
+ Extract the features of the input(s).
+
+ Args:
+ images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`):
+ The pipeline handles three types of images:
+
+ - A string containing a http link pointing to an image
+ - A string containing a local path to an image
+ - An image loaded in PIL directly
+
+ The pipeline accepts either a single image or a batch of images, which must then be passed as a string.
+ Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL
+ images.
+ timeout (`float`, *optional*, defaults to None):
+ The maximum time in seconds to wait for fetching images from the web. If None, no timeout is used and
+ the call may block forever.
+ Return:
+ A nested list of `float`: The features computed by the model.
+ """
+ return super().__call__(*args, **kwargs)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/question_answering.py b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/question_answering.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ac5d252b1139e9ea4c2047587e6499a4c6b8082
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/question_answering.py
@@ -0,0 +1,671 @@
+import inspect
+import types
+import warnings
+from collections.abc import Iterable
+from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
+
+import numpy as np
+
+from ..data import SquadExample, SquadFeatures, squad_convert_examples_to_features
+from ..modelcard import ModelCard
+from ..tokenization_utils import PreTrainedTokenizer
+from ..utils import (
+ PaddingStrategy,
+ add_end_docstrings,
+ is_tf_available,
+ is_tokenizers_available,
+ is_torch_available,
+ logging,
+)
+from .base import ArgumentHandler, ChunkPipeline, build_pipeline_init_args
+
+
+logger = logging.get_logger(__name__)
+
+if TYPE_CHECKING:
+ from ..modeling_tf_utils import TFPreTrainedModel
+ from ..modeling_utils import PreTrainedModel
+
+ if is_tokenizers_available():
+ import tokenizers
+
+if is_tf_available():
+ import tensorflow as tf
+
+ from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
+
+ Dataset = None
+
+if is_torch_available():
+ import torch
+ from torch.utils.data import Dataset
+
+ from ..models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
+
+
+def decode_spans(
+ start: np.ndarray, end: np.ndarray, topk: int, max_answer_len: int, undesired_tokens: np.ndarray
+) -> Tuple:
+ """
+ Take the output of any `ModelForQuestionAnswering` and will generate probabilities for each span to be the actual
+ answer.
+
+ In addition, it filters out some unwanted/impossible cases like answer len being greater than max_answer_len or
+ answer end position being before the starting position. The method supports output the k-best answer through the
+ topk argument.
+
+ Args:
+ start (`np.ndarray`): Individual start probabilities for each token.
+ end (`np.ndarray`): Individual end probabilities for each token.
+ topk (`int`): Indicates how many possible answer span(s) to extract from the model output.
+ max_answer_len (`int`): Maximum size of the answer to extract from the model's output.
+ undesired_tokens (`np.ndarray`): Mask determining tokens that can be part of the answer
+ """
+ # Ensure we have batch axis
+ if start.ndim == 1:
+ start = start[None]
+
+ if end.ndim == 1:
+ end = end[None]
+
+ # Compute the score of each tuple(start, end) to be the real answer
+ outer = np.matmul(np.expand_dims(start, -1), np.expand_dims(end, 1))
+
+ # Remove candidate with end < start and end - start > max_answer_len
+ candidates = np.tril(np.triu(outer), max_answer_len - 1)
+
+ # Inspired by Chen & al. (https://github.com/facebookresearch/DrQA)
+ scores_flat = candidates.flatten()
+ if topk == 1:
+ idx_sort = [np.argmax(scores_flat)]
+ elif len(scores_flat) < topk:
+ idx_sort = np.argsort(-scores_flat)
+ else:
+ idx = np.argpartition(-scores_flat, topk)[0:topk]
+ idx_sort = idx[np.argsort(-scores_flat[idx])]
+
+ starts, ends = np.unravel_index(idx_sort, candidates.shape)[1:]
+ desired_spans = np.isin(starts, undesired_tokens.nonzero()) & np.isin(ends, undesired_tokens.nonzero())
+ starts = starts[desired_spans]
+ ends = ends[desired_spans]
+ scores = candidates[0, starts, ends]
+
+ return starts, ends, scores
+
+
+def select_starts_ends(
+ start,
+ end,
+ p_mask,
+ attention_mask,
+ min_null_score=1000000,
+ top_k=1,
+ handle_impossible_answer=False,
+ max_answer_len=15,
+):
+ """
+ Takes the raw output of any `ModelForQuestionAnswering` and first normalizes its outputs and then uses
+ `decode_spans()` to generate probabilities for each span to be the actual answer.
+
+ Args:
+ start (`np.ndarray`): Individual start logits for each token.
+ end (`np.ndarray`): Individual end logits for each token.
+ p_mask (`np.ndarray`): A mask with 1 for values that cannot be in the answer
+ attention_mask (`np.ndarray`): The attention mask generated by the tokenizer
+ min_null_score(`float`): The minimum null (empty) answer score seen so far.
+ topk (`int`): Indicates how many possible answer span(s) to extract from the model output.
+ handle_impossible_answer(`bool`): Whether to allow null (empty) answers
+ max_answer_len (`int`): Maximum size of the answer to extract from the model's output.
+ """
+ # Ensure padded tokens & question tokens cannot belong to the set of candidate answers.
+ undesired_tokens = np.abs(np.array(p_mask) - 1)
+
+ if attention_mask is not None:
+ undesired_tokens = undesired_tokens & attention_mask
+
+ # Generate mask
+ undesired_tokens_mask = undesired_tokens == 0.0
+
+ # Make sure non-context indexes in the tensor cannot contribute to the softmax
+ start = np.where(undesired_tokens_mask, -10000.0, start)
+ end = np.where(undesired_tokens_mask, -10000.0, end)
+
+ # Normalize logits and spans to retrieve the answer
+ start = np.exp(start - start.max(axis=-1, keepdims=True))
+ start = start / start.sum()
+
+ end = np.exp(end - end.max(axis=-1, keepdims=True))
+ end = end / end.sum()
+
+ if handle_impossible_answer:
+ min_null_score = min(min_null_score, (start[0, 0] * end[0, 0]).item())
+
+ # Mask CLS
+ start[0, 0] = end[0, 0] = 0.0
+
+ starts, ends, scores = decode_spans(start, end, top_k, max_answer_len, undesired_tokens)
+ return starts, ends, scores, min_null_score
+
+
+class QuestionAnsweringArgumentHandler(ArgumentHandler):
+ """
+ QuestionAnsweringPipeline requires the user to provide multiple arguments (i.e. question & context) to be mapped to
+ internal [`SquadExample`].
+
+ QuestionAnsweringArgumentHandler manages all the possible to create a [`SquadExample`] from the command-line
+ supplied arguments.
+ """
+
+ def normalize(self, item):
+ if isinstance(item, SquadExample):
+ return item
+ elif isinstance(item, dict):
+ for k in ["question", "context"]:
+ if k not in item:
+ raise KeyError("You need to provide a dictionary with keys {question:..., context:...}")
+ elif item[k] is None:
+ raise ValueError(f"`{k}` cannot be None")
+ elif isinstance(item[k], str) and len(item[k]) == 0:
+ raise ValueError(f"`{k}` cannot be empty")
+
+ return QuestionAnsweringPipeline.create_sample(**item)
+ raise ValueError(f"{item} argument needs to be of type (SquadExample, dict)")
+
+ def __call__(self, *args, **kwargs):
+ # Detect where the actual inputs are
+ if args is not None and len(args) > 0:
+ if len(args) == 1:
+ inputs = args[0]
+ elif len(args) == 2 and {type(el) for el in args} == {str}:
+ inputs = [{"question": args[0], "context": args[1]}]
+ else:
+ inputs = list(args)
+ # Generic compatibility with sklearn and Keras
+ # Batched data
+ elif "X" in kwargs:
+ inputs = kwargs["X"]
+ elif "data" in kwargs:
+ inputs = kwargs["data"]
+ elif "question" in kwargs and "context" in kwargs:
+ if isinstance(kwargs["question"], list) and isinstance(kwargs["context"], str):
+ inputs = [{"question": Q, "context": kwargs["context"]} for Q in kwargs["question"]]
+ elif isinstance(kwargs["question"], list) and isinstance(kwargs["context"], list):
+ if len(kwargs["question"]) != len(kwargs["context"]):
+ raise ValueError("Questions and contexts don't have the same lengths")
+
+ inputs = [{"question": Q, "context": C} for Q, C in zip(kwargs["question"], kwargs["context"])]
+ elif isinstance(kwargs["question"], str) and isinstance(kwargs["context"], str):
+ inputs = [{"question": kwargs["question"], "context": kwargs["context"]}]
+ else:
+ raise ValueError("Arguments can't be understood")
+ else:
+ raise ValueError(f"Unknown arguments {kwargs}")
+
+ # When user is sending a generator we need to trust it's a valid example
+ generator_types = (types.GeneratorType, Dataset) if Dataset is not None else (types.GeneratorType,)
+ if isinstance(inputs, generator_types):
+ return inputs
+
+ # Normalize inputs
+ if isinstance(inputs, dict):
+ inputs = [inputs]
+ elif isinstance(inputs, Iterable):
+ # Copy to avoid overriding arguments
+ inputs = list(inputs)
+ else:
+ raise ValueError(f"Invalid arguments {kwargs}")
+
+ for i, item in enumerate(inputs):
+ inputs[i] = self.normalize(item)
+
+ return inputs
+
+
+@add_end_docstrings(build_pipeline_init_args(has_tokenizer=True))
+class QuestionAnsweringPipeline(ChunkPipeline):
+ """
+ Question Answering pipeline using any `ModelForQuestionAnswering`. See the [question answering
+ examples](../task_summary#question-answering) for more information.
+
+ Example:
+
+ ```python
+ >>> from transformers import pipeline
+
+ >>> oracle = pipeline(model="deepset/roberta-base-squad2")
+ >>> oracle(question="Where do I live?", context="My name is Wolfgang and I live in Berlin")
+ {'score': 0.9191, 'start': 34, 'end': 40, 'answer': 'Berlin'}
+ ```
+
+ Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
+
+ This question answering pipeline can currently be loaded from [`pipeline`] using the following task identifier:
+ `"question-answering"`.
+
+ The models that this pipeline can use are models that have been fine-tuned on a question answering task. See the
+ up-to-date list of available models on
+ [huggingface.co/models](https://huggingface.co/models?filter=question-answering).
+ """
+
+ default_input_names = "question,context"
+ handle_impossible_answer = False
+
+ def __init__(
+ self,
+ model: Union["PreTrainedModel", "TFPreTrainedModel"],
+ tokenizer: PreTrainedTokenizer,
+ modelcard: Optional[ModelCard] = None,
+ framework: Optional[str] = None,
+ task: str = "",
+ **kwargs,
+ ):
+ super().__init__(
+ model=model,
+ tokenizer=tokenizer,
+ modelcard=modelcard,
+ framework=framework,
+ task=task,
+ **kwargs,
+ )
+
+ self._args_parser = QuestionAnsweringArgumentHandler()
+ self.check_model_type(
+ TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
+ if self.framework == "tf"
+ else MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
+ )
+
+ @staticmethod
+ def create_sample(
+ question: Union[str, List[str]], context: Union[str, List[str]]
+ ) -> Union[SquadExample, List[SquadExample]]:
+ """
+ QuestionAnsweringPipeline leverages the [`SquadExample`] internally. This helper method encapsulate all the
+ logic for converting question(s) and context(s) to [`SquadExample`].
+
+ We currently support extractive question answering.
+
+ Arguments:
+ question (`str` or `List[str]`): The question(s) asked.
+ context (`str` or `List[str]`): The context(s) in which we will look for the answer.
+
+ Returns:
+ One or a list of [`SquadExample`]: The corresponding [`SquadExample`] grouping question and context.
+ """
+ if isinstance(question, list):
+ return [SquadExample(None, q, c, None, None, None) for q, c in zip(question, context)]
+ else:
+ return SquadExample(None, question, context, None, None, None)
+
+ def _sanitize_parameters(
+ self,
+ padding=None,
+ topk=None,
+ top_k=None,
+ doc_stride=None,
+ max_answer_len=None,
+ max_seq_len=None,
+ max_question_len=None,
+ handle_impossible_answer=None,
+ align_to_words=None,
+ **kwargs,
+ ):
+ # Set defaults values
+ preprocess_params = {}
+ if padding is not None:
+ preprocess_params["padding"] = padding
+ if doc_stride is not None:
+ preprocess_params["doc_stride"] = doc_stride
+ if max_question_len is not None:
+ preprocess_params["max_question_len"] = max_question_len
+ if max_seq_len is not None:
+ preprocess_params["max_seq_len"] = max_seq_len
+
+ postprocess_params = {}
+ if topk is not None and top_k is None:
+ warnings.warn("topk parameter is deprecated, use top_k instead", UserWarning)
+ top_k = topk
+ if top_k is not None:
+ if top_k < 1:
+ raise ValueError(f"top_k parameter should be >= 1 (got {top_k})")
+ postprocess_params["top_k"] = top_k
+ if max_answer_len is not None:
+ if max_answer_len < 1:
+ raise ValueError(f"max_answer_len parameter should be >= 1 (got {max_answer_len}")
+ if max_answer_len is not None:
+ postprocess_params["max_answer_len"] = max_answer_len
+ if handle_impossible_answer is not None:
+ postprocess_params["handle_impossible_answer"] = handle_impossible_answer
+ if align_to_words is not None:
+ postprocess_params["align_to_words"] = align_to_words
+ return preprocess_params, {}, postprocess_params
+
+ def __call__(self, *args, **kwargs):
+ """
+ Answer the question(s) given as inputs by using the context(s).
+
+ Args:
+ args ([`SquadExample`] or a list of [`SquadExample`]):
+ One or several [`SquadExample`] containing the question and context.
+ X ([`SquadExample`] or a list of [`SquadExample`], *optional*):
+ One or several [`SquadExample`] containing the question and context (will be treated the same way as if
+ passed as the first positional argument).
+ data ([`SquadExample`] or a list of [`SquadExample`], *optional*):
+ One or several [`SquadExample`] containing the question and context (will be treated the same way as if
+ passed as the first positional argument).
+ question (`str` or `List[str]`):
+ One or several question(s) (must be used in conjunction with the `context` argument).
+ context (`str` or `List[str]`):
+ One or several context(s) associated with the question(s) (must be used in conjunction with the
+ `question` argument).
+ topk (`int`, *optional*, defaults to 1):
+ The number of answers to return (will be chosen by order of likelihood). Note that we return less than
+ topk answers if there are not enough options available within the context.
+ doc_stride (`int`, *optional*, defaults to 128):
+ If the context is too long to fit with the question for the model, it will be split in several chunks
+ with some overlap. This argument controls the size of that overlap.
+ max_answer_len (`int`, *optional*, defaults to 15):
+ The maximum length of predicted answers (e.g., only answers with a shorter length are considered).
+ max_seq_len (`int`, *optional*, defaults to 384):
+ The maximum length of the total sentence (context + question) in tokens of each chunk passed to the
+ model. The context will be split in several chunks (using `doc_stride` as overlap) if needed.
+ max_question_len (`int`, *optional*, defaults to 64):
+ The maximum length of the question after tokenization. It will be truncated if needed.
+ handle_impossible_answer (`bool`, *optional*, defaults to `False`):
+ Whether or not we accept impossible as an answer.
+ align_to_words (`bool`, *optional*, defaults to `True`):
+ Attempts to align the answer to real words. Improves quality on space separated langages. Might hurt on
+ non-space-separated languages (like Japanese or Chinese)
+
+ Return:
+ A `dict` or a list of `dict`: Each result comes as a dictionary with the following keys:
+
+ - **score** (`float`) -- The probability associated to the answer.
+ - **start** (`int`) -- The character start index of the answer (in the tokenized version of the input).
+ - **end** (`int`) -- The character end index of the answer (in the tokenized version of the input).
+ - **answer** (`str`) -- The answer to the question.
+ """
+
+ # Convert inputs to features
+
+ examples = self._args_parser(*args, **kwargs)
+ if isinstance(examples, (list, tuple)) and len(examples) == 1:
+ return super().__call__(examples[0], **kwargs)
+ return super().__call__(examples, **kwargs)
+
+ def preprocess(self, example, padding="do_not_pad", doc_stride=None, max_question_len=64, max_seq_len=None):
+ # XXX: This is specal, args_parser will not handle anything generator or dataset like
+ # For those we expect user to send a simple valid example either directly as a SquadExample or simple dict.
+ # So we still need a little sanitation here.
+ if isinstance(example, dict):
+ example = SquadExample(None, example["question"], example["context"], None, None, None)
+
+ if max_seq_len is None:
+ max_seq_len = min(self.tokenizer.model_max_length, 384)
+ if doc_stride is None:
+ doc_stride = min(max_seq_len // 2, 128)
+
+ if doc_stride > max_seq_len:
+ raise ValueError(f"`doc_stride` ({doc_stride}) is larger than `max_seq_len` ({max_seq_len})")
+
+ if not self.tokenizer.is_fast:
+ features = squad_convert_examples_to_features(
+ examples=[example],
+ tokenizer=self.tokenizer,
+ max_seq_length=max_seq_len,
+ doc_stride=doc_stride,
+ max_query_length=max_question_len,
+ padding_strategy=PaddingStrategy.MAX_LENGTH,
+ is_training=False,
+ tqdm_enabled=False,
+ )
+ else:
+ # Define the side we want to truncate / pad and the text/pair sorting
+ question_first = self.tokenizer.padding_side == "right"
+
+ encoded_inputs = self.tokenizer(
+ text=example.question_text if question_first else example.context_text,
+ text_pair=example.context_text if question_first else example.question_text,
+ padding=padding,
+ truncation="only_second" if question_first else "only_first",
+ max_length=max_seq_len,
+ stride=doc_stride,
+ return_token_type_ids=True,
+ return_overflowing_tokens=True,
+ return_offsets_mapping=True,
+ return_special_tokens_mask=True,
+ )
+ # When the input is too long, it's converted in a batch of inputs with overflowing tokens
+ # and a stride of overlap between the inputs. If a batch of inputs is given, a special output
+ # "overflow_to_sample_mapping" indicate which member of the encoded batch belong to which original batch sample.
+ # Here we tokenize examples one-by-one so we don't need to use "overflow_to_sample_mapping".
+ # "num_span" is the number of output samples generated from the overflowing tokens.
+ num_spans = len(encoded_inputs["input_ids"])
+
+ # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer)
+ # We put 0 on the tokens from the context and 1 everywhere else (question and special tokens)
+ p_mask = [
+ [tok != 1 if question_first else 0 for tok in encoded_inputs.sequence_ids(span_id)]
+ for span_id in range(num_spans)
+ ]
+
+ features = []
+ for span_idx in range(num_spans):
+ input_ids_span_idx = encoded_inputs["input_ids"][span_idx]
+ attention_mask_span_idx = (
+ encoded_inputs["attention_mask"][span_idx] if "attention_mask" in encoded_inputs else None
+ )
+ token_type_ids_span_idx = (
+ encoded_inputs["token_type_ids"][span_idx] if "token_type_ids" in encoded_inputs else None
+ )
+ # keep the cls_token unmasked (some models use it to indicate unanswerable questions)
+ if self.tokenizer.cls_token_id is not None:
+ cls_indices = np.nonzero(np.array(input_ids_span_idx) == self.tokenizer.cls_token_id)[0]
+ for cls_index in cls_indices:
+ p_mask[span_idx][cls_index] = 0
+ submask = p_mask[span_idx]
+ features.append(
+ SquadFeatures(
+ input_ids=input_ids_span_idx,
+ attention_mask=attention_mask_span_idx,
+ token_type_ids=token_type_ids_span_idx,
+ p_mask=submask,
+ encoding=encoded_inputs[span_idx],
+ # We don't use the rest of the values - and actually
+ # for Fast tokenizer we could totally avoid using SquadFeatures and SquadExample
+ cls_index=None,
+ token_to_orig_map={},
+ example_index=0,
+ unique_id=0,
+ paragraph_len=0,
+ token_is_max_context=0,
+ tokens=[],
+ start_position=0,
+ end_position=0,
+ is_impossible=False,
+ qas_id=None,
+ )
+ )
+
+ for i, feature in enumerate(features):
+ fw_args = {}
+ others = {}
+ model_input_names = self.tokenizer.model_input_names + ["p_mask", "token_type_ids"]
+
+ for k, v in feature.__dict__.items():
+ if k in model_input_names:
+ if self.framework == "tf":
+ tensor = tf.constant(v)
+ if tensor.dtype == tf.int64:
+ tensor = tf.cast(tensor, tf.int32)
+ fw_args[k] = tf.expand_dims(tensor, 0)
+ elif self.framework == "pt":
+ tensor = torch.tensor(v)
+ if tensor.dtype == torch.int32:
+ tensor = tensor.long()
+ fw_args[k] = tensor.unsqueeze(0)
+ else:
+ others[k] = v
+
+ is_last = i == len(features) - 1
+ yield {"example": example, "is_last": is_last, **fw_args, **others}
+
+ def _forward(self, inputs):
+ example = inputs["example"]
+ model_inputs = {k: inputs[k] for k in self.tokenizer.model_input_names}
+ # `XXXForSequenceClassification` models should not use `use_cache=True` even if it's supported
+ model_forward = self.model.forward if self.framework == "pt" else self.model.call
+ if "use_cache" in inspect.signature(model_forward).parameters.keys():
+ model_inputs["use_cache"] = False
+ output = self.model(**model_inputs)
+ if isinstance(output, dict):
+ return {"start": output["start_logits"], "end": output["end_logits"], "example": example, **inputs}
+ else:
+ start, end = output[:2]
+ return {"start": start, "end": end, "example": example, **inputs}
+
+ def postprocess(
+ self,
+ model_outputs,
+ top_k=1,
+ handle_impossible_answer=False,
+ max_answer_len=15,
+ align_to_words=True,
+ ):
+ min_null_score = 1000000 # large and positive
+ answers = []
+ for output in model_outputs:
+ start_ = output["start"]
+ end_ = output["end"]
+ example = output["example"]
+ p_mask = output["p_mask"]
+ attention_mask = (
+ output["attention_mask"].numpy() if output.get("attention_mask", None) is not None else None
+ )
+
+ starts, ends, scores, min_null_score = select_starts_ends(
+ start_, end_, p_mask, attention_mask, min_null_score, top_k, handle_impossible_answer, max_answer_len
+ )
+
+ if not self.tokenizer.is_fast:
+ char_to_word = np.array(example.char_to_word_offset)
+
+ # Convert the answer (tokens) back to the original text
+ # Score: score from the model
+ # Start: Index of the first character of the answer in the context string
+ # End: Index of the character following the last character of the answer in the context string
+ # Answer: Plain text of the answer
+ for s, e, score in zip(starts, ends, scores):
+ token_to_orig_map = output["token_to_orig_map"]
+ answers.append(
+ {
+ "score": score.item(),
+ "start": np.where(char_to_word == token_to_orig_map[s])[0][0].item(),
+ "end": np.where(char_to_word == token_to_orig_map[e])[0][-1].item(),
+ "answer": " ".join(example.doc_tokens[token_to_orig_map[s] : token_to_orig_map[e] + 1]),
+ }
+ )
+ else:
+ # Convert the answer (tokens) back to the original text
+ # Score: score from the model
+ # Start: Index of the first character of the answer in the context string
+ # End: Index of the character following the last character of the answer in the context string
+ # Answer: Plain text of the answer
+ question_first = bool(self.tokenizer.padding_side == "right")
+ enc = output["encoding"]
+
+ # Encoding was *not* padded, input_ids *might*.
+ # It doesn't make a difference unless we're padding on
+ # the left hand side, since now we have different offsets
+ # everywhere.
+ if self.tokenizer.padding_side == "left":
+ offset = (output["input_ids"] == self.tokenizer.pad_token_id).numpy().sum()
+ else:
+ offset = 0
+
+ # Sometimes the max probability token is in the middle of a word so:
+ # - we start by finding the right word containing the token with `token_to_word`
+ # - then we convert this word in a character span with `word_to_chars`
+ sequence_index = 1 if question_first else 0
+ for s, e, score in zip(starts, ends, scores):
+ s = s - offset
+ e = e - offset
+
+ start_index, end_index = self.get_indices(enc, s, e, sequence_index, align_to_words)
+
+ answers.append(
+ {
+ "score": score.item(),
+ "start": start_index,
+ "end": end_index,
+ "answer": example.context_text[start_index:end_index],
+ }
+ )
+
+ if handle_impossible_answer:
+ answers.append({"score": min_null_score, "start": 0, "end": 0, "answer": ""})
+ answers = sorted(answers, key=lambda x: x["score"], reverse=True)[:top_k]
+ if len(answers) == 1:
+ return answers[0]
+ return answers
+
+ def get_indices(
+ self, enc: "tokenizers.Encoding", s: int, e: int, sequence_index: int, align_to_words: bool
+ ) -> Tuple[int, int]:
+ if align_to_words:
+ try:
+ start_word = enc.token_to_word(s)
+ end_word = enc.token_to_word(e)
+ start_index = enc.word_to_chars(start_word, sequence_index=sequence_index)[0]
+ end_index = enc.word_to_chars(end_word, sequence_index=sequence_index)[1]
+ except Exception:
+ # Some tokenizers don't really handle words. Keep to offsets then.
+ start_index = enc.offsets[s][0]
+ end_index = enc.offsets[e][1]
+ else:
+ start_index = enc.offsets[s][0]
+ end_index = enc.offsets[e][1]
+ return start_index, end_index
+
+ def span_to_answer(self, text: str, start: int, end: int) -> Dict[str, Union[str, int]]:
+ """
+ When decoding from token probabilities, this method maps token indexes to actual word in the initial context.
+
+ Args:
+ text (`str`): The actual context to extract the answer from.
+ start (`int`): The answer starting token index.
+ end (`int`): The answer end token index.
+
+ Returns:
+ Dictionary like `{'answer': str, 'start': int, 'end': int}`
+ """
+ words = []
+ token_idx = char_start_idx = char_end_idx = chars_idx = 0
+
+ for i, word in enumerate(text.split(" ")):
+ token = self.tokenizer.tokenize(word)
+
+ # Append words if they are in the span
+ if start <= token_idx <= end:
+ if token_idx == start:
+ char_start_idx = chars_idx
+
+ if token_idx == end:
+ char_end_idx = chars_idx + len(word)
+
+ words += [word]
+
+ # Stop if we went over the end of the answer
+ if token_idx > end:
+ break
+
+ # Append the subtokenization length to the running index
+ token_idx += len(token)
+ chars_idx += len(word) + 1
+
+ # Join text with spaces
+ return {
+ "answer": " ".join(words),
+ "start": max(0, char_start_idx),
+ "end": min(len(text), char_end_idx),
+ }
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/text2text_generation.py b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/text2text_generation.py
new file mode 100644
index 0000000000000000000000000000000000000000..bb8abdfcf7f500d314fc7d73a031e6b563a39b43
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/text2text_generation.py
@@ -0,0 +1,371 @@
+import enum
+import warnings
+
+from ..tokenization_utils import TruncationStrategy
+from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
+from .base import Pipeline, build_pipeline_init_args
+
+
+if is_tf_available():
+ import tensorflow as tf
+
+ from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
+
+if is_torch_available():
+ from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
+
+logger = logging.get_logger(__name__)
+
+
+class ReturnType(enum.Enum):
+ TENSORS = 0
+ TEXT = 1
+
+
+@add_end_docstrings(build_pipeline_init_args(has_tokenizer=True))
+class Text2TextGenerationPipeline(Pipeline):
+ """
+ Pipeline for text to text generation using seq2seq models.
+
+ Example:
+
+ ```python
+ >>> from transformers import pipeline
+
+ >>> generator = pipeline(model="mrm8488/t5-base-finetuned-question-generation-ap")
+ >>> generator(
+ ... "answer: Manuel context: Manuel has created RuPERTa-base with the support of HF-Transformers and Google"
+ ... )
+ [{'generated_text': 'question: Who created the RuPERTa-base?'}]
+ ```
+
+ Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial). You can pass text
+ generation parameters to this pipeline to control stopping criteria, decoding strategy, and more. Learn more about
+ text generation parameters in [Text generation strategies](../generation_strategies) and [Text
+ generation](text_generation).
+
+ This Text2TextGenerationPipeline pipeline can currently be loaded from [`pipeline`] using the following task
+ identifier: `"text2text-generation"`.
+
+ The models that this pipeline can use are models that have been fine-tuned on a translation task. See the
+ up-to-date list of available models on
+ [huggingface.co/models](https://huggingface.co/models?filter=text2text-generation). For a list of available
+ parameters, see the [following
+ documentation](https://huggingface.co/docs/transformers/en/main_classes/text_generation#transformers.generation.GenerationMixin.generate)
+
+ Usage:
+
+ ```python
+ text2text_generator = pipeline("text2text-generation")
+ text2text_generator("question: What is 42 ? context: 42 is the answer to life, the universe and everything")
+ ```"""
+
+ # Used in the return key of the pipeline.
+ return_name = "generated"
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ self.check_model_type(
+ TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
+ if self.framework == "tf"
+ else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
+ )
+
+ def _sanitize_parameters(
+ self,
+ return_tensors=None,
+ return_text=None,
+ return_type=None,
+ clean_up_tokenization_spaces=None,
+ truncation=None,
+ stop_sequence=None,
+ **generate_kwargs,
+ ):
+ preprocess_params = {}
+ if truncation is not None:
+ preprocess_params["truncation"] = truncation
+
+ forward_params = generate_kwargs
+
+ postprocess_params = {}
+ if return_tensors is not None and return_type is None:
+ return_type = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
+ if return_type is not None:
+ postprocess_params["return_type"] = return_type
+
+ if clean_up_tokenization_spaces is not None:
+ postprocess_params["clean_up_tokenization_spaces"] = clean_up_tokenization_spaces
+
+ if stop_sequence is not None:
+ stop_sequence_ids = self.tokenizer.encode(stop_sequence, add_special_tokens=False)
+ if len(stop_sequence_ids) > 1:
+ warnings.warn(
+ "Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
+ " the stop sequence will be used as the stop sequence string in the interim."
+ )
+ generate_kwargs["eos_token_id"] = stop_sequence_ids[0]
+
+ return preprocess_params, forward_params, postprocess_params
+
+ def check_inputs(self, input_length: int, min_length: int, max_length: int):
+ """
+ Checks whether there might be something wrong with given input with regard to the model.
+ """
+ return True
+
+ def _parse_and_tokenize(self, *args, truncation):
+ prefix = self.model.config.prefix if self.model.config.prefix is not None else ""
+ if isinstance(args[0], list):
+ if self.tokenizer.pad_token_id is None:
+ raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input")
+ args = ([prefix + arg for arg in args[0]],)
+ padding = True
+
+ elif isinstance(args[0], str):
+ args = (prefix + args[0],)
+ padding = False
+ else:
+ raise ValueError(
+ f" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`"
+ )
+ inputs = self.tokenizer(*args, padding=padding, truncation=truncation, return_tensors=self.framework)
+ # This is produced by tokenizers but is an invalid generate kwargs
+ if "token_type_ids" in inputs:
+ del inputs["token_type_ids"]
+ return inputs
+
+ def __call__(self, *args, **kwargs):
+ r"""
+ Generate the output text(s) using text(s) given as inputs.
+
+ Args:
+ args (`str` or `List[str]`):
+ Input text for the encoder.
+ return_tensors (`bool`, *optional*, defaults to `False`):
+ Whether or not to include the tensors of predictions (as token indices) in the outputs.
+ return_text (`bool`, *optional*, defaults to `True`):
+ Whether or not to include the decoded texts in the outputs.
+ clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
+ Whether or not to clean up the potential extra spaces in the text output.
+ truncation (`TruncationStrategy`, *optional*, defaults to `TruncationStrategy.DO_NOT_TRUNCATE`):
+ The truncation strategy for the tokenization within the pipeline. `TruncationStrategy.DO_NOT_TRUNCATE`
+ (default) will never truncate, but it is sometimes desirable to truncate the input to fit the model's
+ max_length instead of throwing an error down the line.
+ generate_kwargs:
+ Additional keyword arguments to pass along to the generate method of the model (see the generate method
+ corresponding to your framework [here](./model#generative-models)).
+
+ Return:
+ A list or a list of list of `dict`: Each result comes as a dictionary with the following keys:
+
+ - **generated_text** (`str`, present when `return_text=True`) -- The generated text.
+ - **generated_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token
+ ids of the generated text.
+ """
+
+ result = super().__call__(*args, **kwargs)
+ if (
+ isinstance(args[0], list)
+ and all(isinstance(el, str) for el in args[0])
+ and all(len(res) == 1 for res in result)
+ ):
+ return [res[0] for res in result]
+ return result
+
+ def preprocess(self, inputs, truncation=TruncationStrategy.DO_NOT_TRUNCATE, **kwargs):
+ inputs = self._parse_and_tokenize(inputs, truncation=truncation, **kwargs)
+ return inputs
+
+ def _forward(self, model_inputs, **generate_kwargs):
+ if self.framework == "pt":
+ in_b, input_length = model_inputs["input_ids"].shape
+ elif self.framework == "tf":
+ in_b, input_length = tf.shape(model_inputs["input_ids"]).numpy()
+
+ self.check_inputs(
+ input_length,
+ generate_kwargs.get("min_length", self.model.config.min_length),
+ generate_kwargs.get("max_length", self.model.config.max_length),
+ )
+ output_ids = self.model.generate(**model_inputs, **generate_kwargs)
+ out_b = output_ids.shape[0]
+ if self.framework == "pt":
+ output_ids = output_ids.reshape(in_b, out_b // in_b, *output_ids.shape[1:])
+ elif self.framework == "tf":
+ output_ids = tf.reshape(output_ids, (in_b, out_b // in_b, *output_ids.shape[1:]))
+ return {"output_ids": output_ids}
+
+ def postprocess(self, model_outputs, return_type=ReturnType.TEXT, clean_up_tokenization_spaces=False):
+ records = []
+ for output_ids in model_outputs["output_ids"][0]:
+ if return_type == ReturnType.TENSORS:
+ record = {f"{self.return_name}_token_ids": output_ids}
+ elif return_type == ReturnType.TEXT:
+ record = {
+ f"{self.return_name}_text": self.tokenizer.decode(
+ output_ids,
+ skip_special_tokens=True,
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
+ )
+ }
+ records.append(record)
+ return records
+
+
+@add_end_docstrings(build_pipeline_init_args(has_tokenizer=True))
+class SummarizationPipeline(Text2TextGenerationPipeline):
+ """
+ Summarize news articles and other documents.
+
+ This summarizing pipeline can currently be loaded from [`pipeline`] using the following task identifier:
+ `"summarization"`.
+
+ The models that this pipeline can use are models that have been fine-tuned on a summarization task, which is
+ currently, '*bart-large-cnn*', '*google-t5/t5-small*', '*google-t5/t5-base*', '*google-t5/t5-large*', '*google-t5/t5-3b*', '*google-t5/t5-11b*'. See the up-to-date
+ list of available models on [huggingface.co/models](https://huggingface.co/models?filter=summarization). For a list
+ of available parameters, see the [following
+ documentation](https://huggingface.co/docs/transformers/en/main_classes/text_generation#transformers.generation.GenerationMixin.generate)
+
+ Usage:
+
+ ```python
+ # use bart in pytorch
+ summarizer = pipeline("summarization")
+ summarizer("An apple a day, keeps the doctor away", min_length=5, max_length=20)
+
+ # use t5 in tf
+ summarizer = pipeline("summarization", model="google-t5/t5-base", tokenizer="google-t5/t5-base", framework="tf")
+ summarizer("An apple a day, keeps the doctor away", min_length=5, max_length=20)
+ ```"""
+
+ # Used in the return key of the pipeline.
+ return_name = "summary"
+
+ def __call__(self, *args, **kwargs):
+ r"""
+ Summarize the text(s) given as inputs.
+
+ Args:
+ documents (*str* or `List[str]`):
+ One or several articles (or one list of articles) to summarize.
+ return_text (`bool`, *optional*, defaults to `True`):
+ Whether or not to include the decoded texts in the outputs
+ return_tensors (`bool`, *optional*, defaults to `False`):
+ Whether or not to include the tensors of predictions (as token indices) in the outputs.
+ clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
+ Whether or not to clean up the potential extra spaces in the text output.
+ generate_kwargs:
+ Additional keyword arguments to pass along to the generate method of the model (see the generate method
+ corresponding to your framework [here](./model#generative-models)).
+
+ Return:
+ A list or a list of list of `dict`: Each result comes as a dictionary with the following keys:
+
+ - **summary_text** (`str`, present when `return_text=True`) -- The summary of the corresponding input.
+ - **summary_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token
+ ids of the summary.
+ """
+ return super().__call__(*args, **kwargs)
+
+ def check_inputs(self, input_length: int, min_length: int, max_length: int) -> bool:
+ """
+ Checks whether there might be something wrong with given input with regard to the model.
+ """
+ if max_length < min_length:
+ logger.warning(f"Your min_length={min_length} must be inferior than your max_length={max_length}.")
+
+ if input_length < max_length:
+ logger.warning(
+ f"Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is "
+ "a summarization task, where outputs shorter than the input are typically wanted, you might "
+ f"consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})"
+ )
+
+
+@add_end_docstrings(build_pipeline_init_args(has_tokenizer=True))
+class TranslationPipeline(Text2TextGenerationPipeline):
+ """
+ Translates from one language to another.
+
+ This translation pipeline can currently be loaded from [`pipeline`] using the following task identifier:
+ `"translation_xx_to_yy"`.
+
+ The models that this pipeline can use are models that have been fine-tuned on a translation task. See the
+ up-to-date list of available models on [huggingface.co/models](https://huggingface.co/models?filter=translation).
+ For a list of available parameters, see the [following
+ documentation](https://huggingface.co/docs/transformers/en/main_classes/text_generation#transformers.generation.GenerationMixin.generate)
+
+ Usage:
+
+ ```python
+ en_fr_translator = pipeline("translation_en_to_fr")
+ en_fr_translator("How old are you?")
+ ```"""
+
+ # Used in the return key of the pipeline.
+ return_name = "translation"
+
+ def check_inputs(self, input_length: int, min_length: int, max_length: int):
+ if input_length > 0.9 * max_length:
+ logger.warning(
+ f"Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider "
+ "increasing your max_length manually, e.g. translator('...', max_length=400)"
+ )
+ return True
+
+ def preprocess(self, *args, truncation=TruncationStrategy.DO_NOT_TRUNCATE, src_lang=None, tgt_lang=None):
+ if getattr(self.tokenizer, "_build_translation_inputs", None):
+ return self.tokenizer._build_translation_inputs(
+ *args, return_tensors=self.framework, truncation=truncation, src_lang=src_lang, tgt_lang=tgt_lang
+ )
+ else:
+ return super()._parse_and_tokenize(*args, truncation=truncation)
+
+ def _sanitize_parameters(self, src_lang=None, tgt_lang=None, **kwargs):
+ preprocess_params, forward_params, postprocess_params = super()._sanitize_parameters(**kwargs)
+ if src_lang is not None:
+ preprocess_params["src_lang"] = src_lang
+ if tgt_lang is not None:
+ preprocess_params["tgt_lang"] = tgt_lang
+ if src_lang is None and tgt_lang is None:
+ # Backward compatibility, direct arguments use is preferred.
+ task = kwargs.get("task", self.task)
+ items = task.split("_")
+ if task and len(items) == 4:
+ # translation, XX, to YY
+ preprocess_params["src_lang"] = items[1]
+ preprocess_params["tgt_lang"] = items[3]
+ return preprocess_params, forward_params, postprocess_params
+
+ def __call__(self, *args, **kwargs):
+ r"""
+ Translate the text(s) given as inputs.
+
+ Args:
+ args (`str` or `List[str]`):
+ Texts to be translated.
+ return_tensors (`bool`, *optional*, defaults to `False`):
+ Whether or not to include the tensors of predictions (as token indices) in the outputs.
+ return_text (`bool`, *optional*, defaults to `True`):
+ Whether or not to include the decoded texts in the outputs.
+ clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
+ Whether or not to clean up the potential extra spaces in the text output.
+ src_lang (`str`, *optional*):
+ The language of the input. Might be required for multilingual models. Will not have any effect for
+ single pair translation models
+ tgt_lang (`str`, *optional*):
+ The language of the desired output. Might be required for multilingual models. Will not have any effect
+ for single pair translation models
+ generate_kwargs:
+ Additional keyword arguments to pass along to the generate method of the model (see the generate method
+ corresponding to your framework [here](./model#generative-models)).
+
+ Return:
+ A list or a list of list of `dict`: Each result comes as a dictionary with the following keys:
+
+ - **translation_text** (`str`, present when `return_text=True`) -- The translation.
+ - **translation_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The
+ token ids of the translation.
+ """
+ return super().__call__(*args, **kwargs)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/text_generation.py b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/text_generation.py
new file mode 100644
index 0000000000000000000000000000000000000000..0b358291717ee0281c28c009bfbb9a0144d68457
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/text_generation.py
@@ -0,0 +1,374 @@
+import enum
+import warnings
+from typing import Dict
+
+from ..utils import add_end_docstrings, is_tf_available, is_torch_available
+from .base import Pipeline, build_pipeline_init_args
+
+
+if is_torch_available():
+ from ..models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
+
+if is_tf_available():
+ import tensorflow as tf
+
+ from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
+
+
+class ReturnType(enum.Enum):
+ TENSORS = 0
+ NEW_TEXT = 1
+ FULL_TEXT = 2
+
+
+class Chat:
+ """This class is intended to just be used internally in this pipeline and not exposed to users. We convert chats
+ to this format because the rest of the pipeline code tends to assume that lists of messages are
+ actually a batch of samples rather than messages in the same conversation."""
+
+ def __init__(self, messages: Dict):
+ for message in messages:
+ if not ("role" in message and "content" in message):
+ raise ValueError("When passing chat dicts as input, each dict must have a 'role' and 'content' key.")
+ self.messages = messages
+
+
+@add_end_docstrings(build_pipeline_init_args(has_tokenizer=True))
+class TextGenerationPipeline(Pipeline):
+ """
+ Language generation pipeline using any `ModelWithLMHead`. This pipeline predicts the words that will follow a
+ specified text prompt. It can also accept one or more chats. Each chat takes the form of a list of dicts,
+ where each dict contains "role" and "content" keys.
+
+ Example:
+
+ ```python
+ >>> from transformers import pipeline
+
+ >>> generator = pipeline(model="openai-community/gpt2")
+ >>> generator("I can't believe you did such a ", do_sample=False)
+ [{'generated_text': "I can't believe you did such a icky thing to me. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I"}]
+
+ >>> # These parameters will return suggestions, and only the newly created text making it easier for prompting suggestions.
+ >>> outputs = generator("My tart needs some", num_return_sequences=4, return_full_text=False)
+ ```
+
+ Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial). You can pass text
+ generation parameters to this pipeline to control stopping criteria, decoding strategy, and more. Learn more about
+ text generation parameters in [Text generation strategies](../generation_strategies) and [Text
+ generation](text_generation).
+
+ This language generation pipeline can currently be loaded from [`pipeline`] using the following task identifier:
+ `"text-generation"`.
+
+ The models that this pipeline can use are models that have been trained with an autoregressive language modeling
+ objective, which includes the uni-directional models in the library (e.g. openai-community/gpt2). See the list of available models
+ on [huggingface.co/models](https://huggingface.co/models?filter=text-generation).
+ """
+
+ # Prefix text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia
+ # in https://github.com/rusiaaman/XLNet-gen#methodology
+ # and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e
+
+ XL_PREFIX = """
+ In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
+ voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
+ Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
+ and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
+ accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
+ the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
+ begging for his blessing.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.check_model_type(
+ TF_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
+ )
+ if "prefix" not in self._preprocess_params:
+ # This is very specific. The logic is quite complex and needs to be done
+ # as a "default".
+ # It also defines both some preprocess_kwargs and generate_kwargs
+ # which is why we cannot put them in their respective methods.
+ prefix = None
+ if self.model.config.prefix is not None:
+ prefix = self.model.config.prefix
+ if prefix is None and self.model.__class__.__name__ in [
+ "XLNetLMHeadModel",
+ "TransfoXLLMHeadModel",
+ "TFXLNetLMHeadModel",
+ "TFTransfoXLLMHeadModel",
+ ]:
+ # For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
+ prefix = self.XL_PREFIX
+ if prefix is not None:
+ # Recalculate some generate_kwargs linked to prefix.
+ preprocess_params, forward_params, _ = self._sanitize_parameters(prefix=prefix, **self._forward_params)
+ self._preprocess_params = {**self._preprocess_params, **preprocess_params}
+ self._forward_params = {**self._forward_params, **forward_params}
+
+ def _sanitize_parameters(
+ self,
+ return_full_text=None,
+ return_tensors=None,
+ return_text=None,
+ return_type=None,
+ clean_up_tokenization_spaces=None,
+ prefix=None,
+ handle_long_generation=None,
+ stop_sequence=None,
+ add_special_tokens=False,
+ truncation=None,
+ padding=False,
+ max_length=None,
+ **generate_kwargs,
+ ):
+ preprocess_params = {
+ "add_special_tokens": add_special_tokens,
+ "truncation": truncation,
+ "padding": padding,
+ "max_length": max_length,
+ }
+ if max_length is not None:
+ generate_kwargs["max_length"] = max_length
+
+ if prefix is not None:
+ preprocess_params["prefix"] = prefix
+ if prefix:
+ prefix_inputs = self.tokenizer(
+ prefix, padding=False, add_special_tokens=add_special_tokens, return_tensors=self.framework
+ )
+ generate_kwargs["prefix_length"] = prefix_inputs["input_ids"].shape[-1]
+
+ if handle_long_generation is not None:
+ if handle_long_generation not in {"hole"}:
+ raise ValueError(
+ f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
+ " [None, 'hole']"
+ )
+ preprocess_params["handle_long_generation"] = handle_long_generation
+
+ preprocess_params.update(generate_kwargs)
+ forward_params = generate_kwargs
+
+ postprocess_params = {}
+ if return_full_text is not None and return_type is None:
+ if return_text is not None:
+ raise ValueError("`return_text` is mutually exclusive with `return_full_text`")
+ if return_tensors is not None:
+ raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`")
+ return_type = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
+ if return_tensors is not None and return_type is None:
+ if return_text is not None:
+ raise ValueError("`return_text` is mutually exclusive with `return_tensors`")
+ return_type = ReturnType.TENSORS
+ if return_type is not None:
+ postprocess_params["return_type"] = return_type
+ if clean_up_tokenization_spaces is not None:
+ postprocess_params["clean_up_tokenization_spaces"] = clean_up_tokenization_spaces
+
+ if stop_sequence is not None:
+ stop_sequence_ids = self.tokenizer.encode(stop_sequence, add_special_tokens=False)
+ if len(stop_sequence_ids) > 1:
+ warnings.warn(
+ "Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
+ " the stop sequence will be used as the stop sequence string in the interim."
+ )
+ generate_kwargs["eos_token_id"] = stop_sequence_ids[0]
+
+ return preprocess_params, forward_params, postprocess_params
+
+ # overriding _parse_and_tokenize to allow for unusual language-modeling tokenizer arguments
+ def _parse_and_tokenize(self, *args, **kwargs):
+ """
+ Parse arguments and tokenize
+ """
+ # Parse arguments
+ if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
+ kwargs.update({"add_space_before_punct_symbol": True})
+
+ return super()._parse_and_tokenize(*args, **kwargs)
+
+ def __call__(self, text_inputs, **kwargs):
+ """
+ Complete the prompt(s) given as inputs.
+
+ Args:
+ text_inputs (`str` or `List[str]`):
+ One or several prompts (or one list of prompts) to complete.
+ return_tensors (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the tensors of predictions (as token indices) in the outputs. If set to
+ `True`, the decoded text is not returned.
+ return_text (`bool`, *optional*, defaults to `True`):
+ Whether or not to return the decoded texts in the outputs.
+ return_full_text (`bool`, *optional*, defaults to `True`):
+ If set to `False` only added text is returned, otherwise the full text is returned. Only meaningful if
+ *return_text* is set to True.
+ clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`):
+ Whether or not to clean up the potential extra spaces in the text output.
+ prefix (`str`, *optional*):
+ Prefix added to prompt.
+ handle_long_generation (`str`, *optional*):
+ By default, this pipelines does not handle long generation (ones that exceed in one form or the other
+ the model maximum length). There is no perfect way to adress this (more info
+ :https://github.com/huggingface/transformers/issues/14033#issuecomment-948385227). This provides common
+ strategies to work around that problem depending on your use case.
+
+ - `None` : default strategy where nothing in particular happens
+ - `"hole"`: Truncates left of input, and leaves a gap wide enough to let generation happen (might
+ truncate a lot of the prompt and not suitable when generation exceed the model capacity)
+ generate_kwargs (`dict`, *optional*):
+ Additional keyword arguments to pass along to the generate method of the model (see the generate method
+ corresponding to your framework [here](./model#generative-models)).
+
+ Return:
+ A list or a list of list of `dict`: Returns one of the following dictionaries (cannot return a combination
+ of both `generated_text` and `generated_token_ids`):
+
+ - **generated_text** (`str`, present when `return_text=True`) -- The generated text.
+ - **generated_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token
+ ids of the generated text.
+ """
+ if isinstance(text_inputs, (list, tuple)) and isinstance(text_inputs[0], (list, tuple, dict)):
+ # We have one or more prompts in list-of-dicts format, so this is chat mode
+ if isinstance(text_inputs[0], dict):
+ return super().__call__(Chat(text_inputs), **kwargs)
+ else:
+ chats = [Chat(chat) for chat in text_inputs] # 🐈 🐈 🐈
+ return super().__call__(chats, **kwargs)
+ else:
+ return super().__call__(text_inputs, **kwargs)
+
+ def preprocess(
+ self,
+ prompt_text,
+ prefix="",
+ handle_long_generation=None,
+ add_special_tokens=False,
+ truncation=None,
+ padding=False,
+ max_length=None,
+ **generate_kwargs,
+ ):
+ if isinstance(prompt_text, Chat):
+ inputs = self.tokenizer.apply_chat_template(
+ prompt_text.messages,
+ truncation=truncation,
+ padding=padding,
+ max_length=max_length,
+ add_generation_prompt=True,
+ return_dict=True,
+ return_tensors=self.framework,
+ )
+ else:
+ inputs = self.tokenizer(
+ prefix + prompt_text,
+ truncation=truncation,
+ padding=padding,
+ max_length=max_length,
+ add_special_tokens=add_special_tokens,
+ return_tensors=self.framework,
+ )
+ inputs["prompt_text"] = prompt_text
+
+ if handle_long_generation == "hole":
+ cur_len = inputs["input_ids"].shape[-1]
+ if "max_new_tokens" in generate_kwargs:
+ new_tokens = generate_kwargs["max_new_tokens"]
+ else:
+ new_tokens = generate_kwargs.get("max_length", self.model.config.max_length) - cur_len
+ if new_tokens < 0:
+ raise ValueError("We cannot infer how many new tokens are expected")
+ if cur_len + new_tokens > self.tokenizer.model_max_length:
+ keep_length = self.tokenizer.model_max_length - new_tokens
+ if keep_length <= 0:
+ raise ValueError(
+ "We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
+ " models max length"
+ )
+
+ inputs["input_ids"] = inputs["input_ids"][:, -keep_length:]
+ if "attention_mask" in inputs:
+ inputs["attention_mask"] = inputs["attention_mask"][:, -keep_length:]
+
+ return inputs
+
+ def _forward(self, model_inputs, **generate_kwargs):
+ input_ids = model_inputs["input_ids"]
+ attention_mask = model_inputs.get("attention_mask", None)
+ # Allow empty prompts
+ if input_ids.shape[1] == 0:
+ input_ids = None
+ attention_mask = None
+ in_b = 1
+ else:
+ in_b = input_ids.shape[0]
+ prompt_text = model_inputs.pop("prompt_text")
+
+ # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
+ # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
+ prefix_length = generate_kwargs.pop("prefix_length", 0)
+ if prefix_length > 0:
+ has_max_new_tokens = "max_new_tokens" in generate_kwargs or (
+ "generation_config" in generate_kwargs
+ and generate_kwargs["generation_config"].max_new_tokens is not None
+ )
+ if not has_max_new_tokens:
+ generate_kwargs["max_length"] = generate_kwargs.get("max_length") or self.model.config.max_length
+ generate_kwargs["max_length"] += prefix_length
+ has_min_new_tokens = "min_new_tokens" in generate_kwargs or (
+ "generation_config" in generate_kwargs
+ and generate_kwargs["generation_config"].min_new_tokens is not None
+ )
+ if not has_min_new_tokens and "min_length" in generate_kwargs:
+ generate_kwargs["min_length"] += prefix_length
+
+ # BS x SL
+ generated_sequence = self.model.generate(input_ids=input_ids, attention_mask=attention_mask, **generate_kwargs)
+ out_b = generated_sequence.shape[0]
+ if self.framework == "pt":
+ generated_sequence = generated_sequence.reshape(in_b, out_b // in_b, *generated_sequence.shape[1:])
+ elif self.framework == "tf":
+ generated_sequence = tf.reshape(generated_sequence, (in_b, out_b // in_b, *generated_sequence.shape[1:]))
+ return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
+
+ def postprocess(self, model_outputs, return_type=ReturnType.FULL_TEXT, clean_up_tokenization_spaces=True):
+ generated_sequence = model_outputs["generated_sequence"][0]
+ input_ids = model_outputs["input_ids"]
+ prompt_text = model_outputs["prompt_text"]
+ generated_sequence = generated_sequence.numpy().tolist()
+ records = []
+ for sequence in generated_sequence:
+ if return_type == ReturnType.TENSORS:
+ record = {"generated_token_ids": sequence}
+ elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
+ # Decode text
+ text = self.tokenizer.decode(
+ sequence,
+ skip_special_tokens=True,
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
+ )
+
+ # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
+ if input_ids is None:
+ prompt_length = 0
+ else:
+ prompt_length = len(
+ self.tokenizer.decode(
+ input_ids[0],
+ skip_special_tokens=True,
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
+ )
+ )
+
+ all_text = text[prompt_length:]
+ if return_type == ReturnType.FULL_TEXT:
+ if isinstance(prompt_text, str):
+ all_text = prompt_text + all_text
+ elif isinstance(prompt_text, Chat):
+ all_text = prompt_text.messages + [{"role": "assistant", "content": all_text}]
+
+ record = {"generated_text": all_text}
+ records.append(record)
+
+ return records
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/token_classification.py b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/token_classification.py
new file mode 100644
index 0000000000000000000000000000000000000000..e1d763eafa8b71ee8953faee949102e7cb4dbac8
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/token_classification.py
@@ -0,0 +1,570 @@
+import types
+import warnings
+from typing import List, Optional, Tuple, Union
+
+import numpy as np
+
+from ..models.bert.tokenization_bert import BasicTokenizer
+from ..utils import (
+ ExplicitEnum,
+ add_end_docstrings,
+ is_tf_available,
+ is_torch_available,
+)
+from .base import ArgumentHandler, ChunkPipeline, Dataset, build_pipeline_init_args
+
+
+if is_tf_available():
+ import tensorflow as tf
+
+ from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
+if is_torch_available():
+ from ..models.auto.modeling_auto import MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
+
+
+class TokenClassificationArgumentHandler(ArgumentHandler):
+ """
+ Handles arguments for token classification.
+ """
+
+ def __call__(self, inputs: Union[str, List[str]], **kwargs):
+ if inputs is not None and isinstance(inputs, (list, tuple)) and len(inputs) > 0:
+ inputs = list(inputs)
+ batch_size = len(inputs)
+ elif isinstance(inputs, str):
+ inputs = [inputs]
+ batch_size = 1
+ elif Dataset is not None and isinstance(inputs, Dataset) or isinstance(inputs, types.GeneratorType):
+ return inputs, None
+ else:
+ raise ValueError("At least one input is required.")
+
+ offset_mapping = kwargs.get("offset_mapping")
+ if offset_mapping:
+ if isinstance(offset_mapping, list) and isinstance(offset_mapping[0], tuple):
+ offset_mapping = [offset_mapping]
+ if len(offset_mapping) != batch_size:
+ raise ValueError("offset_mapping should have the same batch size as the input")
+ return inputs, offset_mapping
+
+
+class AggregationStrategy(ExplicitEnum):
+ """All the valid aggregation strategies for TokenClassificationPipeline"""
+
+ NONE = "none"
+ SIMPLE = "simple"
+ FIRST = "first"
+ AVERAGE = "average"
+ MAX = "max"
+
+
+@add_end_docstrings(
+ build_pipeline_init_args(has_tokenizer=True),
+ r"""
+ ignore_labels (`List[str]`, defaults to `["O"]`):
+ A list of labels to ignore.
+ grouped_entities (`bool`, *optional*, defaults to `False`):
+ DEPRECATED, use `aggregation_strategy` instead. Whether or not to group the tokens corresponding to the
+ same entity together in the predictions or not.
+ stride (`int`, *optional*):
+ If stride is provided, the pipeline is applied on all the text. The text is split into chunks of size
+ model_max_length. Works only with fast tokenizers and `aggregation_strategy` different from `NONE`. The
+ value of this argument defines the number of overlapping tokens between chunks. In other words, the model
+ will shift forward by `tokenizer.model_max_length - stride` tokens each step.
+ aggregation_strategy (`str`, *optional*, defaults to `"none"`):
+ The strategy to fuse (or not) tokens based on the model prediction.
+
+ - "none" : Will simply not do any aggregation and simply return raw results from the model
+ - "simple" : Will attempt to group entities following the default schema. (A, B-TAG), (B, I-TAG), (C,
+ I-TAG), (D, B-TAG2) (E, B-TAG2) will end up being [{"word": ABC, "entity": "TAG"}, {"word": "D",
+ "entity": "TAG2"}, {"word": "E", "entity": "TAG2"}] Notice that two consecutive B tags will end up as
+ different entities. On word based languages, we might end up splitting words undesirably : Imagine
+ Microsoft being tagged as [{"word": "Micro", "entity": "ENTERPRISE"}, {"word": "soft", "entity":
+ "NAME"}]. Look for FIRST, MAX, AVERAGE for ways to mitigate that and disambiguate words (on languages
+ that support that meaning, which is basically tokens separated by a space). These mitigations will
+ only work on real words, "New york" might still be tagged with two different entities.
+ - "first" : (works only on word based models) Will use the `SIMPLE` strategy except that words, cannot
+ end up with different tags. Words will simply use the tag of the first token of the word when there
+ is ambiguity.
+ - "average" : (works only on word based models) Will use the `SIMPLE` strategy except that words,
+ cannot end up with different tags. scores will be averaged first across tokens, and then the maximum
+ label is applied.
+ - "max" : (works only on word based models) Will use the `SIMPLE` strategy except that words, cannot
+ end up with different tags. Word entity will simply be the token with the maximum score.""",
+)
+class TokenClassificationPipeline(ChunkPipeline):
+ """
+ Named Entity Recognition pipeline using any `ModelForTokenClassification`. See the [named entity recognition
+ examples](../task_summary#named-entity-recognition) for more information.
+
+ Example:
+
+ ```python
+ >>> from transformers import pipeline
+
+ >>> token_classifier = pipeline(model="Jean-Baptiste/camembert-ner", aggregation_strategy="simple")
+ >>> sentence = "Je m'appelle jean-baptiste et je vis à montréal"
+ >>> tokens = token_classifier(sentence)
+ >>> tokens
+ [{'entity_group': 'PER', 'score': 0.9931, 'word': 'jean-baptiste', 'start': 12, 'end': 26}, {'entity_group': 'LOC', 'score': 0.998, 'word': 'montréal', 'start': 38, 'end': 47}]
+
+ >>> token = tokens[0]
+ >>> # Start and end provide an easy way to highlight words in the original text.
+ >>> sentence[token["start"] : token["end"]]
+ ' jean-baptiste'
+
+ >>> # Some models use the same idea to do part of speech.
+ >>> syntaxer = pipeline(model="vblagoje/bert-english-uncased-finetuned-pos", aggregation_strategy="simple")
+ >>> syntaxer("My name is Sarah and I live in London")
+ [{'entity_group': 'PRON', 'score': 0.999, 'word': 'my', 'start': 0, 'end': 2}, {'entity_group': 'NOUN', 'score': 0.997, 'word': 'name', 'start': 3, 'end': 7}, {'entity_group': 'AUX', 'score': 0.994, 'word': 'is', 'start': 8, 'end': 10}, {'entity_group': 'PROPN', 'score': 0.999, 'word': 'sarah', 'start': 11, 'end': 16}, {'entity_group': 'CCONJ', 'score': 0.999, 'word': 'and', 'start': 17, 'end': 20}, {'entity_group': 'PRON', 'score': 0.999, 'word': 'i', 'start': 21, 'end': 22}, {'entity_group': 'VERB', 'score': 0.998, 'word': 'live', 'start': 23, 'end': 27}, {'entity_group': 'ADP', 'score': 0.999, 'word': 'in', 'start': 28, 'end': 30}, {'entity_group': 'PROPN', 'score': 0.999, 'word': 'london', 'start': 31, 'end': 37}]
+ ```
+
+ Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
+
+ This token recognition pipeline can currently be loaded from [`pipeline`] using the following task identifier:
+ `"ner"` (for predicting the classes of tokens in a sequence: person, organisation, location or miscellaneous).
+
+ The models that this pipeline can use are models that have been fine-tuned on a token classification task. See the
+ up-to-date list of available models on
+ [huggingface.co/models](https://huggingface.co/models?filter=token-classification).
+ """
+
+ default_input_names = "sequences"
+
+ def __init__(self, args_parser=TokenClassificationArgumentHandler(), *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.check_model_type(
+ TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
+ if self.framework == "tf"
+ else MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
+ )
+
+ self._basic_tokenizer = BasicTokenizer(do_lower_case=False)
+ self._args_parser = args_parser
+
+ def _sanitize_parameters(
+ self,
+ ignore_labels=None,
+ grouped_entities: Optional[bool] = None,
+ ignore_subwords: Optional[bool] = None,
+ aggregation_strategy: Optional[AggregationStrategy] = None,
+ offset_mapping: Optional[List[Tuple[int, int]]] = None,
+ stride: Optional[int] = None,
+ ):
+ preprocess_params = {}
+ if offset_mapping is not None:
+ preprocess_params["offset_mapping"] = offset_mapping
+
+ postprocess_params = {}
+ if grouped_entities is not None or ignore_subwords is not None:
+ if grouped_entities and ignore_subwords:
+ aggregation_strategy = AggregationStrategy.FIRST
+ elif grouped_entities and not ignore_subwords:
+ aggregation_strategy = AggregationStrategy.SIMPLE
+ else:
+ aggregation_strategy = AggregationStrategy.NONE
+
+ if grouped_entities is not None:
+ warnings.warn(
+ "`grouped_entities` is deprecated and will be removed in version v5.0.0, defaulted to"
+ f' `aggregation_strategy="{aggregation_strategy}"` instead.'
+ )
+ if ignore_subwords is not None:
+ warnings.warn(
+ "`ignore_subwords` is deprecated and will be removed in version v5.0.0, defaulted to"
+ f' `aggregation_strategy="{aggregation_strategy}"` instead.'
+ )
+
+ if aggregation_strategy is not None:
+ if isinstance(aggregation_strategy, str):
+ aggregation_strategy = AggregationStrategy[aggregation_strategy.upper()]
+ if (
+ aggregation_strategy
+ in {AggregationStrategy.FIRST, AggregationStrategy.MAX, AggregationStrategy.AVERAGE}
+ and not self.tokenizer.is_fast
+ ):
+ raise ValueError(
+ "Slow tokenizers cannot handle subwords. Please set the `aggregation_strategy` option"
+ ' to `"simple"` or use a fast tokenizer.'
+ )
+ postprocess_params["aggregation_strategy"] = aggregation_strategy
+ if ignore_labels is not None:
+ postprocess_params["ignore_labels"] = ignore_labels
+ if stride is not None:
+ if stride >= self.tokenizer.model_max_length:
+ raise ValueError(
+ "`stride` must be less than `tokenizer.model_max_length` (or even lower if the tokenizer adds special tokens)"
+ )
+ if aggregation_strategy == AggregationStrategy.NONE:
+ raise ValueError(
+ "`stride` was provided to process all the text but `aggregation_strategy="
+ f'"{aggregation_strategy}"`, please select another one instead.'
+ )
+ else:
+ if self.tokenizer.is_fast:
+ tokenizer_params = {
+ "return_overflowing_tokens": True,
+ "padding": True,
+ "stride": stride,
+ }
+ preprocess_params["tokenizer_params"] = tokenizer_params
+ else:
+ raise ValueError(
+ "`stride` was provided to process all the text but you're using a slow tokenizer."
+ " Please use a fast tokenizer."
+ )
+ return preprocess_params, {}, postprocess_params
+
+ def __call__(self, inputs: Union[str, List[str]], **kwargs):
+ """
+ Classify each token of the text(s) given as inputs.
+
+ Args:
+ inputs (`str` or `List[str]`):
+ One or several texts (or one list of texts) for token classification.
+
+ Return:
+ A list or a list of list of `dict`: Each result comes as a list of dictionaries (one for each token in the
+ corresponding input, or each entity if this pipeline was instantiated with an aggregation_strategy) with
+ the following keys:
+
+ - **word** (`str`) -- The token/word classified. This is obtained by decoding the selected tokens. If you
+ want to have the exact string in the original sentence, use `start` and `end`.
+ - **score** (`float`) -- The corresponding probability for `entity`.
+ - **entity** (`str`) -- The entity predicted for that token/word (it is named *entity_group* when
+ *aggregation_strategy* is not `"none"`.
+ - **index** (`int`, only present when `aggregation_strategy="none"`) -- The index of the corresponding
+ token in the sentence.
+ - **start** (`int`, *optional*) -- The index of the start of the corresponding entity in the sentence. Only
+ exists if the offsets are available within the tokenizer
+ - **end** (`int`, *optional*) -- The index of the end of the corresponding entity in the sentence. Only
+ exists if the offsets are available within the tokenizer
+ """
+
+ _inputs, offset_mapping = self._args_parser(inputs, **kwargs)
+ if offset_mapping:
+ kwargs["offset_mapping"] = offset_mapping
+
+ return super().__call__(inputs, **kwargs)
+
+ def preprocess(self, sentence, offset_mapping=None, **preprocess_params):
+ tokenizer_params = preprocess_params.pop("tokenizer_params", {})
+ truncation = True if self.tokenizer.model_max_length and self.tokenizer.model_max_length > 0 else False
+ inputs = self.tokenizer(
+ sentence,
+ return_tensors=self.framework,
+ truncation=truncation,
+ return_special_tokens_mask=True,
+ return_offsets_mapping=self.tokenizer.is_fast,
+ **tokenizer_params,
+ )
+ inputs.pop("overflow_to_sample_mapping", None)
+ num_chunks = len(inputs["input_ids"])
+
+ for i in range(num_chunks):
+ if self.framework == "tf":
+ model_inputs = {k: tf.expand_dims(v[i], 0) for k, v in inputs.items()}
+ else:
+ model_inputs = {k: v[i].unsqueeze(0) for k, v in inputs.items()}
+ if offset_mapping is not None:
+ model_inputs["offset_mapping"] = offset_mapping
+ model_inputs["sentence"] = sentence if i == 0 else None
+ model_inputs["is_last"] = i == num_chunks - 1
+
+ yield model_inputs
+
+ def _forward(self, model_inputs):
+ # Forward
+ special_tokens_mask = model_inputs.pop("special_tokens_mask")
+ offset_mapping = model_inputs.pop("offset_mapping", None)
+ sentence = model_inputs.pop("sentence")
+ is_last = model_inputs.pop("is_last")
+ if self.framework == "tf":
+ logits = self.model(**model_inputs)[0]
+ else:
+ output = self.model(**model_inputs)
+ logits = output["logits"] if isinstance(output, dict) else output[0]
+
+ return {
+ "logits": logits,
+ "special_tokens_mask": special_tokens_mask,
+ "offset_mapping": offset_mapping,
+ "sentence": sentence,
+ "is_last": is_last,
+ **model_inputs,
+ }
+
+ def postprocess(self, all_outputs, aggregation_strategy=AggregationStrategy.NONE, ignore_labels=None):
+ if ignore_labels is None:
+ ignore_labels = ["O"]
+ all_entities = []
+ for model_outputs in all_outputs:
+ logits = model_outputs["logits"][0].numpy()
+ sentence = all_outputs[0]["sentence"]
+ input_ids = model_outputs["input_ids"][0]
+ offset_mapping = (
+ model_outputs["offset_mapping"][0] if model_outputs["offset_mapping"] is not None else None
+ )
+ special_tokens_mask = model_outputs["special_tokens_mask"][0].numpy()
+
+ maxes = np.max(logits, axis=-1, keepdims=True)
+ shifted_exp = np.exp(logits - maxes)
+ scores = shifted_exp / shifted_exp.sum(axis=-1, keepdims=True)
+
+ if self.framework == "tf":
+ input_ids = input_ids.numpy()
+ offset_mapping = offset_mapping.numpy() if offset_mapping is not None else None
+
+ pre_entities = self.gather_pre_entities(
+ sentence, input_ids, scores, offset_mapping, special_tokens_mask, aggregation_strategy
+ )
+ grouped_entities = self.aggregate(pre_entities, aggregation_strategy)
+ # Filter anything that is in self.ignore_labels
+ entities = [
+ entity
+ for entity in grouped_entities
+ if entity.get("entity", None) not in ignore_labels
+ and entity.get("entity_group", None) not in ignore_labels
+ ]
+ all_entities.extend(entities)
+ num_chunks = len(all_outputs)
+ if num_chunks > 1:
+ all_entities = self.aggregate_overlapping_entities(all_entities)
+ return all_entities
+
+ def aggregate_overlapping_entities(self, entities):
+ if len(entities) == 0:
+ return entities
+ entities = sorted(entities, key=lambda x: x["start"])
+ aggregated_entities = []
+ previous_entity = entities[0]
+ for entity in entities:
+ if previous_entity["start"] <= entity["start"] < previous_entity["end"]:
+ current_length = entity["end"] - entity["start"]
+ previous_length = previous_entity["end"] - previous_entity["start"]
+ if current_length > previous_length:
+ previous_entity = entity
+ elif current_length == previous_length and entity["score"] > previous_entity["score"]:
+ previous_entity = entity
+ else:
+ aggregated_entities.append(previous_entity)
+ previous_entity = entity
+ aggregated_entities.append(previous_entity)
+ return aggregated_entities
+
+ def gather_pre_entities(
+ self,
+ sentence: str,
+ input_ids: np.ndarray,
+ scores: np.ndarray,
+ offset_mapping: Optional[List[Tuple[int, int]]],
+ special_tokens_mask: np.ndarray,
+ aggregation_strategy: AggregationStrategy,
+ ) -> List[dict]:
+ """Fuse various numpy arrays into dicts with all the information needed for aggregation"""
+ pre_entities = []
+ for idx, token_scores in enumerate(scores):
+ # Filter special_tokens
+ if special_tokens_mask[idx]:
+ continue
+
+ word = self.tokenizer.convert_ids_to_tokens(int(input_ids[idx]))
+ if offset_mapping is not None:
+ start_ind, end_ind = offset_mapping[idx]
+ if not isinstance(start_ind, int):
+ if self.framework == "pt":
+ start_ind = start_ind.item()
+ end_ind = end_ind.item()
+ word_ref = sentence[start_ind:end_ind]
+ if getattr(self.tokenizer, "_tokenizer", None) and getattr(
+ self.tokenizer._tokenizer.model, "continuing_subword_prefix", None
+ ):
+ # This is a BPE, word aware tokenizer, there is a correct way
+ # to fuse tokens
+ is_subword = len(word) != len(word_ref)
+ else:
+ # This is a fallback heuristic. This will fail most likely on any kind of text + punctuation mixtures that will be considered "words". Non word aware models cannot do better than this unfortunately.
+ if aggregation_strategy in {
+ AggregationStrategy.FIRST,
+ AggregationStrategy.AVERAGE,
+ AggregationStrategy.MAX,
+ }:
+ warnings.warn(
+ "Tokenizer does not support real words, using fallback heuristic",
+ UserWarning,
+ )
+ is_subword = start_ind > 0 and " " not in sentence[start_ind - 1 : start_ind + 1]
+
+ if int(input_ids[idx]) == self.tokenizer.unk_token_id:
+ word = word_ref
+ is_subword = False
+ else:
+ start_ind = None
+ end_ind = None
+ is_subword = False
+
+ pre_entity = {
+ "word": word,
+ "scores": token_scores,
+ "start": start_ind,
+ "end": end_ind,
+ "index": idx,
+ "is_subword": is_subword,
+ }
+ pre_entities.append(pre_entity)
+ return pre_entities
+
+ def aggregate(self, pre_entities: List[dict], aggregation_strategy: AggregationStrategy) -> List[dict]:
+ if aggregation_strategy in {AggregationStrategy.NONE, AggregationStrategy.SIMPLE}:
+ entities = []
+ for pre_entity in pre_entities:
+ entity_idx = pre_entity["scores"].argmax()
+ score = pre_entity["scores"][entity_idx]
+ entity = {
+ "entity": self.model.config.id2label[entity_idx],
+ "score": score,
+ "index": pre_entity["index"],
+ "word": pre_entity["word"],
+ "start": pre_entity["start"],
+ "end": pre_entity["end"],
+ }
+ entities.append(entity)
+ else:
+ entities = self.aggregate_words(pre_entities, aggregation_strategy)
+
+ if aggregation_strategy == AggregationStrategy.NONE:
+ return entities
+ return self.group_entities(entities)
+
+ def aggregate_word(self, entities: List[dict], aggregation_strategy: AggregationStrategy) -> dict:
+ word = self.tokenizer.convert_tokens_to_string([entity["word"] for entity in entities])
+ if aggregation_strategy == AggregationStrategy.FIRST:
+ scores = entities[0]["scores"]
+ idx = scores.argmax()
+ score = scores[idx]
+ entity = self.model.config.id2label[idx]
+ elif aggregation_strategy == AggregationStrategy.MAX:
+ max_entity = max(entities, key=lambda entity: entity["scores"].max())
+ scores = max_entity["scores"]
+ idx = scores.argmax()
+ score = scores[idx]
+ entity = self.model.config.id2label[idx]
+ elif aggregation_strategy == AggregationStrategy.AVERAGE:
+ scores = np.stack([entity["scores"] for entity in entities])
+ average_scores = np.nanmean(scores, axis=0)
+ entity_idx = average_scores.argmax()
+ entity = self.model.config.id2label[entity_idx]
+ score = average_scores[entity_idx]
+ else:
+ raise ValueError("Invalid aggregation_strategy")
+ new_entity = {
+ "entity": entity,
+ "score": score,
+ "word": word,
+ "start": entities[0]["start"],
+ "end": entities[-1]["end"],
+ }
+ return new_entity
+
+ def aggregate_words(self, entities: List[dict], aggregation_strategy: AggregationStrategy) -> List[dict]:
+ """
+ Override tokens from a given word that disagree to force agreement on word boundaries.
+
+ Example: micro|soft| com|pany| B-ENT I-NAME I-ENT I-ENT will be rewritten with first strategy as microsoft|
+ company| B-ENT I-ENT
+ """
+ if aggregation_strategy in {
+ AggregationStrategy.NONE,
+ AggregationStrategy.SIMPLE,
+ }:
+ raise ValueError("NONE and SIMPLE strategies are invalid for word aggregation")
+
+ word_entities = []
+ word_group = None
+ for entity in entities:
+ if word_group is None:
+ word_group = [entity]
+ elif entity["is_subword"]:
+ word_group.append(entity)
+ else:
+ word_entities.append(self.aggregate_word(word_group, aggregation_strategy))
+ word_group = [entity]
+ # Last item
+ if word_group is not None:
+ word_entities.append(self.aggregate_word(word_group, aggregation_strategy))
+ return word_entities
+
+ def group_sub_entities(self, entities: List[dict]) -> dict:
+ """
+ Group together the adjacent tokens with the same entity predicted.
+
+ Args:
+ entities (`dict`): The entities predicted by the pipeline.
+ """
+ # Get the first entity in the entity group
+ entity = entities[0]["entity"].split("-", 1)[-1]
+ scores = np.nanmean([entity["score"] for entity in entities])
+ tokens = [entity["word"] for entity in entities]
+
+ entity_group = {
+ "entity_group": entity,
+ "score": np.mean(scores),
+ "word": self.tokenizer.convert_tokens_to_string(tokens),
+ "start": entities[0]["start"],
+ "end": entities[-1]["end"],
+ }
+ return entity_group
+
+ def get_tag(self, entity_name: str) -> Tuple[str, str]:
+ if entity_name.startswith("B-"):
+ bi = "B"
+ tag = entity_name[2:]
+ elif entity_name.startswith("I-"):
+ bi = "I"
+ tag = entity_name[2:]
+ else:
+ # It's not in B-, I- format
+ # Default to I- for continuation.
+ bi = "I"
+ tag = entity_name
+ return bi, tag
+
+ def group_entities(self, entities: List[dict]) -> List[dict]:
+ """
+ Find and group together the adjacent tokens with the same entity predicted.
+
+ Args:
+ entities (`dict`): The entities predicted by the pipeline.
+ """
+
+ entity_groups = []
+ entity_group_disagg = []
+
+ for entity in entities:
+ if not entity_group_disagg:
+ entity_group_disagg.append(entity)
+ continue
+
+ # If the current entity is similar and adjacent to the previous entity,
+ # append it to the disaggregated entity group
+ # The split is meant to account for the "B" and "I" prefixes
+ # Shouldn't merge if both entities are B-type
+ bi, tag = self.get_tag(entity["entity"])
+ last_bi, last_tag = self.get_tag(entity_group_disagg[-1]["entity"])
+
+ if tag == last_tag and bi != "B":
+ # Modify subword type to be previous_type
+ entity_group_disagg.append(entity)
+ else:
+ # If the current entity is different from the previous entity
+ # aggregate the disaggregated entity group
+ entity_groups.append(self.group_sub_entities(entity_group_disagg))
+ entity_group_disagg = [entity]
+ if entity_group_disagg:
+ # it's the last entity, add it to the entity groups
+ entity_groups.append(self.group_sub_entities(entity_group_disagg))
+
+ return entity_groups
+
+
+NerPipeline = TokenClassificationPipeline
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/video_classification.py b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/video_classification.py
new file mode 100644
index 0000000000000000000000000000000000000000..f8596ce14c714ac05a99f4af69d1b768f9ba9938
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/video_classification.py
@@ -0,0 +1,122 @@
+from io import BytesIO
+from typing import List, Union
+
+import requests
+
+from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
+from .base import Pipeline, build_pipeline_init_args
+
+
+if is_decord_available():
+ import numpy as np
+ from decord import VideoReader
+
+
+if is_torch_available():
+ from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES
+
+logger = logging.get_logger(__name__)
+
+
+@add_end_docstrings(build_pipeline_init_args(has_image_processor=True))
+class VideoClassificationPipeline(Pipeline):
+ """
+ Video classification pipeline using any `AutoModelForVideoClassification`. This pipeline predicts the class of a
+ video.
+
+ This video classification pipeline can currently be loaded from [`pipeline`] using the following task identifier:
+ `"video-classification"`.
+
+ See the list of available models on
+ [huggingface.co/models](https://huggingface.co/models?filter=video-classification).
+ """
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ requires_backends(self, "decord")
+ self.check_model_type(MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES)
+
+ def _sanitize_parameters(self, top_k=None, num_frames=None, frame_sampling_rate=None):
+ preprocess_params = {}
+ if frame_sampling_rate is not None:
+ preprocess_params["frame_sampling_rate"] = frame_sampling_rate
+ if num_frames is not None:
+ preprocess_params["num_frames"] = num_frames
+
+ postprocess_params = {}
+ if top_k is not None:
+ postprocess_params["top_k"] = top_k
+ return preprocess_params, {}, postprocess_params
+
+ def __call__(self, videos: Union[str, List[str]], **kwargs):
+ """
+ Assign labels to the video(s) passed as inputs.
+
+ Args:
+ videos (`str`, `List[str]`):
+ The pipeline handles three types of videos:
+
+ - A string containing a http link pointing to a video
+ - A string containing a local path to a video
+
+ The pipeline accepts either a single video or a batch of videos, which must then be passed as a string.
+ Videos in a batch must all be in the same format: all as http links or all as local paths.
+ top_k (`int`, *optional*, defaults to 5):
+ The number of top labels that will be returned by the pipeline. If the provided number is higher than
+ the number of labels available in the model configuration, it will default to the number of labels.
+ num_frames (`int`, *optional*, defaults to `self.model.config.num_frames`):
+ The number of frames sampled from the video to run the classification on. If not provided, will default
+ to the number of frames specified in the model configuration.
+ frame_sampling_rate (`int`, *optional*, defaults to 1):
+ The sampling rate used to select frames from the video. If not provided, will default to 1, i.e. every
+ frame will be used.
+
+ Return:
+ A dictionary or a list of dictionaries containing result. If the input is a single video, will return a
+ dictionary, if the input is a list of several videos, will return a list of dictionaries corresponding to
+ the videos.
+
+ The dictionaries contain the following keys:
+
+ - **label** (`str`) -- The label identified by the model.
+ - **score** (`int`) -- The score attributed by the model for that label.
+ """
+ return super().__call__(videos, **kwargs)
+
+ def preprocess(self, video, num_frames=None, frame_sampling_rate=1):
+ if num_frames is None:
+ num_frames = self.model.config.num_frames
+
+ if video.startswith("http://") or video.startswith("https://"):
+ video = BytesIO(requests.get(video).content)
+
+ videoreader = VideoReader(video)
+ videoreader.seek(0)
+
+ start_idx = 0
+ end_idx = num_frames * frame_sampling_rate - 1
+ indices = np.linspace(start_idx, end_idx, num=num_frames, dtype=np.int64)
+
+ video = videoreader.get_batch(indices).asnumpy()
+ video = list(video)
+
+ model_inputs = self.image_processor(video, return_tensors=self.framework)
+ return model_inputs
+
+ def _forward(self, model_inputs):
+ model_outputs = self.model(**model_inputs)
+ return model_outputs
+
+ def postprocess(self, model_outputs, top_k=5):
+ if top_k > self.model.config.num_labels:
+ top_k = self.model.config.num_labels
+
+ if self.framework == "pt":
+ probs = model_outputs.logits.softmax(-1)[0]
+ scores, ids = probs.topk(top_k)
+ else:
+ raise ValueError(f"Unsupported framework: {self.framework}")
+
+ scores = scores.tolist()
+ ids = ids.tolist()
+ return [{"score": score, "label": self.model.config.id2label[_id]} for score, _id in zip(scores, ids)]
diff --git a/env-llmeval/lib/python3.10/site-packages/xxhash/__init__.py b/env-llmeval/lib/python3.10/site-packages/xxhash/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..043c486ddf9d2770d7fbee4bff1c26985ac6cd7e
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/xxhash/__init__.py
@@ -0,0 +1,63 @@
+from ._xxhash import (
+ xxh32,
+ xxh32_digest,
+ xxh32_intdigest,
+ xxh32_hexdigest,
+ xxh64,
+ xxh64_digest,
+ xxh64_intdigest,
+ xxh64_hexdigest,
+ xxh3_64,
+ xxh3_64_digest,
+ xxh3_64_intdigest,
+ xxh3_64_hexdigest,
+ xxh3_128,
+ xxh3_128_digest,
+ xxh3_128_intdigest,
+ xxh3_128_hexdigest,
+ XXHASH_VERSION,
+)
+
+from .version import VERSION, VERSION_TUPLE
+
+
+xxh128 = xxh3_128
+xxh128_hexdigest = xxh3_128_hexdigest
+xxh128_intdigest = xxh3_128_intdigest
+xxh128_digest = xxh3_128_digest
+
+algorithms_available = set([
+ "xxh32",
+ "xxh64",
+ "xxh3_64",
+ "xxh128",
+ "xxh3_128",
+])
+
+
+__all__ = [
+ "xxh32",
+ "xxh32_digest",
+ "xxh32_intdigest",
+ "xxh32_hexdigest",
+ "xxh64",
+ "xxh64_digest",
+ "xxh64_intdigest",
+ "xxh64_hexdigest",
+ "xxh3_64",
+ "xxh3_64_digest",
+ "xxh3_64_intdigest",
+ "xxh3_64_hexdigest",
+ "xxh3_128",
+ "xxh3_128_digest",
+ "xxh3_128_intdigest",
+ "xxh3_128_hexdigest",
+ "xxh128",
+ "xxh128_digest",
+ "xxh128_intdigest",
+ "xxh128_hexdigest",
+ "VERSION",
+ "VERSION_TUPLE",
+ "XXHASH_VERSION",
+ "algorithms_available",
+]
diff --git a/env-llmeval/lib/python3.10/site-packages/xxhash/__init__.pyi b/env-llmeval/lib/python3.10/site-packages/xxhash/__init__.pyi
new file mode 100644
index 0000000000000000000000000000000000000000..03c62497a3aba0fc5695ee033c0a402e41ae8c44
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/xxhash/__init__.pyi
@@ -0,0 +1,62 @@
+import array
+from typing import Union
+from typing_extensions import final
+
+_InputType = Union[str, bytes, bytearray, memoryview, array.ArrayType[int]]
+
+VERSION: str
+XXHASH_VERSION: str
+VERSION_TUPLE: tuple[int, ...]
+
+algorithms_available: set[str]
+
+class _Hasher:
+ def __init__(self, input: _InputType = ..., seed: int = ...) -> None: ...
+ def update(self, input: _InputType) -> None: ...
+ def digest(self) -> bytes: ...
+ def hexdigest(self) -> str: ...
+ def intdigest(self) -> int: ...
+ def copy(self) -> _Hasher: ...
+ def reset(self) -> None: ...
+ @property
+ def digestsize(self) -> int: ...
+ @property
+ def digest_size(self) -> int: ...
+ @property
+ def block_size(self) -> int: ...
+ @property
+ def name(self) -> str: ...
+ @property
+ def seed(self) -> int: ...
+
+@final
+class xxh32(_Hasher): ...
+
+@final
+class xxh3_64(_Hasher): ...
+
+@final
+class xxh3_128(_Hasher): ...
+
+xxh64 = xxh3_64
+xxh128 = xxh3_128
+
+def xxh32_digest(args: _InputType, seed: int = ...) -> bytes: ...
+def xxh32_hexdigest(args: _InputType, seed: int = ...) -> str: ...
+def xxh32_intdigest(args: _InputType, seed: int = ...) -> int: ...
+
+def xxh3_64_digest(args: _InputType, seed: int = ...) -> bytes: ...
+def xxh3_64_hexdigest(args: _InputType, seed: int = ...) -> str: ...
+def xxh3_64_intdigest(args: _InputType, seed: int = ...) -> int: ...
+
+def xxh3_128_digest(args: _InputType, seed: int = ...) -> bytes: ...
+def xxh3_128_hexdigest(args: _InputType, seed: int = ...) -> str: ...
+def xxh3_128_intdigest(args: _InputType, seed: int = ...) -> int: ...
+
+xxh64_digest = xxh3_64_digest
+xxh64_hexdigest = xxh3_64_hexdigest
+xxh64_intdigest = xxh3_64_intdigest
+
+xxh128_digest = xxh3_128_digest
+xxh128_hexdigest = xxh3_128_hexdigest
+xxh128_intdigest = xxh3_128_intdigest
diff --git a/env-llmeval/lib/python3.10/site-packages/xxhash/_xxhash.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/xxhash/_xxhash.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..3a2e50404b3555ed22c1e112f54c4c6dd85d4a77
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/xxhash/_xxhash.cpython-310-x86_64-linux-gnu.so differ
diff --git a/env-llmeval/lib/python3.10/site-packages/xxhash/version.py b/env-llmeval/lib/python3.10/site-packages/xxhash/version.py
new file mode 100644
index 0000000000000000000000000000000000000000..a9ca3e3bf151e33af0c511167c590e767caa2528
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/xxhash/version.py
@@ -0,0 +1,2 @@
+VERSION = "3.4.1"
+VERSION_TUPLE = (3, 4, 1)