diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c16e10c67d8b6d288b4f6240bcd7861929e82268 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/activations.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/activations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..788ba6e51a43d8efc689ece7b575076b8dd8af90 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/activations.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/activations_tf.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/activations_tf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e9d53eb83db65295fcad2e57354d42d9d0c76f3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/activations_tf.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/audio_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/audio_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de0773706ee55a692e09932ee9818fc6d06da9cd Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/audio_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/convert_graph_to_onnx.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/convert_graph_to_onnx.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc4e13f1181a9ff4908fa5f18101192298ed118e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/convert_graph_to_onnx.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/convert_slow_tokenizer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/convert_slow_tokenizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..09ebdee6b7466f2d321b72bd1b233c30551c5492 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/convert_slow_tokenizer.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/convert_slow_tokenizers_checkpoints_to_fast.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/convert_slow_tokenizers_checkpoints_to_fast.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31dacccf0424bb40bbf90d276a35ece8bcde00e6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/convert_slow_tokenizers_checkpoints_to_fast.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/convert_tf_hub_seq_to_seq_bert_to_pytorch.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/convert_tf_hub_seq_to_seq_bert_to_pytorch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..48c244d9dd74c51be6ce339d4aff29ba74030bca Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/convert_tf_hub_seq_to_seq_bert_to_pytorch.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/deepspeed.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/deepspeed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..774b353d77913f809f5351098c73b06b434cb262 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/deepspeed.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/dependency_versions_check.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/dependency_versions_check.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..508c9db0dc115cb7d6344b75cf9eaddebd0e2419 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/dependency_versions_check.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/dependency_versions_table.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/dependency_versions_table.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08fcfb582d43735e052213e08ff344fb2a998059 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/dependency_versions_table.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/dynamic_module_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/dynamic_module_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a23692b36d238876dccaf9dea27674f8ff38378f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/dynamic_module_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/feature_extraction_sequence_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/feature_extraction_sequence_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ef4580d863338f5b25a67f5c976c3e12e3b8972 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/feature_extraction_sequence_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/file_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/file_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3adba20998dbc6a1ea7ae68c02ddaaca9cb4e7db Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/file_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/generation_flax_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/generation_flax_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b762cdf6889f92e2e4692ee8c2edcf8fcbe5030f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/generation_flax_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/generation_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/generation_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d21627b771c7f7611e89567da83b77b631da5bf6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/generation_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/hf_argparser.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/hf_argparser.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82fb3f7d4c7767561ac490ec20f16eb6ae641cb8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/hf_argparser.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/image_transforms.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/image_transforms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1224be311cc82712b5076e5ee2fbc88f2a5287cd Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/image_transforms.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/image_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/image_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a3f20498667d75544ff8536516ec670337440d1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/image_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modelcard.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modelcard.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4fbd612b93649aed813b921d471bc413ec092ed1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modelcard.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_tf_outputs.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_tf_outputs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83639b0cb8e3c8ac1759b5492f1c5adbf9f9cb87 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_tf_outputs.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/optimization_tf.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/optimization_tf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fba60b2c08f21549a0631e2e6b220a90b2d852e6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/optimization_tf.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/pytorch_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/pytorch_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea89a3c9ea51fddbac8950e04326156e96373732 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/pytorch_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/tf_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/tf_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..daea474fa9c63ac2fcdf85b60cba4d3fae9b3477 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/tf_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/time_series_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/time_series_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a180b2e89f29f1bc405b422ad32d8f6acb9500a9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/time_series_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/tokenization_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/tokenization_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae6fa094a75dcf8149e88f38dd92965d070b2bc3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/tokenization_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/tokenization_utils_base.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/tokenization_utils_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..557fb859850098c42368d0ff317fd47adb88da95 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/tokenization_utils_base.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/tokenization_utils_fast.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/tokenization_utils_fast.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32540cd7d3fb7ca6a650acdf664aef570c28ee57 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/tokenization_utils_fast.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/trainer_callback.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/trainer_callback.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..55d39cdf14981ff552dd231e79532ef50826ab78 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/trainer_callback.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/trainer_pt_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/trainer_pt_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea1d4b94c621743e02a5c67f2bd9f8cd1e63a6a4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/trainer_pt_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/training_args.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/training_args.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8483756c55bf12a82621e5861db8e290463802c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/training_args.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/training_args_seq2seq.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/training_args_seq2seq.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff65cbcb719f5916ae1c6568dcd6c4d8044cafd3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/training_args_seq2seq.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/data/metrics/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/data/metrics/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22cc02a412289be0e2558df63d29660dc5d71a7c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/data/metrics/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/data/processors/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/data/processors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a26ab5776d74715428b10c4d9cd943e53b253785 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/data/processors/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels +from .squad import SquadExample, SquadFeatures, SquadV1Processor, SquadV2Processor, squad_convert_examples_to_features +from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor +from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/data/processors/__pycache__/glue.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/data/processors/__pycache__/glue.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a88923f9a0adcc5891fbc436236eb0aa17fcc8b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/data/processors/__pycache__/glue.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/data/processors/__pycache__/squad.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/data/processors/__pycache__/squad.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..679ccb4129cb390c90d45d54fb171103b1dd58e9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/data/processors/__pycache__/squad.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/data/processors/__pycache__/utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/data/processors/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c230be7685df4058bcbca39f8016a5c45d27b990 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/data/processors/__pycache__/utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/data/processors/__pycache__/xnli.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/data/processors/__pycache__/xnli.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7b52af3b1644f159ca7c02a053c5d98a2a52ad7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/data/processors/__pycache__/xnli.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/data/processors/glue.py b/env-llmeval/lib/python3.10/site-packages/transformers/data/processors/glue.py new file mode 100644 index 0000000000000000000000000000000000000000..3d22968c9d06323c7c1cd4b00e5fcd2e6cf3f35d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/data/processors/glue.py @@ -0,0 +1,643 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" GLUE processors and helpers""" + +import os +import warnings +from dataclasses import asdict +from enum import Enum +from typing import List, Optional, Union + +from ...tokenization_utils import PreTrainedTokenizer +from ...utils import is_tf_available, logging +from .utils import DataProcessor, InputExample, InputFeatures + + +if is_tf_available(): + import tensorflow as tf + +logger = logging.get_logger(__name__) + +DEPRECATION_WARNING = ( + "This {0} will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets " + "library. You can have a look at this example script for pointers: " + "https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" +) + + +def glue_convert_examples_to_features( + examples: Union[List[InputExample], "tf.data.Dataset"], + tokenizer: PreTrainedTokenizer, + max_length: Optional[int] = None, + task=None, + label_list=None, + output_mode=None, +): + """ + Loads a data file into a list of `InputFeatures` + + Args: + examples: List of `InputExamples` or `tf.data.Dataset` containing the examples. + tokenizer: Instance of a tokenizer that will tokenize the examples + max_length: Maximum example length. Defaults to the tokenizer's max_len + task: GLUE task + label_list: List of labels. Can be obtained from the processor using the `processor.get_labels()` method + output_mode: String indicating the output mode. Either `regression` or `classification` + + Returns: + If the `examples` input is a `tf.data.Dataset`, will return a `tf.data.Dataset` containing the task-specific + features. If the input is a list of `InputExamples`, will return a list of task-specific `InputFeatures` which + can be fed to the model. + + """ + warnings.warn(DEPRECATION_WARNING.format("function"), FutureWarning) + if is_tf_available() and isinstance(examples, tf.data.Dataset): + if task is None: + raise ValueError("When calling glue_convert_examples_to_features from TF, the task parameter is required.") + return _tf_glue_convert_examples_to_features(examples, tokenizer, max_length=max_length, task=task) + return _glue_convert_examples_to_features( + examples, tokenizer, max_length=max_length, task=task, label_list=label_list, output_mode=output_mode + ) + + +if is_tf_available(): + + def _tf_glue_convert_examples_to_features( + examples: tf.data.Dataset, + tokenizer: PreTrainedTokenizer, + task=str, + max_length: Optional[int] = None, + ) -> tf.data.Dataset: + """ + Returns: + A `tf.data.Dataset` containing the task-specific features. + + """ + processor = glue_processors[task]() + examples = [processor.tfds_map(processor.get_example_from_tensor_dict(example)) for example in examples] + features = glue_convert_examples_to_features(examples, tokenizer, max_length=max_length, task=task) + label_type = tf.float32 if task == "sts-b" else tf.int64 + + def gen(): + for ex in features: + d = {k: v for k, v in asdict(ex).items() if v is not None} + label = d.pop("label") + yield (d, label) + + input_names = tokenizer.model_input_names + + return tf.data.Dataset.from_generator( + gen, + ({k: tf.int32 for k in input_names}, label_type), + ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])), + ) + + +def _glue_convert_examples_to_features( + examples: List[InputExample], + tokenizer: PreTrainedTokenizer, + max_length: Optional[int] = None, + task=None, + label_list=None, + output_mode=None, +): + if max_length is None: + max_length = tokenizer.model_max_length + + if task is not None: + processor = glue_processors[task]() + if label_list is None: + label_list = processor.get_labels() + logger.info(f"Using label list {label_list} for task {task}") + if output_mode is None: + output_mode = glue_output_modes[task] + logger.info(f"Using output mode {output_mode} for task {task}") + + label_map = {label: i for i, label in enumerate(label_list)} + + def label_from_example(example: InputExample) -> Union[int, float, None]: + if example.label is None: + return None + if output_mode == "classification": + return label_map[example.label] + elif output_mode == "regression": + return float(example.label) + raise KeyError(output_mode) + + labels = [label_from_example(example) for example in examples] + + batch_encoding = tokenizer( + [(example.text_a, example.text_b) for example in examples], + max_length=max_length, + padding="max_length", + truncation=True, + ) + + features = [] + for i in range(len(examples)): + inputs = {k: batch_encoding[k][i] for k in batch_encoding} + + feature = InputFeatures(**inputs, label=labels[i]) + features.append(feature) + + for i, example in enumerate(examples[:5]): + logger.info("*** Example ***") + logger.info(f"guid: {example.guid}") + logger.info(f"features: {features[i]}") + + return features + + +class OutputMode(Enum): + classification = "classification" + regression = "regression" + + +class MrpcProcessor(DataProcessor): + """Processor for the MRPC data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_example_from_tensor_dict(self, tensor_dict): + """See base class.""" + return InputExample( + tensor_dict["idx"].numpy(), + tensor_dict["sentence1"].numpy().decode("utf-8"), + tensor_dict["sentence2"].numpy().decode("utf-8"), + str(tensor_dict["label"].numpy()), + ) + + def get_train_examples(self, data_dir): + """See base class.""" + logger.info(f"LOOKING AT {os.path.join(data_dir, 'train.tsv')}") + return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + return ["0", "1"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training, dev and test sets.""" + examples = [] + for i, line in enumerate(lines): + if i == 0: + continue + guid = f"{set_type}-{i}" + text_a = line[3] + text_b = line[4] + label = None if set_type == "test" else line[0] + examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + +class MnliProcessor(DataProcessor): + """Processor for the MultiNLI data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_example_from_tensor_dict(self, tensor_dict): + """See base class.""" + return InputExample( + tensor_dict["idx"].numpy(), + tensor_dict["premise"].numpy().decode("utf-8"), + tensor_dict["hypothesis"].numpy().decode("utf-8"), + str(tensor_dict["label"].numpy()), + ) + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test_matched") + + def get_labels(self): + """See base class.""" + return ["contradiction", "entailment", "neutral"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training, dev and test sets.""" + examples = [] + for i, line in enumerate(lines): + if i == 0: + continue + guid = f"{set_type}-{line[0]}" + text_a = line[8] + text_b = line[9] + label = None if set_type.startswith("test") else line[-1] + examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + +class MnliMismatchedProcessor(MnliProcessor): + """Processor for the MultiNLI Mismatched data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")), "dev_mismatched") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test_mismatched.tsv")), "test_mismatched") + + +class ColaProcessor(DataProcessor): + """Processor for the CoLA data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_example_from_tensor_dict(self, tensor_dict): + """See base class.""" + return InputExample( + tensor_dict["idx"].numpy(), + tensor_dict["sentence"].numpy().decode("utf-8"), + None, + str(tensor_dict["label"].numpy()), + ) + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + return ["0", "1"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training, dev and test sets.""" + test_mode = set_type == "test" + if test_mode: + lines = lines[1:] + text_index = 1 if test_mode else 3 + examples = [] + for i, line in enumerate(lines): + guid = f"{set_type}-{i}" + text_a = line[text_index] + label = None if test_mode else line[1] + examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) + return examples + + +class Sst2Processor(DataProcessor): + """Processor for the SST-2 data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_example_from_tensor_dict(self, tensor_dict): + """See base class.""" + return InputExample( + tensor_dict["idx"].numpy(), + tensor_dict["sentence"].numpy().decode("utf-8"), + None, + str(tensor_dict["label"].numpy()), + ) + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + return ["0", "1"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training, dev and test sets.""" + examples = [] + text_index = 1 if set_type == "test" else 0 + for i, line in enumerate(lines): + if i == 0: + continue + guid = f"{set_type}-{i}" + text_a = line[text_index] + label = None if set_type == "test" else line[1] + examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) + return examples + + +class StsbProcessor(DataProcessor): + """Processor for the STS-B data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_example_from_tensor_dict(self, tensor_dict): + """See base class.""" + return InputExample( + tensor_dict["idx"].numpy(), + tensor_dict["sentence1"].numpy().decode("utf-8"), + tensor_dict["sentence2"].numpy().decode("utf-8"), + str(tensor_dict["label"].numpy()), + ) + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + return [None] + + def _create_examples(self, lines, set_type): + """Creates examples for the training, dev and test sets.""" + examples = [] + for i, line in enumerate(lines): + if i == 0: + continue + guid = f"{set_type}-{line[0]}" + text_a = line[7] + text_b = line[8] + label = None if set_type == "test" else line[-1] + examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + +class QqpProcessor(DataProcessor): + """Processor for the QQP data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_example_from_tensor_dict(self, tensor_dict): + """See base class.""" + return InputExample( + tensor_dict["idx"].numpy(), + tensor_dict["question1"].numpy().decode("utf-8"), + tensor_dict["question2"].numpy().decode("utf-8"), + str(tensor_dict["label"].numpy()), + ) + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + return ["0", "1"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training, dev and test sets.""" + test_mode = set_type == "test" + q1_index = 1 if test_mode else 3 + q2_index = 2 if test_mode else 4 + examples = [] + for i, line in enumerate(lines): + if i == 0: + continue + guid = f"{set_type}-{line[0]}" + try: + text_a = line[q1_index] + text_b = line[q2_index] + label = None if test_mode else line[5] + except IndexError: + continue + examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + +class QnliProcessor(DataProcessor): + """Processor for the QNLI data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_example_from_tensor_dict(self, tensor_dict): + """See base class.""" + return InputExample( + tensor_dict["idx"].numpy(), + tensor_dict["question"].numpy().decode("utf-8"), + tensor_dict["sentence"].numpy().decode("utf-8"), + str(tensor_dict["label"].numpy()), + ) + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + return ["entailment", "not_entailment"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training, dev and test sets.""" + examples = [] + for i, line in enumerate(lines): + if i == 0: + continue + guid = f"{set_type}-{line[0]}" + text_a = line[1] + text_b = line[2] + label = None if set_type == "test" else line[-1] + examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + +class RteProcessor(DataProcessor): + """Processor for the RTE data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_example_from_tensor_dict(self, tensor_dict): + """See base class.""" + return InputExample( + tensor_dict["idx"].numpy(), + tensor_dict["sentence1"].numpy().decode("utf-8"), + tensor_dict["sentence2"].numpy().decode("utf-8"), + str(tensor_dict["label"].numpy()), + ) + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + return ["entailment", "not_entailment"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training, dev and test sets.""" + examples = [] + for i, line in enumerate(lines): + if i == 0: + continue + guid = f"{set_type}-{line[0]}" + text_a = line[1] + text_b = line[2] + label = None if set_type == "test" else line[-1] + examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + +class WnliProcessor(DataProcessor): + """Processor for the WNLI data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_example_from_tensor_dict(self, tensor_dict): + """See base class.""" + return InputExample( + tensor_dict["idx"].numpy(), + tensor_dict["sentence1"].numpy().decode("utf-8"), + tensor_dict["sentence2"].numpy().decode("utf-8"), + str(tensor_dict["label"].numpy()), + ) + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + return ["0", "1"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training, dev and test sets.""" + examples = [] + for i, line in enumerate(lines): + if i == 0: + continue + guid = f"{set_type}-{line[0]}" + text_a = line[1] + text_b = line[2] + label = None if set_type == "test" else line[-1] + examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + +glue_tasks_num_labels = { + "cola": 2, + "mnli": 3, + "mrpc": 2, + "sst-2": 2, + "sts-b": 1, + "qqp": 2, + "qnli": 2, + "rte": 2, + "wnli": 2, +} + +glue_processors = { + "cola": ColaProcessor, + "mnli": MnliProcessor, + "mnli-mm": MnliMismatchedProcessor, + "mrpc": MrpcProcessor, + "sst-2": Sst2Processor, + "sts-b": StsbProcessor, + "qqp": QqpProcessor, + "qnli": QnliProcessor, + "rte": RteProcessor, + "wnli": WnliProcessor, +} + +glue_output_modes = { + "cola": "classification", + "mnli": "classification", + "mnli-mm": "classification", + "mrpc": "classification", + "sst-2": "classification", + "sts-b": "regression", + "qqp": "classification", + "qnli": "classification", + "rte": "classification", + "wnli": "classification", +} diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/data/processors/squad.py b/env-llmeval/lib/python3.10/site-packages/transformers/data/processors/squad.py new file mode 100644 index 0000000000000000000000000000000000000000..0f8bd2480551158c9916215e43436c8e027dbed0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/data/processors/squad.py @@ -0,0 +1,845 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os +from functools import partial +from multiprocessing import Pool, cpu_count + +import numpy as np +from tqdm import tqdm + +from ...models.bert.tokenization_bert import whitespace_tokenize +from ...tokenization_utils_base import BatchEncoding, PreTrainedTokenizerBase, TruncationStrategy +from ...utils import is_tf_available, is_torch_available, logging +from .utils import DataProcessor + + +# Store the tokenizers which insert 2 separators tokens +MULTI_SEP_TOKENS_TOKENIZERS_SET = {"roberta", "camembert", "bart", "mpnet"} + + +if is_torch_available(): + import torch + from torch.utils.data import TensorDataset + +if is_tf_available(): + import tensorflow as tf + +logger = logging.get_logger(__name__) + + +def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text): + """Returns tokenized answer spans that better match the annotated answer.""" + tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) + + for new_start in range(input_start, input_end + 1): + for new_end in range(input_end, new_start - 1, -1): + text_span = " ".join(doc_tokens[new_start : (new_end + 1)]) + if text_span == tok_answer_text: + return (new_start, new_end) + + return (input_start, input_end) + + +def _check_is_max_context(doc_spans, cur_span_index, position): + """Check if this is the 'max context' doc span for the token.""" + best_score = None + best_span_index = None + for span_index, doc_span in enumerate(doc_spans): + end = doc_span.start + doc_span.length - 1 + if position < doc_span.start: + continue + if position > end: + continue + num_left_context = position - doc_span.start + num_right_context = end - position + score = min(num_left_context, num_right_context) + 0.01 * doc_span.length + if best_score is None or score > best_score: + best_score = score + best_span_index = span_index + + return cur_span_index == best_span_index + + +def _new_check_is_max_context(doc_spans, cur_span_index, position): + """Check if this is the 'max context' doc span for the token.""" + # if len(doc_spans) == 1: + # return True + best_score = None + best_span_index = None + for span_index, doc_span in enumerate(doc_spans): + end = doc_span["start"] + doc_span["length"] - 1 + if position < doc_span["start"]: + continue + if position > end: + continue + num_left_context = position - doc_span["start"] + num_right_context = end - position + score = min(num_left_context, num_right_context) + 0.01 * doc_span["length"] + if best_score is None or score > best_score: + best_score = score + best_span_index = span_index + + return cur_span_index == best_span_index + + +def _is_whitespace(c): + if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: + return True + return False + + +def squad_convert_example_to_features( + example, max_seq_length, doc_stride, max_query_length, padding_strategy, is_training +): + features = [] + if is_training and not example.is_impossible: + # Get start and end position + start_position = example.start_position + end_position = example.end_position + + # If the answer cannot be found in the text, then skip this example. + actual_text = " ".join(example.doc_tokens[start_position : (end_position + 1)]) + cleaned_answer_text = " ".join(whitespace_tokenize(example.answer_text)) + if actual_text.find(cleaned_answer_text) == -1: + logger.warning(f"Could not find answer: '{actual_text}' vs. '{cleaned_answer_text}'") + return [] + + tok_to_orig_index = [] + orig_to_tok_index = [] + all_doc_tokens = [] + for i, token in enumerate(example.doc_tokens): + orig_to_tok_index.append(len(all_doc_tokens)) + if tokenizer.__class__.__name__ in [ + "RobertaTokenizer", + "LongformerTokenizer", + "BartTokenizer", + "RobertaTokenizerFast", + "LongformerTokenizerFast", + "BartTokenizerFast", + ]: + sub_tokens = tokenizer.tokenize(token, add_prefix_space=True) + else: + sub_tokens = tokenizer.tokenize(token) + for sub_token in sub_tokens: + tok_to_orig_index.append(i) + all_doc_tokens.append(sub_token) + + if is_training and not example.is_impossible: + tok_start_position = orig_to_tok_index[example.start_position] + if example.end_position < len(example.doc_tokens) - 1: + tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 + else: + tok_end_position = len(all_doc_tokens) - 1 + + (tok_start_position, tok_end_position) = _improve_answer_span( + all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.answer_text + ) + + spans = [] + + truncated_query = tokenizer.encode( + example.question_text, add_special_tokens=False, truncation=True, max_length=max_query_length + ) + + # Tokenizers who insert 2 SEP tokens in-between & need to have special handling + # in the way they compute mask of added tokens. + tokenizer_type = type(tokenizer).__name__.replace("Tokenizer", "").lower() + sequence_added_tokens = ( + tokenizer.model_max_length - tokenizer.max_len_single_sentence + 1 + if tokenizer_type in MULTI_SEP_TOKENS_TOKENIZERS_SET + else tokenizer.model_max_length - tokenizer.max_len_single_sentence + ) + sequence_pair_added_tokens = tokenizer.model_max_length - tokenizer.max_len_sentences_pair + + span_doc_tokens = all_doc_tokens + while len(spans) * doc_stride < len(all_doc_tokens): + # Define the side we want to truncate / pad and the text/pair sorting + if tokenizer.padding_side == "right": + texts = truncated_query + pairs = span_doc_tokens + truncation = TruncationStrategy.ONLY_SECOND.value + else: + texts = span_doc_tokens + pairs = truncated_query + truncation = TruncationStrategy.ONLY_FIRST.value + + encoded_dict = tokenizer.encode_plus( # TODO(thom) update this logic + texts, + pairs, + truncation=truncation, + padding=padding_strategy, + max_length=max_seq_length, + return_overflowing_tokens=True, + stride=max_seq_length - doc_stride - len(truncated_query) - sequence_pair_added_tokens, + return_token_type_ids=True, + ) + + paragraph_len = min( + len(all_doc_tokens) - len(spans) * doc_stride, + max_seq_length - len(truncated_query) - sequence_pair_added_tokens, + ) + + if tokenizer.pad_token_id in encoded_dict["input_ids"]: + if tokenizer.padding_side == "right": + non_padded_ids = encoded_dict["input_ids"][: encoded_dict["input_ids"].index(tokenizer.pad_token_id)] + else: + last_padding_id_position = ( + len(encoded_dict["input_ids"]) - 1 - encoded_dict["input_ids"][::-1].index(tokenizer.pad_token_id) + ) + non_padded_ids = encoded_dict["input_ids"][last_padding_id_position + 1 :] + + else: + non_padded_ids = encoded_dict["input_ids"] + + tokens = tokenizer.convert_ids_to_tokens(non_padded_ids) + + token_to_orig_map = {} + for i in range(paragraph_len): + index = len(truncated_query) + sequence_added_tokens + i if tokenizer.padding_side == "right" else i + token_to_orig_map[index] = tok_to_orig_index[len(spans) * doc_stride + i] + + encoded_dict["paragraph_len"] = paragraph_len + encoded_dict["tokens"] = tokens + encoded_dict["token_to_orig_map"] = token_to_orig_map + encoded_dict["truncated_query_with_special_tokens_length"] = len(truncated_query) + sequence_added_tokens + encoded_dict["token_is_max_context"] = {} + encoded_dict["start"] = len(spans) * doc_stride + encoded_dict["length"] = paragraph_len + + spans.append(encoded_dict) + + if "overflowing_tokens" not in encoded_dict or ( + "overflowing_tokens" in encoded_dict and len(encoded_dict["overflowing_tokens"]) == 0 + ): + break + span_doc_tokens = encoded_dict["overflowing_tokens"] + + for doc_span_index in range(len(spans)): + for j in range(spans[doc_span_index]["paragraph_len"]): + is_max_context = _new_check_is_max_context(spans, doc_span_index, doc_span_index * doc_stride + j) + index = ( + j + if tokenizer.padding_side == "left" + else spans[doc_span_index]["truncated_query_with_special_tokens_length"] + j + ) + spans[doc_span_index]["token_is_max_context"][index] = is_max_context + + for span in spans: + # Identify the position of the CLS token + cls_index = span["input_ids"].index(tokenizer.cls_token_id) + + # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer) + # Original TF implementation also keep the classification token (set to 0) + p_mask = np.ones_like(span["token_type_ids"]) + if tokenizer.padding_side == "right": + p_mask[len(truncated_query) + sequence_added_tokens :] = 0 + else: + p_mask[-len(span["tokens"]) : -(len(truncated_query) + sequence_added_tokens)] = 0 + + pad_token_indices = np.where(span["input_ids"] == tokenizer.pad_token_id) + special_token_indices = np.asarray( + tokenizer.get_special_tokens_mask(span["input_ids"], already_has_special_tokens=True) + ).nonzero() + + p_mask[pad_token_indices] = 1 + p_mask[special_token_indices] = 1 + + # Set the cls index to 0: the CLS index can be used for impossible answers + p_mask[cls_index] = 0 + + span_is_impossible = example.is_impossible + start_position = 0 + end_position = 0 + if is_training and not span_is_impossible: + # For training, if our document chunk does not contain an annotation + # we throw it out, since there is nothing to predict. + doc_start = span["start"] + doc_end = span["start"] + span["length"] - 1 + out_of_span = False + + if not (tok_start_position >= doc_start and tok_end_position <= doc_end): + out_of_span = True + + if out_of_span: + start_position = cls_index + end_position = cls_index + span_is_impossible = True + else: + if tokenizer.padding_side == "left": + doc_offset = 0 + else: + doc_offset = len(truncated_query) + sequence_added_tokens + + start_position = tok_start_position - doc_start + doc_offset + end_position = tok_end_position - doc_start + doc_offset + + features.append( + SquadFeatures( + span["input_ids"], + span["attention_mask"], + span["token_type_ids"], + cls_index, + p_mask.tolist(), + example_index=0, # Can not set unique_id and example_index here. They will be set after multiple processing. + unique_id=0, + paragraph_len=span["paragraph_len"], + token_is_max_context=span["token_is_max_context"], + tokens=span["tokens"], + token_to_orig_map=span["token_to_orig_map"], + start_position=start_position, + end_position=end_position, + is_impossible=span_is_impossible, + qas_id=example.qas_id, + ) + ) + return features + + +def squad_convert_example_to_features_init(tokenizer_for_convert: PreTrainedTokenizerBase): + global tokenizer + tokenizer = tokenizer_for_convert + + +def squad_convert_examples_to_features( + examples, + tokenizer, + max_seq_length, + doc_stride, + max_query_length, + is_training, + padding_strategy="max_length", + return_dataset=False, + threads=1, + tqdm_enabled=True, +): + """ + Converts a list of examples into a list of features that can be directly given as input to a model. It is + model-dependant and takes advantage of many of the tokenizer's features to create the model's inputs. + + Args: + examples: list of [`~data.processors.squad.SquadExample`] + tokenizer: an instance of a child of [`PreTrainedTokenizer`] + max_seq_length: The maximum sequence length of the inputs. + doc_stride: The stride used when the context is too large and is split across several features. + max_query_length: The maximum length of the query. + is_training: whether to create features for model evaluation or model training. + padding_strategy: Default to "max_length". Which padding strategy to use + return_dataset: Default False. Either 'pt' or 'tf'. + if 'pt': returns a torch.data.TensorDataset, if 'tf': returns a tf.data.Dataset + threads: multiple processing threads. + + + Returns: + list of [`~data.processors.squad.SquadFeatures`] + + Example: + + ```python + processor = SquadV2Processor() + examples = processor.get_dev_examples(data_dir) + + features = squad_convert_examples_to_features( + examples=examples, + tokenizer=tokenizer, + max_seq_length=args.max_seq_length, + doc_stride=args.doc_stride, + max_query_length=args.max_query_length, + is_training=not evaluate, + ) + ```""" + # Defining helper methods + features = [] + + threads = min(threads, cpu_count()) + with Pool(threads, initializer=squad_convert_example_to_features_init, initargs=(tokenizer,)) as p: + annotate_ = partial( + squad_convert_example_to_features, + max_seq_length=max_seq_length, + doc_stride=doc_stride, + max_query_length=max_query_length, + padding_strategy=padding_strategy, + is_training=is_training, + ) + features = list( + tqdm( + p.imap(annotate_, examples, chunksize=32), + total=len(examples), + desc="convert squad examples to features", + disable=not tqdm_enabled, + ) + ) + + new_features = [] + unique_id = 1000000000 + example_index = 0 + for example_features in tqdm( + features, total=len(features), desc="add example index and unique id", disable=not tqdm_enabled + ): + if not example_features: + continue + for example_feature in example_features: + example_feature.example_index = example_index + example_feature.unique_id = unique_id + new_features.append(example_feature) + unique_id += 1 + example_index += 1 + features = new_features + del new_features + if return_dataset == "pt": + if not is_torch_available(): + raise RuntimeError("PyTorch must be installed to return a PyTorch dataset.") + + # Convert to Tensors and build dataset + all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) + all_attention_masks = torch.tensor([f.attention_mask for f in features], dtype=torch.long) + all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long) + all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long) + all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float) + all_is_impossible = torch.tensor([f.is_impossible for f in features], dtype=torch.float) + + if not is_training: + all_feature_index = torch.arange(all_input_ids.size(0), dtype=torch.long) + dataset = TensorDataset( + all_input_ids, all_attention_masks, all_token_type_ids, all_feature_index, all_cls_index, all_p_mask + ) + else: + all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long) + all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long) + dataset = TensorDataset( + all_input_ids, + all_attention_masks, + all_token_type_ids, + all_start_positions, + all_end_positions, + all_cls_index, + all_p_mask, + all_is_impossible, + ) + + return features, dataset + elif return_dataset == "tf": + if not is_tf_available(): + raise RuntimeError("TensorFlow must be installed to return a TensorFlow dataset.") + + def gen(): + for i, ex in enumerate(features): + if ex.token_type_ids is None: + yield ( + { + "input_ids": ex.input_ids, + "attention_mask": ex.attention_mask, + "feature_index": i, + "qas_id": ex.qas_id, + }, + { + "start_positions": ex.start_position, + "end_positions": ex.end_position, + "cls_index": ex.cls_index, + "p_mask": ex.p_mask, + "is_impossible": ex.is_impossible, + }, + ) + else: + yield ( + { + "input_ids": ex.input_ids, + "attention_mask": ex.attention_mask, + "token_type_ids": ex.token_type_ids, + "feature_index": i, + "qas_id": ex.qas_id, + }, + { + "start_positions": ex.start_position, + "end_positions": ex.end_position, + "cls_index": ex.cls_index, + "p_mask": ex.p_mask, + "is_impossible": ex.is_impossible, + }, + ) + + # Why have we split the batch into a tuple? PyTorch just has a list of tensors. + if "token_type_ids" in tokenizer.model_input_names: + train_types = ( + { + "input_ids": tf.int32, + "attention_mask": tf.int32, + "token_type_ids": tf.int32, + "feature_index": tf.int64, + "qas_id": tf.string, + }, + { + "start_positions": tf.int64, + "end_positions": tf.int64, + "cls_index": tf.int64, + "p_mask": tf.int32, + "is_impossible": tf.int32, + }, + ) + + train_shapes = ( + { + "input_ids": tf.TensorShape([None]), + "attention_mask": tf.TensorShape([None]), + "token_type_ids": tf.TensorShape([None]), + "feature_index": tf.TensorShape([]), + "qas_id": tf.TensorShape([]), + }, + { + "start_positions": tf.TensorShape([]), + "end_positions": tf.TensorShape([]), + "cls_index": tf.TensorShape([]), + "p_mask": tf.TensorShape([None]), + "is_impossible": tf.TensorShape([]), + }, + ) + else: + train_types = ( + {"input_ids": tf.int32, "attention_mask": tf.int32, "feature_index": tf.int64, "qas_id": tf.string}, + { + "start_positions": tf.int64, + "end_positions": tf.int64, + "cls_index": tf.int64, + "p_mask": tf.int32, + "is_impossible": tf.int32, + }, + ) + + train_shapes = ( + { + "input_ids": tf.TensorShape([None]), + "attention_mask": tf.TensorShape([None]), + "feature_index": tf.TensorShape([]), + "qas_id": tf.TensorShape([]), + }, + { + "start_positions": tf.TensorShape([]), + "end_positions": tf.TensorShape([]), + "cls_index": tf.TensorShape([]), + "p_mask": tf.TensorShape([None]), + "is_impossible": tf.TensorShape([]), + }, + ) + + return tf.data.Dataset.from_generator(gen, train_types, train_shapes) + else: + return features + + +class SquadProcessor(DataProcessor): + """ + Processor for the SQuAD data set. overridden by SquadV1Processor and SquadV2Processor, used by the version 1.1 and + version 2.0 of SQuAD, respectively. + """ + + train_file = None + dev_file = None + + def _get_example_from_tensor_dict(self, tensor_dict, evaluate=False): + if not evaluate: + answer = tensor_dict["answers"]["text"][0].numpy().decode("utf-8") + answer_start = tensor_dict["answers"]["answer_start"][0].numpy() + answers = [] + else: + answers = [ + {"answer_start": start.numpy(), "text": text.numpy().decode("utf-8")} + for start, text in zip(tensor_dict["answers"]["answer_start"], tensor_dict["answers"]["text"]) + ] + + answer = None + answer_start = None + + return SquadExample( + qas_id=tensor_dict["id"].numpy().decode("utf-8"), + question_text=tensor_dict["question"].numpy().decode("utf-8"), + context_text=tensor_dict["context"].numpy().decode("utf-8"), + answer_text=answer, + start_position_character=answer_start, + title=tensor_dict["title"].numpy().decode("utf-8"), + answers=answers, + ) + + def get_examples_from_dataset(self, dataset, evaluate=False): + """ + Creates a list of [`~data.processors.squad.SquadExample`] using a TFDS dataset. + + Args: + dataset: The tfds dataset loaded from *tensorflow_datasets.load("squad")* + evaluate: Boolean specifying if in evaluation mode or in training mode + + Returns: + List of SquadExample + + Examples: + + ```python + >>> import tensorflow_datasets as tfds + + >>> dataset = tfds.load("squad") + + >>> training_examples = get_examples_from_dataset(dataset, evaluate=False) + >>> evaluation_examples = get_examples_from_dataset(dataset, evaluate=True) + ```""" + + if evaluate: + dataset = dataset["validation"] + else: + dataset = dataset["train"] + + examples = [] + for tensor_dict in tqdm(dataset): + examples.append(self._get_example_from_tensor_dict(tensor_dict, evaluate=evaluate)) + + return examples + + def get_train_examples(self, data_dir, filename=None): + """ + Returns the training examples from the data directory. + + Args: + data_dir: Directory containing the data files used for training and evaluating. + filename: None by default, specify this if the training file has a different name than the original one + which is `train-v1.1.json` and `train-v2.0.json` for squad versions 1.1 and 2.0 respectively. + + """ + if data_dir is None: + data_dir = "" + + if self.train_file is None: + raise ValueError("SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor") + + with open( + os.path.join(data_dir, self.train_file if filename is None else filename), "r", encoding="utf-8" + ) as reader: + input_data = json.load(reader)["data"] + return self._create_examples(input_data, "train") + + def get_dev_examples(self, data_dir, filename=None): + """ + Returns the evaluation example from the data directory. + + Args: + data_dir: Directory containing the data files used for training and evaluating. + filename: None by default, specify this if the evaluation file has a different name than the original one + which is `dev-v1.1.json` and `dev-v2.0.json` for squad versions 1.1 and 2.0 respectively. + """ + if data_dir is None: + data_dir = "" + + if self.dev_file is None: + raise ValueError("SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor") + + with open( + os.path.join(data_dir, self.dev_file if filename is None else filename), "r", encoding="utf-8" + ) as reader: + input_data = json.load(reader)["data"] + return self._create_examples(input_data, "dev") + + def _create_examples(self, input_data, set_type): + is_training = set_type == "train" + examples = [] + for entry in tqdm(input_data): + title = entry["title"] + for paragraph in entry["paragraphs"]: + context_text = paragraph["context"] + for qa in paragraph["qas"]: + qas_id = qa["id"] + question_text = qa["question"] + start_position_character = None + answer_text = None + answers = [] + + is_impossible = qa.get("is_impossible", False) + if not is_impossible: + if is_training: + answer = qa["answers"][0] + answer_text = answer["text"] + start_position_character = answer["answer_start"] + else: + answers = qa["answers"] + + example = SquadExample( + qas_id=qas_id, + question_text=question_text, + context_text=context_text, + answer_text=answer_text, + start_position_character=start_position_character, + title=title, + is_impossible=is_impossible, + answers=answers, + ) + examples.append(example) + return examples + + +class SquadV1Processor(SquadProcessor): + train_file = "train-v1.1.json" + dev_file = "dev-v1.1.json" + + +class SquadV2Processor(SquadProcessor): + train_file = "train-v2.0.json" + dev_file = "dev-v2.0.json" + + +class SquadExample: + """ + A single training/test example for the Squad dataset, as loaded from disk. + + Args: + qas_id: The example's unique identifier + question_text: The question string + context_text: The context string + answer_text: The answer string + start_position_character: The character position of the start of the answer + title: The title of the example + answers: None by default, this is used during evaluation. Holds answers as well as their start positions. + is_impossible: False by default, set to True if the example has no possible answer. + """ + + def __init__( + self, + qas_id, + question_text, + context_text, + answer_text, + start_position_character, + title, + answers=[], + is_impossible=False, + ): + self.qas_id = qas_id + self.question_text = question_text + self.context_text = context_text + self.answer_text = answer_text + self.title = title + self.is_impossible = is_impossible + self.answers = answers + + self.start_position, self.end_position = 0, 0 + + doc_tokens = [] + char_to_word_offset = [] + prev_is_whitespace = True + + # Split on whitespace so that different tokens may be attributed to their original position. + for c in self.context_text: + if _is_whitespace(c): + prev_is_whitespace = True + else: + if prev_is_whitespace: + doc_tokens.append(c) + else: + doc_tokens[-1] += c + prev_is_whitespace = False + char_to_word_offset.append(len(doc_tokens) - 1) + + self.doc_tokens = doc_tokens + self.char_to_word_offset = char_to_word_offset + + # Start and end positions only has a value during evaluation. + if start_position_character is not None and not is_impossible: + self.start_position = char_to_word_offset[start_position_character] + self.end_position = char_to_word_offset[ + min(start_position_character + len(answer_text) - 1, len(char_to_word_offset) - 1) + ] + + +class SquadFeatures: + """ + Single squad example features to be fed to a model. Those features are model-specific and can be crafted from + [`~data.processors.squad.SquadExample`] using the + :method:*~transformers.data.processors.squad.squad_convert_examples_to_features* method. + + Args: + input_ids: Indices of input sequence tokens in the vocabulary. + attention_mask: Mask to avoid performing attention on padding token indices. + token_type_ids: Segment token indices to indicate first and second portions of the inputs. + cls_index: the index of the CLS token. + p_mask: Mask identifying tokens that can be answers vs. tokens that cannot. + Mask with 1 for tokens than cannot be in the answer and 0 for token that can be in an answer + example_index: the index of the example + unique_id: The unique Feature identifier + paragraph_len: The length of the context + token_is_max_context: + List of booleans identifying which tokens have their maximum context in this feature object. If a token + does not have their maximum context in this feature object, it means that another feature object has more + information related to that token and should be prioritized over this feature for that token. + tokens: list of tokens corresponding to the input ids + token_to_orig_map: mapping between the tokens and the original text, needed in order to identify the answer. + start_position: start of the answer token index + end_position: end of the answer token index + encoding: optionally store the BatchEncoding with the fast-tokenizer alignment methods. + """ + + def __init__( + self, + input_ids, + attention_mask, + token_type_ids, + cls_index, + p_mask, + example_index, + unique_id, + paragraph_len, + token_is_max_context, + tokens, + token_to_orig_map, + start_position, + end_position, + is_impossible, + qas_id: str = None, + encoding: BatchEncoding = None, + ): + self.input_ids = input_ids + self.attention_mask = attention_mask + self.token_type_ids = token_type_ids + self.cls_index = cls_index + self.p_mask = p_mask + + self.example_index = example_index + self.unique_id = unique_id + self.paragraph_len = paragraph_len + self.token_is_max_context = token_is_max_context + self.tokens = tokens + self.token_to_orig_map = token_to_orig_map + + self.start_position = start_position + self.end_position = end_position + self.is_impossible = is_impossible + self.qas_id = qas_id + + self.encoding = encoding + + +class SquadResult: + """ + Constructs a SquadResult which can be used to evaluate a model's output on the SQuAD dataset. + + Args: + unique_id: The unique identifier corresponding to that example. + start_logits: The logits corresponding to the start of the answer + end_logits: The logits corresponding to the end of the answer + """ + + def __init__(self, unique_id, start_logits, end_logits, start_top_index=None, end_top_index=None, cls_logits=None): + self.start_logits = start_logits + self.end_logits = end_logits + self.unique_id = unique_id + + if start_top_index: + self.start_top_index = start_top_index + self.end_top_index = end_top_index + self.cls_logits = cls_logits diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/data/processors/utils.py b/env-llmeval/lib/python3.10/site-packages/transformers/data/processors/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..936f5a51e9fcf4c4189eb444e567d761e8fa0865 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/data/processors/utils.py @@ -0,0 +1,349 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import csv +import dataclasses +import json +from dataclasses import dataclass +from typing import List, Optional, Union + +from ...utils import is_tf_available, is_torch_available, logging + + +logger = logging.get_logger(__name__) + + +@dataclass +class InputExample: + """ + A single training/test example for simple sequence classification. + + Args: + guid: Unique id for the example. + text_a: string. The untokenized text of the first sequence. For single + sequence tasks, only this sequence must be specified. + text_b: (Optional) string. The untokenized text of the second sequence. + Only must be specified for sequence pair tasks. + label: (Optional) string. The label of the example. This should be + specified for train and dev examples, but not for test examples. + """ + + guid: str + text_a: str + text_b: Optional[str] = None + label: Optional[str] = None + + def to_json_string(self): + """Serializes this instance to a JSON string.""" + return json.dumps(dataclasses.asdict(self), indent=2) + "\n" + + +@dataclass(frozen=True) +class InputFeatures: + """ + A single set of features of data. Property names are the same names as the corresponding inputs to a model. + + Args: + input_ids: Indices of input sequence tokens in the vocabulary. + attention_mask: Mask to avoid performing attention on padding token indices. + Mask values selected in `[0, 1]`: Usually `1` for tokens that are NOT MASKED, `0` for MASKED (padded) + tokens. + token_type_ids: (Optional) Segment token indices to indicate first and second + portions of the inputs. Only some models use them. + label: (Optional) Label corresponding to the input. Int for classification problems, + float for regression problems. + """ + + input_ids: List[int] + attention_mask: Optional[List[int]] = None + token_type_ids: Optional[List[int]] = None + label: Optional[Union[int, float]] = None + + def to_json_string(self): + """Serializes this instance to a JSON string.""" + return json.dumps(dataclasses.asdict(self)) + "\n" + + +class DataProcessor: + """Base class for data converters for sequence classification data sets.""" + + def get_example_from_tensor_dict(self, tensor_dict): + """ + Gets an example from a dict with tensorflow tensors. + + Args: + tensor_dict: Keys and values should match the corresponding Glue + tensorflow_dataset examples. + """ + raise NotImplementedError() + + def get_train_examples(self, data_dir): + """Gets a collection of [`InputExample`] for the train set.""" + raise NotImplementedError() + + def get_dev_examples(self, data_dir): + """Gets a collection of [`InputExample`] for the dev set.""" + raise NotImplementedError() + + def get_test_examples(self, data_dir): + """Gets a collection of [`InputExample`] for the test set.""" + raise NotImplementedError() + + def get_labels(self): + """Gets the list of labels for this data set.""" + raise NotImplementedError() + + def tfds_map(self, example): + """ + Some tensorflow_datasets datasets are not formatted the same way the GLUE datasets are. This method converts + examples to the correct format. + """ + if len(self.get_labels()) > 1: + example.label = self.get_labels()[int(example.label)] + return example + + @classmethod + def _read_tsv(cls, input_file, quotechar=None): + """Reads a tab separated value file.""" + with open(input_file, "r", encoding="utf-8-sig") as f: + return list(csv.reader(f, delimiter="\t", quotechar=quotechar)) + + +class SingleSentenceClassificationProcessor(DataProcessor): + """Generic processor for a single sentence classification data set.""" + + def __init__(self, labels=None, examples=None, mode="classification", verbose=False): + self.labels = [] if labels is None else labels + self.examples = [] if examples is None else examples + self.mode = mode + self.verbose = verbose + + def __len__(self): + return len(self.examples) + + def __getitem__(self, idx): + if isinstance(idx, slice): + return SingleSentenceClassificationProcessor(labels=self.labels, examples=self.examples[idx]) + return self.examples[idx] + + @classmethod + def create_from_csv( + cls, file_name, split_name="", column_label=0, column_text=1, column_id=None, skip_first_row=False, **kwargs + ): + processor = cls(**kwargs) + processor.add_examples_from_csv( + file_name, + split_name=split_name, + column_label=column_label, + column_text=column_text, + column_id=column_id, + skip_first_row=skip_first_row, + overwrite_labels=True, + overwrite_examples=True, + ) + return processor + + @classmethod + def create_from_examples(cls, texts_or_text_and_labels, labels=None, **kwargs): + processor = cls(**kwargs) + processor.add_examples(texts_or_text_and_labels, labels=labels) + return processor + + def add_examples_from_csv( + self, + file_name, + split_name="", + column_label=0, + column_text=1, + column_id=None, + skip_first_row=False, + overwrite_labels=False, + overwrite_examples=False, + ): + lines = self._read_tsv(file_name) + if skip_first_row: + lines = lines[1:] + texts = [] + labels = [] + ids = [] + for i, line in enumerate(lines): + texts.append(line[column_text]) + labels.append(line[column_label]) + if column_id is not None: + ids.append(line[column_id]) + else: + guid = f"{split_name}-{i}" if split_name else str(i) + ids.append(guid) + + return self.add_examples( + texts, labels, ids, overwrite_labels=overwrite_labels, overwrite_examples=overwrite_examples + ) + + def add_examples( + self, texts_or_text_and_labels, labels=None, ids=None, overwrite_labels=False, overwrite_examples=False + ): + if labels is not None and len(texts_or_text_and_labels) != len(labels): + raise ValueError( + f"Text and labels have mismatched lengths {len(texts_or_text_and_labels)} and {len(labels)}" + ) + if ids is not None and len(texts_or_text_and_labels) != len(ids): + raise ValueError(f"Text and ids have mismatched lengths {len(texts_or_text_and_labels)} and {len(ids)}") + if ids is None: + ids = [None] * len(texts_or_text_and_labels) + if labels is None: + labels = [None] * len(texts_or_text_and_labels) + examples = [] + added_labels = set() + for text_or_text_and_label, label, guid in zip(texts_or_text_and_labels, labels, ids): + if isinstance(text_or_text_and_label, (tuple, list)) and label is None: + text, label = text_or_text_and_label + else: + text = text_or_text_and_label + added_labels.add(label) + examples.append(InputExample(guid=guid, text_a=text, text_b=None, label=label)) + + # Update examples + if overwrite_examples: + self.examples = examples + else: + self.examples.extend(examples) + + # Update labels + if overwrite_labels: + self.labels = list(added_labels) + else: + self.labels = list(set(self.labels).union(added_labels)) + + return self.examples + + def get_features( + self, + tokenizer, + max_length=None, + pad_on_left=False, + pad_token=0, + mask_padding_with_zero=True, + return_tensors=None, + ): + """ + Convert examples in a list of `InputFeatures` + + Args: + tokenizer: Instance of a tokenizer that will tokenize the examples + max_length: Maximum example length + pad_on_left: If set to `True`, the examples will be padded on the left rather than on the right (default) + pad_token: Padding token + mask_padding_with_zero: If set to `True`, the attention mask will be filled by `1` for actual values + and by `0` for padded values. If set to `False`, inverts it (`1` for padded values, `0` for actual + values) + + Returns: + If the `examples` input is a `tf.data.Dataset`, will return a `tf.data.Dataset` containing the + task-specific features. If the input is a list of `InputExamples`, will return a list of task-specific + `InputFeatures` which can be fed to the model. + + """ + if max_length is None: + max_length = tokenizer.max_len + + label_map = {label: i for i, label in enumerate(self.labels)} + + all_input_ids = [] + for ex_index, example in enumerate(self.examples): + if ex_index % 10000 == 0: + logger.info(f"Tokenizing example {ex_index}") + + input_ids = tokenizer.encode( + example.text_a, + add_special_tokens=True, + max_length=min(max_length, tokenizer.max_len), + ) + all_input_ids.append(input_ids) + + batch_length = max(len(input_ids) for input_ids in all_input_ids) + + features = [] + for ex_index, (input_ids, example) in enumerate(zip(all_input_ids, self.examples)): + if ex_index % 10000 == 0: + logger.info(f"Writing example {ex_index}/{len(self.examples)}") + # The mask has 1 for real tokens and 0 for padding tokens. Only real + # tokens are attended to. + attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) + + # Zero-pad up to the sequence length. + padding_length = batch_length - len(input_ids) + if pad_on_left: + input_ids = ([pad_token] * padding_length) + input_ids + attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask + else: + input_ids = input_ids + ([pad_token] * padding_length) + attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length) + + if len(input_ids) != batch_length: + raise ValueError(f"Error with input length {len(input_ids)} vs {batch_length}") + if len(attention_mask) != batch_length: + raise ValueError(f"Error with input length {len(attention_mask)} vs {batch_length}") + + if self.mode == "classification": + label = label_map[example.label] + elif self.mode == "regression": + label = float(example.label) + else: + raise ValueError(self.mode) + + if ex_index < 5 and self.verbose: + logger.info("*** Example ***") + logger.info(f"guid: {example.guid}") + logger.info(f"input_ids: {' '.join([str(x) for x in input_ids])}") + logger.info(f"attention_mask: {' '.join([str(x) for x in attention_mask])}") + logger.info(f"label: {example.label} (id = {label})") + + features.append(InputFeatures(input_ids=input_ids, attention_mask=attention_mask, label=label)) + + if return_tensors is None: + return features + elif return_tensors == "tf": + if not is_tf_available(): + raise RuntimeError("return_tensors set to 'tf' but TensorFlow 2.0 can't be imported") + import tensorflow as tf + + def gen(): + for ex in features: + yield ({"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label) + + dataset = tf.data.Dataset.from_generator( + gen, + ({"input_ids": tf.int32, "attention_mask": tf.int32}, tf.int64), + ({"input_ids": tf.TensorShape([None]), "attention_mask": tf.TensorShape([None])}, tf.TensorShape([])), + ) + return dataset + elif return_tensors == "pt": + if not is_torch_available(): + raise RuntimeError("return_tensors set to 'pt' but PyTorch can't be imported") + import torch + from torch.utils.data import TensorDataset + + all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) + all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long) + if self.mode == "classification": + all_labels = torch.tensor([f.label for f in features], dtype=torch.long) + elif self.mode == "regression": + all_labels = torch.tensor([f.label for f in features], dtype=torch.float) + + dataset = TensorDataset(all_input_ids, all_attention_mask, all_labels) + return dataset + else: + raise ValueError("return_tensors should be one of 'tf' or 'pt'") diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/data/processors/xnli.py b/env-llmeval/lib/python3.10/site-packages/transformers/data/processors/xnli.py new file mode 100644 index 0000000000000000000000000000000000000000..3f1a11fcd6b4ef167fc77fb1cc6d9acbbadaccf0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/data/processors/xnli.py @@ -0,0 +1,97 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" XNLI utils (dataset loading and evaluation)""" + + +import os + +from ...utils import logging +from .utils import DataProcessor, InputExample + + +logger = logging.get_logger(__name__) + + +class XnliProcessor(DataProcessor): + """ + Processor for the XNLI dataset. Adapted from + https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/run_classifier.py#L207 + """ + + def __init__(self, language, train_language=None): + self.language = language + self.train_language = train_language + + def get_train_examples(self, data_dir): + """See base class.""" + lg = self.language if self.train_language is None else self.train_language + lines = self._read_tsv(os.path.join(data_dir, f"XNLI-MT-1.0/multinli/multinli.train.{lg}.tsv")) + examples = [] + for i, line in enumerate(lines): + if i == 0: + continue + guid = f"train-{i}" + text_a = line[0] + text_b = line[1] + label = "contradiction" if line[2] == "contradictory" else line[2] + if not isinstance(text_a, str): + raise ValueError(f"Training input {text_a} is not a string") + if not isinstance(text_b, str): + raise ValueError(f"Training input {text_b} is not a string") + if not isinstance(label, str): + raise ValueError(f"Training label {label} is not a string") + examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + def get_test_examples(self, data_dir): + """See base class.""" + lines = self._read_tsv(os.path.join(data_dir, "XNLI-1.0/xnli.test.tsv")) + examples = [] + for i, line in enumerate(lines): + if i == 0: + continue + language = line[0] + if language != self.language: + continue + guid = f"test-{i}" + text_a = line[6] + text_b = line[7] + label = line[1] + if not isinstance(text_a, str): + raise ValueError(f"Training input {text_a} is not a string") + if not isinstance(text_b, str): + raise ValueError(f"Training input {text_b} is not a string") + if not isinstance(label, str): + raise ValueError(f"Training label {label} is not a string") + examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + def get_labels(self): + """See base class.""" + return ["contradiction", "entailment", "neutral"] + + +xnli_processors = { + "xnli": XnliProcessor, +} + +xnli_output_modes = { + "xnli": "classification", +} + +xnli_tasks_num_labels = { + "xnli": 3, +} diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8ee0137a20b3ff78706c37585b0d7f3808cad063 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__init__.py @@ -0,0 +1,1107 @@ +# coding=utf-8 +# Copyright 2018 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json +import os +import warnings +from pathlib import Path +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union + +from huggingface_hub import model_info + +from ..configuration_utils import PretrainedConfig +from ..dynamic_module_utils import get_class_from_dynamic_module +from ..feature_extraction_utils import PreTrainedFeatureExtractor +from ..image_processing_utils import BaseImageProcessor +from ..models.auto.configuration_auto import AutoConfig +from ..models.auto.feature_extraction_auto import FEATURE_EXTRACTOR_MAPPING, AutoFeatureExtractor +from ..models.auto.image_processing_auto import IMAGE_PROCESSOR_MAPPING, AutoImageProcessor +from ..models.auto.modeling_auto import AutoModelForDepthEstimation, AutoModelForImageToImage +from ..models.auto.tokenization_auto import TOKENIZER_MAPPING, AutoTokenizer +from ..tokenization_utils import PreTrainedTokenizer +from ..utils import ( + CONFIG_NAME, + HUGGINGFACE_CO_RESOLVE_ENDPOINT, + cached_file, + extract_commit_hash, + find_adapter_config_file, + is_kenlm_available, + is_offline_mode, + is_peft_available, + is_pyctcdecode_available, + is_tf_available, + is_torch_available, + logging, +) +from .audio_classification import AudioClassificationPipeline +from .automatic_speech_recognition import AutomaticSpeechRecognitionPipeline +from .base import ( + ArgumentHandler, + CsvPipelineDataFormat, + JsonPipelineDataFormat, + PipedPipelineDataFormat, + Pipeline, + PipelineDataFormat, + PipelineException, + PipelineRegistry, + get_default_model_and_revision, + infer_framework_load_model, +) +from .conversational import Conversation, ConversationalPipeline +from .depth_estimation import DepthEstimationPipeline +from .document_question_answering import DocumentQuestionAnsweringPipeline +from .feature_extraction import FeatureExtractionPipeline +from .fill_mask import FillMaskPipeline +from .image_classification import ImageClassificationPipeline +from .image_feature_extraction import ImageFeatureExtractionPipeline +from .image_segmentation import ImageSegmentationPipeline +from .image_to_image import ImageToImagePipeline +from .image_to_text import ImageToTextPipeline +from .mask_generation import MaskGenerationPipeline +from .object_detection import ObjectDetectionPipeline +from .question_answering import QuestionAnsweringArgumentHandler, QuestionAnsweringPipeline +from .table_question_answering import TableQuestionAnsweringArgumentHandler, TableQuestionAnsweringPipeline +from .text2text_generation import SummarizationPipeline, Text2TextGenerationPipeline, TranslationPipeline +from .text_classification import TextClassificationPipeline +from .text_generation import TextGenerationPipeline +from .text_to_audio import TextToAudioPipeline +from .token_classification import ( + AggregationStrategy, + NerPipeline, + TokenClassificationArgumentHandler, + TokenClassificationPipeline, +) +from .video_classification import VideoClassificationPipeline +from .visual_question_answering import VisualQuestionAnsweringPipeline +from .zero_shot_audio_classification import ZeroShotAudioClassificationPipeline +from .zero_shot_classification import ZeroShotClassificationArgumentHandler, ZeroShotClassificationPipeline +from .zero_shot_image_classification import ZeroShotImageClassificationPipeline +from .zero_shot_object_detection import ZeroShotObjectDetectionPipeline + + +if is_tf_available(): + import tensorflow as tf + + from ..models.auto.modeling_tf_auto import ( + TFAutoModel, + TFAutoModelForCausalLM, + TFAutoModelForImageClassification, + TFAutoModelForMaskedLM, + TFAutoModelForQuestionAnswering, + TFAutoModelForSeq2SeqLM, + TFAutoModelForSequenceClassification, + TFAutoModelForTableQuestionAnswering, + TFAutoModelForTokenClassification, + TFAutoModelForVision2Seq, + TFAutoModelForZeroShotImageClassification, + ) + +if is_torch_available(): + import torch + + from ..models.auto.modeling_auto import ( + AutoModel, + AutoModelForAudioClassification, + AutoModelForCausalLM, + AutoModelForCTC, + AutoModelForDocumentQuestionAnswering, + AutoModelForImageClassification, + AutoModelForImageSegmentation, + AutoModelForMaskedLM, + AutoModelForMaskGeneration, + AutoModelForObjectDetection, + AutoModelForQuestionAnswering, + AutoModelForSemanticSegmentation, + AutoModelForSeq2SeqLM, + AutoModelForSequenceClassification, + AutoModelForSpeechSeq2Seq, + AutoModelForTableQuestionAnswering, + AutoModelForTextToSpectrogram, + AutoModelForTextToWaveform, + AutoModelForTokenClassification, + AutoModelForVideoClassification, + AutoModelForVision2Seq, + AutoModelForVisualQuestionAnswering, + AutoModelForZeroShotImageClassification, + AutoModelForZeroShotObjectDetection, + ) + + +if TYPE_CHECKING: + from ..modeling_tf_utils import TFPreTrainedModel + from ..modeling_utils import PreTrainedModel + from ..tokenization_utils_fast import PreTrainedTokenizerFast + + +logger = logging.get_logger(__name__) + + +# Register all the supported tasks here +TASK_ALIASES = { + "sentiment-analysis": "text-classification", + "ner": "token-classification", + "vqa": "visual-question-answering", + "text-to-speech": "text-to-audio", +} +SUPPORTED_TASKS = { + "audio-classification": { + "impl": AudioClassificationPipeline, + "tf": (), + "pt": (AutoModelForAudioClassification,) if is_torch_available() else (), + "default": {"model": {"pt": ("superb/wav2vec2-base-superb-ks", "372e048")}}, + "type": "audio", + }, + "automatic-speech-recognition": { + "impl": AutomaticSpeechRecognitionPipeline, + "tf": (), + "pt": (AutoModelForCTC, AutoModelForSpeechSeq2Seq) if is_torch_available() else (), + "default": {"model": {"pt": ("facebook/wav2vec2-base-960h", "55bb623")}}, + "type": "multimodal", + }, + "text-to-audio": { + "impl": TextToAudioPipeline, + "tf": (), + "pt": (AutoModelForTextToWaveform, AutoModelForTextToSpectrogram) if is_torch_available() else (), + "default": {"model": {"pt": ("suno/bark-small", "645cfba")}}, + "type": "text", + }, + "feature-extraction": { + "impl": FeatureExtractionPipeline, + "tf": (TFAutoModel,) if is_tf_available() else (), + "pt": (AutoModel,) if is_torch_available() else (), + "default": { + "model": { + "pt": ("distilbert/distilbert-base-cased", "935ac13"), + "tf": ("distilbert/distilbert-base-cased", "935ac13"), + } + }, + "type": "multimodal", + }, + "text-classification": { + "impl": TextClassificationPipeline, + "tf": (TFAutoModelForSequenceClassification,) if is_tf_available() else (), + "pt": (AutoModelForSequenceClassification,) if is_torch_available() else (), + "default": { + "model": { + "pt": ("distilbert/distilbert-base-uncased-finetuned-sst-2-english", "af0f99b"), + "tf": ("distilbert/distilbert-base-uncased-finetuned-sst-2-english", "af0f99b"), + }, + }, + "type": "text", + }, + "token-classification": { + "impl": TokenClassificationPipeline, + "tf": (TFAutoModelForTokenClassification,) if is_tf_available() else (), + "pt": (AutoModelForTokenClassification,) if is_torch_available() else (), + "default": { + "model": { + "pt": ("dbmdz/bert-large-cased-finetuned-conll03-english", "f2482bf"), + "tf": ("dbmdz/bert-large-cased-finetuned-conll03-english", "f2482bf"), + }, + }, + "type": "text", + }, + "question-answering": { + "impl": QuestionAnsweringPipeline, + "tf": (TFAutoModelForQuestionAnswering,) if is_tf_available() else (), + "pt": (AutoModelForQuestionAnswering,) if is_torch_available() else (), + "default": { + "model": { + "pt": ("distilbert/distilbert-base-cased-distilled-squad", "626af31"), + "tf": ("distilbert/distilbert-base-cased-distilled-squad", "626af31"), + }, + }, + "type": "text", + }, + "table-question-answering": { + "impl": TableQuestionAnsweringPipeline, + "pt": (AutoModelForTableQuestionAnswering,) if is_torch_available() else (), + "tf": (TFAutoModelForTableQuestionAnswering,) if is_tf_available() else (), + "default": { + "model": { + "pt": ("google/tapas-base-finetuned-wtq", "69ceee2"), + "tf": ("google/tapas-base-finetuned-wtq", "69ceee2"), + }, + }, + "type": "text", + }, + "visual-question-answering": { + "impl": VisualQuestionAnsweringPipeline, + "pt": (AutoModelForVisualQuestionAnswering,) if is_torch_available() else (), + "tf": (), + "default": { + "model": {"pt": ("dandelin/vilt-b32-finetuned-vqa", "4355f59")}, + }, + "type": "multimodal", + }, + "document-question-answering": { + "impl": DocumentQuestionAnsweringPipeline, + "pt": (AutoModelForDocumentQuestionAnswering,) if is_torch_available() else (), + "tf": (), + "default": { + "model": {"pt": ("impira/layoutlm-document-qa", "52e01b3")}, + }, + "type": "multimodal", + }, + "fill-mask": { + "impl": FillMaskPipeline, + "tf": (TFAutoModelForMaskedLM,) if is_tf_available() else (), + "pt": (AutoModelForMaskedLM,) if is_torch_available() else (), + "default": { + "model": { + "pt": ("distilbert/distilroberta-base", "ec58a5b"), + "tf": ("distilbert/distilroberta-base", "ec58a5b"), + } + }, + "type": "text", + }, + "summarization": { + "impl": SummarizationPipeline, + "tf": (TFAutoModelForSeq2SeqLM,) if is_tf_available() else (), + "pt": (AutoModelForSeq2SeqLM,) if is_torch_available() else (), + "default": { + "model": {"pt": ("sshleifer/distilbart-cnn-12-6", "a4f8f3e"), "tf": ("google-t5/t5-small", "d769bba")} + }, + "type": "text", + }, + # This task is a special case as it's parametrized by SRC, TGT languages. + "translation": { + "impl": TranslationPipeline, + "tf": (TFAutoModelForSeq2SeqLM,) if is_tf_available() else (), + "pt": (AutoModelForSeq2SeqLM,) if is_torch_available() else (), + "default": { + ("en", "fr"): {"model": {"pt": ("google-t5/t5-base", "686f1db"), "tf": ("google-t5/t5-base", "686f1db")}}, + ("en", "de"): {"model": {"pt": ("google-t5/t5-base", "686f1db"), "tf": ("google-t5/t5-base", "686f1db")}}, + ("en", "ro"): {"model": {"pt": ("google-t5/t5-base", "686f1db"), "tf": ("google-t5/t5-base", "686f1db")}}, + }, + "type": "text", + }, + "text2text-generation": { + "impl": Text2TextGenerationPipeline, + "tf": (TFAutoModelForSeq2SeqLM,) if is_tf_available() else (), + "pt": (AutoModelForSeq2SeqLM,) if is_torch_available() else (), + "default": {"model": {"pt": ("google-t5/t5-base", "686f1db"), "tf": ("google-t5/t5-base", "686f1db")}}, + "type": "text", + }, + "text-generation": { + "impl": TextGenerationPipeline, + "tf": (TFAutoModelForCausalLM,) if is_tf_available() else (), + "pt": (AutoModelForCausalLM,) if is_torch_available() else (), + "default": {"model": {"pt": ("openai-community/gpt2", "6c0e608"), "tf": ("openai-community/gpt2", "6c0e608")}}, + "type": "text", + }, + "zero-shot-classification": { + "impl": ZeroShotClassificationPipeline, + "tf": (TFAutoModelForSequenceClassification,) if is_tf_available() else (), + "pt": (AutoModelForSequenceClassification,) if is_torch_available() else (), + "default": { + "model": { + "pt": ("facebook/bart-large-mnli", "c626438"), + "tf": ("FacebookAI/roberta-large-mnli", "130fb28"), + }, + "config": { + "pt": ("facebook/bart-large-mnli", "c626438"), + "tf": ("FacebookAI/roberta-large-mnli", "130fb28"), + }, + }, + "type": "text", + }, + "zero-shot-image-classification": { + "impl": ZeroShotImageClassificationPipeline, + "tf": (TFAutoModelForZeroShotImageClassification,) if is_tf_available() else (), + "pt": (AutoModelForZeroShotImageClassification,) if is_torch_available() else (), + "default": { + "model": { + "pt": ("openai/clip-vit-base-patch32", "f4881ba"), + "tf": ("openai/clip-vit-base-patch32", "f4881ba"), + } + }, + "type": "multimodal", + }, + "zero-shot-audio-classification": { + "impl": ZeroShotAudioClassificationPipeline, + "tf": (), + "pt": (AutoModel,) if is_torch_available() else (), + "default": { + "model": { + "pt": ("laion/clap-htsat-fused", "973b6e5"), + } + }, + "type": "multimodal", + }, + "conversational": { + "impl": ConversationalPipeline, + "tf": (TFAutoModelForSeq2SeqLM, TFAutoModelForCausalLM) if is_tf_available() else (), + "pt": (AutoModelForSeq2SeqLM, AutoModelForCausalLM) if is_torch_available() else (), + "default": { + "model": {"pt": ("microsoft/DialoGPT-medium", "8bada3b"), "tf": ("microsoft/DialoGPT-medium", "8bada3b")} + }, + "type": "text", + }, + "image-classification": { + "impl": ImageClassificationPipeline, + "tf": (TFAutoModelForImageClassification,) if is_tf_available() else (), + "pt": (AutoModelForImageClassification,) if is_torch_available() else (), + "default": { + "model": { + "pt": ("google/vit-base-patch16-224", "5dca96d"), + "tf": ("google/vit-base-patch16-224", "5dca96d"), + } + }, + "type": "image", + }, + "image-feature-extraction": { + "impl": ImageFeatureExtractionPipeline, + "tf": (TFAutoModel,) if is_tf_available() else (), + "pt": (AutoModel,) if is_torch_available() else (), + "default": { + "model": { + "pt": ("google/vit-base-patch16-224", "29e7a1e183"), + "tf": ("google/vit-base-patch16-224", "29e7a1e183"), + } + }, + "type": "image", + }, + "image-segmentation": { + "impl": ImageSegmentationPipeline, + "tf": (), + "pt": (AutoModelForImageSegmentation, AutoModelForSemanticSegmentation) if is_torch_available() else (), + "default": {"model": {"pt": ("facebook/detr-resnet-50-panoptic", "fc15262")}}, + "type": "multimodal", + }, + "image-to-text": { + "impl": ImageToTextPipeline, + "tf": (TFAutoModelForVision2Seq,) if is_tf_available() else (), + "pt": (AutoModelForVision2Seq,) if is_torch_available() else (), + "default": { + "model": { + "pt": ("ydshieh/vit-gpt2-coco-en", "65636df"), + "tf": ("ydshieh/vit-gpt2-coco-en", "65636df"), + } + }, + "type": "multimodal", + }, + "object-detection": { + "impl": ObjectDetectionPipeline, + "tf": (), + "pt": (AutoModelForObjectDetection,) if is_torch_available() else (), + "default": {"model": {"pt": ("facebook/detr-resnet-50", "2729413")}}, + "type": "multimodal", + }, + "zero-shot-object-detection": { + "impl": ZeroShotObjectDetectionPipeline, + "tf": (), + "pt": (AutoModelForZeroShotObjectDetection,) if is_torch_available() else (), + "default": {"model": {"pt": ("google/owlvit-base-patch32", "17740e1")}}, + "type": "multimodal", + }, + "depth-estimation": { + "impl": DepthEstimationPipeline, + "tf": (), + "pt": (AutoModelForDepthEstimation,) if is_torch_available() else (), + "default": {"model": {"pt": ("Intel/dpt-large", "e93beec")}}, + "type": "image", + }, + "video-classification": { + "impl": VideoClassificationPipeline, + "tf": (), + "pt": (AutoModelForVideoClassification,) if is_torch_available() else (), + "default": {"model": {"pt": ("MCG-NJU/videomae-base-finetuned-kinetics", "4800870")}}, + "type": "video", + }, + "mask-generation": { + "impl": MaskGenerationPipeline, + "tf": (), + "pt": (AutoModelForMaskGeneration,) if is_torch_available() else (), + "default": {"model": {"pt": ("facebook/sam-vit-huge", "997b15")}}, + "type": "multimodal", + }, + "image-to-image": { + "impl": ImageToImagePipeline, + "tf": (), + "pt": (AutoModelForImageToImage,) if is_torch_available() else (), + "default": {"model": {"pt": ("caidas/swin2SR-classical-sr-x2-64", "4aaedcb")}}, + "type": "image", + }, +} + +NO_FEATURE_EXTRACTOR_TASKS = set() +NO_IMAGE_PROCESSOR_TASKS = set() +NO_TOKENIZER_TASKS = set() + +# Those model configs are special, they are generic over their task, meaning +# any tokenizer/feature_extractor might be use for a given model so we cannot +# use the statically defined TOKENIZER_MAPPING and FEATURE_EXTRACTOR_MAPPING to +# see if the model defines such objects or not. +MULTI_MODEL_AUDIO_CONFIGS = {"SpeechEncoderDecoderConfig"} +MULTI_MODEL_VISION_CONFIGS = {"VisionEncoderDecoderConfig", "VisionTextDualEncoderConfig"} +for task, values in SUPPORTED_TASKS.items(): + if values["type"] == "text": + NO_FEATURE_EXTRACTOR_TASKS.add(task) + NO_IMAGE_PROCESSOR_TASKS.add(task) + elif values["type"] in {"image", "video"}: + NO_TOKENIZER_TASKS.add(task) + elif values["type"] in {"audio"}: + NO_TOKENIZER_TASKS.add(task) + NO_IMAGE_PROCESSOR_TASKS.add(task) + elif values["type"] != "multimodal": + raise ValueError(f"SUPPORTED_TASK {task} contains invalid type {values['type']}") + +PIPELINE_REGISTRY = PipelineRegistry(supported_tasks=SUPPORTED_TASKS, task_aliases=TASK_ALIASES) + + +def get_supported_tasks() -> List[str]: + """ + Returns a list of supported task strings. + """ + return PIPELINE_REGISTRY.get_supported_tasks() + + +def get_task(model: str, token: Optional[str] = None, **deprecated_kwargs) -> str: + use_auth_token = deprecated_kwargs.pop("use_auth_token", None) + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") + token = use_auth_token + + if is_offline_mode(): + raise RuntimeError("You cannot infer task automatically within `pipeline` when using offline mode") + try: + info = model_info(model, token=token) + except Exception as e: + raise RuntimeError(f"Instantiating a pipeline without a task set raised an error: {e}") + if not info.pipeline_tag: + raise RuntimeError( + f"The model {model} does not seem to have a correct `pipeline_tag` set to infer the task automatically" + ) + if getattr(info, "library_name", "transformers") != "transformers": + raise RuntimeError(f"This model is meant to be used with {info.library_name} not with transformers") + task = info.pipeline_tag + return task + + +def check_task(task: str) -> Tuple[str, Dict, Any]: + """ + Checks an incoming task string, to validate it's correct and return the default Pipeline and Model classes, and + default models if they exist. + + Args: + task (`str`): + The task defining which pipeline will be returned. Currently accepted tasks are: + + - `"audio-classification"` + - `"automatic-speech-recognition"` + - `"conversational"` + - `"depth-estimation"` + - `"document-question-answering"` + - `"feature-extraction"` + - `"fill-mask"` + - `"image-classification"` + - `"image-feature-extraction"` + - `"image-segmentation"` + - `"image-to-text"` + - `"image-to-image"` + - `"object-detection"` + - `"question-answering"` + - `"summarization"` + - `"table-question-answering"` + - `"text2text-generation"` + - `"text-classification"` (alias `"sentiment-analysis"` available) + - `"text-generation"` + - `"text-to-audio"` (alias `"text-to-speech"` available) + - `"token-classification"` (alias `"ner"` available) + - `"translation"` + - `"translation_xx_to_yy"` + - `"video-classification"` + - `"visual-question-answering"` (alias `"vqa"` available) + - `"zero-shot-classification"` + - `"zero-shot-image-classification"` + - `"zero-shot-object-detection"` + + Returns: + (normalized_task: `str`, task_defaults: `dict`, task_options: (`tuple`, None)) The normalized task name + (removed alias and options). The actual dictionary required to initialize the pipeline and some extra task + options for parametrized tasks like "translation_XX_to_YY" + + + """ + return PIPELINE_REGISTRY.check_task(task) + + +def clean_custom_task(task_info): + import transformers + + if "impl" not in task_info: + raise RuntimeError("This model introduces a custom pipeline without specifying its implementation.") + pt_class_names = task_info.get("pt", ()) + if isinstance(pt_class_names, str): + pt_class_names = [pt_class_names] + task_info["pt"] = tuple(getattr(transformers, c) for c in pt_class_names) + tf_class_names = task_info.get("tf", ()) + if isinstance(tf_class_names, str): + tf_class_names = [tf_class_names] + task_info["tf"] = tuple(getattr(transformers, c) for c in tf_class_names) + return task_info, None + + +def pipeline( + task: str = None, + model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None, + config: Optional[Union[str, PretrainedConfig]] = None, + tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, + feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, + image_processor: Optional[Union[str, BaseImageProcessor]] = None, + framework: Optional[str] = None, + revision: Optional[str] = None, + use_fast: bool = True, + token: Optional[Union[str, bool]] = None, + device: Optional[Union[int, str, "torch.device"]] = None, + device_map=None, + torch_dtype=None, + trust_remote_code: Optional[bool] = None, + model_kwargs: Dict[str, Any] = None, + pipeline_class: Optional[Any] = None, + **kwargs, +) -> Pipeline: + """ + Utility factory method to build a [`Pipeline`]. + + Pipelines are made of: + + - A [tokenizer](tokenizer) in charge of mapping raw textual input to token. + - A [model](model) to make predictions from the inputs. + - Some (optional) post processing for enhancing model's output. + + Args: + task (`str`): + The task defining which pipeline will be returned. Currently accepted tasks are: + + - `"audio-classification"`: will return a [`AudioClassificationPipeline`]. + - `"automatic-speech-recognition"`: will return a [`AutomaticSpeechRecognitionPipeline`]. + - `"conversational"`: will return a [`ConversationalPipeline`]. + - `"depth-estimation"`: will return a [`DepthEstimationPipeline`]. + - `"document-question-answering"`: will return a [`DocumentQuestionAnsweringPipeline`]. + - `"feature-extraction"`: will return a [`FeatureExtractionPipeline`]. + - `"fill-mask"`: will return a [`FillMaskPipeline`]:. + - `"image-classification"`: will return a [`ImageClassificationPipeline`]. + - `"image-feature-extraction"`: will return an [`ImageFeatureExtractionPipeline`]. + - `"image-segmentation"`: will return a [`ImageSegmentationPipeline`]. + - `"image-to-image"`: will return a [`ImageToImagePipeline`]. + - `"image-to-text"`: will return a [`ImageToTextPipeline`]. + - `"mask-generation"`: will return a [`MaskGenerationPipeline`]. + - `"object-detection"`: will return a [`ObjectDetectionPipeline`]. + - `"question-answering"`: will return a [`QuestionAnsweringPipeline`]. + - `"summarization"`: will return a [`SummarizationPipeline`]. + - `"table-question-answering"`: will return a [`TableQuestionAnsweringPipeline`]. + - `"text2text-generation"`: will return a [`Text2TextGenerationPipeline`]. + - `"text-classification"` (alias `"sentiment-analysis"` available): will return a + [`TextClassificationPipeline`]. + - `"text-generation"`: will return a [`TextGenerationPipeline`]:. + - `"text-to-audio"` (alias `"text-to-speech"` available): will return a [`TextToAudioPipeline`]:. + - `"token-classification"` (alias `"ner"` available): will return a [`TokenClassificationPipeline`]. + - `"translation"`: will return a [`TranslationPipeline`]. + - `"translation_xx_to_yy"`: will return a [`TranslationPipeline`]. + - `"video-classification"`: will return a [`VideoClassificationPipeline`]. + - `"visual-question-answering"`: will return a [`VisualQuestionAnsweringPipeline`]. + - `"zero-shot-classification"`: will return a [`ZeroShotClassificationPipeline`]. + - `"zero-shot-image-classification"`: will return a [`ZeroShotImageClassificationPipeline`]. + - `"zero-shot-audio-classification"`: will return a [`ZeroShotAudioClassificationPipeline`]. + - `"zero-shot-object-detection"`: will return a [`ZeroShotObjectDetectionPipeline`]. + + model (`str` or [`PreTrainedModel`] or [`TFPreTrainedModel`], *optional*): + The model that will be used by the pipeline to make predictions. This can be a model identifier or an + actual instance of a pretrained model inheriting from [`PreTrainedModel`] (for PyTorch) or + [`TFPreTrainedModel`] (for TensorFlow). + + If not provided, the default for the `task` will be loaded. + config (`str` or [`PretrainedConfig`], *optional*): + The configuration that will be used by the pipeline to instantiate the model. This can be a model + identifier or an actual pretrained model configuration inheriting from [`PretrainedConfig`]. + + If not provided, the default configuration file for the requested model will be used. That means that if + `model` is given, its default configuration will be used. However, if `model` is not supplied, this + `task`'s default model's config is used instead. + tokenizer (`str` or [`PreTrainedTokenizer`], *optional*): + The tokenizer that will be used by the pipeline to encode data for the model. This can be a model + identifier or an actual pretrained tokenizer inheriting from [`PreTrainedTokenizer`]. + + If not provided, the default tokenizer for the given `model` will be loaded (if it is a string). If `model` + is not specified or not a string, then the default tokenizer for `config` is loaded (if it is a string). + However, if `config` is also not given or not a string, then the default tokenizer for the given `task` + will be loaded. + feature_extractor (`str` or [`PreTrainedFeatureExtractor`], *optional*): + The feature extractor that will be used by the pipeline to encode data for the model. This can be a model + identifier or an actual pretrained feature extractor inheriting from [`PreTrainedFeatureExtractor`]. + + Feature extractors are used for non-NLP models, such as Speech or Vision models as well as multi-modal + models. Multi-modal models will also require a tokenizer to be passed. + + If not provided, the default feature extractor for the given `model` will be loaded (if it is a string). If + `model` is not specified or not a string, then the default feature extractor for `config` is loaded (if it + is a string). However, if `config` is also not given or not a string, then the default feature extractor + for the given `task` will be loaded. + framework (`str`, *optional*): + The framework to use, either `"pt"` for PyTorch or `"tf"` for TensorFlow. The specified framework must be + installed. + + If no framework is specified, will default to the one currently installed. If no framework is specified and + both frameworks are installed, will default to the framework of the `model`, or to PyTorch if no model is + provided. + revision (`str`, *optional*, defaults to `"main"`): + When passing a task name or a string model identifier: The specific model version to use. It can be a + branch name, a tag name, or a commit id, since we use a git-based system for storing models and other + artifacts on huggingface.co, so `revision` can be any identifier allowed by git. + use_fast (`bool`, *optional*, defaults to `True`): + Whether or not to use a Fast tokenizer if possible (a [`PreTrainedTokenizerFast`]). + use_auth_token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `huggingface-cli login` (stored in `~/.huggingface`). + device (`int` or `str` or `torch.device`): + Defines the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank like `1`) on which this + pipeline will be allocated. + device_map (`str` or `Dict[str, Union[int, str, torch.device]`, *optional*): + Sent directly as `model_kwargs` (just a simpler shortcut). When `accelerate` library is present, set + `device_map="auto"` to compute the most optimized `device_map` automatically (see + [here](https://huggingface.co/docs/accelerate/main/en/package_reference/big_modeling#accelerate.cpu_offload) + for more information). + + + + Do not use `device_map` AND `device` at the same time as they will conflict + + + + torch_dtype (`str` or `torch.dtype`, *optional*): + Sent directly as `model_kwargs` (just a simpler shortcut) to use the available precision for this model + (`torch.float16`, `torch.bfloat16`, ... or `"auto"`). + trust_remote_code (`bool`, *optional*, defaults to `False`): + Whether or not to allow for custom code defined on the Hub in their own modeling, configuration, + tokenization or even pipeline files. This option should only be set to `True` for repositories you trust + and in which you have read the code, as it will execute code present on the Hub on your local machine. + model_kwargs (`Dict[str, Any]`, *optional*): + Additional dictionary of keyword arguments passed along to the model's `from_pretrained(..., + **model_kwargs)` function. + kwargs (`Dict[str, Any]`, *optional*): + Additional keyword arguments passed along to the specific pipeline init (see the documentation for the + corresponding pipeline class for possible values). + + Returns: + [`Pipeline`]: A suitable pipeline for the task. + + Examples: + + ```python + >>> from transformers import pipeline, AutoModelForTokenClassification, AutoTokenizer + + >>> # Sentiment analysis pipeline + >>> analyzer = pipeline("sentiment-analysis") + + >>> # Question answering pipeline, specifying the checkpoint identifier + >>> oracle = pipeline( + ... "question-answering", model="distilbert/distilbert-base-cased-distilled-squad", tokenizer="google-bert/bert-base-cased" + ... ) + + >>> # Named entity recognition pipeline, passing in a specific model and tokenizer + >>> model = AutoModelForTokenClassification.from_pretrained("dbmdz/bert-large-cased-finetuned-conll03-english") + >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased") + >>> recognizer = pipeline("ner", model=model, tokenizer=tokenizer) + ```""" + if model_kwargs is None: + model_kwargs = {} + # Make sure we only pass use_auth_token once as a kwarg (it used to be possible to pass it in model_kwargs, + # this is to keep BC). + use_auth_token = model_kwargs.pop("use_auth_token", None) + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") + token = use_auth_token + + code_revision = kwargs.pop("code_revision", None) + commit_hash = kwargs.pop("_commit_hash", None) + + hub_kwargs = { + "revision": revision, + "token": token, + "trust_remote_code": trust_remote_code, + "_commit_hash": commit_hash, + } + + if task is None and model is None: + raise RuntimeError( + "Impossible to instantiate a pipeline without either a task or a model " + "being specified. " + "Please provide a task class or a model" + ) + + if model is None and tokenizer is not None: + raise RuntimeError( + "Impossible to instantiate a pipeline with tokenizer specified but not the model as the provided tokenizer" + " may not be compatible with the default model. Please provide a PreTrainedModel class or a" + " path/identifier to a pretrained model when providing tokenizer." + ) + if model is None and feature_extractor is not None: + raise RuntimeError( + "Impossible to instantiate a pipeline with feature_extractor specified but not the model as the provided" + " feature_extractor may not be compatible with the default model. Please provide a PreTrainedModel class" + " or a path/identifier to a pretrained model when providing feature_extractor." + ) + if isinstance(model, Path): + model = str(model) + + if commit_hash is None: + pretrained_model_name_or_path = None + if isinstance(config, str): + pretrained_model_name_or_path = config + elif config is None and isinstance(model, str): + pretrained_model_name_or_path = model + + if not isinstance(config, PretrainedConfig) and pretrained_model_name_or_path is not None: + # We make a call to the config file first (which may be absent) to get the commit hash as soon as possible + resolved_config_file = cached_file( + pretrained_model_name_or_path, + CONFIG_NAME, + _raise_exceptions_for_gated_repo=False, + _raise_exceptions_for_missing_entries=False, + _raise_exceptions_for_connection_errors=False, + **hub_kwargs, + ) + hub_kwargs["_commit_hash"] = extract_commit_hash(resolved_config_file, commit_hash) + else: + hub_kwargs["_commit_hash"] = getattr(config, "_commit_hash", None) + + # Config is the primordial information item. + # Instantiate config if needed + if isinstance(config, str): + config = AutoConfig.from_pretrained( + config, _from_pipeline=task, code_revision=code_revision, **hub_kwargs, **model_kwargs + ) + hub_kwargs["_commit_hash"] = config._commit_hash + elif config is None and isinstance(model, str): + # Check for an adapter file in the model path if PEFT is available + if is_peft_available(): + # `find_adapter_config_file` doesn't accept `trust_remote_code` + _hub_kwargs = {k: v for k, v in hub_kwargs.items() if k != "trust_remote_code"} + maybe_adapter_path = find_adapter_config_file( + model, + token=hub_kwargs["token"], + revision=hub_kwargs["revision"], + _commit_hash=hub_kwargs["_commit_hash"], + ) + + if maybe_adapter_path is not None: + with open(maybe_adapter_path, "r", encoding="utf-8") as f: + adapter_config = json.load(f) + model = adapter_config["base_model_name_or_path"] + + config = AutoConfig.from_pretrained( + model, _from_pipeline=task, code_revision=code_revision, **hub_kwargs, **model_kwargs + ) + hub_kwargs["_commit_hash"] = config._commit_hash + + custom_tasks = {} + if config is not None and len(getattr(config, "custom_pipelines", {})) > 0: + custom_tasks = config.custom_pipelines + if task is None and trust_remote_code is not False: + if len(custom_tasks) == 1: + task = list(custom_tasks.keys())[0] + else: + raise RuntimeError( + "We can't infer the task automatically for this model as there are multiple tasks available. Pick " + f"one in {', '.join(custom_tasks.keys())}" + ) + + if task is None and model is not None: + if not isinstance(model, str): + raise RuntimeError( + "Inferring the task automatically requires to check the hub with a model_id defined as a `str`. " + f"{model} is not a valid model_id." + ) + task = get_task(model, token) + + # Retrieve the task + if task in custom_tasks: + normalized_task = task + targeted_task, task_options = clean_custom_task(custom_tasks[task]) + if pipeline_class is None: + if not trust_remote_code: + raise ValueError( + "Loading this pipeline requires you to execute the code in the pipeline file in that" + " repo on your local machine. Make sure you have read the code there to avoid malicious use, then" + " set the option `trust_remote_code=True` to remove this error." + ) + class_ref = targeted_task["impl"] + pipeline_class = get_class_from_dynamic_module( + class_ref, + model, + code_revision=code_revision, + **hub_kwargs, + ) + else: + normalized_task, targeted_task, task_options = check_task(task) + if pipeline_class is None: + pipeline_class = targeted_task["impl"] + + # Use default model/config/tokenizer for the task if no model is provided + if model is None: + # At that point framework might still be undetermined + model, default_revision = get_default_model_and_revision(targeted_task, framework, task_options) + revision = revision if revision is not None else default_revision + logger.warning( + f"No model was supplied, defaulted to {model} and revision" + f" {revision} ({HUGGINGFACE_CO_RESOLVE_ENDPOINT}/{model}).\n" + "Using a pipeline without specifying a model name and revision in production is not recommended." + ) + if config is None and isinstance(model, str): + config = AutoConfig.from_pretrained(model, _from_pipeline=task, **hub_kwargs, **model_kwargs) + hub_kwargs["_commit_hash"] = config._commit_hash + + if device_map is not None: + if "device_map" in model_kwargs: + raise ValueError( + 'You cannot use both `pipeline(... device_map=..., model_kwargs={"device_map":...})` as those' + " arguments might conflict, use only one.)" + ) + if device is not None: + logger.warning( + "Both `device` and `device_map` are specified. `device` will override `device_map`. You" + " will most likely encounter unexpected behavior. Please remove `device` and keep `device_map`." + ) + model_kwargs["device_map"] = device_map + if torch_dtype is not None: + if "torch_dtype" in model_kwargs: + raise ValueError( + 'You cannot use both `pipeline(... torch_dtype=..., model_kwargs={"torch_dtype":...})` as those' + " arguments might conflict, use only one.)" + ) + if isinstance(torch_dtype, str) and hasattr(torch, torch_dtype): + torch_dtype = getattr(torch, torch_dtype) + model_kwargs["torch_dtype"] = torch_dtype + + model_name = model if isinstance(model, str) else None + + # Load the correct model if possible + # Infer the framework from the model if not already defined + if isinstance(model, str) or framework is None: + model_classes = {"tf": targeted_task["tf"], "pt": targeted_task["pt"]} + framework, model = infer_framework_load_model( + model, + model_classes=model_classes, + config=config, + framework=framework, + task=task, + **hub_kwargs, + **model_kwargs, + ) + + model_config = model.config + hub_kwargs["_commit_hash"] = model.config._commit_hash + load_tokenizer = type(model_config) in TOKENIZER_MAPPING or model_config.tokenizer_class is not None + load_feature_extractor = type(model_config) in FEATURE_EXTRACTOR_MAPPING or feature_extractor is not None + load_image_processor = type(model_config) in IMAGE_PROCESSOR_MAPPING or image_processor is not None + + # If `model` (instance of `PretrainedModel` instead of `str`) is passed (and/or same for config), while + # `image_processor` or `feature_extractor` is `None`, the loading will fail. This happens particularly for some + # vision tasks when calling `pipeline()` with `model` and only one of the `image_processor` and `feature_extractor`. + # TODO: we need to make `NO_IMAGE_PROCESSOR_TASKS` and `NO_FEATURE_EXTRACTOR_TASKS` more robust to avoid such issue. + # This block is only temporarily to make CI green. + if load_image_processor and load_feature_extractor: + load_feature_extractor = False + + if ( + tokenizer is None + and not load_tokenizer + and normalized_task not in NO_TOKENIZER_TASKS + # Using class name to avoid importing the real class. + and ( + model_config.__class__.__name__ in MULTI_MODEL_AUDIO_CONFIGS + or model_config.__class__.__name__ in MULTI_MODEL_VISION_CONFIGS + ) + ): + # This is a special category of models, that are fusions of multiple models + # so the model_config might not define a tokenizer, but it seems to be + # necessary for the task, so we're force-trying to load it. + load_tokenizer = True + if ( + image_processor is None + and not load_image_processor + and normalized_task not in NO_IMAGE_PROCESSOR_TASKS + # Using class name to avoid importing the real class. + and model_config.__class__.__name__ in MULTI_MODEL_VISION_CONFIGS + ): + # This is a special category of models, that are fusions of multiple models + # so the model_config might not define a tokenizer, but it seems to be + # necessary for the task, so we're force-trying to load it. + load_image_processor = True + if ( + feature_extractor is None + and not load_feature_extractor + and normalized_task not in NO_FEATURE_EXTRACTOR_TASKS + # Using class name to avoid importing the real class. + and model_config.__class__.__name__ in MULTI_MODEL_AUDIO_CONFIGS + ): + # This is a special category of models, that are fusions of multiple models + # so the model_config might not define a tokenizer, but it seems to be + # necessary for the task, so we're force-trying to load it. + load_feature_extractor = True + + if task in NO_TOKENIZER_TASKS: + # These will never require a tokenizer. + # the model on the other hand might have a tokenizer, but + # the files could be missing from the hub, instead of failing + # on such repos, we just force to not load it. + load_tokenizer = False + + if task in NO_FEATURE_EXTRACTOR_TASKS: + load_feature_extractor = False + if task in NO_IMAGE_PROCESSOR_TASKS: + load_image_processor = False + + if load_tokenizer: + # Try to infer tokenizer from model or config name (if provided as str) + if tokenizer is None: + if isinstance(model_name, str): + tokenizer = model_name + elif isinstance(config, str): + tokenizer = config + else: + # Impossible to guess what is the right tokenizer here + raise Exception( + "Impossible to guess which tokenizer to use. " + "Please provide a PreTrainedTokenizer class or a path/identifier to a pretrained tokenizer." + ) + + # Instantiate tokenizer if needed + if isinstance(tokenizer, (str, tuple)): + if isinstance(tokenizer, tuple): + # For tuple we have (tokenizer name, {kwargs}) + use_fast = tokenizer[1].pop("use_fast", use_fast) + tokenizer_identifier = tokenizer[0] + tokenizer_kwargs = tokenizer[1] + else: + tokenizer_identifier = tokenizer + tokenizer_kwargs = model_kwargs.copy() + tokenizer_kwargs.pop("torch_dtype", None) + + tokenizer = AutoTokenizer.from_pretrained( + tokenizer_identifier, use_fast=use_fast, _from_pipeline=task, **hub_kwargs, **tokenizer_kwargs + ) + + if load_image_processor: + # Try to infer image processor from model or config name (if provided as str) + if image_processor is None: + if isinstance(model_name, str): + image_processor = model_name + elif isinstance(config, str): + image_processor = config + # Backward compatibility, as `feature_extractor` used to be the name + # for `ImageProcessor`. + elif feature_extractor is not None and isinstance(feature_extractor, BaseImageProcessor): + image_processor = feature_extractor + else: + # Impossible to guess what is the right image_processor here + raise Exception( + "Impossible to guess which image processor to use. " + "Please provide a PreTrainedImageProcessor class or a path/identifier " + "to a pretrained image processor." + ) + + # Instantiate image_processor if needed + if isinstance(image_processor, (str, tuple)): + image_processor = AutoImageProcessor.from_pretrained( + image_processor, _from_pipeline=task, **hub_kwargs, **model_kwargs + ) + + if load_feature_extractor: + # Try to infer feature extractor from model or config name (if provided as str) + if feature_extractor is None: + if isinstance(model_name, str): + feature_extractor = model_name + elif isinstance(config, str): + feature_extractor = config + else: + # Impossible to guess what is the right feature_extractor here + raise Exception( + "Impossible to guess which feature extractor to use. " + "Please provide a PreTrainedFeatureExtractor class or a path/identifier " + "to a pretrained feature extractor." + ) + + # Instantiate feature_extractor if needed + if isinstance(feature_extractor, (str, tuple)): + feature_extractor = AutoFeatureExtractor.from_pretrained( + feature_extractor, _from_pipeline=task, **hub_kwargs, **model_kwargs + ) + + if ( + feature_extractor._processor_class + and feature_extractor._processor_class.endswith("WithLM") + and isinstance(model_name, str) + ): + try: + import kenlm # to trigger `ImportError` if not installed + from pyctcdecode import BeamSearchDecoderCTC + + if os.path.isdir(model_name) or os.path.isfile(model_name): + decoder = BeamSearchDecoderCTC.load_from_dir(model_name) + else: + language_model_glob = os.path.join( + BeamSearchDecoderCTC._LANGUAGE_MODEL_SERIALIZED_DIRECTORY, "*" + ) + alphabet_filename = BeamSearchDecoderCTC._ALPHABET_SERIALIZED_FILENAME + allow_patterns = [language_model_glob, alphabet_filename] + decoder = BeamSearchDecoderCTC.load_from_hf_hub(model_name, allow_patterns=allow_patterns) + + kwargs["decoder"] = decoder + except ImportError as e: + logger.warning(f"Could not load the `decoder` for {model_name}. Defaulting to raw CTC. Error: {e}") + if not is_kenlm_available(): + logger.warning("Try to install `kenlm`: `pip install kenlm") + + if not is_pyctcdecode_available(): + logger.warning("Try to install `pyctcdecode`: `pip install pyctcdecode") + + if task == "translation" and model.config.task_specific_params: + for key in model.config.task_specific_params: + if key.startswith("translation"): + task = key + warnings.warn( + f'"translation" task was used, instead of "translation_XX_to_YY", defaulting to "{task}"', + UserWarning, + ) + break + + if tokenizer is not None: + kwargs["tokenizer"] = tokenizer + + if feature_extractor is not None: + kwargs["feature_extractor"] = feature_extractor + + if torch_dtype is not None: + kwargs["torch_dtype"] = torch_dtype + + if image_processor is not None: + kwargs["image_processor"] = image_processor + + if device is not None: + kwargs["device"] = device + + return pipeline_class(model=model, framework=framework, task=task, **kwargs) diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1328ae4778fc671d9451c625d8cc26473cd5f2ef Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/audio_classification.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/audio_classification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f737c275c80d9d38a9c8a0037ac2e501232a0995 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/audio_classification.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/base.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53482928aebefb45b27360d075017f7a3a3d6cd6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/base.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/conversational.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/conversational.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7e6b5bac068c869991075e8fae5543bd701e24a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/conversational.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/document_question_answering.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/document_question_answering.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6131a108c37a5d0aa791aaf5c451fdf314be7e36 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/document_question_answering.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/fill_mask.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/fill_mask.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b88ef2877931788d9516bad88e9d9c677ff7c098 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/fill_mask.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_classification.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_classification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8f7182cbe89f20b6b59c5c3ce7b8a7808b5c240 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_classification.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_segmentation.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_segmentation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf1e60e48c6ad4f0d4501b587e5754ff8bb12654 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_segmentation.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_to_text.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_to_text.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ab90de81a441ea78620b09273049459d27a4cac Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_to_text.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/mask_generation.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/mask_generation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41e598a1fe0e01496d669f0aabd6b64e22197f43 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/mask_generation.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/object_detection.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/object_detection.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..682fb40c127ab5de858136138af3949e04d4fc10 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/object_detection.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/pt_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/pt_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..512702e56f203325503940a5278c9769e6edaac4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/pt_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/question_answering.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/question_answering.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0dc31a5008d9ab346e44c8894e542b1e576b5835 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/question_answering.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/table_question_answering.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/table_question_answering.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c48bfea367bb5a9f46db3617d20992a882c60707 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/table_question_answering.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/text2text_generation.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/text2text_generation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d9c76104209679cea541b7f77e5b2aec958008c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/text2text_generation.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/text_classification.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/text_classification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65dbff1bc1ab8cef7c943c38ae64b69a750c932b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/text_classification.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/video_classification.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/video_classification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..379a95a280dee1181fb53bc94b1f8039cceea3d1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/video_classification.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/visual_question_answering.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/visual_question_answering.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d9e35fb5d76b7f645d630ee531e6ab3e133ab0f7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/visual_question_answering.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/zero_shot_audio_classification.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/zero_shot_audio_classification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dcc3626510805858fc430982164901ab7a822bed Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/zero_shot_audio_classification.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/zero_shot_classification.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/zero_shot_classification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35fec2442eb7ac793845792705898d306ea16886 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/zero_shot_classification.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/zero_shot_image_classification.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/zero_shot_image_classification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a38c226cdbf4782b4f4e0aa68002af0789d3891d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/zero_shot_image_classification.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/automatic_speech_recognition.py b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/automatic_speech_recognition.py new file mode 100644 index 0000000000000000000000000000000000000000..f2d0f13679092249fa892acd4ebe25e7245d756a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/automatic_speech_recognition.py @@ -0,0 +1,737 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from collections import defaultdict +from typing import TYPE_CHECKING, Dict, Optional, Union + +import numpy as np +import requests + +from ..tokenization_utils import PreTrainedTokenizer +from ..utils import is_torch_available, is_torchaudio_available, logging +from .audio_utils import ffmpeg_read +from .base import ChunkPipeline + + +if TYPE_CHECKING: + from pyctcdecode import BeamSearchDecoderCTC + + from ..feature_extraction_sequence_utils import SequenceFeatureExtractor + from ..modeling_utils import PreTrainedModel + +logger = logging.get_logger(__name__) + +if is_torch_available(): + import torch + + from ..models.auto.modeling_auto import MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES + + +def rescale_stride(stride, ratio): + """ + Rescales the stride values from audio space to tokens/logits space. + + (160_000, 16_000, 16_000) -> (2000, 200, 200) for instance. + """ + # Shape is [B, SEQ] for tokens + # [B, SEQ, V] for logits + + new_strides = [] + for input_n, left, right in stride: + token_n = int(round(input_n * ratio)) + left = int(round(left / input_n * token_n)) + right = int(round(right / input_n * token_n)) + new_stride = (token_n, left, right) + new_strides.append(new_stride) + + return new_strides + + +def chunk_iter(inputs, feature_extractor, chunk_len, stride_left, stride_right, dtype=None): + inputs_len = inputs.shape[0] + step = chunk_len - stride_left - stride_right + for chunk_start_idx in range(0, inputs_len, step): + chunk_end_idx = chunk_start_idx + chunk_len + chunk = inputs[chunk_start_idx:chunk_end_idx] + processed = feature_extractor(chunk, sampling_rate=feature_extractor.sampling_rate, return_tensors="pt") + if dtype is not None: + processed = processed.to(dtype=dtype) + _stride_left = 0 if chunk_start_idx == 0 else stride_left + # all right strides must be full, otherwise it is the last item + is_last = chunk_end_idx > inputs_len if stride_right > 0 else chunk_end_idx >= inputs_len + _stride_right = 0 if is_last else stride_right + + chunk_len = chunk.shape[0] + stride = (chunk_len, _stride_left, _stride_right) + if chunk.shape[0] > _stride_left: + yield {"is_last": is_last, "stride": stride, **processed} + if is_last: + break + + +def _fast_find_longest_common_sequence(sequence_left, sequence_right): + seq_len_left = len(sequence_left) + seq_len_right = len(sequence_right) + counter = [[0] * (seq_len_right + 1) for _ in range(seq_len_left + 1)] + longest = 0 + for i in range(seq_len_left): + for j in range(seq_len_right): + if sequence_left[i] == sequence_right[j]: + previous_counter = counter[i][j] + 1 + counter[i + 1][j + 1] = previous_counter + if previous_counter > longest: + longest = previous_counter + + counter = np.array(counter) + # we return the idx of the first element of the longest common sequence in the left sequence + index_left = np.argwhere(counter == longest)[-1][0] - longest if longest != 0 else -1 + index_right = np.argwhere(counter == longest)[-1][1] - longest if longest != 0 else -1 + return index_left, index_right, longest + + +def _find_longest_common_sequence(sequences, tokenizer): + # TODO Use a faster algorithm this can probably be done in O(n) + # using suffix array. + # It might be tedious to do because of fault tolerance. + # We actually have a really good property which is that the total sequence + # MUST be those subsequences in order. + # Also the algorithm should be more tolerant to errors. + sequence = [tok_id for tok_id in sequences[0][0].tolist() if tok_id not in tokenizer.all_special_ids] + for new_seq in sequences[1:]: + new_sequence = [tok_id for tok_id in new_seq[0].tolist() if tok_id not in tokenizer.all_special_ids] + + index = 0 + max_ = 0.0 + for i in range(1, len(new_sequence) + 1): + # epsilon to favor long perfect matches + eps = i / 10000.0 + matches = np.sum(np.array(sequence[-i:]) == np.array(new_sequence[:i])) + matching = matches / i + eps + if matches > 1 and matching > max_: + index = i + max_ = matching + sequence.extend(new_sequence[index:]) + return np.array(sequence) + + +class AutomaticSpeechRecognitionPipeline(ChunkPipeline): + """ + Pipeline that aims at extracting spoken text contained within some audio. + + The input can be either a raw waveform or a audio file. In case of the audio file, ffmpeg should be installed for + to support multiple audio formats + + Example: + + ```python + >>> from transformers import pipeline + + >>> transcriber = pipeline(model="openai/whisper-base") + >>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac") + {'text': ' He hoped there would be stew for dinner, turnips and carrots and bruised potatoes and fat mutton pieces to be ladled out in thick, peppered flour-fatten sauce.'} + ``` + + Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) + + Arguments: + model ([`PreTrainedModel`] or [`TFPreTrainedModel`]): + The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from + [`PreTrainedModel`] for PyTorch and [`TFPreTrainedModel`] for TensorFlow. + feature_extractor ([`SequenceFeatureExtractor`]): + The feature extractor that will be used by the pipeline to encode waveform for the model. + tokenizer ([`PreTrainedTokenizer`]): + The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from + [`PreTrainedTokenizer`]. + decoder (`pyctcdecode.BeamSearchDecoderCTC`, *optional*): + [PyCTCDecode's + BeamSearchDecoderCTC](https://github.com/kensho-technologies/pyctcdecode/blob/2fd33dc37c4111417e08d89ccd23d28e9b308d19/pyctcdecode/decoder.py#L180) + can be passed for language model boosted decoding. See [`Wav2Vec2ProcessorWithLM`] for more information. + chunk_length_s (`float`, *optional*, defaults to 0): + The input length for in each chunk. If `chunk_length_s = 0` then chunking is disabled (default). + + + + For more information on how to effectively use `chunk_length_s`, please have a look at the [ASR chunking + blog post](https://huggingface.co/blog/asr-chunking). + + + + stride_length_s (`float`, *optional*, defaults to `chunk_length_s / 6`): + The length of stride on the left and right of each chunk. Used only with `chunk_length_s > 0`. This enables + the model to *see* more context and infer letters better than without this context but the pipeline + discards the stride bits at the end to make the final reconstitution as perfect as possible. + + + + For more information on how to effectively use `stride_length_s`, please have a look at the [ASR chunking + blog post](https://huggingface.co/blog/asr-chunking). + + + + framework (`str`, *optional*): + The framework to use, either `"pt"` for PyTorch or `"tf"` for TensorFlow. The specified framework must be + installed. If no framework is specified, will default to the one currently installed. If no framework is + specified and both frameworks are installed, will default to the framework of the `model`, or to PyTorch if + no model is provided. + device (Union[`int`, `torch.device`], *optional*): + Device ordinal for CPU/GPU supports. Setting this to `None` will leverage CPU, a positive will run the + model on the associated CUDA device id. + torch_dtype (Union[`int`, `torch.dtype`], *optional*): + The data-type (dtype) of the computation. Setting this to `None` will use float32 precision. Set to + `torch.float16` or `torch.bfloat16` to use half-precision in the respective dtypes. + + """ + + def __init__( + self, + model: "PreTrainedModel", + feature_extractor: Union["SequenceFeatureExtractor", str] = None, + tokenizer: Optional[PreTrainedTokenizer] = None, + decoder: Optional[Union["BeamSearchDecoderCTC", str]] = None, + device: Union[int, "torch.device"] = None, + torch_dtype: Optional[Union[str, "torch.dtype"]] = None, + **kwargs, + ): + # set the model type so we can check we have the right pre- and post-processing parameters + if model.config.model_type == "whisper": + self.type = "seq2seq_whisper" + elif model.__class__.__name__ in MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES.values(): + self.type = "seq2seq" + elif ( + feature_extractor._processor_class + and feature_extractor._processor_class.endswith("WithLM") + and decoder is not None + ): + self.decoder = decoder + self.type = "ctc_with_lm" + else: + self.type = "ctc" + + super().__init__(model, tokenizer, feature_extractor, device=device, torch_dtype=torch_dtype, **kwargs) + + def __call__( + self, + inputs: Union[np.ndarray, bytes, str], + **kwargs, + ): + """ + Transcribe the audio sequence(s) given as inputs to text. See the [`AutomaticSpeechRecognitionPipeline`] + documentation for more information. + + Args: + inputs (`np.ndarray` or `bytes` or `str` or `dict`): + The inputs is either : + - `str` that is either the filename of a local audio file, or a public URL address to download the + audio file. The file will be read at the correct sampling rate to get the waveform using + *ffmpeg*. This requires *ffmpeg* to be installed on the system. + - `bytes` it is supposed to be the content of an audio file and is interpreted by *ffmpeg* in the + same way. + - (`np.ndarray` of shape (n, ) of type `np.float32` or `np.float64`) + Raw audio at the correct sampling rate (no further check will be done) + - `dict` form can be used to pass raw audio sampled at arbitrary `sampling_rate` and let this + pipeline do the resampling. The dict must be in the format `{"sampling_rate": int, "raw": + np.array}` with optionally a `"stride": (left: int, right: int)` than can ask the pipeline to + treat the first `left` samples and last `right` samples to be ignored in decoding (but used at + inference to provide more context to the model). Only use `stride` with CTC models. + return_timestamps (*optional*, `str` or `bool`): + Only available for pure CTC models (Wav2Vec2, HuBERT, etc) and the Whisper model. Not available for + other sequence-to-sequence models. + + For CTC models, timestamps can take one of two formats: + - `"char"`: the pipeline will return timestamps along the text for every character in the text. For + instance, if you get `[{"text": "h", "timestamp": (0.5, 0.6)}, {"text": "i", "timestamp": (0.7, + 0.9)}]`, then it means the model predicts that the letter "h" was spoken after `0.5` and before + `0.6` seconds. + - `"word"`: the pipeline will return timestamps along the text for every word in the text. For + instance, if you get `[{"text": "hi ", "timestamp": (0.5, 0.9)}, {"text": "there", "timestamp": + (1.0, 1.5)}]`, then it means the model predicts that the word "hi" was spoken after `0.5` and + before `0.9` seconds. + + For the Whisper model, timestamps can take one of two formats: + - `"word"`: same as above for word-level CTC timestamps. Word-level timestamps are predicted + through the *dynamic-time warping (DTW)* algorithm, an approximation to word-level timestamps + by inspecting the cross-attention weights. + - `True`: the pipeline will return timestamps along the text for *segments* of words in the text. + For instance, if you get `[{"text": " Hi there!", "timestamp": (0.5, 1.5)}]`, then it means the + model predicts that the segment "Hi there!" was spoken after `0.5` and before `1.5` seconds. + Note that a segment of text refers to a sequence of one or more words, rather than individual + words as with word-level timestamps. + generate_kwargs (`dict`, *optional*): + The dictionary of ad-hoc parametrization of `generate_config` to be used for the generation call. For a + complete overview of generate, check the [following + guide](https://huggingface.co/docs/transformers/en/main_classes/text_generation). + max_new_tokens (`int`, *optional*): + The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt. + + Return: + `Dict`: A dictionary with the following keys: + - **text** (`str`): The recognized text. + - **chunks** (*optional(, `List[Dict]`) + When using `return_timestamps`, the `chunks` will become a list containing all the various text + chunks identified by the model, *e.g.* `[{"text": "hi ", "timestamp": (0.5, 0.9)}, {"text": + "there", "timestamp": (1.0, 1.5)}]`. The original full text can roughly be recovered by doing + `"".join(chunk["text"] for chunk in output["chunks"])`. + """ + return super().__call__(inputs, **kwargs) + + def _sanitize_parameters( + self, + chunk_length_s=None, + stride_length_s=None, + ignore_warning=None, + decoder_kwargs=None, + return_timestamps=None, + return_language=None, + generate_kwargs=None, + max_new_tokens=None, + ): + # No parameters on this pipeline right now + preprocess_params = {} + if chunk_length_s is not None: + if self.type == "seq2seq" and not ignore_warning: + logger.warning( + "Using `chunk_length_s` is very experimental with seq2seq models. The results will not necessarily" + " be entirely accurate and will have caveats. More information:" + " https://github.com/huggingface/transformers/pull/20104. Ignore this warning with pipeline(...," + " ignore_warning=True)" + ) + preprocess_params["chunk_length_s"] = chunk_length_s + if stride_length_s is not None: + preprocess_params["stride_length_s"] = stride_length_s + + forward_params = defaultdict(dict) + if max_new_tokens is not None: + forward_params["max_new_tokens"] = max_new_tokens + if generate_kwargs is not None: + if max_new_tokens is not None and "max_new_tokens" in generate_kwargs: + raise ValueError( + "`max_new_tokens` is defined both as an argument and inside `generate_kwargs` argument, please use" + " only 1 version" + ) + forward_params.update(generate_kwargs) + + postprocess_params = {} + if decoder_kwargs is not None: + postprocess_params["decoder_kwargs"] = decoder_kwargs + if return_timestamps is not None: + # Check whether we have a valid setting for return_timestamps and throw an error before we perform a forward pass + if self.type == "seq2seq" and return_timestamps: + raise ValueError("We cannot return_timestamps yet on non-CTC models apart from Whisper!") + if self.type == "ctc_with_lm" and return_timestamps != "word": + raise ValueError("CTC with LM can only predict word level timestamps, set `return_timestamps='word'`") + if self.type == "ctc" and return_timestamps not in ["char", "word"]: + raise ValueError( + "CTC can either predict character level timestamps, or word level timestamps. " + "Set `return_timestamps='char'` or `return_timestamps='word'` as required." + ) + if self.type == "seq2seq_whisper" and return_timestamps == "char": + raise ValueError( + "Whisper cannot return `char` timestamps, only word level or segment level timestamps. " + "Use `return_timestamps='word'` or `return_timestamps=True` respectively." + ) + forward_params["return_timestamps"] = return_timestamps + postprocess_params["return_timestamps"] = return_timestamps + if return_language is not None: + if self.type != "seq2seq_whisper": + raise ValueError("Only Whisper can return language for now.") + postprocess_params["return_language"] = return_language + + return preprocess_params, forward_params, postprocess_params + + def preprocess(self, inputs, chunk_length_s=0, stride_length_s=None): + if isinstance(inputs, str): + if inputs.startswith("http://") or inputs.startswith("https://"): + # We need to actually check for a real protocol, otherwise it's impossible to use a local file + # like http_huggingface_co.png + inputs = requests.get(inputs).content + else: + with open(inputs, "rb") as f: + inputs = f.read() + + if isinstance(inputs, bytes): + inputs = ffmpeg_read(inputs, self.feature_extractor.sampling_rate) + + stride = None + extra = {} + if isinstance(inputs, dict): + stride = inputs.pop("stride", None) + # Accepting `"array"` which is the key defined in `datasets` for + # better integration + if not ("sampling_rate" in inputs and ("raw" in inputs or "array" in inputs)): + raise ValueError( + "When passing a dictionary to AutomaticSpeechRecognitionPipeline, the dict needs to contain a " + '"raw" key containing the numpy array representing the audio and a "sampling_rate" key, ' + "containing the sampling_rate associated with that array" + ) + + _inputs = inputs.pop("raw", None) + if _inputs is None: + # Remove path which will not be used from `datasets`. + inputs.pop("path", None) + _inputs = inputs.pop("array", None) + in_sampling_rate = inputs.pop("sampling_rate") + extra = inputs + inputs = _inputs + if in_sampling_rate != self.feature_extractor.sampling_rate: + if is_torchaudio_available(): + from torchaudio import functional as F + else: + raise ImportError( + "torchaudio is required to resample audio samples in AutomaticSpeechRecognitionPipeline. " + "The torchaudio package can be installed through: `pip install torchaudio`." + ) + + inputs = F.resample( + torch.from_numpy(inputs), in_sampling_rate, self.feature_extractor.sampling_rate + ).numpy() + ratio = self.feature_extractor.sampling_rate / in_sampling_rate + else: + ratio = 1 + if stride is not None: + if stride[0] + stride[1] > inputs.shape[0]: + raise ValueError("Stride is too large for input") + + # Stride needs to get the chunk length here, it's going to get + # swallowed by the `feature_extractor` later, and then batching + # can add extra data in the inputs, so we need to keep track + # of the original length in the stride so we can cut properly. + stride = (inputs.shape[0], int(round(stride[0] * ratio)), int(round(stride[1] * ratio))) + if not isinstance(inputs, np.ndarray): + raise ValueError(f"We expect a numpy ndarray as input, got `{type(inputs)}`") + if len(inputs.shape) != 1: + raise ValueError("We expect a single channel audio input for AutomaticSpeechRecognitionPipeline") + + if chunk_length_s: + if stride_length_s is None: + stride_length_s = chunk_length_s / 6 + + if isinstance(stride_length_s, (int, float)): + stride_length_s = [stride_length_s, stride_length_s] + + # XXX: Carefuly, this variable will not exist in `seq2seq` setting. + # Currently chunking is not possible at this level for `seq2seq` so + # it's ok. + align_to = getattr(self.model.config, "inputs_to_logits_ratio", 1) + chunk_len = int(round(chunk_length_s * self.feature_extractor.sampling_rate / align_to) * align_to) + stride_left = int(round(stride_length_s[0] * self.feature_extractor.sampling_rate / align_to) * align_to) + stride_right = int(round(stride_length_s[1] * self.feature_extractor.sampling_rate / align_to) * align_to) + + if chunk_len < stride_left + stride_right: + raise ValueError("Chunk length must be superior to stride length") + + for item in chunk_iter( + inputs, self.feature_extractor, chunk_len, stride_left, stride_right, self.torch_dtype + ): + yield item + else: + if self.type == "seq2seq_whisper" and inputs.shape[0] > self.feature_extractor.n_samples: + processed = self.feature_extractor( + inputs, + sampling_rate=self.feature_extractor.sampling_rate, + truncation=False, + padding="longest", + return_tensors="pt", + ) + else: + processed = self.feature_extractor( + inputs, sampling_rate=self.feature_extractor.sampling_rate, return_tensors="pt" + ) + + if self.torch_dtype is not None: + processed = processed.to(dtype=self.torch_dtype) + if stride is not None: + if self.type == "seq2seq": + raise ValueError("Stride is only usable with CTC models, try removing it !") + + processed["stride"] = stride + yield {"is_last": True, **processed, **extra} + + def _forward(self, model_inputs, return_timestamps=False, **generate_kwargs): + attention_mask = model_inputs.pop("attention_mask", None) + stride = model_inputs.pop("stride", None) + is_last = model_inputs.pop("is_last") + + if self.type in {"seq2seq", "seq2seq_whisper"}: + encoder = self.model.get_encoder() + # Consume values so we can let extra information flow freely through + # the pipeline (important for `partial` in microphone) + if "input_features" in model_inputs: + inputs = model_inputs.pop("input_features") + elif "input_values" in model_inputs: + inputs = model_inputs.pop("input_values") + else: + raise ValueError( + "Seq2Seq speech recognition model requires either a " + f"`input_features` or `input_values` key, but only has {model_inputs.keys()}" + ) + + # custom processing for Whisper timestamps and word-level timestamps + if return_timestamps and self.type == "seq2seq_whisper": + generate_kwargs["return_timestamps"] = return_timestamps + if return_timestamps == "word": + generate_kwargs["return_token_timestamps"] = True + generate_kwargs["return_segments"] = True + + if stride is not None: + if isinstance(stride, tuple): + generate_kwargs["num_frames"] = stride[0] // self.feature_extractor.hop_length + else: + generate_kwargs["num_frames"] = [s[0] // self.feature_extractor.hop_length for s in stride] + + if self.type == "seq2seq_whisper" and inputs.shape[-1] > self.feature_extractor.nb_max_frames: + generate_kwargs["input_features"] = inputs + else: + generate_kwargs["encoder_outputs"] = encoder(inputs, attention_mask=attention_mask) + + tokens = self.model.generate( + attention_mask=attention_mask, + **generate_kwargs, + ) + # whisper longform generation stores timestamps in "segments" + if return_timestamps == "word" and self.type == "seq2seq_whisper": + if "segments" not in tokens: + out = {"tokens": tokens["sequences"], "token_timestamps": tokens["token_timestamps"]} + else: + token_timestamps = [ + torch.cat([segment["token_timestamps"] for segment in segment_list]) + for segment_list in tokens["segments"] + ] + out = {"tokens": tokens["sequences"], "token_timestamps": token_timestamps} + else: + out = {"tokens": tokens} + if self.type == "seq2seq_whisper": + if stride is not None: + out["stride"] = stride + + else: + inputs = { + self.model.main_input_name: model_inputs.pop(self.model.main_input_name), + "attention_mask": attention_mask, + } + outputs = self.model(**inputs) + logits = outputs.logits + + if self.type == "ctc_with_lm": + out = {"logits": logits} + else: + out = {"tokens": logits.argmax(dim=-1)} + if stride is not None: + # Send stride to `postprocess`. + # it needs to be handled there where + # the pieces are to be concatenated. + ratio = 1 / self.model.config.inputs_to_logits_ratio + if isinstance(stride, tuple): + out["stride"] = rescale_stride([stride], ratio)[0] + else: + out["stride"] = rescale_stride(stride, ratio) + # Leftover + extra = model_inputs + return {"is_last": is_last, **out, **extra} + + def postprocess( + self, model_outputs, decoder_kwargs: Optional[Dict] = None, return_timestamps=None, return_language=None + ): + # Optional return types + optional = {} + + final_items = [] + key = "logits" if self.type == "ctc_with_lm" else "tokens" + stride = None + for outputs in model_outputs: + items = outputs[key].numpy() + stride = outputs.get("stride", None) + if stride is not None and self.type in {"ctc", "ctc_with_lm"}: + total_n, left, right = stride + # Total_n might be < logits.shape[1] + # because of padding, that's why + # we need to reconstruct this information + # This won't work with left padding (which doesn't exist right now) + right_n = total_n - right + items = items[:, left:right_n] + final_items.append(items) + + if stride and self.type == "seq2seq": + items = _find_longest_common_sequence(final_items, self.tokenizer) + elif self.type == "seq2seq_whisper": + time_precision = self.feature_extractor.chunk_length / self.model.config.max_source_positions + # Send the chunking back to seconds, it's easier to handle in whisper + sampling_rate = self.feature_extractor.sampling_rate + for output in model_outputs: + if "stride" in output: + chunk_len, stride_left, stride_right = output["stride"] + # Go back in seconds + chunk_len /= sampling_rate + stride_left /= sampling_rate + stride_right /= sampling_rate + output["stride"] = chunk_len, stride_left, stride_right + + text, optional = self.tokenizer._decode_asr( + model_outputs, + return_timestamps=return_timestamps, + return_language=return_language, + time_precision=time_precision, + ) + else: + items = np.concatenate(final_items, axis=1) + items = items.squeeze(0) + + if self.type == "ctc_with_lm": + if decoder_kwargs is None: + decoder_kwargs = {} + beams = self.decoder.decode_beams(items, **decoder_kwargs) + text = beams[0][0] + if return_timestamps: + # Simply cast from pyctcdecode format to wav2vec2 format to leverage + # pre-existing code later + chunk_offset = beams[0][2] + offsets = [] + for word, (start_offset, end_offset) in chunk_offset: + offsets.append({"word": word, "start_offset": start_offset, "end_offset": end_offset}) + elif self.type != "seq2seq_whisper": + skip_special_tokens = self.type != "ctc" + text = self.tokenizer.decode(items, skip_special_tokens=skip_special_tokens) + if return_timestamps: + offsets = self.tokenizer.decode( + items, skip_special_tokens=skip_special_tokens, output_char_offsets=True + )["char_offsets"] + if return_timestamps == "word": + offsets = self.tokenizer._get_word_offsets(offsets, self.tokenizer.replace_word_delimiter_char) + + if return_timestamps and self.type not in {"seq2seq", "seq2seq_whisper"}: + chunks = [] + for item in offsets: + start = item["start_offset"] * self.model.config.inputs_to_logits_ratio + start /= self.feature_extractor.sampling_rate + + stop = item["end_offset"] * self.model.config.inputs_to_logits_ratio + stop /= self.feature_extractor.sampling_rate + + chunks.append({"text": item[return_timestamps], "timestamp": (start, stop)}) + optional["chunks"] = chunks + + extra = defaultdict(list) + for output in model_outputs: + output.pop("tokens", None) + output.pop("logits", None) + output.pop("is_last", None) + output.pop("stride", None) + output.pop("token_timestamps", None) + for k, v in output.items(): + extra[k].append(v) + return {"text": text, **optional, **extra} + + +def _find_timestamp_sequence(sequences, tokenizer, feature_extractor, max_source_positions): + """ + Computes the final sequences by merging the end of the nth sequence with the beginning of the n+1th sequence. Since + `WhisperForConditionalGeneration` produces the timestamps pairwise, we filter the consecutive timestamps and only + iterate over them. We keep track of the `time` which indicates the actual starting time of the chunk that is + processed. We need to make sure to offset the timestamps tokens by the `time` in order for the tokenizer to + properly compute the final `offset`. + """ + # index of the first timestamp token + timestamp_begin = tokenizer.convert_tokens_to_ids("<|notimestamps|>") + 1 + items = [] + # approximation of the token to time ratio : ~0.2seconds + time_precision = feature_extractor.chunk_length / max_source_positions + time = 0 + for seq_idx, item in enumerate(sequences): + sequence, stride = item + if isinstance(sequence, list): + sequence = np.array(sequence) + chunk_len, stride_left, stride_right = stride + sequence = sequence.squeeze(0) + # get rid of the `forced_decoder_idx` that are use to parametrize the generation + begin_idx = np.where(sequence == timestamp_begin)[0][0] if timestamp_begin in sequence else 0 + sequence = sequence[begin_idx:] + + timestamp_tokens = sequence >= timestamp_begin + if seq_idx != 0 and sum(timestamp_tokens) > 0: + consecutive = np.where(timestamp_tokens[:-1] & timestamp_tokens[1:])[0] + 1 + last_timestamp = np.where(timestamp_tokens)[0][-1] + consecutive = np.append(consecutive, last_timestamp) if last_timestamp not in consecutive else consecutive + time -= stride_left + stride_right + offset = int((time / feature_extractor.sampling_rate) / time_precision) + overlap_time = int((stride_left / feature_extractor.sampling_rate) / time_precision) + # relevant timestamps are in the overlapping part + relevant_timestamp = np.where(sequence[consecutive] >= timestamp_begin + overlap_time)[0] + if relevant_timestamp.shape[0] > 0: + relevant_timestamp = ( + consecutive[relevant_timestamp[0] - 1] if relevant_timestamp[0] > 0 else consecutive[0] + ) + # if a big stride is used, we need to check some of the previous items for the best overlap + best_match = 0 + sliced_sequence = [] + for idx, previous_sequence in enumerate(reversed(items)): + previous_tokens = previous_sequence[1:-1] + if previous_sequence[0] < (timestamp_begin + offset - overlap_time) and idx != 0: + break # the previous sequence is too far in the past + if len(previous_tokens) > 0: + # find the longest common sequence between the overlapping parts + index_left, index_right, match_length = _fast_find_longest_common_sequence( + sequence[1:relevant_timestamp], previous_tokens + ) + # don't do anything if only 1 token was matched + if match_length > 1 and match_length > best_match: + best_match = match_length + best_idx = idx + end_of_curr_sequence_idx = ( + np.where(sequence[index_left + 1 :] >= timestamp_begin)[0][0] + 1 + ) + end_of_curr_sequence_idx = end_of_curr_sequence_idx + 1 + index_left + # if all the tokens are matched, suffix + if index_left == 0 and match_length == len(previous_tokens): + sliced_sequence = np.insert( + sequence[index_left + 1 : end_of_curr_sequence_idx], 0, previous_sequence[0] + ) + sliced_sequence[-1] = previous_sequence[-1] + # if part of the previous sequence is not taken + elif index_left >= 0: + sliced_sequence = sequence[index_left + 1 : end_of_curr_sequence_idx] + # let's insert the missing part of the previous sequence + previous_slice = ( + previous_sequence[: index_right + 1] if index_right > 0 else [previous_sequence[0]] + ) + sliced_sequence = np.insert(sliced_sequence, 0, previous_slice) + sliced_sequence[-1] += offset + + if len(sliced_sequence) > 0: + items[len(items) - best_idx - 1] = sliced_sequence + items = items[: len(items) - best_idx] + sequence = sequence[end_of_curr_sequence_idx:] + + # sequence might have changed + timestamp_tokens = sequence >= timestamp_begin + consecutive = np.where(timestamp_tokens[:-1] & timestamp_tokens[1:])[0] + 1 + if sum(timestamp_tokens) > 0: + last_timestamp = np.where(timestamp_tokens)[0][-1] + consecutive = ( + np.append(consecutive, last_timestamp + 1) if last_timestamp not in consecutive else consecutive + ) + + if len(consecutive) > 0: + last_slice = 0 + for current_slice in consecutive: + actual_offset = items[-1][-1] if seq_idx != 0 or last_slice != 0 else sequence[0] + sliced_tokens = sequence[last_slice:current_slice] + duration = sliced_tokens[-1] - sliced_tokens[0] + sliced_tokens[0] = actual_offset + sliced_tokens[-1] = actual_offset + duration + items.append(sliced_tokens) + last_slice = current_slice + + time += chunk_len + result = [] + for i in range(len(items)): + result += items[i].tolist() + return result diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/base.py b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/base.py new file mode 100644 index 0000000000000000000000000000000000000000..023b168b831ddd57b5cfe94b0be6ab8b22900380 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/base.py @@ -0,0 +1,1321 @@ +# coding=utf-8 +# Copyright 2018 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import collections +import csv +import importlib +import json +import os +import pickle +import sys +import traceback +import types +import warnings +from abc import ABC, abstractmethod +from collections import UserDict +from contextlib import contextmanager +from os.path import abspath, exists +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union + +from ..dynamic_module_utils import custom_object_save +from ..feature_extraction_utils import PreTrainedFeatureExtractor +from ..image_processing_utils import BaseImageProcessor +from ..modelcard import ModelCard +from ..models.auto.configuration_auto import AutoConfig +from ..tokenization_utils import PreTrainedTokenizer +from ..utils import ( + ModelOutput, + add_end_docstrings, + infer_framework, + is_tf_available, + is_torch_available, + is_torch_cuda_available, + is_torch_npu_available, + is_torch_xpu_available, + logging, +) + + +GenericTensor = Union[List["GenericTensor"], "torch.Tensor", "tf.Tensor"] + +if is_tf_available(): + import tensorflow as tf + + from ..models.auto.modeling_tf_auto import TFAutoModel + +if is_torch_available(): + import torch + from torch.utils.data import DataLoader, Dataset + + from ..models.auto.modeling_auto import AutoModel + + # Re-export for backward compatibility + from .pt_utils import KeyDataset +else: + Dataset = None + KeyDataset = None + +if TYPE_CHECKING: + from ..modeling_tf_utils import TFPreTrainedModel + from ..modeling_utils import PreTrainedModel + + +logger = logging.get_logger(__name__) + + +def no_collate_fn(items): + if len(items) != 1: + raise ValueError("This collate_fn is meant to be used with batch_size=1") + return items[0] + + +def _pad(items, key, padding_value, padding_side): + batch_size = len(items) + if isinstance(items[0][key], torch.Tensor): + # Others include `attention_mask` etc... + shape = items[0][key].shape + dim = len(shape) + if key in ["pixel_values", "image"]: + # This is probable image so padding shouldn't be necessary + # B, C, H, W + return torch.cat([item[key] for item in items], dim=0) + elif dim == 4 and key == "input_features": + # this is probably a mel spectrogram batched + return torch.cat([item[key] for item in items], dim=0) + max_length = max(item[key].shape[1] for item in items) + min_length = min(item[key].shape[1] for item in items) + dtype = items[0][key].dtype + + if dim == 2: + if max_length == min_length: + # Bypass for `ImageGPT` which doesn't provide a padding value, yet + # we can consistently pad since the size should be matching + return torch.cat([item[key] for item in items], dim=0) + tensor = torch.zeros((batch_size, max_length), dtype=dtype) + padding_value + elif dim == 3: + tensor = torch.zeros((batch_size, max_length, shape[-1]), dtype=dtype) + padding_value + elif dim == 4: + tensor = torch.zeros((batch_size, max_length, shape[-2], shape[-1]), dtype=dtype) + padding_value + + for i, item in enumerate(items): + if dim == 2: + if padding_side == "left": + tensor[i, -len(item[key][0]) :] = item[key][0].clone() + else: + tensor[i, : len(item[key][0])] = item[key][0].clone() + elif dim == 3: + if padding_side == "left": + tensor[i, -len(item[key][0]) :, :] = item[key][0].clone() + else: + tensor[i, : len(item[key][0]), :] = item[key][0].clone() + elif dim == 4: + if padding_side == "left": + tensor[i, -len(item[key][0]) :, :, :] = item[key][0].clone() + else: + tensor[i, : len(item[key][0]), :, :] = item[key][0].clone() + + return tensor + else: + return [item[key] for item in items] + + +def pad_collate_fn(tokenizer, feature_extractor): + # Tokenizer + t_padding_side = None + # Feature extractor + f_padding_side = None + if tokenizer is None and feature_extractor is None: + raise ValueError("Pipeline without tokenizer or feature_extractor cannot do batching") + if tokenizer is not None: + if tokenizer.pad_token_id is None: + raise ValueError( + "Pipeline with tokenizer without pad_token cannot do batching. You can try to set it with " + "`pipe.tokenizer.pad_token_id = model.config.eos_token_id`." + ) + else: + t_padding_value = tokenizer.pad_token_id + t_padding_side = tokenizer.padding_side + if feature_extractor is not None: + # Feature extractor can be images, where no padding is expected + f_padding_value = getattr(feature_extractor, "padding_value", None) + f_padding_side = getattr(feature_extractor, "padding_side", None) + + if t_padding_side is not None and f_padding_side is not None and t_padding_side != f_padding_side: + raise ValueError( + f"The feature extractor, and tokenizer don't agree on padding side {t_padding_side} != {f_padding_side}" + ) + padding_side = "right" + if t_padding_side is not None: + padding_side = t_padding_side + if f_padding_side is not None: + padding_side = f_padding_side + + def inner(items): + keys = set(items[0].keys()) + for item in items: + if set(item.keys()) != keys: + raise ValueError( + f"The elements of the batch contain different keys. Cannot batch them ({set(item.keys())} !=" + f" {keys})" + ) + # input_values, input_pixels, input_ids, ... + padded = {} + for key in keys: + if key in {"input_ids"}: + # ImageGPT uses a feature extractor + if tokenizer is None and feature_extractor is not None: + _padding_value = f_padding_value + else: + _padding_value = t_padding_value + elif key in {"input_values", "pixel_values", "input_features"}: + _padding_value = f_padding_value + elif key in {"p_mask", "special_tokens_mask"}: + _padding_value = 1 + elif key in {"attention_mask", "token_type_ids"}: + _padding_value = 0 + else: + # This is likely another random key maybe even user provided + _padding_value = 0 + padded[key] = _pad(items, key, _padding_value, padding_side) + return padded + + return inner + + +def infer_framework_load_model( + model, + config: AutoConfig, + model_classes: Optional[Dict[str, Tuple[type]]] = None, + task: Optional[str] = None, + framework: Optional[str] = None, + **model_kwargs, +): + """ + Select framework (TensorFlow or PyTorch) to use from the `model` passed. Returns a tuple (framework, model). + + If `model` is instantiated, this function will just infer the framework from the model class. Otherwise `model` is + actually a checkpoint name and this method will try to instantiate it using `model_classes`. Since we don't want to + instantiate the model twice, this model is returned for use by the pipeline. + + If both frameworks are installed and available for `model`, PyTorch is selected. + + Args: + model (`str`, [`PreTrainedModel`] or [`TFPreTrainedModel`]): + The model to infer the framework from. If `str`, a checkpoint name. The model to infer the framewrok from. + config ([`AutoConfig`]): + The config associated with the model to help using the correct class + model_classes (dictionary `str` to `type`, *optional*): + A mapping framework to class. + task (`str`): + The task defining which pipeline will be returned. + model_kwargs: + Additional dictionary of keyword arguments passed along to the model's `from_pretrained(..., + **model_kwargs)` function. + + Returns: + `Tuple`: A tuple framework, model. + """ + if not is_tf_available() and not is_torch_available(): + raise RuntimeError( + "At least one of TensorFlow 2.0 or PyTorch should be installed. " + "To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ " + "To install PyTorch, read the instructions at https://pytorch.org/." + ) + if isinstance(model, str): + model_kwargs["_from_pipeline"] = task + class_tuple = () + look_pt = is_torch_available() and framework in {"pt", None} + look_tf = is_tf_available() and framework in {"tf", None} + if model_classes: + if look_pt: + class_tuple = class_tuple + model_classes.get("pt", (AutoModel,)) + if look_tf: + class_tuple = class_tuple + model_classes.get("tf", (TFAutoModel,)) + if config.architectures: + classes = [] + for architecture in config.architectures: + transformers_module = importlib.import_module("transformers") + if look_pt: + _class = getattr(transformers_module, architecture, None) + if _class is not None: + classes.append(_class) + if look_tf: + _class = getattr(transformers_module, f"TF{architecture}", None) + if _class is not None: + classes.append(_class) + class_tuple = class_tuple + tuple(classes) + + if len(class_tuple) == 0: + raise ValueError(f"Pipeline cannot infer suitable model classes from {model}") + + all_traceback = {} + for model_class in class_tuple: + kwargs = model_kwargs.copy() + if framework == "pt" and model.endswith(".h5"): + kwargs["from_tf"] = True + logger.warning( + "Model might be a TensorFlow model (ending with `.h5`) but TensorFlow is not available. " + "Trying to load the model with PyTorch." + ) + elif framework == "tf" and model.endswith(".bin"): + kwargs["from_pt"] = True + logger.warning( + "Model might be a PyTorch model (ending with `.bin`) but PyTorch is not available. " + "Trying to load the model with Tensorflow." + ) + + try: + model = model_class.from_pretrained(model, **kwargs) + if hasattr(model, "eval"): + model = model.eval() + # Stop loading on the first successful load. + break + except (OSError, ValueError): + all_traceback[model_class.__name__] = traceback.format_exc() + continue + + if isinstance(model, str): + error = "" + for class_name, trace in all_traceback.items(): + error += f"while loading with {class_name}, an error is thrown:\n{trace}\n" + raise ValueError( + f"Could not load model {model} with any of the following classes: {class_tuple}. See the original errors:\n\n{error}\n" + ) + + if framework is None: + framework = infer_framework(model.__class__) + return framework, model + + +def infer_framework_from_model( + model, + model_classes: Optional[Dict[str, Tuple[type]]] = None, + task: Optional[str] = None, + framework: Optional[str] = None, + **model_kwargs, +): + """ + Select framework (TensorFlow or PyTorch) to use from the `model` passed. Returns a tuple (framework, model). + + If `model` is instantiated, this function will just infer the framework from the model class. Otherwise `model` is + actually a checkpoint name and this method will try to instantiate it using `model_classes`. Since we don't want to + instantiate the model twice, this model is returned for use by the pipeline. + + If both frameworks are installed and available for `model`, PyTorch is selected. + + Args: + model (`str`, [`PreTrainedModel`] or [`TFPreTrainedModel`]): + The model to infer the framework from. If `str`, a checkpoint name. The model to infer the framewrok from. + model_classes (dictionary `str` to `type`, *optional*): + A mapping framework to class. + task (`str`): + The task defining which pipeline will be returned. + model_kwargs: + Additional dictionary of keyword arguments passed along to the model's `from_pretrained(..., + **model_kwargs)` function. + + Returns: + `Tuple`: A tuple framework, model. + """ + if isinstance(model, str): + config = AutoConfig.from_pretrained(model, _from_pipeline=task, **model_kwargs) + else: + config = model.config + return infer_framework_load_model( + model, config, model_classes=model_classes, _from_pipeline=task, task=task, framework=framework, **model_kwargs + ) + + +def get_framework(model, revision: Optional[str] = None): + """ + Select framework (TensorFlow or PyTorch) to use. + + Args: + model (`str`, [`PreTrainedModel`] or [`TFPreTrainedModel`]): + If both frameworks are installed, picks the one corresponding to the model passed (either a model class or + the model name). If no specific model is provided, defaults to using PyTorch. + """ + warnings.warn( + "`get_framework` is deprecated and will be removed in v5, use `infer_framework_from_model` instead.", + FutureWarning, + ) + if not is_tf_available() and not is_torch_available(): + raise RuntimeError( + "At least one of TensorFlow 2.0 or PyTorch should be installed. " + "To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ " + "To install PyTorch, read the instructions at https://pytorch.org/." + ) + if isinstance(model, str): + if is_torch_available() and not is_tf_available(): + model = AutoModel.from_pretrained(model, revision=revision) + elif is_tf_available() and not is_torch_available(): + model = TFAutoModel.from_pretrained(model, revision=revision) + else: + try: + model = AutoModel.from_pretrained(model, revision=revision) + except OSError: + model = TFAutoModel.from_pretrained(model, revision=revision) + + framework = infer_framework(model.__class__) + return framework + + +def get_default_model_and_revision( + targeted_task: Dict, framework: Optional[str], task_options: Optional[Any] +) -> Union[str, Tuple[str, str]]: + """ + Select a default model to use for a given task. Defaults to pytorch if ambiguous. + + Args: + targeted_task (`Dict` ): + Dictionary representing the given task, that should contain default models + + framework (`str`, None) + "pt", "tf" or None, representing a specific framework if it was specified, or None if we don't know yet. + + task_options (`Any`, None) + Any further value required by the task to get fully specified, for instance (SRC, TGT) languages for + translation task. + + Returns + + `str` The model string representing the default model for this pipeline + """ + if is_torch_available() and not is_tf_available(): + framework = "pt" + elif is_tf_available() and not is_torch_available(): + framework = "tf" + + defaults = targeted_task["default"] + if task_options: + if task_options not in defaults: + raise ValueError(f"The task does not provide any default models for options {task_options}") + default_models = defaults[task_options]["model"] + elif "model" in defaults: + default_models = targeted_task["default"]["model"] + else: + # XXX This error message needs to be updated to be more generic if more tasks are going to become + # parametrized + raise ValueError('The task defaults can\'t be correctly selected. You probably meant "translation_XX_to_YY"') + + if framework is None: + framework = "pt" + + return default_models[framework] + + +class PipelineException(Exception): + """ + Raised by a [`Pipeline`] when handling __call__. + + Args: + task (`str`): The task of the pipeline. + model (`str`): The model used by the pipeline. + reason (`str`): The error message to display. + """ + + def __init__(self, task: str, model: str, reason: str): + super().__init__(reason) + + self.task = task + self.model = model + + +class ArgumentHandler(ABC): + """ + Base interface for handling arguments for each [`~pipelines.Pipeline`]. + """ + + @abstractmethod + def __call__(self, *args, **kwargs): + raise NotImplementedError() + + +class PipelineDataFormat: + """ + Base class for all the pipeline supported data format both for reading and writing. Supported data formats + currently includes: + + - JSON + - CSV + - stdin/stdout (pipe) + + `PipelineDataFormat` also includes some utilities to work with multi-columns like mapping from datasets columns to + pipelines keyword arguments through the `dataset_kwarg_1=dataset_column_1` format. + + Args: + output_path (`str`): Where to save the outgoing data. + input_path (`str`): Where to look for the input data. + column (`str`): The column to read. + overwrite (`bool`, *optional*, defaults to `False`): + Whether or not to overwrite the `output_path`. + """ + + SUPPORTED_FORMATS = ["json", "csv", "pipe"] + + def __init__( + self, + output_path: Optional[str], + input_path: Optional[str], + column: Optional[str], + overwrite: bool = False, + ): + self.output_path = output_path + self.input_path = input_path + self.column = column.split(",") if column is not None else [""] + self.is_multi_columns = len(self.column) > 1 + + if self.is_multi_columns: + self.column = [tuple(c.split("=")) if "=" in c else (c, c) for c in self.column] + + if output_path is not None and not overwrite: + if exists(abspath(self.output_path)): + raise OSError(f"{self.output_path} already exists on disk") + + if input_path is not None: + if not exists(abspath(self.input_path)): + raise OSError(f"{self.input_path} doesnt exist on disk") + + @abstractmethod + def __iter__(self): + raise NotImplementedError() + + @abstractmethod + def save(self, data: Union[dict, List[dict]]): + """ + Save the provided data object with the representation for the current [`~pipelines.PipelineDataFormat`]. + + Args: + data (`dict` or list of `dict`): The data to store. + """ + raise NotImplementedError() + + def save_binary(self, data: Union[dict, List[dict]]) -> str: + """ + Save the provided data object as a pickle-formatted binary data on the disk. + + Args: + data (`dict` or list of `dict`): The data to store. + + Returns: + `str`: Path where the data has been saved. + """ + path, _ = os.path.splitext(self.output_path) + binary_path = os.path.extsep.join((path, "pickle")) + + with open(binary_path, "wb+") as f_output: + pickle.dump(data, f_output) + + return binary_path + + @staticmethod + def from_str( + format: str, + output_path: Optional[str], + input_path: Optional[str], + column: Optional[str], + overwrite=False, + ) -> "PipelineDataFormat": + """ + Creates an instance of the right subclass of [`~pipelines.PipelineDataFormat`] depending on `format`. + + Args: + format (`str`): + The format of the desired pipeline. Acceptable values are `"json"`, `"csv"` or `"pipe"`. + output_path (`str`, *optional*): + Where to save the outgoing data. + input_path (`str`, *optional*): + Where to look for the input data. + column (`str`, *optional*): + The column to read. + overwrite (`bool`, *optional*, defaults to `False`): + Whether or not to overwrite the `output_path`. + + Returns: + [`~pipelines.PipelineDataFormat`]: The proper data format. + """ + if format == "json": + return JsonPipelineDataFormat(output_path, input_path, column, overwrite=overwrite) + elif format == "csv": + return CsvPipelineDataFormat(output_path, input_path, column, overwrite=overwrite) + elif format == "pipe": + return PipedPipelineDataFormat(output_path, input_path, column, overwrite=overwrite) + else: + raise KeyError(f"Unknown reader {format} (Available reader are json/csv/pipe)") + + +class CsvPipelineDataFormat(PipelineDataFormat): + """ + Support for pipelines using CSV data format. + + Args: + output_path (`str`): Where to save the outgoing data. + input_path (`str`): Where to look for the input data. + column (`str`): The column to read. + overwrite (`bool`, *optional*, defaults to `False`): + Whether or not to overwrite the `output_path`. + """ + + def __init__( + self, + output_path: Optional[str], + input_path: Optional[str], + column: Optional[str], + overwrite=False, + ): + super().__init__(output_path, input_path, column, overwrite=overwrite) + + def __iter__(self): + with open(self.input_path, "r") as f: + reader = csv.DictReader(f) + for row in reader: + if self.is_multi_columns: + yield {k: row[c] for k, c in self.column} + else: + yield row[self.column[0]] + + def save(self, data: List[dict]): + """ + Save the provided data object with the representation for the current [`~pipelines.PipelineDataFormat`]. + + Args: + data (`List[dict]`): The data to store. + """ + with open(self.output_path, "w") as f: + if len(data) > 0: + writer = csv.DictWriter(f, list(data[0].keys())) + writer.writeheader() + writer.writerows(data) + + +class JsonPipelineDataFormat(PipelineDataFormat): + """ + Support for pipelines using JSON file format. + + Args: + output_path (`str`): Where to save the outgoing data. + input_path (`str`): Where to look for the input data. + column (`str`): The column to read. + overwrite (`bool`, *optional*, defaults to `False`): + Whether or not to overwrite the `output_path`. + """ + + def __init__( + self, + output_path: Optional[str], + input_path: Optional[str], + column: Optional[str], + overwrite=False, + ): + super().__init__(output_path, input_path, column, overwrite=overwrite) + + with open(input_path, "r") as f: + self._entries = json.load(f) + + def __iter__(self): + for entry in self._entries: + if self.is_multi_columns: + yield {k: entry[c] for k, c in self.column} + else: + yield entry[self.column[0]] + + def save(self, data: dict): + """ + Save the provided data object in a json file. + + Args: + data (`dict`): The data to store. + """ + with open(self.output_path, "w") as f: + json.dump(data, f) + + +class PipedPipelineDataFormat(PipelineDataFormat): + """ + Read data from piped input to the python process. For multi columns data, columns should separated by \t + + If columns are provided, then the output will be a dictionary with {column_x: value_x} + + Args: + output_path (`str`): Where to save the outgoing data. + input_path (`str`): Where to look for the input data. + column (`str`): The column to read. + overwrite (`bool`, *optional*, defaults to `False`): + Whether or not to overwrite the `output_path`. + """ + + def __iter__(self): + for line in sys.stdin: + # Split for multi-columns + if "\t" in line: + line = line.split("\t") + if self.column: + # Dictionary to map arguments + yield {kwargs: l for (kwargs, _), l in zip(self.column, line)} + else: + yield tuple(line) + + # No dictionary to map arguments + else: + yield line + + def save(self, data: dict): + """ + Print the data. + + Args: + data (`dict`): The data to store. + """ + print(data) + + def save_binary(self, data: Union[dict, List[dict]]) -> str: + if self.output_path is None: + raise KeyError( + "When using piped input on pipeline outputting large object requires an output file path. " + "Please provide such output path through --output argument." + ) + + return super().save_binary(data) + + +class _ScikitCompat(ABC): + """ + Interface layer for the Scikit and Keras compatibility. + """ + + @abstractmethod + def transform(self, X): + raise NotImplementedError() + + @abstractmethod + def predict(self, X): + raise NotImplementedError() + + +def build_pipeline_init_args( + has_tokenizer: bool = False, + has_feature_extractor: bool = False, + has_image_processor: bool = False, + supports_binary_output: bool = True, +) -> str: + docstring = r""" + Arguments: + model ([`PreTrainedModel`] or [`TFPreTrainedModel`]): + The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from + [`PreTrainedModel`] for PyTorch and [`TFPreTrainedModel`] for TensorFlow.""" + if has_tokenizer: + docstring += r""" + tokenizer ([`PreTrainedTokenizer`]): + The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from + [`PreTrainedTokenizer`].""" + if has_feature_extractor: + docstring += r""" + feature_extractor ([`SequenceFeatureExtractor`]): + The feature extractor that will be used by the pipeline to encode data for the model. This object inherits from + [`SequenceFeatureExtractor`].""" + if has_image_processor: + docstring += r""" + image_processor ([`BaseImageProcessor`]): + The image processor that will be used by the pipeline to encode data for the model. This object inherits from + [`BaseImageProcessor`].""" + docstring += r""" + modelcard (`str` or [`ModelCard`], *optional*): + Model card attributed to the model for this pipeline. + framework (`str`, *optional*): + The framework to use, either `"pt"` for PyTorch or `"tf"` for TensorFlow. The specified framework must be + installed. + + If no framework is specified, will default to the one currently installed. If no framework is specified and + both frameworks are installed, will default to the framework of the `model`, or to PyTorch if no model is + provided. + task (`str`, defaults to `""`): + A task-identifier for the pipeline. + num_workers (`int`, *optional*, defaults to 8): + When the pipeline will use *DataLoader* (when passing a dataset, on GPU for a Pytorch model), the number of + workers to be used. + batch_size (`int`, *optional*, defaults to 1): + When the pipeline will use *DataLoader* (when passing a dataset, on GPU for a Pytorch model), the size of + the batch to use, for inference this is not always beneficial, please read [Batching with + pipelines](https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching) . + args_parser ([`~pipelines.ArgumentHandler`], *optional*): + Reference to the object in charge of parsing supplied pipeline parameters. + device (`int`, *optional*, defaults to -1): + Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on + the associated CUDA device id. You can pass native `torch.device` or a `str` too + torch_dtype (`str` or `torch.dtype`, *optional*): + Sent directly as `model_kwargs` (just a simpler shortcut) to use the available precision for this model + (`torch.float16`, `torch.bfloat16`, ... or `"auto"`)""" + if supports_binary_output: + docstring += r""" + binary_output (`bool`, *optional*, defaults to `False`): + Flag indicating if the output the pipeline should happen in a serialized format (i.e., pickle) or as + the raw output data e.g. text.""" + return docstring + + +PIPELINE_INIT_ARGS = build_pipeline_init_args( + has_tokenizer=True, has_feature_extractor=True, has_image_processor=True, supports_binary_output=True +) + + +if is_torch_available(): + from transformers.pipelines.pt_utils import ( + PipelineChunkIterator, + PipelineDataset, + PipelineIterator, + PipelinePackIterator, + ) + + +@add_end_docstrings(build_pipeline_init_args(has_tokenizer=True, has_feature_extractor=True, has_image_processor=True)) +class Pipeline(_ScikitCompat): + """ + The Pipeline class is the class from which all pipelines inherit. Refer to this class for methods shared across + different pipelines. + + Base class implementing pipelined operations. Pipeline workflow is defined as a sequence of the following + operations: + + Input -> Tokenization -> Model Inference -> Post-Processing (task dependent) -> Output + + Pipeline supports running on CPU or GPU through the device argument (see below). + + Some pipeline, like for instance [`FeatureExtractionPipeline`] (`'feature-extraction'`) output large tensor object + as nested-lists. In order to avoid dumping such large structure as textual data we provide the `binary_output` + constructor argument. If set to `True`, the output will be stored in the pickle format. + """ + + default_input_names = None + + def __init__( + self, + model: Union["PreTrainedModel", "TFPreTrainedModel"], + tokenizer: Optional[PreTrainedTokenizer] = None, + feature_extractor: Optional[PreTrainedFeatureExtractor] = None, + image_processor: Optional[BaseImageProcessor] = None, + modelcard: Optional[ModelCard] = None, + framework: Optional[str] = None, + task: str = "", + args_parser: ArgumentHandler = None, + device: Union[int, "torch.device"] = None, + torch_dtype: Optional[Union[str, "torch.dtype"]] = None, + binary_output: bool = False, + **kwargs, + ): + if framework is None: + framework, model = infer_framework_load_model(model, config=model.config) + + self.task = task + self.model = model + self.tokenizer = tokenizer + self.feature_extractor = feature_extractor + self.image_processor = image_processor + self.modelcard = modelcard + self.framework = framework + + # `accelerate` device map + hf_device_map = getattr(self.model, "hf_device_map", None) + + if hf_device_map is not None and device is not None: + raise ValueError( + "The model has been loaded with `accelerate` and therefore cannot be moved to a specific device. Please " + "discard the `device` argument when creating your pipeline object." + ) + + if device is None: + if hf_device_map is not None: + # Take the first device used by `accelerate`. + device = next(iter(hf_device_map.values())) + else: + device = -1 + + if is_torch_available() and self.framework == "pt": + if isinstance(device, torch.device): + if device.type == "xpu" and not is_torch_xpu_available(check_device=True): + raise ValueError(f'{device} is not available, you should use device="cpu" instead') + self.device = device + elif isinstance(device, str): + if "xpu" in device and not is_torch_xpu_available(check_device=True): + raise ValueError(f'{device} is not available, you should use device="cpu" instead') + self.device = torch.device(device) + elif device < 0: + self.device = torch.device("cpu") + elif is_torch_cuda_available(): + self.device = torch.device(f"cuda:{device}") + elif is_torch_npu_available(): + self.device = torch.device(f"npu:{device}") + elif is_torch_xpu_available(check_device=True): + self.device = torch.device(f"xpu:{device}") + else: + raise ValueError(f"{device} unrecognized or not available.") + else: + self.device = device if device is not None else -1 + self.torch_dtype = torch_dtype + self.binary_output = binary_output + + # We shouldn't call `model.to()` for models loaded with accelerate + if ( + self.framework == "pt" + and self.device is not None + and not (isinstance(self.device, int) and self.device < 0) + and hf_device_map is None + ): + self.model.to(self.device) + + # Update config and generation_config with task specific parameters + task_specific_params = self.model.config.task_specific_params + if task_specific_params is not None and task in task_specific_params: + self.model.config.update(task_specific_params.get(task)) + if self.model.can_generate(): + self.model.generation_config.update(**task_specific_params.get(task)) + + self.call_count = 0 + self._batch_size = kwargs.pop("batch_size", None) + self._num_workers = kwargs.pop("num_workers", None) + self._preprocess_params, self._forward_params, self._postprocess_params = self._sanitize_parameters(**kwargs) + + # Pipelines calling `generate`: if the tokenizer has a pad token but the model doesn't, set it in the + # forward params so that `generate` is aware of the pad token. + if ( + self.tokenizer is not None + and self.model.can_generate() + and self.tokenizer.pad_token_id is not None + and self.model.generation_config.pad_token_id is None + ): + self._forward_params["pad_token_id"] = self.tokenizer.pad_token_id + + if self.image_processor is None and self.feature_extractor is not None: + if isinstance(self.feature_extractor, BaseImageProcessor): + # Backward compatible change, if users called + # ImageSegmentationPipeline(.., feature_extractor=MyFeatureExtractor()) + # then we should keep working + self.image_processor = self.feature_extractor + + def save_pretrained(self, save_directory: str, safe_serialization: bool = True): + """ + Save the pipeline's model and tokenizer. + + Args: + save_directory (`str`): + A path to the directory where to saved. It will be created if it doesn't exist. + safe_serialization (`str`): + Whether to save the model using `safetensors` or the traditional way for PyTorch or Tensorflow. + """ + if os.path.isfile(save_directory): + logger.error(f"Provided path ({save_directory}) should be a directory, not a file") + return + os.makedirs(save_directory, exist_ok=True) + + if hasattr(self, "_registered_impl"): + # Add info to the config + pipeline_info = self._registered_impl.copy() + custom_pipelines = {} + for task, info in pipeline_info.items(): + if info["impl"] != self.__class__: + continue + + info = info.copy() + module_name = info["impl"].__module__ + last_module = module_name.split(".")[-1] + # Change classes into their names/full names + info["impl"] = f"{last_module}.{info['impl'].__name__}" + info["pt"] = tuple(c.__name__ for c in info["pt"]) + info["tf"] = tuple(c.__name__ for c in info["tf"]) + + custom_pipelines[task] = info + self.model.config.custom_pipelines = custom_pipelines + # Save the pipeline custom code + custom_object_save(self, save_directory) + + self.model.save_pretrained(save_directory, safe_serialization=safe_serialization) + + if self.tokenizer is not None: + self.tokenizer.save_pretrained(save_directory) + + if self.feature_extractor is not None: + self.feature_extractor.save_pretrained(save_directory) + + if self.image_processor is not None: + self.image_processor.save_pretrained(save_directory) + + if self.modelcard is not None: + self.modelcard.save_pretrained(save_directory) + + def transform(self, X): + """ + Scikit / Keras interface to transformers' pipelines. This method will forward to __call__(). + """ + return self(X) + + def predict(self, X): + """ + Scikit / Keras interface to transformers' pipelines. This method will forward to __call__(). + """ + return self(X) + + @contextmanager + def device_placement(self): + """ + Context Manager allowing tensor allocation on the user-specified device in framework agnostic way. + + Returns: + Context manager + + Examples: + + ```python + # Explicitly ask for tensor allocation on CUDA device :0 + pipe = pipeline(..., device=0) + with pipe.device_placement(): + # Every framework specific tensor allocation will be done on the request device + output = pipe(...) + ```""" + if self.framework == "tf": + with tf.device("/CPU:0" if self.device == -1 else f"/device:GPU:{self.device}"): + yield + else: + if self.device.type == "cuda": + with torch.cuda.device(self.device): + yield + else: + yield + + def ensure_tensor_on_device(self, **inputs): + """ + Ensure PyTorch tensors are on the specified device. + + Args: + inputs (keyword arguments that should be `torch.Tensor`, the rest is ignored): + The tensors to place on `self.device`. + Recursive on lists **only**. + + Return: + `Dict[str, torch.Tensor]`: The same as `inputs` but on the proper device. + """ + return self._ensure_tensor_on_device(inputs, self.device) + + def _ensure_tensor_on_device(self, inputs, device): + if isinstance(inputs, ModelOutput): + return ModelOutput( + {name: self._ensure_tensor_on_device(tensor, device) for name, tensor in inputs.items()} + ) + elif isinstance(inputs, dict): + return {name: self._ensure_tensor_on_device(tensor, device) for name, tensor in inputs.items()} + elif isinstance(inputs, UserDict): + return UserDict({name: self._ensure_tensor_on_device(tensor, device) for name, tensor in inputs.items()}) + elif isinstance(inputs, list): + return [self._ensure_tensor_on_device(item, device) for item in inputs] + elif isinstance(inputs, tuple): + return tuple([self._ensure_tensor_on_device(item, device) for item in inputs]) + elif isinstance(inputs, torch.Tensor): + if device == torch.device("cpu") and inputs.dtype in {torch.float16, torch.bfloat16}: + inputs = inputs.float() + return inputs.to(device) + else: + return inputs + + def check_model_type(self, supported_models: Union[List[str], dict]): + """ + Check if the model class is in supported by the pipeline. + + Args: + supported_models (`List[str]` or `dict`): + The list of models supported by the pipeline, or a dictionary with model class values. + """ + if not isinstance(supported_models, list): # Create from a model mapping + supported_models_names = [] + for _, model_name in supported_models.items(): + # Mapping can now contain tuples of models for the same configuration. + if isinstance(model_name, tuple): + supported_models_names.extend(list(model_name)) + else: + supported_models_names.append(model_name) + if hasattr(supported_models, "_model_mapping"): + for _, model in supported_models._model_mapping._extra_content.items(): + if isinstance(model_name, tuple): + supported_models_names.extend([m.__name__ for m in model]) + else: + supported_models_names.append(model.__name__) + supported_models = supported_models_names + if self.model.__class__.__name__ not in supported_models: + logger.error( + f"The model '{self.model.__class__.__name__}' is not supported for {self.task}. Supported models are" + f" {supported_models}." + ) + + @abstractmethod + def _sanitize_parameters(self, **pipeline_parameters): + """ + _sanitize_parameters will be called with any excessive named arguments from either `__init__` or `__call__` + methods. It should return 3 dictionaries of the resolved parameters used by the various `preprocess`, + `forward` and `postprocess` methods. Do not fill dictionaries if the caller didn't specify a kwargs. This + lets you keep defaults in function signatures, which is more "natural". + + It is not meant to be called directly, it will be automatically called and the final parameters resolved by + `__init__` and `__call__` + """ + raise NotImplementedError("_sanitize_parameters not implemented") + + @abstractmethod + def preprocess(self, input_: Any, **preprocess_parameters: Dict) -> Dict[str, GenericTensor]: + """ + Preprocess will take the `input_` of a specific pipeline and return a dictionary of everything necessary for + `_forward` to run properly. It should contain at least one tensor, but might have arbitrary other items. + """ + raise NotImplementedError("preprocess not implemented") + + @abstractmethod + def _forward(self, input_tensors: Dict[str, GenericTensor], **forward_parameters: Dict) -> ModelOutput: + """ + _forward will receive the prepared dictionary from `preprocess` and run it on the model. This method might + involve the GPU or the CPU and should be agnostic to it. Isolating this function is the reason for `preprocess` + and `postprocess` to exist, so that the hot path, this method generally can run as fast as possible. + + It is not meant to be called directly, `forward` is preferred. It is basically the same but contains additional + code surrounding `_forward` making sure tensors and models are on the same device, disabling the training part + of the code (leading to faster inference). + """ + raise NotImplementedError("_forward not implemented") + + @abstractmethod + def postprocess(self, model_outputs: ModelOutput, **postprocess_parameters: Dict) -> Any: + """ + Postprocess will receive the raw outputs of the `_forward` method, generally tensors, and reformat them into + something more friendly. Generally it will output a list or a dict or results (containing just strings and + numbers). + """ + raise NotImplementedError("postprocess not implemented") + + def get_inference_context(self): + return torch.no_grad + + def forward(self, model_inputs, **forward_params): + with self.device_placement(): + if self.framework == "tf": + model_inputs["training"] = False + model_outputs = self._forward(model_inputs, **forward_params) + elif self.framework == "pt": + inference_context = self.get_inference_context() + with inference_context(): + model_inputs = self._ensure_tensor_on_device(model_inputs, device=self.device) + model_outputs = self._forward(model_inputs, **forward_params) + model_outputs = self._ensure_tensor_on_device(model_outputs, device=torch.device("cpu")) + else: + raise ValueError(f"Framework {self.framework} is not supported") + return model_outputs + + def get_iterator( + self, inputs, num_workers: int, batch_size: int, preprocess_params, forward_params, postprocess_params + ): + if isinstance(inputs, collections.abc.Sized): + dataset = PipelineDataset(inputs, self.preprocess, preprocess_params) + else: + if num_workers > 1: + logger.warning( + "For iterable dataset using num_workers>1 is likely to result" + " in errors since everything is iterable, setting `num_workers=1`" + " to guarantee correctness." + ) + num_workers = 1 + dataset = PipelineIterator(inputs, self.preprocess, preprocess_params) + if "TOKENIZERS_PARALLELISM" not in os.environ: + logger.info("Disabling tokenizer parallelism, we're using DataLoader multithreading already") + os.environ["TOKENIZERS_PARALLELISM"] = "false" + # TODO hack by collating feature_extractor and image_processor + feature_extractor = self.feature_extractor if self.feature_extractor is not None else self.image_processor + collate_fn = no_collate_fn if batch_size == 1 else pad_collate_fn(self.tokenizer, feature_extractor) + dataloader = DataLoader(dataset, num_workers=num_workers, batch_size=batch_size, collate_fn=collate_fn) + model_iterator = PipelineIterator(dataloader, self.forward, forward_params, loader_batch_size=batch_size) + final_iterator = PipelineIterator(model_iterator, self.postprocess, postprocess_params) + return final_iterator + + def __call__(self, inputs, *args, num_workers=None, batch_size=None, **kwargs): + if args: + logger.warning(f"Ignoring args : {args}") + + if num_workers is None: + if self._num_workers is None: + num_workers = 0 + else: + num_workers = self._num_workers + if batch_size is None: + if self._batch_size is None: + batch_size = 1 + else: + batch_size = self._batch_size + + preprocess_params, forward_params, postprocess_params = self._sanitize_parameters(**kwargs) + + # Fuse __init__ params and __call__ params without modifying the __init__ ones. + preprocess_params = {**self._preprocess_params, **preprocess_params} + forward_params = {**self._forward_params, **forward_params} + postprocess_params = {**self._postprocess_params, **postprocess_params} + + self.call_count += 1 + if self.call_count > 10 and self.framework == "pt" and self.device.type == "cuda": + logger.warning_once( + "You seem to be using the pipelines sequentially on GPU. In order to maximize efficiency please use a" + " dataset", + UserWarning, + ) + + is_dataset = Dataset is not None and isinstance(inputs, Dataset) + is_generator = isinstance(inputs, types.GeneratorType) + is_list = isinstance(inputs, list) + + is_iterable = is_dataset or is_generator or is_list + + # TODO make the get_iterator work also for `tf` (and `flax`). + can_use_iterator = self.framework == "pt" and (is_dataset or is_generator or is_list) + + if is_list: + if can_use_iterator: + final_iterator = self.get_iterator( + inputs, num_workers, batch_size, preprocess_params, forward_params, postprocess_params + ) + outputs = list(final_iterator) + return outputs + else: + return self.run_multi(inputs, preprocess_params, forward_params, postprocess_params) + elif can_use_iterator: + return self.get_iterator( + inputs, num_workers, batch_size, preprocess_params, forward_params, postprocess_params + ) + elif is_iterable: + return self.iterate(inputs, preprocess_params, forward_params, postprocess_params) + elif self.framework == "pt" and isinstance(self, ChunkPipeline): + return next( + iter( + self.get_iterator( + [inputs], num_workers, batch_size, preprocess_params, forward_params, postprocess_params + ) + ) + ) + else: + return self.run_single(inputs, preprocess_params, forward_params, postprocess_params) + + def run_multi(self, inputs, preprocess_params, forward_params, postprocess_params): + return [self.run_single(item, preprocess_params, forward_params, postprocess_params) for item in inputs] + + def run_single(self, inputs, preprocess_params, forward_params, postprocess_params): + model_inputs = self.preprocess(inputs, **preprocess_params) + model_outputs = self.forward(model_inputs, **forward_params) + outputs = self.postprocess(model_outputs, **postprocess_params) + return outputs + + def iterate(self, inputs, preprocess_params, forward_params, postprocess_params): + # This function should become `get_iterator` again, this is a temporary + # easy solution. + for input_ in inputs: + yield self.run_single(input_, preprocess_params, forward_params, postprocess_params) + + +class ChunkPipeline(Pipeline): + def run_single(self, inputs, preprocess_params, forward_params, postprocess_params): + all_outputs = [] + for model_inputs in self.preprocess(inputs, **preprocess_params): + model_outputs = self.forward(model_inputs, **forward_params) + all_outputs.append(model_outputs) + outputs = self.postprocess(all_outputs, **postprocess_params) + return outputs + + def get_iterator( + self, inputs, num_workers: int, batch_size: int, preprocess_params, forward_params, postprocess_params + ): + if "TOKENIZERS_PARALLELISM" not in os.environ: + logger.info("Disabling tokenizer parallelism, we're using DataLoader multithreading already") + os.environ["TOKENIZERS_PARALLELISM"] = "false" + if num_workers > 1: + logger.warning( + "For ChunkPipeline using num_workers>0 is likely to result in errors since everything is iterable," + " setting `num_workers=1` to guarantee correctness." + ) + num_workers = 1 + dataset = PipelineChunkIterator(inputs, self.preprocess, preprocess_params) + + # TODO hack by collating feature_extractor and image_processor + feature_extractor = self.feature_extractor if self.feature_extractor is not None else self.image_processor + collate_fn = no_collate_fn if batch_size == 1 else pad_collate_fn(self.tokenizer, feature_extractor) + dataloader = DataLoader(dataset, num_workers=num_workers, batch_size=batch_size, collate_fn=collate_fn) + model_iterator = PipelinePackIterator(dataloader, self.forward, forward_params, loader_batch_size=batch_size) + final_iterator = PipelineIterator(model_iterator, self.postprocess, postprocess_params) + return final_iterator + + +class PipelineRegistry: + def __init__(self, supported_tasks: Dict[str, Any], task_aliases: Dict[str, str]) -> None: + self.supported_tasks = supported_tasks + self.task_aliases = task_aliases + + def get_supported_tasks(self) -> List[str]: + supported_task = list(self.supported_tasks.keys()) + list(self.task_aliases.keys()) + supported_task.sort() + return supported_task + + def check_task(self, task: str) -> Tuple[str, Dict, Any]: + if task in self.task_aliases: + task = self.task_aliases[task] + if task in self.supported_tasks: + targeted_task = self.supported_tasks[task] + return task, targeted_task, None + + if task.startswith("translation"): + tokens = task.split("_") + if len(tokens) == 4 and tokens[0] == "translation" and tokens[2] == "to": + targeted_task = self.supported_tasks["translation"] + task = "translation" + return task, targeted_task, (tokens[1], tokens[3]) + raise KeyError(f"Invalid translation task {task}, use 'translation_XX_to_YY' format") + + raise KeyError( + f"Unknown task {task}, available tasks are {self.get_supported_tasks() + ['translation_XX_to_YY']}" + ) + + def register_pipeline( + self, + task: str, + pipeline_class: type, + pt_model: Optional[Union[type, Tuple[type]]] = None, + tf_model: Optional[Union[type, Tuple[type]]] = None, + default: Optional[Dict] = None, + type: Optional[str] = None, + ) -> None: + if task in self.supported_tasks: + logger.warning(f"{task} is already registered. Overwriting pipeline for task {task}...") + + if pt_model is None: + pt_model = () + elif not isinstance(pt_model, tuple): + pt_model = (pt_model,) + + if tf_model is None: + tf_model = () + elif not isinstance(tf_model, tuple): + tf_model = (tf_model,) + + task_impl = {"impl": pipeline_class, "pt": pt_model, "tf": tf_model} + + if default is not None: + if "model" not in default and ("pt" in default or "tf" in default): + default = {"model": default} + task_impl["default"] = default + + if type is not None: + task_impl["type"] = type + + self.supported_tasks[task] = task_impl + pipeline_class._registered_impl = {task: task_impl} + + def to_dict(self): + return self.supported_tasks diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/conversational.py b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/conversational.py new file mode 100644 index 0000000000000000000000000000000000000000..257f693c9d2ea34159048ec204f3f615c591fb74 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/conversational.py @@ -0,0 +1,322 @@ +import uuid +import warnings +from typing import Any, Dict, List, Union + +from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging +from .base import Pipeline, build_pipeline_init_args + + +if is_tf_available(): + import tensorflow as tf + +if is_torch_available(): + import torch + + +logger = logging.get_logger(__name__) + + +class Conversation: + """ + Utility class containing a conversation and its history. This class is meant to be used as an input to the + [`ConversationalPipeline`]. The conversation contains several utility functions to manage the addition of new user + inputs and generated model responses. + + Arguments: + messages (Union[str, List[Dict[str, str]]], *optional*): + The initial messages to start the conversation, either a string, or a list of dicts containing "role" and + "content" keys. If a string is passed, it is interpreted as a single message with the "user" role. + conversation_id (`uuid.UUID`, *optional*): + Unique identifier for the conversation. If not provided, a random UUID4 id will be assigned to the + conversation. + + Usage: + + ```python + conversation = Conversation("Going to the movies tonight - any suggestions?") + conversation.add_message({"role": "assistant", "content": "The Big lebowski."}) + conversation.add_message({"role": "user", "content": "Is it good?"}) + ```""" + + def __init__( + self, messages: Union[str, List[Dict[str, str]]] = None, conversation_id: uuid.UUID = None, **deprecated_kwargs + ): + if not conversation_id: + conversation_id = uuid.uuid4() + + if messages is None: + text = deprecated_kwargs.pop("text", None) + if text is not None: + messages = [{"role": "user", "content": text}] + else: + messages = [] + elif isinstance(messages, str): + messages = [{"role": "user", "content": messages}] + + # This block deals with the legacy args - new code should just totally + # avoid past_user_inputs and generated_responses + self._num_processed_user_inputs = 0 + generated_responses = deprecated_kwargs.pop("generated_responses", None) + past_user_inputs = deprecated_kwargs.pop("past_user_inputs", None) + if generated_responses is not None and past_user_inputs is None: + raise ValueError("generated_responses cannot be passed without past_user_inputs!") + if past_user_inputs is not None: + legacy_messages = [] + if generated_responses is None: + generated_responses = [] + # We structure it this way instead of using zip() because the lengths may differ by 1 + for i in range(max([len(past_user_inputs), len(generated_responses)])): + if i < len(past_user_inputs): + legacy_messages.append({"role": "user", "content": past_user_inputs[i]}) + if i < len(generated_responses): + legacy_messages.append({"role": "assistant", "content": generated_responses[i]}) + messages = legacy_messages + messages + + self.uuid = conversation_id + self.messages = messages + + def __eq__(self, other): + if not isinstance(other, Conversation): + return False + return self.uuid == other.uuid or self.messages == other.messages + + def add_message(self, message: Dict[str, str]): + if not set(message.keys()) == {"role", "content"}: + raise ValueError("Message should contain only 'role' and 'content' keys!") + if message["role"] not in ("user", "assistant", "system"): + raise ValueError("Only 'user', 'assistant' and 'system' roles are supported for now!") + self.messages.append(message) + + def add_user_input(self, text: str, overwrite: bool = False): + """ + Add a user input to the conversation for the next round. This is a legacy method that assumes that inputs must + alternate user/assistant/user/assistant, and so will not add multiple user messages in succession. We recommend + just using `add_message` with role "user" instead. + """ + if len(self) > 0 and self[-1]["role"] == "user": + if overwrite: + logger.warning( + f'User input added while unprocessed input was existing: "{self[-1]["content"]}" was overwritten ' + f'with: "{text}".' + ) + self[-1]["content"] = text + else: + logger.warning( + f'User input added while unprocessed input was existing: "{self[-1]["content"]}" new input ' + f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' + ) + else: + self.messages.append({"role": "user", "content": text}) + + def append_response(self, response: str): + """ + This is a legacy method. We recommend just using `add_message` with an appropriate role instead. + """ + self.messages.append({"role": "assistant", "content": response}) + + def mark_processed(self): + """ + This is a legacy method, as the Conversation no longer distinguishes between processed and unprocessed user + input. We set a counter here to keep behaviour mostly backward-compatible, but in general you should just read + the messages directly when writing new code. + """ + self._num_processed_user_inputs = len(self._user_messages) + + def __iter__(self): + for message in self.messages: + yield message + + def __getitem__(self, item): + return self.messages[item] + + def __setitem__(self, key, value): + self.messages[key] = value + + def __len__(self): + return len(self.messages) + + def __repr__(self): + """ + Generates a string representation of the conversation. + + Returns: + `str`: + + Example: + Conversation id: 7d15686b-dc94-49f2-9c4b-c9eac6a1f114 user: Going to the movies tonight - any suggestions? + bot: The Big Lebowski + """ + output = f"Conversation id: {self.uuid}\n" + for message in self.messages: + output += f"{message['role']}: {message['content']}\n" + return output + + def iter_texts(self): + # This is a legacy method for backwards compatibility. It is recommended to just directly access + # conversation.messages instead. + for message in self.messages: + yield message["role"] == "user", message["content"] + + @property + def _user_messages(self): + # This is a legacy property for backwards compatibility. It is recommended to just directly access + # conversation.messages instead. + return [message["content"] for message in self.messages if message["role"] == "user"] + + @property + def past_user_inputs(self): + # This is a legacy property for backwards compatibility. It is recommended to just directly access + # conversation.messages instead. The modern class does not care about which messages are "processed" + # or not. + if not self._user_messages: + return [] + # In the past, the most recent user message had to be mark_processed() before being included + # in past_user_messages. The class essentially had a single-message buffer, representing messages that + # had not yet been replied to. This is no longer the case, but we mimic the behaviour in this property + # for backward compatibility. + if self.messages[-1]["role"] != "user" or self._num_processed_user_inputs == len(self._user_messages): + return self._user_messages + + return self._user_messages[:-1] + + @property + def generated_responses(self): + # This is a legacy property for backwards compatibility. It is recommended to just directly access + # conversation.messages instead. + return [message["content"] for message in self.messages if message["role"] == "assistant"] + + @property + def new_user_input(self): + # This is a legacy property for backwards compatibility. It is recommended to just directly access + # conversation.messages instead. + return self._user_messages[-1] + + +@add_end_docstrings( + build_pipeline_init_args(has_tokenizer=True), + r""" + min_length_for_response (`int`, *optional*, defaults to 32): + The minimum length (in number of tokens) for a response.""", +) +class ConversationalPipeline(Pipeline): + """ + Multi-turn conversational pipeline. + + Example: + + ```python + >>> from transformers import pipeline, Conversation + # Any model with a chat template can be used in a ConversationalPipeline. + + >>> chatbot = pipeline(model="facebook/blenderbot-400M-distill") + >>> # Conversation objects initialized with a string will treat it as a user message + >>> conversation = Conversation("I'm looking for a movie - what's your favourite one?") + >>> conversation = chatbot(conversation) + >>> conversation.messages[-1]["content"] + "I don't really have a favorite movie, but I do like action movies. What about you?" + + >>> conversation.add_message({"role": "user", "content": "That's interesting, why do you like action movies?"}) + >>> conversation = chatbot(conversation) + >>> conversation.messages[-1]["content"] + " I think it's just because they're so fast-paced and action-fantastic." + ``` + + Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) + + This conversational pipeline can currently be loaded from [`pipeline`] using the following task identifier: + `"conversational"`. + + This pipeline can be used with any model that has a [chat + template](https://huggingface.co/docs/transformers/chat_templating) set. + """ + + def __init__(self, *args, **kwargs): + warnings.warn( + "`ConversationalPipeline` is now deprecated, and the functionality has been moved to the standard `text-generation` pipeline, which now accepts lists of message dicts as well as strings. This class will be removed in v4.42.", + DeprecationWarning, + ) + super().__init__(*args, **kwargs) + if self.tokenizer.pad_token_id is None: + self.tokenizer.pad_token = self.tokenizer.eos_token + + def _sanitize_parameters(self, min_length_for_response=None, clean_up_tokenization_spaces=None, **generate_kwargs): + preprocess_params = {} + forward_params = {} + postprocess_params = {} + + if min_length_for_response is not None: + preprocess_params["min_length_for_response"] = min_length_for_response + + if "max_length" in generate_kwargs: + forward_params["max_length"] = generate_kwargs["max_length"] + # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) + if clean_up_tokenization_spaces is not None: + postprocess_params["clean_up_tokenization_spaces"] = clean_up_tokenization_spaces + + if generate_kwargs: + forward_params.update(generate_kwargs) + return preprocess_params, forward_params, postprocess_params + + def __call__(self, conversations: Union[List[Dict], Conversation, List[Conversation]], num_workers=0, **kwargs): + r""" + Generate responses for the conversation(s) given as inputs. + + Args: + conversations (a [`Conversation`] or a list of [`Conversation`]): + Conversation to generate responses for. Inputs can also be passed as a list of dictionaries with `role` + and `content` keys - in this case, they will be converted to `Conversation` objects automatically. + Multiple conversations in either format may be passed as a list. + clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`): + Whether or not to clean up the potential extra spaces in the text output. + generate_kwargs: + Additional keyword arguments to pass along to the generate method of the model (see the generate method + corresponding to your framework [here](./model#generative-models)). + + Returns: + [`Conversation`] or a list of [`Conversation`]: Conversation(s) with updated generated responses for those + containing a new user input. + """ + # XXX: num_workers==0 is required to be backward compatible + # Otherwise the threads will require a Conversation copy. + # This will definitely hinder performance on GPU, but has to be opted + # in because of this BC change. + if isinstance(conversations, list) and isinstance(conversations[0], dict): + conversations = Conversation(conversations) + elif isinstance(conversations, list) and isinstance(conversations[0], list): + conversations = [Conversation(conv) for conv in conversations] + outputs = super().__call__(conversations, num_workers=num_workers, **kwargs) + if isinstance(outputs, list) and len(outputs) == 1: + return outputs[0] + return outputs + + def preprocess(self, conversation: Conversation, min_length_for_response=32) -> Dict[str, Any]: + input_ids = self.tokenizer.apply_chat_template(conversation, add_generation_prompt=True) + + if self.framework == "pt": + input_ids = torch.LongTensor([input_ids]) + elif self.framework == "tf": + input_ids = tf.constant([input_ids]) + return {"input_ids": input_ids, "conversation": conversation} + + def _forward(self, model_inputs, **generate_kwargs): + n = model_inputs["input_ids"].shape[1] + conversation = model_inputs.pop("conversation") + if "max_length" not in generate_kwargs and "max_new_tokens" not in generate_kwargs: + generate_kwargs["max_new_tokens"] = 256 + output_ids = self.model.generate(**model_inputs, **generate_kwargs) + if self.model.config.is_encoder_decoder: + start_position = 1 + else: + start_position = n + return {"output_ids": output_ids[:, start_position:], "conversation": conversation} + + def postprocess(self, model_outputs, clean_up_tokenization_spaces=True): + output_ids = model_outputs["output_ids"] + answer = self.tokenizer.decode( + output_ids[0], + skip_special_tokens=True, + clean_up_tokenization_spaces=clean_up_tokenization_spaces, + ) + conversation = model_outputs["conversation"] + conversation.add_message({"role": "assistant", "content": answer}) + return conversation diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/feature_extraction.py b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/feature_extraction.py new file mode 100644 index 0000000000000000000000000000000000000000..e8adb11b687da6a11932298d8d32b2f31f4053b5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/feature_extraction.py @@ -0,0 +1,86 @@ +from typing import Dict + +from ..utils import add_end_docstrings +from .base import GenericTensor, Pipeline, build_pipeline_init_args + + +@add_end_docstrings( + build_pipeline_init_args(has_tokenizer=True, supports_binary_output=False), + r""" + tokenize_kwargs (`dict`, *optional*): + Additional dictionary of keyword arguments passed along to the tokenizer. + return_tensors (`bool`, *optional*): + If `True`, returns a tensor according to the specified framework, otherwise returns a list.""", +) +class FeatureExtractionPipeline(Pipeline): + """ + Feature extraction pipeline uses no model head. This pipeline extracts the hidden states from the base + transformer, which can be used as features in downstream tasks. + + Example: + + ```python + >>> from transformers import pipeline + + >>> extractor = pipeline(model="google-bert/bert-base-uncased", task="feature-extraction") + >>> result = extractor("This is a simple test.", return_tensors=True) + >>> result.shape # This is a tensor of shape [1, sequence_lenth, hidden_dimension] representing the input string. + torch.Size([1, 8, 768]) + ``` + + Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) + + This feature extraction pipeline can currently be loaded from [`pipeline`] using the task identifier: + `"feature-extraction"`. + + All models may be used for this pipeline. See a list of all models, including community-contributed models on + [huggingface.co/models](https://huggingface.co/models). + """ + + def _sanitize_parameters(self, truncation=None, tokenize_kwargs=None, return_tensors=None, **kwargs): + if tokenize_kwargs is None: + tokenize_kwargs = {} + + if truncation is not None: + if "truncation" in tokenize_kwargs: + raise ValueError( + "truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)" + ) + tokenize_kwargs["truncation"] = truncation + + preprocess_params = tokenize_kwargs + + postprocess_params = {} + if return_tensors is not None: + postprocess_params["return_tensors"] = return_tensors + + return preprocess_params, {}, postprocess_params + + def preprocess(self, inputs, **tokenize_kwargs) -> Dict[str, GenericTensor]: + model_inputs = self.tokenizer(inputs, return_tensors=self.framework, **tokenize_kwargs) + return model_inputs + + def _forward(self, model_inputs): + model_outputs = self.model(**model_inputs) + return model_outputs + + def postprocess(self, model_outputs, return_tensors=False): + # [0] is the first available tensor, logits or last_hidden_state. + if return_tensors: + return model_outputs[0] + if self.framework == "pt": + return model_outputs[0].tolist() + elif self.framework == "tf": + return model_outputs[0].numpy().tolist() + + def __call__(self, *args, **kwargs): + """ + Extract the features of the input(s). + + Args: + args (`str` or `List[str]`): One or several texts (or one list of texts) to get the features of. + + Return: + A nested list of `float`: The features computed by the model. + """ + return super().__call__(*args, **kwargs) diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/image_to_text.py b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/image_to_text.py new file mode 100644 index 0000000000000000000000000000000000000000..4a9a3744d841a05da26b978b7432248fb8f4313e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/image_to_text.py @@ -0,0 +1,194 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List, Union + +from ..utils import ( + add_end_docstrings, + is_tf_available, + is_torch_available, + is_vision_available, + logging, + requires_backends, +) +from .base import Pipeline, build_pipeline_init_args + + +if is_vision_available(): + from PIL import Image + + from ..image_utils import load_image + +if is_tf_available(): + from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES + +if is_torch_available(): + import torch + + from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES + +logger = logging.get_logger(__name__) + + +@add_end_docstrings(build_pipeline_init_args(has_tokenizer=True, has_image_processor=True)) +class ImageToTextPipeline(Pipeline): + """ + Image To Text pipeline using a `AutoModelForVision2Seq`. This pipeline predicts a caption for a given image. + + Example: + + ```python + >>> from transformers import pipeline + + >>> captioner = pipeline(model="ydshieh/vit-gpt2-coco-en") + >>> captioner("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png") + [{'generated_text': 'two birds are standing next to each other '}] + ``` + + Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) + + This image to text pipeline can currently be loaded from pipeline() using the following task identifier: + "image-to-text". + + See the list of available models on + [huggingface.co/models](https://huggingface.co/models?pipeline_tag=image-to-text). + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + requires_backends(self, "vision") + self.check_model_type( + TF_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES + ) + + def _sanitize_parameters(self, max_new_tokens=None, generate_kwargs=None, prompt=None, timeout=None): + forward_params = {} + preprocess_params = {} + + if prompt is not None: + preprocess_params["prompt"] = prompt + if timeout is not None: + preprocess_params["timeout"] = timeout + + if max_new_tokens is not None: + forward_params["max_new_tokens"] = max_new_tokens + if generate_kwargs is not None: + if max_new_tokens is not None and "max_new_tokens" in generate_kwargs: + raise ValueError( + "`max_new_tokens` is defined both as an argument and inside `generate_kwargs` argument, please use" + " only 1 version" + ) + forward_params.update(generate_kwargs) + + return preprocess_params, forward_params, {} + + def __call__(self, images: Union[str, List[str], "Image.Image", List["Image.Image"]], **kwargs): + """ + Assign labels to the image(s) passed as inputs. + + Args: + images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): + The pipeline handles three types of images: + + - A string containing a HTTP(s) link pointing to an image + - A string containing a local path to an image + - An image loaded in PIL directly + + The pipeline accepts either a single image or a batch of images. + + max_new_tokens (`int`, *optional*): + The amount of maximum tokens to generate. By default it will use `generate` default. + + generate_kwargs (`Dict`, *optional*): + Pass it to send all of these arguments directly to `generate` allowing full control of this function. + timeout (`float`, *optional*, defaults to None): + The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and + the call may block forever. + + Return: + A list or a list of list of `dict`: Each result comes as a dictionary with the following key: + + - **generated_text** (`str`) -- The generated text. + """ + return super().__call__(images, **kwargs) + + def preprocess(self, image, prompt=None, timeout=None): + image = load_image(image, timeout=timeout) + + if prompt is not None: + if not isinstance(prompt, str): + raise ValueError( + f"Received an invalid text input, got - {type(prompt)} - but expected a single string. " + "Note also that one single text can be provided for conditional image to text generation." + ) + + model_type = self.model.config.model_type + + if model_type == "git": + model_inputs = self.image_processor(images=image, return_tensors=self.framework) + input_ids = self.tokenizer(text=prompt, add_special_tokens=False).input_ids + input_ids = [self.tokenizer.cls_token_id] + input_ids + input_ids = torch.tensor(input_ids).unsqueeze(0) + model_inputs.update({"input_ids": input_ids}) + + elif model_type == "pix2struct": + model_inputs = self.image_processor(images=image, header_text=prompt, return_tensors=self.framework) + + elif model_type != "vision-encoder-decoder": + # vision-encoder-decoder does not support conditional generation + model_inputs = self.image_processor(images=image, return_tensors=self.framework) + text_inputs = self.tokenizer(prompt, return_tensors=self.framework) + model_inputs.update(text_inputs) + + else: + raise ValueError(f"Model type {model_type} does not support conditional text generation") + + else: + model_inputs = self.image_processor(images=image, return_tensors=self.framework) + + if self.model.config.model_type == "git" and prompt is None: + model_inputs["input_ids"] = None + + return model_inputs + + def _forward(self, model_inputs, **generate_kwargs): + # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the + # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. + if ( + "input_ids" in model_inputs + and isinstance(model_inputs["input_ids"], list) + and all(x is None for x in model_inputs["input_ids"]) + ): + model_inputs["input_ids"] = None + + # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` + # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas + # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` + # in the `_prepare_model_inputs` method. + inputs = model_inputs.pop(self.model.main_input_name) + model_outputs = self.model.generate(inputs, **model_inputs, **generate_kwargs) + return model_outputs + + def postprocess(self, model_outputs): + records = [] + for output_ids in model_outputs: + record = { + "generated_text": self.tokenizer.decode( + output_ids, + skip_special_tokens=True, + ) + } + records.append(record) + return records diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/pt_utils.py b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/pt_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c39f906f641ea6347437005ae056207087655118 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/pt_utils.py @@ -0,0 +1,318 @@ +import numpy as np +import torch +from torch.utils.data import Dataset, IterableDataset + +from ..utils.generic import ModelOutput + + +class PipelineDataset(Dataset): + def __init__(self, dataset, process, params): + self.dataset = dataset + self.process = process + self.params = params + + def __len__(self): + return len(self.dataset) + + def __getitem__(self, i): + item = self.dataset[i] + processed = self.process(item, **self.params) + return processed + + +class PipelineIterator(IterableDataset): + def __init__(self, loader, infer, params, loader_batch_size=None): + """ + Roughly equivalent to + + ``` + for item in loader: + yield infer(item, **params) + ``` + + Arguments: + loader (`torch.utils.data.DataLoader` or any iterator): + The iterator that will be used to apply `infer` on. + infer (any function): + The function to apply of each element of `loader`. + params (`dict`): + The parameters passed to `infer` along with every item + loader_batch_size (`int`, *optional*): + If specified, the items of `loader` are supposed to come as batch, and are loader_batched here + making it roughly behave as + + + ``` + for items in loader: + for i in loader_batch_size: + item = items[i] + yield infer(item, **params) + ```""" + self.loader = loader + self.infer = infer + self.params = params + if loader_batch_size == 1: + # Let's spare some time by deactivating altogether + loader_batch_size = None + self.loader_batch_size = loader_batch_size + + # Internal bookkeeping + self._loader_batch_index = None + self._loader_batch_data = None + + def __len__(self): + return len(self.loader) + + def __iter__(self): + self.iterator = iter(self.loader) + return self + + def loader_batch_item(self): + """ + Return item located at `loader_batch_index` within the current `loader_batch_data`. + """ + if isinstance(self._loader_batch_data, torch.Tensor): + # Batch data is simple tensor, just fetch the slice + result = self._loader_batch_data[self._loader_batch_index].unsqueeze(0) + else: + # Batch data is assumed to be BaseModelOutput (or dict) + loader_batched = {} + for k, element in self._loader_batch_data.items(): + if isinstance(element, ModelOutput): + # Convert ModelOutput to tuple first + element = element.to_tuple() + if isinstance(element[0], torch.Tensor): + loader_batched[k] = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element) + elif isinstance(element[0], np.ndarray): + loader_batched[k] = tuple(np.expand_dims(el[self._loader_batch_index], 0) for el in element) + continue + if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(element, tuple): + # Those are stored as lists of tensors so need specific unbatching. + if isinstance(element[0], torch.Tensor): + loader_batched[k] = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element) + elif isinstance(element[0], np.ndarray): + loader_batched[k] = tuple(np.expand_dims(el[self._loader_batch_index], 0) for el in element) + continue + if element is None: + # This can happen for optional data that get passed around + loader_batched[k] = None + elif isinstance(element[self._loader_batch_index], torch.Tensor): + # Take correct batch data, but make it looked like batch_size=1 + # For compatibility with other methods within transformers + + loader_batched[k] = element[self._loader_batch_index].unsqueeze(0) + elif isinstance(element[self._loader_batch_index], np.ndarray): + # Take correct batch data, but make it looked like batch_size=1 + # For compatibility with other methods within transformers + loader_batched[k] = np.expand_dims(element[self._loader_batch_index], 0) + else: + # This is typically a list, so no need to `unsqueeze`. + loader_batched[k] = element[self._loader_batch_index] + # Recreate the element by reusing the original class to make it look + # batch_size=1 + result = self._loader_batch_data.__class__(loader_batched) + self._loader_batch_index += 1 + return result + + def __next__(self): + if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: + # We are currently unrolling a batch so we just need to return + # the current item within a batch + return self.loader_batch_item() + + # We're out of items within a batch + item = next(self.iterator) + processed = self.infer(item, **self.params) + # We now have a batch of "inferred things". + if self.loader_batch_size is not None: + # Try to infer the size of the batch + if isinstance(processed, torch.Tensor): + first_tensor = processed + else: + key = list(processed.keys())[0] + first_tensor = processed[key] + if isinstance(first_tensor, list): + observed_batch_size = len(first_tensor) + else: + observed_batch_size = first_tensor.shape[0] + if 0 < observed_batch_size < self.loader_batch_size: + # could be last batch so we can't unroll as many + # elements. + self.loader_batch_size = observed_batch_size + # Setting internal index to unwrap the batch + self._loader_batch_data = processed + self._loader_batch_index = 0 + return self.loader_batch_item() + else: + # We're not unrolling batches + return processed + + +class PipelineChunkIterator(PipelineIterator): + def __init__(self, loader, infer, params, loader_batch_size=None): + """ + Roughly equivalent to + + ``` + for iterator in loader: + for item in iterator: + yield infer(item, **params) + ``` + + Arguments: + loader (`torch.utils.data.DataLoader` or any iterator): + The iterator that will be used to apply `infer` on. + infer (any function): + The function to apply of each element of `loader`. + params (`dict`): + The parameters passed to `infer` along with every item + """ + super().__init__(loader, infer, params) + + def __iter__(self): + self.iterator = iter(self.loader) + self.subiterator = None + return self + + def __next__(self): + if self.subiterator is None: + "Subiterator None means we haven't started a `preprocess` iterator. so start it" + self.subiterator = self.infer(next(self.iterator), **self.params) + try: + # Try to return next item + processed = next(self.subiterator) + except StopIteration: + # When a preprocess iterator ends, we can start lookig at the next item + # ChunkIterator will keep feeding until ALL elements of iterator + # all have created their subiterator and have been iterating against. + # + # Another way to look at it, is we're basically flattening lists of lists + # into a single list, but with generators + self.subiterator = self.infer(next(self.iterator), **self.params) + processed = next(self.subiterator) + return processed + + +class PipelinePackIterator(PipelineIterator): + """ + Roughly equivalent to + + ``` + packed = [] + for item in loader: + packed.append(item) + if item["is_last"]: + yield packed + packed = [] + ``` + + but it also handles cases where `item` are batched (meaning it's a dict of Tensor with first dimension > 1. In + that case it does + + ``` + packed = [] + for batch in loader: + # item is batched + for item in batch: + packed.append(item) + if item["is_last"]: + yield packed + packed = [] + ``` + + Arguments: + loader (`torch.utils.data.DataLoader` or any iterator): + The iterator that will be used to apply `infer` on. + infer (any function): + The function to apply of each element of `loader`. + params (`dict`): + The parameters passed to `infer` along with every item + loader_batch_size (`int`, *optional*): + If specified, the items of `loader` are supposed to come as batch, and are loader_batched here making + it roughly behave as + + + ``` + for items in loader: + for i in loader_batch_size: + item = items[i] + yield infer(item, **params) + ```""" + + def __iter__(self): + self.iterator = iter(self.loader) + return self + + def __next__(self): + # Extremely similar to PipelineIterator in its unpacking mechanism + # BUT, we have an extra required item which is the presence of `is_last` + # That is because everything is flattened by `PipelineChunkIterator` we + # need to keep track of how to regroup here in the original `process` + # boundaries so that `process` and `postprocess` see the same data. + + # This iterator accumulates items (possibly while unbatching) until it + # its a `is_last` and then just passes it on to the caller. + is_last = False + accumulator = [] + if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: + while self._loader_batch_index < self.loader_batch_size: + item = self.loader_batch_item() + is_last = item.pop("is_last") + accumulator.append(item) + if is_last: + return accumulator + + while not is_last: + processed = self.infer(next(self.iterator), **self.params) + if self.loader_batch_size is not None: + if isinstance(processed, torch.Tensor): + first_tensor = processed + else: + key = list(processed.keys())[0] + first_tensor = processed[key] + if isinstance(first_tensor, list): + observed_batch_size = len(first_tensor) + else: + observed_batch_size = first_tensor.shape[0] + if 0 < observed_batch_size < self.loader_batch_size: + # could be last batch so we can't unroll as many + # elements. + self.loader_batch_size = observed_batch_size + self._loader_batch_data = processed + self._loader_batch_index = 0 + while self._loader_batch_index < self.loader_batch_size: + item = self.loader_batch_item() + is_last = item.pop("is_last") + accumulator.append(item) + if is_last: + return accumulator + else: + item = processed + is_last = item.pop("is_last") + accumulator.append(item) + return accumulator + + +class KeyDataset(Dataset): + def __init__(self, dataset: Dataset, key: str): + self.dataset = dataset + self.key = key + + def __len__(self): + return len(self.dataset) + + def __getitem__(self, i): + return self.dataset[i][self.key] + + +class KeyPairDataset(Dataset): + def __init__(self, dataset: Dataset, key1: str, key2: str): + self.dataset = dataset + self.key1 = key1 + self.key2 = key2 + + def __len__(self): + return len(self.dataset) + + def __getitem__(self, i): + return {"text": self.dataset[i][self.key1], "text_pair": self.dataset[i][self.key2]} diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/table_question_answering.py b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/table_question_answering.py new file mode 100644 index 0000000000000000000000000000000000000000..702a47b7c3cbeddadebd8a98d5ad644f7608ec6d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/table_question_answering.py @@ -0,0 +1,432 @@ +import collections +import types + +import numpy as np + +from ..utils import ( + add_end_docstrings, + is_tf_available, + is_torch_available, + requires_backends, +) +from .base import ArgumentHandler, Dataset, Pipeline, PipelineException, build_pipeline_init_args + + +if is_torch_available(): + import torch + + from ..models.auto.modeling_auto import ( + MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, + MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES, + ) + +if is_tf_available(): + import tensorflow as tf + + from ..models.auto.modeling_tf_auto import ( + TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, + TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES, + ) + + +class TableQuestionAnsweringArgumentHandler(ArgumentHandler): + """ + Handles arguments for the TableQuestionAnsweringPipeline + """ + + def __call__(self, table=None, query=None, **kwargs): + # Returns tqa_pipeline_inputs of shape: + # [ + # {"table": pd.DataFrame, "query": List[str]}, + # ..., + # {"table": pd.DataFrame, "query" : List[str]} + # ] + requires_backends(self, "pandas") + import pandas as pd + + if table is None: + raise ValueError("Keyword argument `table` cannot be None.") + elif query is None: + if isinstance(table, dict) and table.get("query") is not None and table.get("table") is not None: + tqa_pipeline_inputs = [table] + elif isinstance(table, list) and len(table) > 0: + if not all(isinstance(d, dict) for d in table): + raise ValueError( + f"Keyword argument `table` should be a list of dict, but is {(type(d) for d in table)}" + ) + + if table[0].get("query") is not None and table[0].get("table") is not None: + tqa_pipeline_inputs = table + else: + raise ValueError( + "If keyword argument `table` is a list of dictionaries, each dictionary should have a `table`" + f" and `query` key, but only dictionary has keys {table[0].keys()} `table` and `query` keys." + ) + elif Dataset is not None and isinstance(table, Dataset) or isinstance(table, types.GeneratorType): + return table + else: + raise ValueError( + "Invalid input. Keyword argument `table` should be either of type `dict` or `list`, but " + f"is {type(table)})" + ) + else: + tqa_pipeline_inputs = [{"table": table, "query": query}] + + for tqa_pipeline_input in tqa_pipeline_inputs: + if not isinstance(tqa_pipeline_input["table"], pd.DataFrame): + if tqa_pipeline_input["table"] is None: + raise ValueError("Table cannot be None.") + + tqa_pipeline_input["table"] = pd.DataFrame(tqa_pipeline_input["table"]) + + return tqa_pipeline_inputs + + +@add_end_docstrings(build_pipeline_init_args(has_tokenizer=True)) +class TableQuestionAnsweringPipeline(Pipeline): + """ + Table Question Answering pipeline using a `ModelForTableQuestionAnswering`. This pipeline is only available in + PyTorch. + + Example: + + ```python + >>> from transformers import pipeline + + >>> oracle = pipeline(model="google/tapas-base-finetuned-wtq") + >>> table = { + ... "Repository": ["Transformers", "Datasets", "Tokenizers"], + ... "Stars": ["36542", "4512", "3934"], + ... "Contributors": ["651", "77", "34"], + ... "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], + ... } + >>> oracle(query="How many stars does the transformers repository have?", table=table) + {'answer': 'AVERAGE > 36542', 'coordinates': [(0, 1)], 'cells': ['36542'], 'aggregator': 'AVERAGE'} + ``` + + Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) + + This tabular question answering pipeline can currently be loaded from [`pipeline`] using the following task + identifier: `"table-question-answering"`. + + The models that this pipeline can use are models that have been fine-tuned on a tabular question answering task. + See the up-to-date list of available models on + [huggingface.co/models](https://huggingface.co/models?filter=table-question-answering). + """ + + default_input_names = "table,query" + + def __init__(self, args_parser=TableQuestionAnsweringArgumentHandler(), *args, **kwargs): + super().__init__(*args, **kwargs) + self._args_parser = args_parser + + if self.framework == "tf": + mapping = TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES.copy() + mapping.update(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES) + else: + mapping = MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES.copy() + mapping.update(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES) + self.check_model_type(mapping) + + self.aggregate = bool(getattr(self.model.config, "aggregation_labels", None)) and bool( + getattr(self.model.config, "num_aggregation_labels", None) + ) + self.type = "tapas" if hasattr(self.model.config, "aggregation_labels") else None + + def batch_inference(self, **inputs): + return self.model(**inputs) + + def sequential_inference(self, **inputs): + """ + Inference used for models that need to process sequences in a sequential fashion, like the SQA models which + handle conversational query related to a table. + """ + if self.framework == "pt": + all_logits = [] + all_aggregations = [] + prev_answers = None + batch_size = inputs["input_ids"].shape[0] + + input_ids = inputs["input_ids"].to(self.device) + attention_mask = inputs["attention_mask"].to(self.device) + token_type_ids = inputs["token_type_ids"].to(self.device) + token_type_ids_example = None + + for index in range(batch_size): + # If sequences have already been processed, the token type IDs will be created according to the previous + # answer. + if prev_answers is not None: + prev_labels_example = token_type_ids_example[:, 3] # shape (seq_len,) + model_labels = np.zeros_like(prev_labels_example.cpu().numpy()) # shape (seq_len,) + + token_type_ids_example = token_type_ids[index] # shape (seq_len, 7) + for i in range(model_labels.shape[0]): + segment_id = token_type_ids_example[:, 0].tolist()[i] + col_id = token_type_ids_example[:, 1].tolist()[i] - 1 + row_id = token_type_ids_example[:, 2].tolist()[i] - 1 + + if row_id >= 0 and col_id >= 0 and segment_id == 1: + model_labels[i] = int(prev_answers[(col_id, row_id)]) + + token_type_ids_example[:, 3] = torch.from_numpy(model_labels).type(torch.long).to(self.device) + + input_ids_example = input_ids[index] + attention_mask_example = attention_mask[index] # shape (seq_len,) + token_type_ids_example = token_type_ids[index] # shape (seq_len, 7) + outputs = self.model( + input_ids=input_ids_example.unsqueeze(0), + attention_mask=attention_mask_example.unsqueeze(0), + token_type_ids=token_type_ids_example.unsqueeze(0), + ) + logits = outputs.logits + + if self.aggregate: + all_aggregations.append(outputs.logits_aggregation) + + all_logits.append(logits) + + dist_per_token = torch.distributions.Bernoulli(logits=logits) + probabilities = dist_per_token.probs * attention_mask_example.type(torch.float32).to( + dist_per_token.probs.device + ) + + coords_to_probs = collections.defaultdict(list) + for i, p in enumerate(probabilities.squeeze().tolist()): + segment_id = token_type_ids_example[:, 0].tolist()[i] + col = token_type_ids_example[:, 1].tolist()[i] - 1 + row = token_type_ids_example[:, 2].tolist()[i] - 1 + if col >= 0 and row >= 0 and segment_id == 1: + coords_to_probs[(col, row)].append(p) + + prev_answers = {key: np.array(coords_to_probs[key]).mean() > 0.5 for key in coords_to_probs} + + logits_batch = torch.cat(tuple(all_logits), 0) + + return (logits_batch,) if not self.aggregate else (logits_batch, torch.cat(tuple(all_aggregations), 0)) + else: + all_logits = [] + all_aggregations = [] + prev_answers = None + batch_size = inputs["input_ids"].shape[0] + + input_ids = inputs["input_ids"] + attention_mask = inputs["attention_mask"] + token_type_ids = inputs["token_type_ids"].numpy() + token_type_ids_example = None + + for index in range(batch_size): + # If sequences have already been processed, the token type IDs will be created according to the previous + # answer. + if prev_answers is not None: + prev_labels_example = token_type_ids_example[:, 3] # shape (seq_len,) + model_labels = np.zeros_like(prev_labels_example, dtype=np.int32) # shape (seq_len,) + + token_type_ids_example = token_type_ids[index] # shape (seq_len, 7) + for i in range(model_labels.shape[0]): + segment_id = token_type_ids_example[:, 0].tolist()[i] + col_id = token_type_ids_example[:, 1].tolist()[i] - 1 + row_id = token_type_ids_example[:, 2].tolist()[i] - 1 + + if row_id >= 0 and col_id >= 0 and segment_id == 1: + model_labels[i] = int(prev_answers[(col_id, row_id)]) + + token_type_ids_example[:, 3] = model_labels + + input_ids_example = input_ids[index] + attention_mask_example = attention_mask[index] # shape (seq_len,) + token_type_ids_example = token_type_ids[index] # shape (seq_len, 7) + outputs = self.model( + input_ids=np.expand_dims(input_ids_example, axis=0), + attention_mask=np.expand_dims(attention_mask_example, axis=0), + token_type_ids=np.expand_dims(token_type_ids_example, axis=0), + ) + logits = outputs.logits + + if self.aggregate: + all_aggregations.append(outputs.logits_aggregation) + + all_logits.append(logits) + + probabilities = tf.math.sigmoid(tf.cast(logits, tf.float32)) * tf.cast( + attention_mask_example, tf.float32 + ) + + coords_to_probs = collections.defaultdict(list) + token_type_ids_example = token_type_ids_example + for i, p in enumerate(tf.squeeze(probabilities).numpy().tolist()): + segment_id = token_type_ids_example[:, 0].tolist()[i] + col = token_type_ids_example[:, 1].tolist()[i] - 1 + row = token_type_ids_example[:, 2].tolist()[i] - 1 + if col >= 0 and row >= 0 and segment_id == 1: + coords_to_probs[(col, row)].append(p) + + prev_answers = {key: np.array(coords_to_probs[key]).mean() > 0.5 for key in coords_to_probs} + + logits_batch = tf.concat(tuple(all_logits), 0) + + return (logits_batch,) if not self.aggregate else (logits_batch, tf.concat(tuple(all_aggregations), 0)) + + def __call__(self, *args, **kwargs): + r""" + Answers queries according to a table. The pipeline accepts several types of inputs which are detailed below: + + - `pipeline(table, query)` + - `pipeline(table, [query])` + - `pipeline(table=table, query=query)` + - `pipeline(table=table, query=[query])` + - `pipeline({"table": table, "query": query})` + - `pipeline({"table": table, "query": [query]})` + - `pipeline([{"table": table, "query": query}, {"table": table, "query": query}])` + + The `table` argument should be a dict or a DataFrame built from that dict, containing the whole table: + + Example: + + ```python + data = { + "actors": ["brad pitt", "leonardo di caprio", "george clooney"], + "age": ["56", "45", "59"], + "number of movies": ["87", "53", "69"], + "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], + } + ``` + + This dictionary can be passed in as such, or can be converted to a pandas DataFrame: + + Example: + + ```python + import pandas as pd + + table = pd.DataFrame.from_dict(data) + ``` + + Args: + table (`pd.DataFrame` or `Dict`): + Pandas DataFrame or dictionary that will be converted to a DataFrame containing all the table values. + See above for an example of dictionary. + query (`str` or `List[str]`): + Query or list of queries that will be sent to the model alongside the table. + sequential (`bool`, *optional*, defaults to `False`): + Whether to do inference sequentially or as a batch. Batching is faster, but models like SQA require the + inference to be done sequentially to extract relations within sequences, given their conversational + nature. + padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): + Activates and controls padding. Accepts the following values: + + - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single + sequence if provided). + - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum + acceptable input length for the model if that argument is not provided. + - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different + lengths). + + truncation (`bool`, `str` or [`TapasTruncationStrategy`], *optional*, defaults to `False`): + Activates and controls truncation. Accepts the following values: + + - `True` or `'drop_rows_to_fit'`: Truncate to a maximum length specified with the argument `max_length` + or to the maximum acceptable input length for the model if that argument is not provided. This will + truncate row by row, removing rows from the table. + - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths + greater than the model maximum admissible input size). + + + Return: + A dictionary or a list of dictionaries containing results: Each result is a dictionary with the following + keys: + + - **answer** (`str`) -- The answer of the query given the table. If there is an aggregator, the answer will + be preceded by `AGGREGATOR >`. + - **coordinates** (`List[Tuple[int, int]]`) -- Coordinates of the cells of the answers. + - **cells** (`List[str]`) -- List of strings made up of the answer cell values. + - **aggregator** (`str`) -- If the model has an aggregator, this returns the aggregator. + """ + pipeline_inputs = self._args_parser(*args, **kwargs) + + results = super().__call__(pipeline_inputs, **kwargs) + if len(results) == 1: + return results[0] + return results + + def _sanitize_parameters(self, sequential=None, padding=None, truncation=None, **kwargs): + preprocess_params = {} + if padding is not None: + preprocess_params["padding"] = padding + if truncation is not None: + preprocess_params["truncation"] = truncation + + forward_params = {} + if sequential is not None: + forward_params["sequential"] = sequential + return preprocess_params, forward_params, {} + + def preprocess(self, pipeline_input, sequential=None, padding=True, truncation=None): + if truncation is None: + if self.type == "tapas": + truncation = "drop_rows_to_fit" + else: + truncation = "do_not_truncate" + + table, query = pipeline_input["table"], pipeline_input["query"] + if table.empty: + raise ValueError("table is empty") + if query is None or query == "": + raise ValueError("query is empty") + inputs = self.tokenizer(table, query, return_tensors=self.framework, truncation=truncation, padding=padding) + inputs["table"] = table + return inputs + + def _forward(self, model_inputs, sequential=False, **generate_kwargs): + table = model_inputs.pop("table") + + if self.type == "tapas": + if sequential: + outputs = self.sequential_inference(**model_inputs) + else: + outputs = self.batch_inference(**model_inputs) + else: + outputs = self.model.generate(**model_inputs, **generate_kwargs) + model_outputs = {"model_inputs": model_inputs, "table": table, "outputs": outputs} + return model_outputs + + def postprocess(self, model_outputs): + inputs = model_outputs["model_inputs"] + table = model_outputs["table"] + outputs = model_outputs["outputs"] + if self.type == "tapas": + if self.aggregate: + logits, logits_agg = outputs[:2] + predictions = self.tokenizer.convert_logits_to_predictions(inputs, logits, logits_agg) + answer_coordinates_batch, agg_predictions = predictions + aggregators = {i: self.model.config.aggregation_labels[pred] for i, pred in enumerate(agg_predictions)} + + no_agg_label_index = self.model.config.no_aggregation_label_index + aggregators_prefix = { + i: aggregators[i] + " > " for i, pred in enumerate(agg_predictions) if pred != no_agg_label_index + } + else: + logits = outputs[0] + predictions = self.tokenizer.convert_logits_to_predictions(inputs, logits) + answer_coordinates_batch = predictions[0] + aggregators = {} + aggregators_prefix = {} + answers = [] + for index, coordinates in enumerate(answer_coordinates_batch): + cells = [table.iat[coordinate] for coordinate in coordinates] + aggregator = aggregators.get(index, "") + aggregator_prefix = aggregators_prefix.get(index, "") + answer = { + "answer": aggregator_prefix + ", ".join(cells), + "coordinates": coordinates, + "cells": [table.iat[coordinate] for coordinate in coordinates], + } + if aggregator: + answer["aggregator"] = aggregator + + answers.append(answer) + if len(answer) == 0: + raise PipelineException("Empty answer") + else: + answers = [{"answer": answer} for answer in self.tokenizer.batch_decode(outputs, skip_special_tokens=True)] + + return answers if len(answers) > 1 else answers[0] diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/text_classification.py b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/text_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..6521da098d4cdf379d6c338041d33cb21f44907a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/text_classification.py @@ -0,0 +1,226 @@ +import inspect +import warnings +from typing import Dict + +import numpy as np + +from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available +from .base import GenericTensor, Pipeline, build_pipeline_init_args + + +if is_tf_available(): + from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES + +if is_torch_available(): + from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES + + +def sigmoid(_outputs): + return 1.0 / (1.0 + np.exp(-_outputs)) + + +def softmax(_outputs): + maxes = np.max(_outputs, axis=-1, keepdims=True) + shifted_exp = np.exp(_outputs - maxes) + return shifted_exp / shifted_exp.sum(axis=-1, keepdims=True) + + +class ClassificationFunction(ExplicitEnum): + SIGMOID = "sigmoid" + SOFTMAX = "softmax" + NONE = "none" + + +@add_end_docstrings( + build_pipeline_init_args(has_tokenizer=True), + r""" + return_all_scores (`bool`, *optional*, defaults to `False`): + Whether to return all prediction scores or just the one of the predicted class. + function_to_apply (`str`, *optional*, defaults to `"default"`): + The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: + + - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model + has several labels, will apply the softmax function on the output. + - `"sigmoid"`: Applies the sigmoid function on the output. + - `"softmax"`: Applies the softmax function on the output. + - `"none"`: Does not apply any function on the output.""", +) +class TextClassificationPipeline(Pipeline): + """ + Text classification pipeline using any `ModelForSequenceClassification`. See the [sequence classification + examples](../task_summary#sequence-classification) for more information. + + Example: + + ```python + >>> from transformers import pipeline + + >>> classifier = pipeline(model="distilbert/distilbert-base-uncased-finetuned-sst-2-english") + >>> classifier("This movie is disgustingly good !") + [{'label': 'POSITIVE', 'score': 1.0}] + + >>> classifier("Director tried too much.") + [{'label': 'NEGATIVE', 'score': 0.996}] + ``` + + Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) + + This text classification pipeline can currently be loaded from [`pipeline`] using the following task identifier: + `"sentiment-analysis"` (for classifying sequences according to positive or negative sentiments). + + If multiple classification labels are available (`model.config.num_labels >= 2`), the pipeline will run a softmax + over the results. If there is a single label, the pipeline will run a sigmoid over the result. + + The models that this pipeline can use are models that have been fine-tuned on a sequence classification task. See + the up-to-date list of available models on + [huggingface.co/models](https://huggingface.co/models?filter=text-classification). + """ + + return_all_scores = False + function_to_apply = ClassificationFunction.NONE + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + self.check_model_type( + TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES + if self.framework == "tf" + else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES + ) + + def _sanitize_parameters(self, return_all_scores=None, function_to_apply=None, top_k="", **tokenizer_kwargs): + # Using "" as default argument because we're going to use `top_k=None` in user code to declare + # "No top_k" + preprocess_params = tokenizer_kwargs + + postprocess_params = {} + if hasattr(self.model.config, "return_all_scores") and return_all_scores is None: + return_all_scores = self.model.config.return_all_scores + + if isinstance(top_k, int) or top_k is None: + postprocess_params["top_k"] = top_k + postprocess_params["_legacy"] = False + elif return_all_scores is not None: + warnings.warn( + "`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of" + " `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.", + UserWarning, + ) + if return_all_scores: + postprocess_params["top_k"] = None + else: + postprocess_params["top_k"] = 1 + + if isinstance(function_to_apply, str): + function_to_apply = ClassificationFunction[function_to_apply.upper()] + + if function_to_apply is not None: + postprocess_params["function_to_apply"] = function_to_apply + return preprocess_params, {}, postprocess_params + + def __call__(self, inputs, **kwargs): + """ + Classify the text(s) given as inputs. + + Args: + inputs (`str` or `List[str]` or `Dict[str]`, or `List[Dict[str]]`): + One or several texts to classify. In order to use text pairs for your classification, you can send a + dictionary containing `{"text", "text_pair"}` keys, or a list of those. + top_k (`int`, *optional*, defaults to `1`): + How many results to return. + function_to_apply (`str`, *optional*, defaults to `"default"`): + The function to apply to the model outputs in order to retrieve the scores. Accepts four different + values: + + If this argument is not specified, then it will apply the following functions according to the number + of labels: + + - If the model has a single label, will apply the sigmoid function on the output. + - If the model has several labels, will apply the softmax function on the output. + + Possible values are: + + - `"sigmoid"`: Applies the sigmoid function on the output. + - `"softmax"`: Applies the softmax function on the output. + - `"none"`: Does not apply any function on the output. + + Return: + A list or a list of list of `dict`: Each result comes as list of dictionaries with the following keys: + + - **label** (`str`) -- The label predicted. + - **score** (`float`) -- The corresponding probability. + + If `top_k` is used, one such dictionary is returned per label. + """ + inputs = (inputs,) + result = super().__call__(*inputs, **kwargs) + # TODO try and retrieve it in a nicer way from _sanitize_parameters. + _legacy = "top_k" not in kwargs + if isinstance(inputs[0], str) and _legacy: + # This pipeline is odd, and return a list when single item is run + return [result] + else: + return result + + def preprocess(self, inputs, **tokenizer_kwargs) -> Dict[str, GenericTensor]: + return_tensors = self.framework + if isinstance(inputs, dict): + return self.tokenizer(**inputs, return_tensors=return_tensors, **tokenizer_kwargs) + elif isinstance(inputs, list) and len(inputs) == 1 and isinstance(inputs[0], list) and len(inputs[0]) == 2: + # It used to be valid to use a list of list of list for text pairs, keeping this path for BC + return self.tokenizer( + text=inputs[0][0], text_pair=inputs[0][1], return_tensors=return_tensors, **tokenizer_kwargs + ) + elif isinstance(inputs, list): + # This is likely an invalid usage of the pipeline attempting to pass text pairs. + raise ValueError( + "The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a" + ' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.' + ) + return self.tokenizer(inputs, return_tensors=return_tensors, **tokenizer_kwargs) + + def _forward(self, model_inputs): + # `XXXForSequenceClassification` models should not use `use_cache=True` even if it's supported + model_forward = self.model.forward if self.framework == "pt" else self.model.call + if "use_cache" in inspect.signature(model_forward).parameters.keys(): + model_inputs["use_cache"] = False + return self.model(**model_inputs) + + def postprocess(self, model_outputs, function_to_apply=None, top_k=1, _legacy=True): + # `_legacy` is used to determine if we're running the naked pipeline and in backward + # compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running + # the more natural result containing the list. + # Default value before `set_parameters` + if function_to_apply is None: + if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: + function_to_apply = ClassificationFunction.SIGMOID + elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: + function_to_apply = ClassificationFunction.SOFTMAX + elif hasattr(self.model.config, "function_to_apply") and function_to_apply is None: + function_to_apply = self.model.config.function_to_apply + else: + function_to_apply = ClassificationFunction.NONE + + outputs = model_outputs["logits"][0] + outputs = outputs.numpy() + + if function_to_apply == ClassificationFunction.SIGMOID: + scores = sigmoid(outputs) + elif function_to_apply == ClassificationFunction.SOFTMAX: + scores = softmax(outputs) + elif function_to_apply == ClassificationFunction.NONE: + scores = outputs + else: + raise ValueError(f"Unrecognized `function_to_apply` argument: {function_to_apply}") + + if top_k == 1 and _legacy: + return {"label": self.model.config.id2label[scores.argmax().item()], "score": scores.max().item()} + + dict_scores = [ + {"label": self.model.config.id2label[i], "score": score.item()} for i, score in enumerate(scores) + ] + if not _legacy: + dict_scores.sort(key=lambda x: x["score"], reverse=True) + if top_k is not None: + dict_scores = dict_scores[:top_k] + return dict_scores diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/zero_shot_audio_classification.py b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/zero_shot_audio_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..ca9f5e4fcfd4955a6335f574fa3b406257b1f111 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/zero_shot_audio_classification.py @@ -0,0 +1,155 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from collections import UserDict +from typing import Union + +import numpy as np +import requests + +from ..utils import ( + add_end_docstrings, + logging, +) +from .audio_classification import ffmpeg_read +from .base import Pipeline, build_pipeline_init_args + + +logger = logging.get_logger(__name__) + + +@add_end_docstrings(build_pipeline_init_args(has_feature_extractor=True, has_tokenizer=True)) +class ZeroShotAudioClassificationPipeline(Pipeline): + """ + Zero shot audio classification pipeline using `ClapModel`. This pipeline predicts the class of an audio when you + provide an audio and a set of `candidate_labels`. + + Example: + ```python + >>> from transformers import pipeline + >>> from datasets import load_dataset + + >>> dataset = load_dataset("ashraq/esc50") + >>> audio = next(iter(dataset["train"]["audio"]))["array"] + >>> classifier = pipeline(task="zero-shot-audio-classification", model="laion/clap-htsat-unfused") + >>> classifier(audio, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"]) + [{'score': 0.9996, 'label': 'Sound of a dog'}, {'score': 0.0004, 'label': 'Sound of vaccum cleaner'}] + ``` + + + Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This audio + classification pipeline can currently be loaded from [`pipeline`] using the following task identifier: + `"zero-shot-audio-classification"`. See the list of available models on + [huggingface.co/models](https://huggingface.co/models?filter=zero-shot-audio-classification). + """ + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + if self.framework != "pt": + raise ValueError(f"The {self.__class__} is only available in PyTorch.") + # No specific FOR_XXX available yet + + def __call__(self, audios: Union[np.ndarray, bytes, str], **kwargs): + """ + Assign labels to the audio(s) passed as inputs. + + Args: + audios (`str`, `List[str]`, `np.array` or `List[np.array]`): + The pipeline handles three types of inputs: + - A string containing a http link pointing to an audio + - A string containing a local path to an audio + - An audio loaded in numpy + candidate_labels (`List[str]`): + The candidate labels for this audio + hypothesis_template (`str`, *optional*, defaults to `"This is a sound of {}"`): + The sentence used in cunjunction with *candidate_labels* to attempt the audio classification by + replacing the placeholder with the candidate_labels. Then likelihood is estimated by using + logits_per_audio + Return: + A list of dictionaries containing result, one dictionary per proposed label. The dictionaries contain the + following keys: + - **label** (`str`) -- The label identified by the model. It is one of the suggested `candidate_label`. + - **score** (`float`) -- The score attributed by the model for that label (between 0 and 1). + """ + return super().__call__(audios, **kwargs) + + def _sanitize_parameters(self, **kwargs): + preprocess_params = {} + if "candidate_labels" in kwargs: + preprocess_params["candidate_labels"] = kwargs["candidate_labels"] + if "hypothesis_template" in kwargs: + preprocess_params["hypothesis_template"] = kwargs["hypothesis_template"] + + return preprocess_params, {}, {} + + def preprocess(self, audio, candidate_labels=None, hypothesis_template="This is a sound of {}."): + if isinstance(audio, str): + if audio.startswith("http://") or audio.startswith("https://"): + # We need to actually check for a real protocol, otherwise it's impossible to use a local file + # like http_huggingface_co.png + audio = requests.get(audio).content + else: + with open(audio, "rb") as f: + audio = f.read() + + if isinstance(audio, bytes): + audio = ffmpeg_read(audio, self.feature_extractor.sampling_rate) + + if not isinstance(audio, np.ndarray): + raise ValueError("We expect a numpy ndarray as input") + if len(audio.shape) != 1: + raise ValueError("We expect a single channel audio input for ZeroShotAudioClassificationPipeline") + + inputs = self.feature_extractor( + [audio], sampling_rate=self.feature_extractor.sampling_rate, return_tensors="pt" + ) + inputs["candidate_labels"] = candidate_labels + sequences = [hypothesis_template.format(x) for x in candidate_labels] + text_inputs = self.tokenizer(sequences, return_tensors=self.framework, padding=True) + inputs["text_inputs"] = [text_inputs] + return inputs + + def _forward(self, model_inputs): + candidate_labels = model_inputs.pop("candidate_labels") + text_inputs = model_inputs.pop("text_inputs") + if isinstance(text_inputs[0], UserDict): + text_inputs = text_inputs[0] + else: + # Batching case. + text_inputs = text_inputs[0][0] + + outputs = self.model(**text_inputs, **model_inputs) + + model_outputs = { + "candidate_labels": candidate_labels, + "logits": outputs.logits_per_audio, + } + return model_outputs + + def postprocess(self, model_outputs): + candidate_labels = model_outputs.pop("candidate_labels") + logits = model_outputs["logits"][0] + + if self.framework == "pt": + probs = logits.softmax(dim=0) + scores = probs.tolist() + else: + raise ValueError("`tf` framework not supported.") + + result = [ + {"score": score, "label": candidate_label} + for score, candidate_label in sorted(zip(scores, candidate_labels), key=lambda x: -x[0]) + ] + return result diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/zero_shot_image_classification.py b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/zero_shot_image_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..8e40d0e6a5cbfa4bb5938dfc9d7883071485aa77 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/zero_shot_image_classification.py @@ -0,0 +1,170 @@ +from collections import UserDict +from typing import List, Union + +from ..utils import ( + add_end_docstrings, + is_tf_available, + is_torch_available, + is_vision_available, + logging, + requires_backends, +) +from .base import Pipeline, build_pipeline_init_args + + +if is_vision_available(): + from PIL import Image + + from ..image_utils import load_image + +if is_torch_available(): + import torch + + from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES + +if is_tf_available(): + from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES + from ..tf_utils import stable_softmax + +logger = logging.get_logger(__name__) + + +@add_end_docstrings(build_pipeline_init_args(has_image_processor=True)) +class ZeroShotImageClassificationPipeline(Pipeline): + """ + Zero shot image classification pipeline using `CLIPModel`. This pipeline predicts the class of an image when you + provide an image and a set of `candidate_labels`. + + Example: + + ```python + >>> from transformers import pipeline + + >>> classifier = pipeline(model="google/siglip-so400m-patch14-384") + >>> classifier( + ... "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png", + ... candidate_labels=["animals", "humans", "landscape"], + ... ) + [{'score': 0.965, 'label': 'animals'}, {'score': 0.03, 'label': 'humans'}, {'score': 0.005, 'label': 'landscape'}] + + >>> classifier( + ... "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png", + ... candidate_labels=["black and white", "photorealist", "painting"], + ... ) + [{'score': 0.996, 'label': 'black and white'}, {'score': 0.003, 'label': 'photorealist'}, {'score': 0.0, 'label': 'painting'}] + ``` + + Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) + + This image classification pipeline can currently be loaded from [`pipeline`] using the following task identifier: + `"zero-shot-image-classification"`. + + See the list of available models on + [huggingface.co/models](https://huggingface.co/models?filter=zero-shot-image-classification). + """ + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + requires_backends(self, "vision") + self.check_model_type( + TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES + if self.framework == "tf" + else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES + ) + + def __call__(self, images: Union[str, List[str], "Image", List["Image"]], **kwargs): + """ + Assign labels to the image(s) passed as inputs. + + Args: + images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): + The pipeline handles three types of images: + + - A string containing a http link pointing to an image + - A string containing a local path to an image + - An image loaded in PIL directly + + candidate_labels (`List[str]`): + The candidate labels for this image + + hypothesis_template (`str`, *optional*, defaults to `"This is a photo of {}"`): + The sentence used in cunjunction with *candidate_labels* to attempt the image classification by + replacing the placeholder with the candidate_labels. Then likelihood is estimated by using + logits_per_image + + timeout (`float`, *optional*, defaults to None): + The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and + the call may block forever. + + Return: + A list of dictionaries containing result, one dictionary per proposed label. The dictionaries contain the + following keys: + + - **label** (`str`) -- The label identified by the model. It is one of the suggested `candidate_label`. + - **score** (`float`) -- The score attributed by the model for that label (between 0 and 1). + """ + return super().__call__(images, **kwargs) + + def _sanitize_parameters(self, **kwargs): + preprocess_params = {} + if "candidate_labels" in kwargs: + preprocess_params["candidate_labels"] = kwargs["candidate_labels"] + if "timeout" in kwargs: + preprocess_params["timeout"] = kwargs["timeout"] + if "hypothesis_template" in kwargs: + preprocess_params["hypothesis_template"] = kwargs["hypothesis_template"] + + return preprocess_params, {}, {} + + def preprocess(self, image, candidate_labels=None, hypothesis_template="This is a photo of {}.", timeout=None): + image = load_image(image, timeout=timeout) + inputs = self.image_processor(images=[image], return_tensors=self.framework) + inputs["candidate_labels"] = candidate_labels + sequences = [hypothesis_template.format(x) for x in candidate_labels] + padding = "max_length" if self.model.config.model_type == "siglip" else True + text_inputs = self.tokenizer(sequences, return_tensors=self.framework, padding=padding) + inputs["text_inputs"] = [text_inputs] + return inputs + + def _forward(self, model_inputs): + candidate_labels = model_inputs.pop("candidate_labels") + text_inputs = model_inputs.pop("text_inputs") + if isinstance(text_inputs[0], UserDict): + text_inputs = text_inputs[0] + else: + # Batching case. + text_inputs = text_inputs[0][0] + + outputs = self.model(**text_inputs, **model_inputs) + + model_outputs = { + "candidate_labels": candidate_labels, + "logits": outputs.logits_per_image, + } + return model_outputs + + def postprocess(self, model_outputs): + candidate_labels = model_outputs.pop("candidate_labels") + logits = model_outputs["logits"][0] + if self.framework == "pt" and self.model.config.model_type == "siglip": + probs = torch.sigmoid(logits).squeeze(-1) + scores = probs.tolist() + if not isinstance(scores, list): + scores = [scores] + elif self.framework == "pt": + probs = logits.softmax(dim=-1).squeeze(-1) + scores = probs.tolist() + if not isinstance(scores, list): + scores = [scores] + elif self.framework == "tf": + probs = stable_softmax(logits, axis=-1) + scores = probs.numpy().tolist() + else: + raise ValueError(f"Unsupported framework: {self.framework}") + + result = [ + {"score": score, "label": candidate_label} + for score, candidate_label in sorted(zip(scores, candidate_labels), key=lambda x: -x[0]) + ] + return result diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/zero_shot_object_detection.py b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/zero_shot_object_detection.py new file mode 100644 index 0000000000000000000000000000000000000000..5be89332cbd910fe182600c11a58d5f1994dd46b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/zero_shot_object_detection.py @@ -0,0 +1,218 @@ +from typing import Any, Dict, List, Union + +from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends +from .base import ChunkPipeline, build_pipeline_init_args + + +if is_vision_available(): + from PIL import Image + + from ..image_utils import load_image + +if is_torch_available(): + import torch + + from transformers.modeling_outputs import BaseModelOutput + + from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES + +logger = logging.get_logger(__name__) + + +@add_end_docstrings(build_pipeline_init_args(has_image_processor=True)) +class ZeroShotObjectDetectionPipeline(ChunkPipeline): + """ + Zero shot object detection pipeline using `OwlViTForObjectDetection`. This pipeline predicts bounding boxes of + objects when you provide an image and a set of `candidate_labels`. + + Example: + + ```python + >>> from transformers import pipeline + + >>> detector = pipeline(model="google/owlvit-base-patch32", task="zero-shot-object-detection") + >>> detector( + ... "http://images.cocodataset.org/val2017/000000039769.jpg", + ... candidate_labels=["cat", "couch"], + ... ) + [{'score': 0.287, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.254, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, {'score': 0.121, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}}] + + >>> detector( + ... "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png", + ... candidate_labels=["head", "bird"], + ... ) + [{'score': 0.119, 'label': 'bird', 'box': {'xmin': 71, 'ymin': 170, 'xmax': 410, 'ymax': 508}}] + ``` + + Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) + + This object detection pipeline can currently be loaded from [`pipeline`] using the following task identifier: + `"zero-shot-object-detection"`. + + See the list of available models on + [huggingface.co/models](https://huggingface.co/models?filter=zero-shot-object-detection). + """ + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + if self.framework == "tf": + raise ValueError(f"The {self.__class__} is only available in PyTorch.") + + requires_backends(self, "vision") + self.check_model_type(MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES) + + def __call__( + self, + image: Union[str, "Image.Image", List[Dict[str, Any]]], + candidate_labels: Union[str, List[str]] = None, + **kwargs, + ): + """ + Detect objects (bounding boxes & classes) in the image(s) passed as inputs. + + Args: + image (`str`, `PIL.Image` or `List[Dict[str, Any]]`): + The pipeline handles three types of images: + + - A string containing an http url pointing to an image + - A string containing a local path to an image + - An image loaded in PIL directly + + You can use this parameter to send directly a list of images, or a dataset or a generator like so: + + ```python + >>> from transformers import pipeline + + >>> detector = pipeline(model="google/owlvit-base-patch32", task="zero-shot-object-detection") + >>> detector( + ... [ + ... { + ... "image": "http://images.cocodataset.org/val2017/000000039769.jpg", + ... "candidate_labels": ["cat", "couch"], + ... }, + ... { + ... "image": "http://images.cocodataset.org/val2017/000000039769.jpg", + ... "candidate_labels": ["cat", "couch"], + ... }, + ... ] + ... ) + [[{'score': 0.287, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.25, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, {'score': 0.121, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}}], [{'score': 0.287, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.254, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, {'score': 0.121, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}}]] + ``` + + + candidate_labels (`str` or `List[str]` or `List[List[str]]`): + What the model should recognize in the image. + + threshold (`float`, *optional*, defaults to 0.1): + The probability necessary to make a prediction. + + top_k (`int`, *optional*, defaults to None): + The number of top predictions that will be returned by the pipeline. If the provided number is `None` + or higher than the number of predictions available, it will default to the number of predictions. + + timeout (`float`, *optional*, defaults to None): + The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and + the call may block forever. + + + Return: + A list of lists containing prediction results, one list per input image. Each list contains dictionaries + with the following keys: + + - **label** (`str`) -- Text query corresponding to the found object. + - **score** (`float`) -- Score corresponding to the object (between 0 and 1). + - **box** (`Dict[str,int]`) -- Bounding box of the detected object in image's original size. It is a + dictionary with `x_min`, `x_max`, `y_min`, `y_max` keys. + """ + if "text_queries" in kwargs: + candidate_labels = kwargs.pop("text_queries") + + if isinstance(image, (str, Image.Image)): + inputs = {"image": image, "candidate_labels": candidate_labels} + else: + inputs = image + results = super().__call__(inputs, **kwargs) + return results + + def _sanitize_parameters(self, **kwargs): + preprocess_params = {} + if "timeout" in kwargs: + preprocess_params["timeout"] = kwargs["timeout"] + postprocess_params = {} + if "threshold" in kwargs: + postprocess_params["threshold"] = kwargs["threshold"] + if "top_k" in kwargs: + postprocess_params["top_k"] = kwargs["top_k"] + return preprocess_params, {}, postprocess_params + + def preprocess(self, inputs, timeout=None): + image = load_image(inputs["image"], timeout=timeout) + candidate_labels = inputs["candidate_labels"] + if isinstance(candidate_labels, str): + candidate_labels = candidate_labels.split(",") + + target_size = torch.tensor([[image.height, image.width]], dtype=torch.int32) + for i, candidate_label in enumerate(candidate_labels): + text_inputs = self.tokenizer(candidate_label, return_tensors=self.framework) + image_features = self.image_processor(image, return_tensors=self.framework) + yield { + "is_last": i == len(candidate_labels) - 1, + "target_size": target_size, + "candidate_label": candidate_label, + **text_inputs, + **image_features, + } + + def _forward(self, model_inputs): + target_size = model_inputs.pop("target_size") + candidate_label = model_inputs.pop("candidate_label") + is_last = model_inputs.pop("is_last") + + outputs = self.model(**model_inputs) + + model_outputs = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs} + return model_outputs + + def postprocess(self, model_outputs, threshold=0.1, top_k=None): + results = [] + for model_output in model_outputs: + label = model_output["candidate_label"] + model_output = BaseModelOutput(model_output) + outputs = self.image_processor.post_process_object_detection( + outputs=model_output, threshold=threshold, target_sizes=model_output["target_size"] + )[0] + + for index in outputs["scores"].nonzero(): + score = outputs["scores"][index].item() + box = self._get_bounding_box(outputs["boxes"][index][0]) + + result = {"score": score, "label": label, "box": box} + results.append(result) + + results = sorted(results, key=lambda x: x["score"], reverse=True) + if top_k: + results = results[:top_k] + + return results + + def _get_bounding_box(self, box: "torch.Tensor") -> Dict[str, int]: + """ + Turns list [xmin, xmax, ymin, ymax] into dict { "xmin": xmin, ... } + + Args: + box (`torch.Tensor`): Tensor containing the coordinates in corners format. + + Returns: + bbox (`Dict[str, int]`): Dict containing the coordinates in corners format. + """ + if self.framework != "pt": + raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.") + xmin, ymin, xmax, ymax = box.int().tolist() + bbox = { + "xmin": xmin, + "ymin": ymin, + "xmax": xmax, + "ymax": ymax, + } + return bbox