Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/activations.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/activations_tf.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/audio_utils.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/convert_graph_to_onnx.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/convert_slow_tokenizer.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/convert_slow_tokenizers_checkpoints_to_fast.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/convert_tf_hub_seq_to_seq_bert_to_pytorch.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/deepspeed.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/dependency_versions_check.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/dependency_versions_table.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/dynamic_module_utils.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/feature_extraction_sequence_utils.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/file_utils.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/generation_flax_utils.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/generation_utils.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/hf_argparser.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/image_transforms.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/image_utils.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modelcard.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_tf_outputs.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/optimization_tf.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/pytorch_utils.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/tf_utils.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/time_series_utils.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/tokenization_utils.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/tokenization_utils_base.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/tokenization_utils_fast.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/trainer_callback.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/trainer_pt_utils.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/training_args.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/training_args_seq2seq.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/data/metrics/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/data/processors/__init__.py +18 -0
- env-llmeval/lib/python3.10/site-packages/transformers/data/processors/__pycache__/glue.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/data/processors/__pycache__/squad.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/data/processors/__pycache__/utils.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/data/processors/__pycache__/xnli.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/data/processors/glue.py +643 -0
- env-llmeval/lib/python3.10/site-packages/transformers/data/processors/squad.py +845 -0
- env-llmeval/lib/python3.10/site-packages/transformers/data/processors/utils.py +349 -0
- env-llmeval/lib/python3.10/site-packages/transformers/data/processors/xnli.py +97 -0
- env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__init__.py +1107 -0
- env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/audio_classification.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/base.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/conversational.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/document_question_answering.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/fill_mask.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_classification.cpython-310.pyc +0 -0
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (181 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/activations.cpython-310.pyc
ADDED
Binary file (9.8 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/activations_tf.cpython-310.pyc
ADDED
Binary file (4.53 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/audio_utils.cpython-310.pyc
ADDED
Binary file (30.5 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/convert_graph_to_onnx.cpython-310.pyc
ADDED
Binary file (16.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/convert_slow_tokenizer.cpython-310.pyc
ADDED
Binary file (38.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/convert_slow_tokenizers_checkpoints_to_fast.cpython-310.pyc
ADDED
Binary file (2.98 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/convert_tf_hub_seq_to_seq_bert_to_pytorch.cpython-310.pyc
ADDED
Binary file (1.91 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/deepspeed.cpython-310.pyc
ADDED
Binary file (999 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/dependency_versions_check.cpython-310.pyc
ADDED
Binary file (941 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/dependency_versions_table.cpython-310.pyc
ADDED
Binary file (3.04 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/dynamic_module_utils.cpython-310.pyc
ADDED
Binary file (19.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/feature_extraction_sequence_utils.cpython-310.pyc
ADDED
Binary file (13.2 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/file_utils.cpython-310.pyc
ADDED
Binary file (3.74 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/generation_flax_utils.cpython-310.pyc
ADDED
Binary file (667 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/generation_utils.cpython-310.pyc
ADDED
Binary file (645 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/hf_argparser.cpython-310.pyc
ADDED
Binary file (15.2 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/image_transforms.cpython-310.pyc
ADDED
Binary file (24.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/image_utils.cpython-310.pyc
ADDED
Binary file (23.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modelcard.cpython-310.pyc
ADDED
Binary file (25.9 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_tf_outputs.cpython-310.pyc
ADDED
Binary file (55.5 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/optimization_tf.cpython-310.pyc
ADDED
Binary file (14.6 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/pytorch_utils.cpython-310.pyc
ADDED
Binary file (10.3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/tf_utils.cpython-310.pyc
ADDED
Binary file (8.16 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/time_series_utils.cpython-310.pyc
ADDED
Binary file (8.94 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/tokenization_utils.cpython-310.pyc
ADDED
Binary file (29.1 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/tokenization_utils_base.cpython-310.pyc
ADDED
Binary file (147 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/tokenization_utils_fast.cpython-310.pyc
ADDED
Binary file (25.9 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/trainer_callback.cpython-310.pyc
ADDED
Binary file (23.8 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/trainer_pt_utils.cpython-310.pyc
ADDED
Binary file (50.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/training_args.cpython-310.pyc
ADDED
Binary file (114 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/training_args_seq2seq.cpython-310.pyc
ADDED
Binary file (3.8 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/data/metrics/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (2.54 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/data/processors/__init__.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
|
16 |
+
from .squad import SquadExample, SquadFeatures, SquadV1Processor, SquadV2Processor, squad_convert_examples_to_features
|
17 |
+
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
|
18 |
+
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
|
env-llmeval/lib/python3.10/site-packages/transformers/data/processors/__pycache__/glue.cpython-310.pyc
ADDED
Binary file (17.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/data/processors/__pycache__/squad.cpython-310.pyc
ADDED
Binary file (20 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/data/processors/__pycache__/utils.cpython-310.pyc
ADDED
Binary file (12.1 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/data/processors/__pycache__/xnli.cpython-310.pyc
ADDED
Binary file (2.53 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/data/processors/glue.py
ADDED
@@ -0,0 +1,643 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
|
3 |
+
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
""" GLUE processors and helpers"""
|
17 |
+
|
18 |
+
import os
|
19 |
+
import warnings
|
20 |
+
from dataclasses import asdict
|
21 |
+
from enum import Enum
|
22 |
+
from typing import List, Optional, Union
|
23 |
+
|
24 |
+
from ...tokenization_utils import PreTrainedTokenizer
|
25 |
+
from ...utils import is_tf_available, logging
|
26 |
+
from .utils import DataProcessor, InputExample, InputFeatures
|
27 |
+
|
28 |
+
|
29 |
+
if is_tf_available():
|
30 |
+
import tensorflow as tf
|
31 |
+
|
32 |
+
logger = logging.get_logger(__name__)
|
33 |
+
|
34 |
+
DEPRECATION_WARNING = (
|
35 |
+
"This {0} will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
|
36 |
+
"library. You can have a look at this example script for pointers: "
|
37 |
+
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"
|
38 |
+
)
|
39 |
+
|
40 |
+
|
41 |
+
def glue_convert_examples_to_features(
|
42 |
+
examples: Union[List[InputExample], "tf.data.Dataset"],
|
43 |
+
tokenizer: PreTrainedTokenizer,
|
44 |
+
max_length: Optional[int] = None,
|
45 |
+
task=None,
|
46 |
+
label_list=None,
|
47 |
+
output_mode=None,
|
48 |
+
):
|
49 |
+
"""
|
50 |
+
Loads a data file into a list of `InputFeatures`
|
51 |
+
|
52 |
+
Args:
|
53 |
+
examples: List of `InputExamples` or `tf.data.Dataset` containing the examples.
|
54 |
+
tokenizer: Instance of a tokenizer that will tokenize the examples
|
55 |
+
max_length: Maximum example length. Defaults to the tokenizer's max_len
|
56 |
+
task: GLUE task
|
57 |
+
label_list: List of labels. Can be obtained from the processor using the `processor.get_labels()` method
|
58 |
+
output_mode: String indicating the output mode. Either `regression` or `classification`
|
59 |
+
|
60 |
+
Returns:
|
61 |
+
If the `examples` input is a `tf.data.Dataset`, will return a `tf.data.Dataset` containing the task-specific
|
62 |
+
features. If the input is a list of `InputExamples`, will return a list of task-specific `InputFeatures` which
|
63 |
+
can be fed to the model.
|
64 |
+
|
65 |
+
"""
|
66 |
+
warnings.warn(DEPRECATION_WARNING.format("function"), FutureWarning)
|
67 |
+
if is_tf_available() and isinstance(examples, tf.data.Dataset):
|
68 |
+
if task is None:
|
69 |
+
raise ValueError("When calling glue_convert_examples_to_features from TF, the task parameter is required.")
|
70 |
+
return _tf_glue_convert_examples_to_features(examples, tokenizer, max_length=max_length, task=task)
|
71 |
+
return _glue_convert_examples_to_features(
|
72 |
+
examples, tokenizer, max_length=max_length, task=task, label_list=label_list, output_mode=output_mode
|
73 |
+
)
|
74 |
+
|
75 |
+
|
76 |
+
if is_tf_available():
|
77 |
+
|
78 |
+
def _tf_glue_convert_examples_to_features(
|
79 |
+
examples: tf.data.Dataset,
|
80 |
+
tokenizer: PreTrainedTokenizer,
|
81 |
+
task=str,
|
82 |
+
max_length: Optional[int] = None,
|
83 |
+
) -> tf.data.Dataset:
|
84 |
+
"""
|
85 |
+
Returns:
|
86 |
+
A `tf.data.Dataset` containing the task-specific features.
|
87 |
+
|
88 |
+
"""
|
89 |
+
processor = glue_processors[task]()
|
90 |
+
examples = [processor.tfds_map(processor.get_example_from_tensor_dict(example)) for example in examples]
|
91 |
+
features = glue_convert_examples_to_features(examples, tokenizer, max_length=max_length, task=task)
|
92 |
+
label_type = tf.float32 if task == "sts-b" else tf.int64
|
93 |
+
|
94 |
+
def gen():
|
95 |
+
for ex in features:
|
96 |
+
d = {k: v for k, v in asdict(ex).items() if v is not None}
|
97 |
+
label = d.pop("label")
|
98 |
+
yield (d, label)
|
99 |
+
|
100 |
+
input_names = tokenizer.model_input_names
|
101 |
+
|
102 |
+
return tf.data.Dataset.from_generator(
|
103 |
+
gen,
|
104 |
+
({k: tf.int32 for k in input_names}, label_type),
|
105 |
+
({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])),
|
106 |
+
)
|
107 |
+
|
108 |
+
|
109 |
+
def _glue_convert_examples_to_features(
|
110 |
+
examples: List[InputExample],
|
111 |
+
tokenizer: PreTrainedTokenizer,
|
112 |
+
max_length: Optional[int] = None,
|
113 |
+
task=None,
|
114 |
+
label_list=None,
|
115 |
+
output_mode=None,
|
116 |
+
):
|
117 |
+
if max_length is None:
|
118 |
+
max_length = tokenizer.model_max_length
|
119 |
+
|
120 |
+
if task is not None:
|
121 |
+
processor = glue_processors[task]()
|
122 |
+
if label_list is None:
|
123 |
+
label_list = processor.get_labels()
|
124 |
+
logger.info(f"Using label list {label_list} for task {task}")
|
125 |
+
if output_mode is None:
|
126 |
+
output_mode = glue_output_modes[task]
|
127 |
+
logger.info(f"Using output mode {output_mode} for task {task}")
|
128 |
+
|
129 |
+
label_map = {label: i for i, label in enumerate(label_list)}
|
130 |
+
|
131 |
+
def label_from_example(example: InputExample) -> Union[int, float, None]:
|
132 |
+
if example.label is None:
|
133 |
+
return None
|
134 |
+
if output_mode == "classification":
|
135 |
+
return label_map[example.label]
|
136 |
+
elif output_mode == "regression":
|
137 |
+
return float(example.label)
|
138 |
+
raise KeyError(output_mode)
|
139 |
+
|
140 |
+
labels = [label_from_example(example) for example in examples]
|
141 |
+
|
142 |
+
batch_encoding = tokenizer(
|
143 |
+
[(example.text_a, example.text_b) for example in examples],
|
144 |
+
max_length=max_length,
|
145 |
+
padding="max_length",
|
146 |
+
truncation=True,
|
147 |
+
)
|
148 |
+
|
149 |
+
features = []
|
150 |
+
for i in range(len(examples)):
|
151 |
+
inputs = {k: batch_encoding[k][i] for k in batch_encoding}
|
152 |
+
|
153 |
+
feature = InputFeatures(**inputs, label=labels[i])
|
154 |
+
features.append(feature)
|
155 |
+
|
156 |
+
for i, example in enumerate(examples[:5]):
|
157 |
+
logger.info("*** Example ***")
|
158 |
+
logger.info(f"guid: {example.guid}")
|
159 |
+
logger.info(f"features: {features[i]}")
|
160 |
+
|
161 |
+
return features
|
162 |
+
|
163 |
+
|
164 |
+
class OutputMode(Enum):
|
165 |
+
classification = "classification"
|
166 |
+
regression = "regression"
|
167 |
+
|
168 |
+
|
169 |
+
class MrpcProcessor(DataProcessor):
|
170 |
+
"""Processor for the MRPC data set (GLUE version)."""
|
171 |
+
|
172 |
+
def __init__(self, *args, **kwargs):
|
173 |
+
super().__init__(*args, **kwargs)
|
174 |
+
warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
|
175 |
+
|
176 |
+
def get_example_from_tensor_dict(self, tensor_dict):
|
177 |
+
"""See base class."""
|
178 |
+
return InputExample(
|
179 |
+
tensor_dict["idx"].numpy(),
|
180 |
+
tensor_dict["sentence1"].numpy().decode("utf-8"),
|
181 |
+
tensor_dict["sentence2"].numpy().decode("utf-8"),
|
182 |
+
str(tensor_dict["label"].numpy()),
|
183 |
+
)
|
184 |
+
|
185 |
+
def get_train_examples(self, data_dir):
|
186 |
+
"""See base class."""
|
187 |
+
logger.info(f"LOOKING AT {os.path.join(data_dir, 'train.tsv')}")
|
188 |
+
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
|
189 |
+
|
190 |
+
def get_dev_examples(self, data_dir):
|
191 |
+
"""See base class."""
|
192 |
+
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
|
193 |
+
|
194 |
+
def get_test_examples(self, data_dir):
|
195 |
+
"""See base class."""
|
196 |
+
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
|
197 |
+
|
198 |
+
def get_labels(self):
|
199 |
+
"""See base class."""
|
200 |
+
return ["0", "1"]
|
201 |
+
|
202 |
+
def _create_examples(self, lines, set_type):
|
203 |
+
"""Creates examples for the training, dev and test sets."""
|
204 |
+
examples = []
|
205 |
+
for i, line in enumerate(lines):
|
206 |
+
if i == 0:
|
207 |
+
continue
|
208 |
+
guid = f"{set_type}-{i}"
|
209 |
+
text_a = line[3]
|
210 |
+
text_b = line[4]
|
211 |
+
label = None if set_type == "test" else line[0]
|
212 |
+
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
|
213 |
+
return examples
|
214 |
+
|
215 |
+
|
216 |
+
class MnliProcessor(DataProcessor):
|
217 |
+
"""Processor for the MultiNLI data set (GLUE version)."""
|
218 |
+
|
219 |
+
def __init__(self, *args, **kwargs):
|
220 |
+
super().__init__(*args, **kwargs)
|
221 |
+
warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
|
222 |
+
|
223 |
+
def get_example_from_tensor_dict(self, tensor_dict):
|
224 |
+
"""See base class."""
|
225 |
+
return InputExample(
|
226 |
+
tensor_dict["idx"].numpy(),
|
227 |
+
tensor_dict["premise"].numpy().decode("utf-8"),
|
228 |
+
tensor_dict["hypothesis"].numpy().decode("utf-8"),
|
229 |
+
str(tensor_dict["label"].numpy()),
|
230 |
+
)
|
231 |
+
|
232 |
+
def get_train_examples(self, data_dir):
|
233 |
+
"""See base class."""
|
234 |
+
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
|
235 |
+
|
236 |
+
def get_dev_examples(self, data_dir):
|
237 |
+
"""See base class."""
|
238 |
+
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched")
|
239 |
+
|
240 |
+
def get_test_examples(self, data_dir):
|
241 |
+
"""See base class."""
|
242 |
+
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test_matched")
|
243 |
+
|
244 |
+
def get_labels(self):
|
245 |
+
"""See base class."""
|
246 |
+
return ["contradiction", "entailment", "neutral"]
|
247 |
+
|
248 |
+
def _create_examples(self, lines, set_type):
|
249 |
+
"""Creates examples for the training, dev and test sets."""
|
250 |
+
examples = []
|
251 |
+
for i, line in enumerate(lines):
|
252 |
+
if i == 0:
|
253 |
+
continue
|
254 |
+
guid = f"{set_type}-{line[0]}"
|
255 |
+
text_a = line[8]
|
256 |
+
text_b = line[9]
|
257 |
+
label = None if set_type.startswith("test") else line[-1]
|
258 |
+
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
|
259 |
+
return examples
|
260 |
+
|
261 |
+
|
262 |
+
class MnliMismatchedProcessor(MnliProcessor):
|
263 |
+
"""Processor for the MultiNLI Mismatched data set (GLUE version)."""
|
264 |
+
|
265 |
+
def __init__(self, *args, **kwargs):
|
266 |
+
super().__init__(*args, **kwargs)
|
267 |
+
warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
|
268 |
+
|
269 |
+
def get_dev_examples(self, data_dir):
|
270 |
+
"""See base class."""
|
271 |
+
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")), "dev_mismatched")
|
272 |
+
|
273 |
+
def get_test_examples(self, data_dir):
|
274 |
+
"""See base class."""
|
275 |
+
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test_mismatched.tsv")), "test_mismatched")
|
276 |
+
|
277 |
+
|
278 |
+
class ColaProcessor(DataProcessor):
|
279 |
+
"""Processor for the CoLA data set (GLUE version)."""
|
280 |
+
|
281 |
+
def __init__(self, *args, **kwargs):
|
282 |
+
super().__init__(*args, **kwargs)
|
283 |
+
warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
|
284 |
+
|
285 |
+
def get_example_from_tensor_dict(self, tensor_dict):
|
286 |
+
"""See base class."""
|
287 |
+
return InputExample(
|
288 |
+
tensor_dict["idx"].numpy(),
|
289 |
+
tensor_dict["sentence"].numpy().decode("utf-8"),
|
290 |
+
None,
|
291 |
+
str(tensor_dict["label"].numpy()),
|
292 |
+
)
|
293 |
+
|
294 |
+
def get_train_examples(self, data_dir):
|
295 |
+
"""See base class."""
|
296 |
+
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
|
297 |
+
|
298 |
+
def get_dev_examples(self, data_dir):
|
299 |
+
"""See base class."""
|
300 |
+
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
|
301 |
+
|
302 |
+
def get_test_examples(self, data_dir):
|
303 |
+
"""See base class."""
|
304 |
+
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
|
305 |
+
|
306 |
+
def get_labels(self):
|
307 |
+
"""See base class."""
|
308 |
+
return ["0", "1"]
|
309 |
+
|
310 |
+
def _create_examples(self, lines, set_type):
|
311 |
+
"""Creates examples for the training, dev and test sets."""
|
312 |
+
test_mode = set_type == "test"
|
313 |
+
if test_mode:
|
314 |
+
lines = lines[1:]
|
315 |
+
text_index = 1 if test_mode else 3
|
316 |
+
examples = []
|
317 |
+
for i, line in enumerate(lines):
|
318 |
+
guid = f"{set_type}-{i}"
|
319 |
+
text_a = line[text_index]
|
320 |
+
label = None if test_mode else line[1]
|
321 |
+
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
|
322 |
+
return examples
|
323 |
+
|
324 |
+
|
325 |
+
class Sst2Processor(DataProcessor):
|
326 |
+
"""Processor for the SST-2 data set (GLUE version)."""
|
327 |
+
|
328 |
+
def __init__(self, *args, **kwargs):
|
329 |
+
super().__init__(*args, **kwargs)
|
330 |
+
warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
|
331 |
+
|
332 |
+
def get_example_from_tensor_dict(self, tensor_dict):
|
333 |
+
"""See base class."""
|
334 |
+
return InputExample(
|
335 |
+
tensor_dict["idx"].numpy(),
|
336 |
+
tensor_dict["sentence"].numpy().decode("utf-8"),
|
337 |
+
None,
|
338 |
+
str(tensor_dict["label"].numpy()),
|
339 |
+
)
|
340 |
+
|
341 |
+
def get_train_examples(self, data_dir):
|
342 |
+
"""See base class."""
|
343 |
+
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
|
344 |
+
|
345 |
+
def get_dev_examples(self, data_dir):
|
346 |
+
"""See base class."""
|
347 |
+
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
|
348 |
+
|
349 |
+
def get_test_examples(self, data_dir):
|
350 |
+
"""See base class."""
|
351 |
+
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
|
352 |
+
|
353 |
+
def get_labels(self):
|
354 |
+
"""See base class."""
|
355 |
+
return ["0", "1"]
|
356 |
+
|
357 |
+
def _create_examples(self, lines, set_type):
|
358 |
+
"""Creates examples for the training, dev and test sets."""
|
359 |
+
examples = []
|
360 |
+
text_index = 1 if set_type == "test" else 0
|
361 |
+
for i, line in enumerate(lines):
|
362 |
+
if i == 0:
|
363 |
+
continue
|
364 |
+
guid = f"{set_type}-{i}"
|
365 |
+
text_a = line[text_index]
|
366 |
+
label = None if set_type == "test" else line[1]
|
367 |
+
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
|
368 |
+
return examples
|
369 |
+
|
370 |
+
|
371 |
+
class StsbProcessor(DataProcessor):
|
372 |
+
"""Processor for the STS-B data set (GLUE version)."""
|
373 |
+
|
374 |
+
def __init__(self, *args, **kwargs):
|
375 |
+
super().__init__(*args, **kwargs)
|
376 |
+
warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
|
377 |
+
|
378 |
+
def get_example_from_tensor_dict(self, tensor_dict):
|
379 |
+
"""See base class."""
|
380 |
+
return InputExample(
|
381 |
+
tensor_dict["idx"].numpy(),
|
382 |
+
tensor_dict["sentence1"].numpy().decode("utf-8"),
|
383 |
+
tensor_dict["sentence2"].numpy().decode("utf-8"),
|
384 |
+
str(tensor_dict["label"].numpy()),
|
385 |
+
)
|
386 |
+
|
387 |
+
def get_train_examples(self, data_dir):
|
388 |
+
"""See base class."""
|
389 |
+
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
|
390 |
+
|
391 |
+
def get_dev_examples(self, data_dir):
|
392 |
+
"""See base class."""
|
393 |
+
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
|
394 |
+
|
395 |
+
def get_test_examples(self, data_dir):
|
396 |
+
"""See base class."""
|
397 |
+
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
|
398 |
+
|
399 |
+
def get_labels(self):
|
400 |
+
"""See base class."""
|
401 |
+
return [None]
|
402 |
+
|
403 |
+
def _create_examples(self, lines, set_type):
|
404 |
+
"""Creates examples for the training, dev and test sets."""
|
405 |
+
examples = []
|
406 |
+
for i, line in enumerate(lines):
|
407 |
+
if i == 0:
|
408 |
+
continue
|
409 |
+
guid = f"{set_type}-{line[0]}"
|
410 |
+
text_a = line[7]
|
411 |
+
text_b = line[8]
|
412 |
+
label = None if set_type == "test" else line[-1]
|
413 |
+
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
|
414 |
+
return examples
|
415 |
+
|
416 |
+
|
417 |
+
class QqpProcessor(DataProcessor):
|
418 |
+
"""Processor for the QQP data set (GLUE version)."""
|
419 |
+
|
420 |
+
def __init__(self, *args, **kwargs):
|
421 |
+
super().__init__(*args, **kwargs)
|
422 |
+
warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
|
423 |
+
|
424 |
+
def get_example_from_tensor_dict(self, tensor_dict):
|
425 |
+
"""See base class."""
|
426 |
+
return InputExample(
|
427 |
+
tensor_dict["idx"].numpy(),
|
428 |
+
tensor_dict["question1"].numpy().decode("utf-8"),
|
429 |
+
tensor_dict["question2"].numpy().decode("utf-8"),
|
430 |
+
str(tensor_dict["label"].numpy()),
|
431 |
+
)
|
432 |
+
|
433 |
+
def get_train_examples(self, data_dir):
|
434 |
+
"""See base class."""
|
435 |
+
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
|
436 |
+
|
437 |
+
def get_dev_examples(self, data_dir):
|
438 |
+
"""See base class."""
|
439 |
+
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
|
440 |
+
|
441 |
+
def get_test_examples(self, data_dir):
|
442 |
+
"""See base class."""
|
443 |
+
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
|
444 |
+
|
445 |
+
def get_labels(self):
|
446 |
+
"""See base class."""
|
447 |
+
return ["0", "1"]
|
448 |
+
|
449 |
+
def _create_examples(self, lines, set_type):
|
450 |
+
"""Creates examples for the training, dev and test sets."""
|
451 |
+
test_mode = set_type == "test"
|
452 |
+
q1_index = 1 if test_mode else 3
|
453 |
+
q2_index = 2 if test_mode else 4
|
454 |
+
examples = []
|
455 |
+
for i, line in enumerate(lines):
|
456 |
+
if i == 0:
|
457 |
+
continue
|
458 |
+
guid = f"{set_type}-{line[0]}"
|
459 |
+
try:
|
460 |
+
text_a = line[q1_index]
|
461 |
+
text_b = line[q2_index]
|
462 |
+
label = None if test_mode else line[5]
|
463 |
+
except IndexError:
|
464 |
+
continue
|
465 |
+
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
|
466 |
+
return examples
|
467 |
+
|
468 |
+
|
469 |
+
class QnliProcessor(DataProcessor):
|
470 |
+
"""Processor for the QNLI data set (GLUE version)."""
|
471 |
+
|
472 |
+
def __init__(self, *args, **kwargs):
|
473 |
+
super().__init__(*args, **kwargs)
|
474 |
+
warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
|
475 |
+
|
476 |
+
def get_example_from_tensor_dict(self, tensor_dict):
|
477 |
+
"""See base class."""
|
478 |
+
return InputExample(
|
479 |
+
tensor_dict["idx"].numpy(),
|
480 |
+
tensor_dict["question"].numpy().decode("utf-8"),
|
481 |
+
tensor_dict["sentence"].numpy().decode("utf-8"),
|
482 |
+
str(tensor_dict["label"].numpy()),
|
483 |
+
)
|
484 |
+
|
485 |
+
def get_train_examples(self, data_dir):
|
486 |
+
"""See base class."""
|
487 |
+
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
|
488 |
+
|
489 |
+
def get_dev_examples(self, data_dir):
|
490 |
+
"""See base class."""
|
491 |
+
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
|
492 |
+
|
493 |
+
def get_test_examples(self, data_dir):
|
494 |
+
"""See base class."""
|
495 |
+
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
|
496 |
+
|
497 |
+
def get_labels(self):
|
498 |
+
"""See base class."""
|
499 |
+
return ["entailment", "not_entailment"]
|
500 |
+
|
501 |
+
def _create_examples(self, lines, set_type):
|
502 |
+
"""Creates examples for the training, dev and test sets."""
|
503 |
+
examples = []
|
504 |
+
for i, line in enumerate(lines):
|
505 |
+
if i == 0:
|
506 |
+
continue
|
507 |
+
guid = f"{set_type}-{line[0]}"
|
508 |
+
text_a = line[1]
|
509 |
+
text_b = line[2]
|
510 |
+
label = None if set_type == "test" else line[-1]
|
511 |
+
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
|
512 |
+
return examples
|
513 |
+
|
514 |
+
|
515 |
+
class RteProcessor(DataProcessor):
|
516 |
+
"""Processor for the RTE data set (GLUE version)."""
|
517 |
+
|
518 |
+
def __init__(self, *args, **kwargs):
|
519 |
+
super().__init__(*args, **kwargs)
|
520 |
+
warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
|
521 |
+
|
522 |
+
def get_example_from_tensor_dict(self, tensor_dict):
|
523 |
+
"""See base class."""
|
524 |
+
return InputExample(
|
525 |
+
tensor_dict["idx"].numpy(),
|
526 |
+
tensor_dict["sentence1"].numpy().decode("utf-8"),
|
527 |
+
tensor_dict["sentence2"].numpy().decode("utf-8"),
|
528 |
+
str(tensor_dict["label"].numpy()),
|
529 |
+
)
|
530 |
+
|
531 |
+
def get_train_examples(self, data_dir):
|
532 |
+
"""See base class."""
|
533 |
+
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
|
534 |
+
|
535 |
+
def get_dev_examples(self, data_dir):
|
536 |
+
"""See base class."""
|
537 |
+
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
|
538 |
+
|
539 |
+
def get_test_examples(self, data_dir):
|
540 |
+
"""See base class."""
|
541 |
+
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
|
542 |
+
|
543 |
+
def get_labels(self):
|
544 |
+
"""See base class."""
|
545 |
+
return ["entailment", "not_entailment"]
|
546 |
+
|
547 |
+
def _create_examples(self, lines, set_type):
|
548 |
+
"""Creates examples for the training, dev and test sets."""
|
549 |
+
examples = []
|
550 |
+
for i, line in enumerate(lines):
|
551 |
+
if i == 0:
|
552 |
+
continue
|
553 |
+
guid = f"{set_type}-{line[0]}"
|
554 |
+
text_a = line[1]
|
555 |
+
text_b = line[2]
|
556 |
+
label = None if set_type == "test" else line[-1]
|
557 |
+
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
|
558 |
+
return examples
|
559 |
+
|
560 |
+
|
561 |
+
class WnliProcessor(DataProcessor):
|
562 |
+
"""Processor for the WNLI data set (GLUE version)."""
|
563 |
+
|
564 |
+
def __init__(self, *args, **kwargs):
|
565 |
+
super().__init__(*args, **kwargs)
|
566 |
+
warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
|
567 |
+
|
568 |
+
def get_example_from_tensor_dict(self, tensor_dict):
|
569 |
+
"""See base class."""
|
570 |
+
return InputExample(
|
571 |
+
tensor_dict["idx"].numpy(),
|
572 |
+
tensor_dict["sentence1"].numpy().decode("utf-8"),
|
573 |
+
tensor_dict["sentence2"].numpy().decode("utf-8"),
|
574 |
+
str(tensor_dict["label"].numpy()),
|
575 |
+
)
|
576 |
+
|
577 |
+
def get_train_examples(self, data_dir):
|
578 |
+
"""See base class."""
|
579 |
+
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
|
580 |
+
|
581 |
+
def get_dev_examples(self, data_dir):
|
582 |
+
"""See base class."""
|
583 |
+
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
|
584 |
+
|
585 |
+
def get_test_examples(self, data_dir):
|
586 |
+
"""See base class."""
|
587 |
+
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
|
588 |
+
|
589 |
+
def get_labels(self):
|
590 |
+
"""See base class."""
|
591 |
+
return ["0", "1"]
|
592 |
+
|
593 |
+
def _create_examples(self, lines, set_type):
|
594 |
+
"""Creates examples for the training, dev and test sets."""
|
595 |
+
examples = []
|
596 |
+
for i, line in enumerate(lines):
|
597 |
+
if i == 0:
|
598 |
+
continue
|
599 |
+
guid = f"{set_type}-{line[0]}"
|
600 |
+
text_a = line[1]
|
601 |
+
text_b = line[2]
|
602 |
+
label = None if set_type == "test" else line[-1]
|
603 |
+
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
|
604 |
+
return examples
|
605 |
+
|
606 |
+
|
607 |
+
glue_tasks_num_labels = {
|
608 |
+
"cola": 2,
|
609 |
+
"mnli": 3,
|
610 |
+
"mrpc": 2,
|
611 |
+
"sst-2": 2,
|
612 |
+
"sts-b": 1,
|
613 |
+
"qqp": 2,
|
614 |
+
"qnli": 2,
|
615 |
+
"rte": 2,
|
616 |
+
"wnli": 2,
|
617 |
+
}
|
618 |
+
|
619 |
+
glue_processors = {
|
620 |
+
"cola": ColaProcessor,
|
621 |
+
"mnli": MnliProcessor,
|
622 |
+
"mnli-mm": MnliMismatchedProcessor,
|
623 |
+
"mrpc": MrpcProcessor,
|
624 |
+
"sst-2": Sst2Processor,
|
625 |
+
"sts-b": StsbProcessor,
|
626 |
+
"qqp": QqpProcessor,
|
627 |
+
"qnli": QnliProcessor,
|
628 |
+
"rte": RteProcessor,
|
629 |
+
"wnli": WnliProcessor,
|
630 |
+
}
|
631 |
+
|
632 |
+
glue_output_modes = {
|
633 |
+
"cola": "classification",
|
634 |
+
"mnli": "classification",
|
635 |
+
"mnli-mm": "classification",
|
636 |
+
"mrpc": "classification",
|
637 |
+
"sst-2": "classification",
|
638 |
+
"sts-b": "regression",
|
639 |
+
"qqp": "classification",
|
640 |
+
"qnli": "classification",
|
641 |
+
"rte": "classification",
|
642 |
+
"wnli": "classification",
|
643 |
+
}
|
env-llmeval/lib/python3.10/site-packages/transformers/data/processors/squad.py
ADDED
@@ -0,0 +1,845 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
import json
|
16 |
+
import os
|
17 |
+
from functools import partial
|
18 |
+
from multiprocessing import Pool, cpu_count
|
19 |
+
|
20 |
+
import numpy as np
|
21 |
+
from tqdm import tqdm
|
22 |
+
|
23 |
+
from ...models.bert.tokenization_bert import whitespace_tokenize
|
24 |
+
from ...tokenization_utils_base import BatchEncoding, PreTrainedTokenizerBase, TruncationStrategy
|
25 |
+
from ...utils import is_tf_available, is_torch_available, logging
|
26 |
+
from .utils import DataProcessor
|
27 |
+
|
28 |
+
|
29 |
+
# Store the tokenizers which insert 2 separators tokens
|
30 |
+
MULTI_SEP_TOKENS_TOKENIZERS_SET = {"roberta", "camembert", "bart", "mpnet"}
|
31 |
+
|
32 |
+
|
33 |
+
if is_torch_available():
|
34 |
+
import torch
|
35 |
+
from torch.utils.data import TensorDataset
|
36 |
+
|
37 |
+
if is_tf_available():
|
38 |
+
import tensorflow as tf
|
39 |
+
|
40 |
+
logger = logging.get_logger(__name__)
|
41 |
+
|
42 |
+
|
43 |
+
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text):
|
44 |
+
"""Returns tokenized answer spans that better match the annotated answer."""
|
45 |
+
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
|
46 |
+
|
47 |
+
for new_start in range(input_start, input_end + 1):
|
48 |
+
for new_end in range(input_end, new_start - 1, -1):
|
49 |
+
text_span = " ".join(doc_tokens[new_start : (new_end + 1)])
|
50 |
+
if text_span == tok_answer_text:
|
51 |
+
return (new_start, new_end)
|
52 |
+
|
53 |
+
return (input_start, input_end)
|
54 |
+
|
55 |
+
|
56 |
+
def _check_is_max_context(doc_spans, cur_span_index, position):
|
57 |
+
"""Check if this is the 'max context' doc span for the token."""
|
58 |
+
best_score = None
|
59 |
+
best_span_index = None
|
60 |
+
for span_index, doc_span in enumerate(doc_spans):
|
61 |
+
end = doc_span.start + doc_span.length - 1
|
62 |
+
if position < doc_span.start:
|
63 |
+
continue
|
64 |
+
if position > end:
|
65 |
+
continue
|
66 |
+
num_left_context = position - doc_span.start
|
67 |
+
num_right_context = end - position
|
68 |
+
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
|
69 |
+
if best_score is None or score > best_score:
|
70 |
+
best_score = score
|
71 |
+
best_span_index = span_index
|
72 |
+
|
73 |
+
return cur_span_index == best_span_index
|
74 |
+
|
75 |
+
|
76 |
+
def _new_check_is_max_context(doc_spans, cur_span_index, position):
|
77 |
+
"""Check if this is the 'max context' doc span for the token."""
|
78 |
+
# if len(doc_spans) == 1:
|
79 |
+
# return True
|
80 |
+
best_score = None
|
81 |
+
best_span_index = None
|
82 |
+
for span_index, doc_span in enumerate(doc_spans):
|
83 |
+
end = doc_span["start"] + doc_span["length"] - 1
|
84 |
+
if position < doc_span["start"]:
|
85 |
+
continue
|
86 |
+
if position > end:
|
87 |
+
continue
|
88 |
+
num_left_context = position - doc_span["start"]
|
89 |
+
num_right_context = end - position
|
90 |
+
score = min(num_left_context, num_right_context) + 0.01 * doc_span["length"]
|
91 |
+
if best_score is None or score > best_score:
|
92 |
+
best_score = score
|
93 |
+
best_span_index = span_index
|
94 |
+
|
95 |
+
return cur_span_index == best_span_index
|
96 |
+
|
97 |
+
|
98 |
+
def _is_whitespace(c):
|
99 |
+
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
|
100 |
+
return True
|
101 |
+
return False
|
102 |
+
|
103 |
+
|
104 |
+
def squad_convert_example_to_features(
|
105 |
+
example, max_seq_length, doc_stride, max_query_length, padding_strategy, is_training
|
106 |
+
):
|
107 |
+
features = []
|
108 |
+
if is_training and not example.is_impossible:
|
109 |
+
# Get start and end position
|
110 |
+
start_position = example.start_position
|
111 |
+
end_position = example.end_position
|
112 |
+
|
113 |
+
# If the answer cannot be found in the text, then skip this example.
|
114 |
+
actual_text = " ".join(example.doc_tokens[start_position : (end_position + 1)])
|
115 |
+
cleaned_answer_text = " ".join(whitespace_tokenize(example.answer_text))
|
116 |
+
if actual_text.find(cleaned_answer_text) == -1:
|
117 |
+
logger.warning(f"Could not find answer: '{actual_text}' vs. '{cleaned_answer_text}'")
|
118 |
+
return []
|
119 |
+
|
120 |
+
tok_to_orig_index = []
|
121 |
+
orig_to_tok_index = []
|
122 |
+
all_doc_tokens = []
|
123 |
+
for i, token in enumerate(example.doc_tokens):
|
124 |
+
orig_to_tok_index.append(len(all_doc_tokens))
|
125 |
+
if tokenizer.__class__.__name__ in [
|
126 |
+
"RobertaTokenizer",
|
127 |
+
"LongformerTokenizer",
|
128 |
+
"BartTokenizer",
|
129 |
+
"RobertaTokenizerFast",
|
130 |
+
"LongformerTokenizerFast",
|
131 |
+
"BartTokenizerFast",
|
132 |
+
]:
|
133 |
+
sub_tokens = tokenizer.tokenize(token, add_prefix_space=True)
|
134 |
+
else:
|
135 |
+
sub_tokens = tokenizer.tokenize(token)
|
136 |
+
for sub_token in sub_tokens:
|
137 |
+
tok_to_orig_index.append(i)
|
138 |
+
all_doc_tokens.append(sub_token)
|
139 |
+
|
140 |
+
if is_training and not example.is_impossible:
|
141 |
+
tok_start_position = orig_to_tok_index[example.start_position]
|
142 |
+
if example.end_position < len(example.doc_tokens) - 1:
|
143 |
+
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
|
144 |
+
else:
|
145 |
+
tok_end_position = len(all_doc_tokens) - 1
|
146 |
+
|
147 |
+
(tok_start_position, tok_end_position) = _improve_answer_span(
|
148 |
+
all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.answer_text
|
149 |
+
)
|
150 |
+
|
151 |
+
spans = []
|
152 |
+
|
153 |
+
truncated_query = tokenizer.encode(
|
154 |
+
example.question_text, add_special_tokens=False, truncation=True, max_length=max_query_length
|
155 |
+
)
|
156 |
+
|
157 |
+
# Tokenizers who insert 2 SEP tokens in-between <context> & <question> need to have special handling
|
158 |
+
# in the way they compute mask of added tokens.
|
159 |
+
tokenizer_type = type(tokenizer).__name__.replace("Tokenizer", "").lower()
|
160 |
+
sequence_added_tokens = (
|
161 |
+
tokenizer.model_max_length - tokenizer.max_len_single_sentence + 1
|
162 |
+
if tokenizer_type in MULTI_SEP_TOKENS_TOKENIZERS_SET
|
163 |
+
else tokenizer.model_max_length - tokenizer.max_len_single_sentence
|
164 |
+
)
|
165 |
+
sequence_pair_added_tokens = tokenizer.model_max_length - tokenizer.max_len_sentences_pair
|
166 |
+
|
167 |
+
span_doc_tokens = all_doc_tokens
|
168 |
+
while len(spans) * doc_stride < len(all_doc_tokens):
|
169 |
+
# Define the side we want to truncate / pad and the text/pair sorting
|
170 |
+
if tokenizer.padding_side == "right":
|
171 |
+
texts = truncated_query
|
172 |
+
pairs = span_doc_tokens
|
173 |
+
truncation = TruncationStrategy.ONLY_SECOND.value
|
174 |
+
else:
|
175 |
+
texts = span_doc_tokens
|
176 |
+
pairs = truncated_query
|
177 |
+
truncation = TruncationStrategy.ONLY_FIRST.value
|
178 |
+
|
179 |
+
encoded_dict = tokenizer.encode_plus( # TODO(thom) update this logic
|
180 |
+
texts,
|
181 |
+
pairs,
|
182 |
+
truncation=truncation,
|
183 |
+
padding=padding_strategy,
|
184 |
+
max_length=max_seq_length,
|
185 |
+
return_overflowing_tokens=True,
|
186 |
+
stride=max_seq_length - doc_stride - len(truncated_query) - sequence_pair_added_tokens,
|
187 |
+
return_token_type_ids=True,
|
188 |
+
)
|
189 |
+
|
190 |
+
paragraph_len = min(
|
191 |
+
len(all_doc_tokens) - len(spans) * doc_stride,
|
192 |
+
max_seq_length - len(truncated_query) - sequence_pair_added_tokens,
|
193 |
+
)
|
194 |
+
|
195 |
+
if tokenizer.pad_token_id in encoded_dict["input_ids"]:
|
196 |
+
if tokenizer.padding_side == "right":
|
197 |
+
non_padded_ids = encoded_dict["input_ids"][: encoded_dict["input_ids"].index(tokenizer.pad_token_id)]
|
198 |
+
else:
|
199 |
+
last_padding_id_position = (
|
200 |
+
len(encoded_dict["input_ids"]) - 1 - encoded_dict["input_ids"][::-1].index(tokenizer.pad_token_id)
|
201 |
+
)
|
202 |
+
non_padded_ids = encoded_dict["input_ids"][last_padding_id_position + 1 :]
|
203 |
+
|
204 |
+
else:
|
205 |
+
non_padded_ids = encoded_dict["input_ids"]
|
206 |
+
|
207 |
+
tokens = tokenizer.convert_ids_to_tokens(non_padded_ids)
|
208 |
+
|
209 |
+
token_to_orig_map = {}
|
210 |
+
for i in range(paragraph_len):
|
211 |
+
index = len(truncated_query) + sequence_added_tokens + i if tokenizer.padding_side == "right" else i
|
212 |
+
token_to_orig_map[index] = tok_to_orig_index[len(spans) * doc_stride + i]
|
213 |
+
|
214 |
+
encoded_dict["paragraph_len"] = paragraph_len
|
215 |
+
encoded_dict["tokens"] = tokens
|
216 |
+
encoded_dict["token_to_orig_map"] = token_to_orig_map
|
217 |
+
encoded_dict["truncated_query_with_special_tokens_length"] = len(truncated_query) + sequence_added_tokens
|
218 |
+
encoded_dict["token_is_max_context"] = {}
|
219 |
+
encoded_dict["start"] = len(spans) * doc_stride
|
220 |
+
encoded_dict["length"] = paragraph_len
|
221 |
+
|
222 |
+
spans.append(encoded_dict)
|
223 |
+
|
224 |
+
if "overflowing_tokens" not in encoded_dict or (
|
225 |
+
"overflowing_tokens" in encoded_dict and len(encoded_dict["overflowing_tokens"]) == 0
|
226 |
+
):
|
227 |
+
break
|
228 |
+
span_doc_tokens = encoded_dict["overflowing_tokens"]
|
229 |
+
|
230 |
+
for doc_span_index in range(len(spans)):
|
231 |
+
for j in range(spans[doc_span_index]["paragraph_len"]):
|
232 |
+
is_max_context = _new_check_is_max_context(spans, doc_span_index, doc_span_index * doc_stride + j)
|
233 |
+
index = (
|
234 |
+
j
|
235 |
+
if tokenizer.padding_side == "left"
|
236 |
+
else spans[doc_span_index]["truncated_query_with_special_tokens_length"] + j
|
237 |
+
)
|
238 |
+
spans[doc_span_index]["token_is_max_context"][index] = is_max_context
|
239 |
+
|
240 |
+
for span in spans:
|
241 |
+
# Identify the position of the CLS token
|
242 |
+
cls_index = span["input_ids"].index(tokenizer.cls_token_id)
|
243 |
+
|
244 |
+
# p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer)
|
245 |
+
# Original TF implementation also keep the classification token (set to 0)
|
246 |
+
p_mask = np.ones_like(span["token_type_ids"])
|
247 |
+
if tokenizer.padding_side == "right":
|
248 |
+
p_mask[len(truncated_query) + sequence_added_tokens :] = 0
|
249 |
+
else:
|
250 |
+
p_mask[-len(span["tokens"]) : -(len(truncated_query) + sequence_added_tokens)] = 0
|
251 |
+
|
252 |
+
pad_token_indices = np.where(span["input_ids"] == tokenizer.pad_token_id)
|
253 |
+
special_token_indices = np.asarray(
|
254 |
+
tokenizer.get_special_tokens_mask(span["input_ids"], already_has_special_tokens=True)
|
255 |
+
).nonzero()
|
256 |
+
|
257 |
+
p_mask[pad_token_indices] = 1
|
258 |
+
p_mask[special_token_indices] = 1
|
259 |
+
|
260 |
+
# Set the cls index to 0: the CLS index can be used for impossible answers
|
261 |
+
p_mask[cls_index] = 0
|
262 |
+
|
263 |
+
span_is_impossible = example.is_impossible
|
264 |
+
start_position = 0
|
265 |
+
end_position = 0
|
266 |
+
if is_training and not span_is_impossible:
|
267 |
+
# For training, if our document chunk does not contain an annotation
|
268 |
+
# we throw it out, since there is nothing to predict.
|
269 |
+
doc_start = span["start"]
|
270 |
+
doc_end = span["start"] + span["length"] - 1
|
271 |
+
out_of_span = False
|
272 |
+
|
273 |
+
if not (tok_start_position >= doc_start and tok_end_position <= doc_end):
|
274 |
+
out_of_span = True
|
275 |
+
|
276 |
+
if out_of_span:
|
277 |
+
start_position = cls_index
|
278 |
+
end_position = cls_index
|
279 |
+
span_is_impossible = True
|
280 |
+
else:
|
281 |
+
if tokenizer.padding_side == "left":
|
282 |
+
doc_offset = 0
|
283 |
+
else:
|
284 |
+
doc_offset = len(truncated_query) + sequence_added_tokens
|
285 |
+
|
286 |
+
start_position = tok_start_position - doc_start + doc_offset
|
287 |
+
end_position = tok_end_position - doc_start + doc_offset
|
288 |
+
|
289 |
+
features.append(
|
290 |
+
SquadFeatures(
|
291 |
+
span["input_ids"],
|
292 |
+
span["attention_mask"],
|
293 |
+
span["token_type_ids"],
|
294 |
+
cls_index,
|
295 |
+
p_mask.tolist(),
|
296 |
+
example_index=0, # Can not set unique_id and example_index here. They will be set after multiple processing.
|
297 |
+
unique_id=0,
|
298 |
+
paragraph_len=span["paragraph_len"],
|
299 |
+
token_is_max_context=span["token_is_max_context"],
|
300 |
+
tokens=span["tokens"],
|
301 |
+
token_to_orig_map=span["token_to_orig_map"],
|
302 |
+
start_position=start_position,
|
303 |
+
end_position=end_position,
|
304 |
+
is_impossible=span_is_impossible,
|
305 |
+
qas_id=example.qas_id,
|
306 |
+
)
|
307 |
+
)
|
308 |
+
return features
|
309 |
+
|
310 |
+
|
311 |
+
def squad_convert_example_to_features_init(tokenizer_for_convert: PreTrainedTokenizerBase):
|
312 |
+
global tokenizer
|
313 |
+
tokenizer = tokenizer_for_convert
|
314 |
+
|
315 |
+
|
316 |
+
def squad_convert_examples_to_features(
|
317 |
+
examples,
|
318 |
+
tokenizer,
|
319 |
+
max_seq_length,
|
320 |
+
doc_stride,
|
321 |
+
max_query_length,
|
322 |
+
is_training,
|
323 |
+
padding_strategy="max_length",
|
324 |
+
return_dataset=False,
|
325 |
+
threads=1,
|
326 |
+
tqdm_enabled=True,
|
327 |
+
):
|
328 |
+
"""
|
329 |
+
Converts a list of examples into a list of features that can be directly given as input to a model. It is
|
330 |
+
model-dependant and takes advantage of many of the tokenizer's features to create the model's inputs.
|
331 |
+
|
332 |
+
Args:
|
333 |
+
examples: list of [`~data.processors.squad.SquadExample`]
|
334 |
+
tokenizer: an instance of a child of [`PreTrainedTokenizer`]
|
335 |
+
max_seq_length: The maximum sequence length of the inputs.
|
336 |
+
doc_stride: The stride used when the context is too large and is split across several features.
|
337 |
+
max_query_length: The maximum length of the query.
|
338 |
+
is_training: whether to create features for model evaluation or model training.
|
339 |
+
padding_strategy: Default to "max_length". Which padding strategy to use
|
340 |
+
return_dataset: Default False. Either 'pt' or 'tf'.
|
341 |
+
if 'pt': returns a torch.data.TensorDataset, if 'tf': returns a tf.data.Dataset
|
342 |
+
threads: multiple processing threads.
|
343 |
+
|
344 |
+
|
345 |
+
Returns:
|
346 |
+
list of [`~data.processors.squad.SquadFeatures`]
|
347 |
+
|
348 |
+
Example:
|
349 |
+
|
350 |
+
```python
|
351 |
+
processor = SquadV2Processor()
|
352 |
+
examples = processor.get_dev_examples(data_dir)
|
353 |
+
|
354 |
+
features = squad_convert_examples_to_features(
|
355 |
+
examples=examples,
|
356 |
+
tokenizer=tokenizer,
|
357 |
+
max_seq_length=args.max_seq_length,
|
358 |
+
doc_stride=args.doc_stride,
|
359 |
+
max_query_length=args.max_query_length,
|
360 |
+
is_training=not evaluate,
|
361 |
+
)
|
362 |
+
```"""
|
363 |
+
# Defining helper methods
|
364 |
+
features = []
|
365 |
+
|
366 |
+
threads = min(threads, cpu_count())
|
367 |
+
with Pool(threads, initializer=squad_convert_example_to_features_init, initargs=(tokenizer,)) as p:
|
368 |
+
annotate_ = partial(
|
369 |
+
squad_convert_example_to_features,
|
370 |
+
max_seq_length=max_seq_length,
|
371 |
+
doc_stride=doc_stride,
|
372 |
+
max_query_length=max_query_length,
|
373 |
+
padding_strategy=padding_strategy,
|
374 |
+
is_training=is_training,
|
375 |
+
)
|
376 |
+
features = list(
|
377 |
+
tqdm(
|
378 |
+
p.imap(annotate_, examples, chunksize=32),
|
379 |
+
total=len(examples),
|
380 |
+
desc="convert squad examples to features",
|
381 |
+
disable=not tqdm_enabled,
|
382 |
+
)
|
383 |
+
)
|
384 |
+
|
385 |
+
new_features = []
|
386 |
+
unique_id = 1000000000
|
387 |
+
example_index = 0
|
388 |
+
for example_features in tqdm(
|
389 |
+
features, total=len(features), desc="add example index and unique id", disable=not tqdm_enabled
|
390 |
+
):
|
391 |
+
if not example_features:
|
392 |
+
continue
|
393 |
+
for example_feature in example_features:
|
394 |
+
example_feature.example_index = example_index
|
395 |
+
example_feature.unique_id = unique_id
|
396 |
+
new_features.append(example_feature)
|
397 |
+
unique_id += 1
|
398 |
+
example_index += 1
|
399 |
+
features = new_features
|
400 |
+
del new_features
|
401 |
+
if return_dataset == "pt":
|
402 |
+
if not is_torch_available():
|
403 |
+
raise RuntimeError("PyTorch must be installed to return a PyTorch dataset.")
|
404 |
+
|
405 |
+
# Convert to Tensors and build dataset
|
406 |
+
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
|
407 |
+
all_attention_masks = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
|
408 |
+
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
|
409 |
+
all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long)
|
410 |
+
all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float)
|
411 |
+
all_is_impossible = torch.tensor([f.is_impossible for f in features], dtype=torch.float)
|
412 |
+
|
413 |
+
if not is_training:
|
414 |
+
all_feature_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
|
415 |
+
dataset = TensorDataset(
|
416 |
+
all_input_ids, all_attention_masks, all_token_type_ids, all_feature_index, all_cls_index, all_p_mask
|
417 |
+
)
|
418 |
+
else:
|
419 |
+
all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long)
|
420 |
+
all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long)
|
421 |
+
dataset = TensorDataset(
|
422 |
+
all_input_ids,
|
423 |
+
all_attention_masks,
|
424 |
+
all_token_type_ids,
|
425 |
+
all_start_positions,
|
426 |
+
all_end_positions,
|
427 |
+
all_cls_index,
|
428 |
+
all_p_mask,
|
429 |
+
all_is_impossible,
|
430 |
+
)
|
431 |
+
|
432 |
+
return features, dataset
|
433 |
+
elif return_dataset == "tf":
|
434 |
+
if not is_tf_available():
|
435 |
+
raise RuntimeError("TensorFlow must be installed to return a TensorFlow dataset.")
|
436 |
+
|
437 |
+
def gen():
|
438 |
+
for i, ex in enumerate(features):
|
439 |
+
if ex.token_type_ids is None:
|
440 |
+
yield (
|
441 |
+
{
|
442 |
+
"input_ids": ex.input_ids,
|
443 |
+
"attention_mask": ex.attention_mask,
|
444 |
+
"feature_index": i,
|
445 |
+
"qas_id": ex.qas_id,
|
446 |
+
},
|
447 |
+
{
|
448 |
+
"start_positions": ex.start_position,
|
449 |
+
"end_positions": ex.end_position,
|
450 |
+
"cls_index": ex.cls_index,
|
451 |
+
"p_mask": ex.p_mask,
|
452 |
+
"is_impossible": ex.is_impossible,
|
453 |
+
},
|
454 |
+
)
|
455 |
+
else:
|
456 |
+
yield (
|
457 |
+
{
|
458 |
+
"input_ids": ex.input_ids,
|
459 |
+
"attention_mask": ex.attention_mask,
|
460 |
+
"token_type_ids": ex.token_type_ids,
|
461 |
+
"feature_index": i,
|
462 |
+
"qas_id": ex.qas_id,
|
463 |
+
},
|
464 |
+
{
|
465 |
+
"start_positions": ex.start_position,
|
466 |
+
"end_positions": ex.end_position,
|
467 |
+
"cls_index": ex.cls_index,
|
468 |
+
"p_mask": ex.p_mask,
|
469 |
+
"is_impossible": ex.is_impossible,
|
470 |
+
},
|
471 |
+
)
|
472 |
+
|
473 |
+
# Why have we split the batch into a tuple? PyTorch just has a list of tensors.
|
474 |
+
if "token_type_ids" in tokenizer.model_input_names:
|
475 |
+
train_types = (
|
476 |
+
{
|
477 |
+
"input_ids": tf.int32,
|
478 |
+
"attention_mask": tf.int32,
|
479 |
+
"token_type_ids": tf.int32,
|
480 |
+
"feature_index": tf.int64,
|
481 |
+
"qas_id": tf.string,
|
482 |
+
},
|
483 |
+
{
|
484 |
+
"start_positions": tf.int64,
|
485 |
+
"end_positions": tf.int64,
|
486 |
+
"cls_index": tf.int64,
|
487 |
+
"p_mask": tf.int32,
|
488 |
+
"is_impossible": tf.int32,
|
489 |
+
},
|
490 |
+
)
|
491 |
+
|
492 |
+
train_shapes = (
|
493 |
+
{
|
494 |
+
"input_ids": tf.TensorShape([None]),
|
495 |
+
"attention_mask": tf.TensorShape([None]),
|
496 |
+
"token_type_ids": tf.TensorShape([None]),
|
497 |
+
"feature_index": tf.TensorShape([]),
|
498 |
+
"qas_id": tf.TensorShape([]),
|
499 |
+
},
|
500 |
+
{
|
501 |
+
"start_positions": tf.TensorShape([]),
|
502 |
+
"end_positions": tf.TensorShape([]),
|
503 |
+
"cls_index": tf.TensorShape([]),
|
504 |
+
"p_mask": tf.TensorShape([None]),
|
505 |
+
"is_impossible": tf.TensorShape([]),
|
506 |
+
},
|
507 |
+
)
|
508 |
+
else:
|
509 |
+
train_types = (
|
510 |
+
{"input_ids": tf.int32, "attention_mask": tf.int32, "feature_index": tf.int64, "qas_id": tf.string},
|
511 |
+
{
|
512 |
+
"start_positions": tf.int64,
|
513 |
+
"end_positions": tf.int64,
|
514 |
+
"cls_index": tf.int64,
|
515 |
+
"p_mask": tf.int32,
|
516 |
+
"is_impossible": tf.int32,
|
517 |
+
},
|
518 |
+
)
|
519 |
+
|
520 |
+
train_shapes = (
|
521 |
+
{
|
522 |
+
"input_ids": tf.TensorShape([None]),
|
523 |
+
"attention_mask": tf.TensorShape([None]),
|
524 |
+
"feature_index": tf.TensorShape([]),
|
525 |
+
"qas_id": tf.TensorShape([]),
|
526 |
+
},
|
527 |
+
{
|
528 |
+
"start_positions": tf.TensorShape([]),
|
529 |
+
"end_positions": tf.TensorShape([]),
|
530 |
+
"cls_index": tf.TensorShape([]),
|
531 |
+
"p_mask": tf.TensorShape([None]),
|
532 |
+
"is_impossible": tf.TensorShape([]),
|
533 |
+
},
|
534 |
+
)
|
535 |
+
|
536 |
+
return tf.data.Dataset.from_generator(gen, train_types, train_shapes)
|
537 |
+
else:
|
538 |
+
return features
|
539 |
+
|
540 |
+
|
541 |
+
class SquadProcessor(DataProcessor):
|
542 |
+
"""
|
543 |
+
Processor for the SQuAD data set. overridden by SquadV1Processor and SquadV2Processor, used by the version 1.1 and
|
544 |
+
version 2.0 of SQuAD, respectively.
|
545 |
+
"""
|
546 |
+
|
547 |
+
train_file = None
|
548 |
+
dev_file = None
|
549 |
+
|
550 |
+
def _get_example_from_tensor_dict(self, tensor_dict, evaluate=False):
|
551 |
+
if not evaluate:
|
552 |
+
answer = tensor_dict["answers"]["text"][0].numpy().decode("utf-8")
|
553 |
+
answer_start = tensor_dict["answers"]["answer_start"][0].numpy()
|
554 |
+
answers = []
|
555 |
+
else:
|
556 |
+
answers = [
|
557 |
+
{"answer_start": start.numpy(), "text": text.numpy().decode("utf-8")}
|
558 |
+
for start, text in zip(tensor_dict["answers"]["answer_start"], tensor_dict["answers"]["text"])
|
559 |
+
]
|
560 |
+
|
561 |
+
answer = None
|
562 |
+
answer_start = None
|
563 |
+
|
564 |
+
return SquadExample(
|
565 |
+
qas_id=tensor_dict["id"].numpy().decode("utf-8"),
|
566 |
+
question_text=tensor_dict["question"].numpy().decode("utf-8"),
|
567 |
+
context_text=tensor_dict["context"].numpy().decode("utf-8"),
|
568 |
+
answer_text=answer,
|
569 |
+
start_position_character=answer_start,
|
570 |
+
title=tensor_dict["title"].numpy().decode("utf-8"),
|
571 |
+
answers=answers,
|
572 |
+
)
|
573 |
+
|
574 |
+
def get_examples_from_dataset(self, dataset, evaluate=False):
|
575 |
+
"""
|
576 |
+
Creates a list of [`~data.processors.squad.SquadExample`] using a TFDS dataset.
|
577 |
+
|
578 |
+
Args:
|
579 |
+
dataset: The tfds dataset loaded from *tensorflow_datasets.load("squad")*
|
580 |
+
evaluate: Boolean specifying if in evaluation mode or in training mode
|
581 |
+
|
582 |
+
Returns:
|
583 |
+
List of SquadExample
|
584 |
+
|
585 |
+
Examples:
|
586 |
+
|
587 |
+
```python
|
588 |
+
>>> import tensorflow_datasets as tfds
|
589 |
+
|
590 |
+
>>> dataset = tfds.load("squad")
|
591 |
+
|
592 |
+
>>> training_examples = get_examples_from_dataset(dataset, evaluate=False)
|
593 |
+
>>> evaluation_examples = get_examples_from_dataset(dataset, evaluate=True)
|
594 |
+
```"""
|
595 |
+
|
596 |
+
if evaluate:
|
597 |
+
dataset = dataset["validation"]
|
598 |
+
else:
|
599 |
+
dataset = dataset["train"]
|
600 |
+
|
601 |
+
examples = []
|
602 |
+
for tensor_dict in tqdm(dataset):
|
603 |
+
examples.append(self._get_example_from_tensor_dict(tensor_dict, evaluate=evaluate))
|
604 |
+
|
605 |
+
return examples
|
606 |
+
|
607 |
+
def get_train_examples(self, data_dir, filename=None):
|
608 |
+
"""
|
609 |
+
Returns the training examples from the data directory.
|
610 |
+
|
611 |
+
Args:
|
612 |
+
data_dir: Directory containing the data files used for training and evaluating.
|
613 |
+
filename: None by default, specify this if the training file has a different name than the original one
|
614 |
+
which is `train-v1.1.json` and `train-v2.0.json` for squad versions 1.1 and 2.0 respectively.
|
615 |
+
|
616 |
+
"""
|
617 |
+
if data_dir is None:
|
618 |
+
data_dir = ""
|
619 |
+
|
620 |
+
if self.train_file is None:
|
621 |
+
raise ValueError("SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor")
|
622 |
+
|
623 |
+
with open(
|
624 |
+
os.path.join(data_dir, self.train_file if filename is None else filename), "r", encoding="utf-8"
|
625 |
+
) as reader:
|
626 |
+
input_data = json.load(reader)["data"]
|
627 |
+
return self._create_examples(input_data, "train")
|
628 |
+
|
629 |
+
def get_dev_examples(self, data_dir, filename=None):
|
630 |
+
"""
|
631 |
+
Returns the evaluation example from the data directory.
|
632 |
+
|
633 |
+
Args:
|
634 |
+
data_dir: Directory containing the data files used for training and evaluating.
|
635 |
+
filename: None by default, specify this if the evaluation file has a different name than the original one
|
636 |
+
which is `dev-v1.1.json` and `dev-v2.0.json` for squad versions 1.1 and 2.0 respectively.
|
637 |
+
"""
|
638 |
+
if data_dir is None:
|
639 |
+
data_dir = ""
|
640 |
+
|
641 |
+
if self.dev_file is None:
|
642 |
+
raise ValueError("SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor")
|
643 |
+
|
644 |
+
with open(
|
645 |
+
os.path.join(data_dir, self.dev_file if filename is None else filename), "r", encoding="utf-8"
|
646 |
+
) as reader:
|
647 |
+
input_data = json.load(reader)["data"]
|
648 |
+
return self._create_examples(input_data, "dev")
|
649 |
+
|
650 |
+
def _create_examples(self, input_data, set_type):
|
651 |
+
is_training = set_type == "train"
|
652 |
+
examples = []
|
653 |
+
for entry in tqdm(input_data):
|
654 |
+
title = entry["title"]
|
655 |
+
for paragraph in entry["paragraphs"]:
|
656 |
+
context_text = paragraph["context"]
|
657 |
+
for qa in paragraph["qas"]:
|
658 |
+
qas_id = qa["id"]
|
659 |
+
question_text = qa["question"]
|
660 |
+
start_position_character = None
|
661 |
+
answer_text = None
|
662 |
+
answers = []
|
663 |
+
|
664 |
+
is_impossible = qa.get("is_impossible", False)
|
665 |
+
if not is_impossible:
|
666 |
+
if is_training:
|
667 |
+
answer = qa["answers"][0]
|
668 |
+
answer_text = answer["text"]
|
669 |
+
start_position_character = answer["answer_start"]
|
670 |
+
else:
|
671 |
+
answers = qa["answers"]
|
672 |
+
|
673 |
+
example = SquadExample(
|
674 |
+
qas_id=qas_id,
|
675 |
+
question_text=question_text,
|
676 |
+
context_text=context_text,
|
677 |
+
answer_text=answer_text,
|
678 |
+
start_position_character=start_position_character,
|
679 |
+
title=title,
|
680 |
+
is_impossible=is_impossible,
|
681 |
+
answers=answers,
|
682 |
+
)
|
683 |
+
examples.append(example)
|
684 |
+
return examples
|
685 |
+
|
686 |
+
|
687 |
+
class SquadV1Processor(SquadProcessor):
|
688 |
+
train_file = "train-v1.1.json"
|
689 |
+
dev_file = "dev-v1.1.json"
|
690 |
+
|
691 |
+
|
692 |
+
class SquadV2Processor(SquadProcessor):
|
693 |
+
train_file = "train-v2.0.json"
|
694 |
+
dev_file = "dev-v2.0.json"
|
695 |
+
|
696 |
+
|
697 |
+
class SquadExample:
|
698 |
+
"""
|
699 |
+
A single training/test example for the Squad dataset, as loaded from disk.
|
700 |
+
|
701 |
+
Args:
|
702 |
+
qas_id: The example's unique identifier
|
703 |
+
question_text: The question string
|
704 |
+
context_text: The context string
|
705 |
+
answer_text: The answer string
|
706 |
+
start_position_character: The character position of the start of the answer
|
707 |
+
title: The title of the example
|
708 |
+
answers: None by default, this is used during evaluation. Holds answers as well as their start positions.
|
709 |
+
is_impossible: False by default, set to True if the example has no possible answer.
|
710 |
+
"""
|
711 |
+
|
712 |
+
def __init__(
|
713 |
+
self,
|
714 |
+
qas_id,
|
715 |
+
question_text,
|
716 |
+
context_text,
|
717 |
+
answer_text,
|
718 |
+
start_position_character,
|
719 |
+
title,
|
720 |
+
answers=[],
|
721 |
+
is_impossible=False,
|
722 |
+
):
|
723 |
+
self.qas_id = qas_id
|
724 |
+
self.question_text = question_text
|
725 |
+
self.context_text = context_text
|
726 |
+
self.answer_text = answer_text
|
727 |
+
self.title = title
|
728 |
+
self.is_impossible = is_impossible
|
729 |
+
self.answers = answers
|
730 |
+
|
731 |
+
self.start_position, self.end_position = 0, 0
|
732 |
+
|
733 |
+
doc_tokens = []
|
734 |
+
char_to_word_offset = []
|
735 |
+
prev_is_whitespace = True
|
736 |
+
|
737 |
+
# Split on whitespace so that different tokens may be attributed to their original position.
|
738 |
+
for c in self.context_text:
|
739 |
+
if _is_whitespace(c):
|
740 |
+
prev_is_whitespace = True
|
741 |
+
else:
|
742 |
+
if prev_is_whitespace:
|
743 |
+
doc_tokens.append(c)
|
744 |
+
else:
|
745 |
+
doc_tokens[-1] += c
|
746 |
+
prev_is_whitespace = False
|
747 |
+
char_to_word_offset.append(len(doc_tokens) - 1)
|
748 |
+
|
749 |
+
self.doc_tokens = doc_tokens
|
750 |
+
self.char_to_word_offset = char_to_word_offset
|
751 |
+
|
752 |
+
# Start and end positions only has a value during evaluation.
|
753 |
+
if start_position_character is not None and not is_impossible:
|
754 |
+
self.start_position = char_to_word_offset[start_position_character]
|
755 |
+
self.end_position = char_to_word_offset[
|
756 |
+
min(start_position_character + len(answer_text) - 1, len(char_to_word_offset) - 1)
|
757 |
+
]
|
758 |
+
|
759 |
+
|
760 |
+
class SquadFeatures:
|
761 |
+
"""
|
762 |
+
Single squad example features to be fed to a model. Those features are model-specific and can be crafted from
|
763 |
+
[`~data.processors.squad.SquadExample`] using the
|
764 |
+
:method:*~transformers.data.processors.squad.squad_convert_examples_to_features* method.
|
765 |
+
|
766 |
+
Args:
|
767 |
+
input_ids: Indices of input sequence tokens in the vocabulary.
|
768 |
+
attention_mask: Mask to avoid performing attention on padding token indices.
|
769 |
+
token_type_ids: Segment token indices to indicate first and second portions of the inputs.
|
770 |
+
cls_index: the index of the CLS token.
|
771 |
+
p_mask: Mask identifying tokens that can be answers vs. tokens that cannot.
|
772 |
+
Mask with 1 for tokens than cannot be in the answer and 0 for token that can be in an answer
|
773 |
+
example_index: the index of the example
|
774 |
+
unique_id: The unique Feature identifier
|
775 |
+
paragraph_len: The length of the context
|
776 |
+
token_is_max_context:
|
777 |
+
List of booleans identifying which tokens have their maximum context in this feature object. If a token
|
778 |
+
does not have their maximum context in this feature object, it means that another feature object has more
|
779 |
+
information related to that token and should be prioritized over this feature for that token.
|
780 |
+
tokens: list of tokens corresponding to the input ids
|
781 |
+
token_to_orig_map: mapping between the tokens and the original text, needed in order to identify the answer.
|
782 |
+
start_position: start of the answer token index
|
783 |
+
end_position: end of the answer token index
|
784 |
+
encoding: optionally store the BatchEncoding with the fast-tokenizer alignment methods.
|
785 |
+
"""
|
786 |
+
|
787 |
+
def __init__(
|
788 |
+
self,
|
789 |
+
input_ids,
|
790 |
+
attention_mask,
|
791 |
+
token_type_ids,
|
792 |
+
cls_index,
|
793 |
+
p_mask,
|
794 |
+
example_index,
|
795 |
+
unique_id,
|
796 |
+
paragraph_len,
|
797 |
+
token_is_max_context,
|
798 |
+
tokens,
|
799 |
+
token_to_orig_map,
|
800 |
+
start_position,
|
801 |
+
end_position,
|
802 |
+
is_impossible,
|
803 |
+
qas_id: str = None,
|
804 |
+
encoding: BatchEncoding = None,
|
805 |
+
):
|
806 |
+
self.input_ids = input_ids
|
807 |
+
self.attention_mask = attention_mask
|
808 |
+
self.token_type_ids = token_type_ids
|
809 |
+
self.cls_index = cls_index
|
810 |
+
self.p_mask = p_mask
|
811 |
+
|
812 |
+
self.example_index = example_index
|
813 |
+
self.unique_id = unique_id
|
814 |
+
self.paragraph_len = paragraph_len
|
815 |
+
self.token_is_max_context = token_is_max_context
|
816 |
+
self.tokens = tokens
|
817 |
+
self.token_to_orig_map = token_to_orig_map
|
818 |
+
|
819 |
+
self.start_position = start_position
|
820 |
+
self.end_position = end_position
|
821 |
+
self.is_impossible = is_impossible
|
822 |
+
self.qas_id = qas_id
|
823 |
+
|
824 |
+
self.encoding = encoding
|
825 |
+
|
826 |
+
|
827 |
+
class SquadResult:
|
828 |
+
"""
|
829 |
+
Constructs a SquadResult which can be used to evaluate a model's output on the SQuAD dataset.
|
830 |
+
|
831 |
+
Args:
|
832 |
+
unique_id: The unique identifier corresponding to that example.
|
833 |
+
start_logits: The logits corresponding to the start of the answer
|
834 |
+
end_logits: The logits corresponding to the end of the answer
|
835 |
+
"""
|
836 |
+
|
837 |
+
def __init__(self, unique_id, start_logits, end_logits, start_top_index=None, end_top_index=None, cls_logits=None):
|
838 |
+
self.start_logits = start_logits
|
839 |
+
self.end_logits = end_logits
|
840 |
+
self.unique_id = unique_id
|
841 |
+
|
842 |
+
if start_top_index:
|
843 |
+
self.start_top_index = start_top_index
|
844 |
+
self.end_top_index = end_top_index
|
845 |
+
self.cls_logits = cls_logits
|
env-llmeval/lib/python3.10/site-packages/transformers/data/processors/utils.py
ADDED
@@ -0,0 +1,349 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
|
3 |
+
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
|
17 |
+
import csv
|
18 |
+
import dataclasses
|
19 |
+
import json
|
20 |
+
from dataclasses import dataclass
|
21 |
+
from typing import List, Optional, Union
|
22 |
+
|
23 |
+
from ...utils import is_tf_available, is_torch_available, logging
|
24 |
+
|
25 |
+
|
26 |
+
logger = logging.get_logger(__name__)
|
27 |
+
|
28 |
+
|
29 |
+
@dataclass
|
30 |
+
class InputExample:
|
31 |
+
"""
|
32 |
+
A single training/test example for simple sequence classification.
|
33 |
+
|
34 |
+
Args:
|
35 |
+
guid: Unique id for the example.
|
36 |
+
text_a: string. The untokenized text of the first sequence. For single
|
37 |
+
sequence tasks, only this sequence must be specified.
|
38 |
+
text_b: (Optional) string. The untokenized text of the second sequence.
|
39 |
+
Only must be specified for sequence pair tasks.
|
40 |
+
label: (Optional) string. The label of the example. This should be
|
41 |
+
specified for train and dev examples, but not for test examples.
|
42 |
+
"""
|
43 |
+
|
44 |
+
guid: str
|
45 |
+
text_a: str
|
46 |
+
text_b: Optional[str] = None
|
47 |
+
label: Optional[str] = None
|
48 |
+
|
49 |
+
def to_json_string(self):
|
50 |
+
"""Serializes this instance to a JSON string."""
|
51 |
+
return json.dumps(dataclasses.asdict(self), indent=2) + "\n"
|
52 |
+
|
53 |
+
|
54 |
+
@dataclass(frozen=True)
|
55 |
+
class InputFeatures:
|
56 |
+
"""
|
57 |
+
A single set of features of data. Property names are the same names as the corresponding inputs to a model.
|
58 |
+
|
59 |
+
Args:
|
60 |
+
input_ids: Indices of input sequence tokens in the vocabulary.
|
61 |
+
attention_mask: Mask to avoid performing attention on padding token indices.
|
62 |
+
Mask values selected in `[0, 1]`: Usually `1` for tokens that are NOT MASKED, `0` for MASKED (padded)
|
63 |
+
tokens.
|
64 |
+
token_type_ids: (Optional) Segment token indices to indicate first and second
|
65 |
+
portions of the inputs. Only some models use them.
|
66 |
+
label: (Optional) Label corresponding to the input. Int for classification problems,
|
67 |
+
float for regression problems.
|
68 |
+
"""
|
69 |
+
|
70 |
+
input_ids: List[int]
|
71 |
+
attention_mask: Optional[List[int]] = None
|
72 |
+
token_type_ids: Optional[List[int]] = None
|
73 |
+
label: Optional[Union[int, float]] = None
|
74 |
+
|
75 |
+
def to_json_string(self):
|
76 |
+
"""Serializes this instance to a JSON string."""
|
77 |
+
return json.dumps(dataclasses.asdict(self)) + "\n"
|
78 |
+
|
79 |
+
|
80 |
+
class DataProcessor:
|
81 |
+
"""Base class for data converters for sequence classification data sets."""
|
82 |
+
|
83 |
+
def get_example_from_tensor_dict(self, tensor_dict):
|
84 |
+
"""
|
85 |
+
Gets an example from a dict with tensorflow tensors.
|
86 |
+
|
87 |
+
Args:
|
88 |
+
tensor_dict: Keys and values should match the corresponding Glue
|
89 |
+
tensorflow_dataset examples.
|
90 |
+
"""
|
91 |
+
raise NotImplementedError()
|
92 |
+
|
93 |
+
def get_train_examples(self, data_dir):
|
94 |
+
"""Gets a collection of [`InputExample`] for the train set."""
|
95 |
+
raise NotImplementedError()
|
96 |
+
|
97 |
+
def get_dev_examples(self, data_dir):
|
98 |
+
"""Gets a collection of [`InputExample`] for the dev set."""
|
99 |
+
raise NotImplementedError()
|
100 |
+
|
101 |
+
def get_test_examples(self, data_dir):
|
102 |
+
"""Gets a collection of [`InputExample`] for the test set."""
|
103 |
+
raise NotImplementedError()
|
104 |
+
|
105 |
+
def get_labels(self):
|
106 |
+
"""Gets the list of labels for this data set."""
|
107 |
+
raise NotImplementedError()
|
108 |
+
|
109 |
+
def tfds_map(self, example):
|
110 |
+
"""
|
111 |
+
Some tensorflow_datasets datasets are not formatted the same way the GLUE datasets are. This method converts
|
112 |
+
examples to the correct format.
|
113 |
+
"""
|
114 |
+
if len(self.get_labels()) > 1:
|
115 |
+
example.label = self.get_labels()[int(example.label)]
|
116 |
+
return example
|
117 |
+
|
118 |
+
@classmethod
|
119 |
+
def _read_tsv(cls, input_file, quotechar=None):
|
120 |
+
"""Reads a tab separated value file."""
|
121 |
+
with open(input_file, "r", encoding="utf-8-sig") as f:
|
122 |
+
return list(csv.reader(f, delimiter="\t", quotechar=quotechar))
|
123 |
+
|
124 |
+
|
125 |
+
class SingleSentenceClassificationProcessor(DataProcessor):
|
126 |
+
"""Generic processor for a single sentence classification data set."""
|
127 |
+
|
128 |
+
def __init__(self, labels=None, examples=None, mode="classification", verbose=False):
|
129 |
+
self.labels = [] if labels is None else labels
|
130 |
+
self.examples = [] if examples is None else examples
|
131 |
+
self.mode = mode
|
132 |
+
self.verbose = verbose
|
133 |
+
|
134 |
+
def __len__(self):
|
135 |
+
return len(self.examples)
|
136 |
+
|
137 |
+
def __getitem__(self, idx):
|
138 |
+
if isinstance(idx, slice):
|
139 |
+
return SingleSentenceClassificationProcessor(labels=self.labels, examples=self.examples[idx])
|
140 |
+
return self.examples[idx]
|
141 |
+
|
142 |
+
@classmethod
|
143 |
+
def create_from_csv(
|
144 |
+
cls, file_name, split_name="", column_label=0, column_text=1, column_id=None, skip_first_row=False, **kwargs
|
145 |
+
):
|
146 |
+
processor = cls(**kwargs)
|
147 |
+
processor.add_examples_from_csv(
|
148 |
+
file_name,
|
149 |
+
split_name=split_name,
|
150 |
+
column_label=column_label,
|
151 |
+
column_text=column_text,
|
152 |
+
column_id=column_id,
|
153 |
+
skip_first_row=skip_first_row,
|
154 |
+
overwrite_labels=True,
|
155 |
+
overwrite_examples=True,
|
156 |
+
)
|
157 |
+
return processor
|
158 |
+
|
159 |
+
@classmethod
|
160 |
+
def create_from_examples(cls, texts_or_text_and_labels, labels=None, **kwargs):
|
161 |
+
processor = cls(**kwargs)
|
162 |
+
processor.add_examples(texts_or_text_and_labels, labels=labels)
|
163 |
+
return processor
|
164 |
+
|
165 |
+
def add_examples_from_csv(
|
166 |
+
self,
|
167 |
+
file_name,
|
168 |
+
split_name="",
|
169 |
+
column_label=0,
|
170 |
+
column_text=1,
|
171 |
+
column_id=None,
|
172 |
+
skip_first_row=False,
|
173 |
+
overwrite_labels=False,
|
174 |
+
overwrite_examples=False,
|
175 |
+
):
|
176 |
+
lines = self._read_tsv(file_name)
|
177 |
+
if skip_first_row:
|
178 |
+
lines = lines[1:]
|
179 |
+
texts = []
|
180 |
+
labels = []
|
181 |
+
ids = []
|
182 |
+
for i, line in enumerate(lines):
|
183 |
+
texts.append(line[column_text])
|
184 |
+
labels.append(line[column_label])
|
185 |
+
if column_id is not None:
|
186 |
+
ids.append(line[column_id])
|
187 |
+
else:
|
188 |
+
guid = f"{split_name}-{i}" if split_name else str(i)
|
189 |
+
ids.append(guid)
|
190 |
+
|
191 |
+
return self.add_examples(
|
192 |
+
texts, labels, ids, overwrite_labels=overwrite_labels, overwrite_examples=overwrite_examples
|
193 |
+
)
|
194 |
+
|
195 |
+
def add_examples(
|
196 |
+
self, texts_or_text_and_labels, labels=None, ids=None, overwrite_labels=False, overwrite_examples=False
|
197 |
+
):
|
198 |
+
if labels is not None and len(texts_or_text_and_labels) != len(labels):
|
199 |
+
raise ValueError(
|
200 |
+
f"Text and labels have mismatched lengths {len(texts_or_text_and_labels)} and {len(labels)}"
|
201 |
+
)
|
202 |
+
if ids is not None and len(texts_or_text_and_labels) != len(ids):
|
203 |
+
raise ValueError(f"Text and ids have mismatched lengths {len(texts_or_text_and_labels)} and {len(ids)}")
|
204 |
+
if ids is None:
|
205 |
+
ids = [None] * len(texts_or_text_and_labels)
|
206 |
+
if labels is None:
|
207 |
+
labels = [None] * len(texts_or_text_and_labels)
|
208 |
+
examples = []
|
209 |
+
added_labels = set()
|
210 |
+
for text_or_text_and_label, label, guid in zip(texts_or_text_and_labels, labels, ids):
|
211 |
+
if isinstance(text_or_text_and_label, (tuple, list)) and label is None:
|
212 |
+
text, label = text_or_text_and_label
|
213 |
+
else:
|
214 |
+
text = text_or_text_and_label
|
215 |
+
added_labels.add(label)
|
216 |
+
examples.append(InputExample(guid=guid, text_a=text, text_b=None, label=label))
|
217 |
+
|
218 |
+
# Update examples
|
219 |
+
if overwrite_examples:
|
220 |
+
self.examples = examples
|
221 |
+
else:
|
222 |
+
self.examples.extend(examples)
|
223 |
+
|
224 |
+
# Update labels
|
225 |
+
if overwrite_labels:
|
226 |
+
self.labels = list(added_labels)
|
227 |
+
else:
|
228 |
+
self.labels = list(set(self.labels).union(added_labels))
|
229 |
+
|
230 |
+
return self.examples
|
231 |
+
|
232 |
+
def get_features(
|
233 |
+
self,
|
234 |
+
tokenizer,
|
235 |
+
max_length=None,
|
236 |
+
pad_on_left=False,
|
237 |
+
pad_token=0,
|
238 |
+
mask_padding_with_zero=True,
|
239 |
+
return_tensors=None,
|
240 |
+
):
|
241 |
+
"""
|
242 |
+
Convert examples in a list of `InputFeatures`
|
243 |
+
|
244 |
+
Args:
|
245 |
+
tokenizer: Instance of a tokenizer that will tokenize the examples
|
246 |
+
max_length: Maximum example length
|
247 |
+
pad_on_left: If set to `True`, the examples will be padded on the left rather than on the right (default)
|
248 |
+
pad_token: Padding token
|
249 |
+
mask_padding_with_zero: If set to `True`, the attention mask will be filled by `1` for actual values
|
250 |
+
and by `0` for padded values. If set to `False`, inverts it (`1` for padded values, `0` for actual
|
251 |
+
values)
|
252 |
+
|
253 |
+
Returns:
|
254 |
+
If the `examples` input is a `tf.data.Dataset`, will return a `tf.data.Dataset` containing the
|
255 |
+
task-specific features. If the input is a list of `InputExamples`, will return a list of task-specific
|
256 |
+
`InputFeatures` which can be fed to the model.
|
257 |
+
|
258 |
+
"""
|
259 |
+
if max_length is None:
|
260 |
+
max_length = tokenizer.max_len
|
261 |
+
|
262 |
+
label_map = {label: i for i, label in enumerate(self.labels)}
|
263 |
+
|
264 |
+
all_input_ids = []
|
265 |
+
for ex_index, example in enumerate(self.examples):
|
266 |
+
if ex_index % 10000 == 0:
|
267 |
+
logger.info(f"Tokenizing example {ex_index}")
|
268 |
+
|
269 |
+
input_ids = tokenizer.encode(
|
270 |
+
example.text_a,
|
271 |
+
add_special_tokens=True,
|
272 |
+
max_length=min(max_length, tokenizer.max_len),
|
273 |
+
)
|
274 |
+
all_input_ids.append(input_ids)
|
275 |
+
|
276 |
+
batch_length = max(len(input_ids) for input_ids in all_input_ids)
|
277 |
+
|
278 |
+
features = []
|
279 |
+
for ex_index, (input_ids, example) in enumerate(zip(all_input_ids, self.examples)):
|
280 |
+
if ex_index % 10000 == 0:
|
281 |
+
logger.info(f"Writing example {ex_index}/{len(self.examples)}")
|
282 |
+
# The mask has 1 for real tokens and 0 for padding tokens. Only real
|
283 |
+
# tokens are attended to.
|
284 |
+
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
|
285 |
+
|
286 |
+
# Zero-pad up to the sequence length.
|
287 |
+
padding_length = batch_length - len(input_ids)
|
288 |
+
if pad_on_left:
|
289 |
+
input_ids = ([pad_token] * padding_length) + input_ids
|
290 |
+
attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
|
291 |
+
else:
|
292 |
+
input_ids = input_ids + ([pad_token] * padding_length)
|
293 |
+
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
|
294 |
+
|
295 |
+
if len(input_ids) != batch_length:
|
296 |
+
raise ValueError(f"Error with input length {len(input_ids)} vs {batch_length}")
|
297 |
+
if len(attention_mask) != batch_length:
|
298 |
+
raise ValueError(f"Error with input length {len(attention_mask)} vs {batch_length}")
|
299 |
+
|
300 |
+
if self.mode == "classification":
|
301 |
+
label = label_map[example.label]
|
302 |
+
elif self.mode == "regression":
|
303 |
+
label = float(example.label)
|
304 |
+
else:
|
305 |
+
raise ValueError(self.mode)
|
306 |
+
|
307 |
+
if ex_index < 5 and self.verbose:
|
308 |
+
logger.info("*** Example ***")
|
309 |
+
logger.info(f"guid: {example.guid}")
|
310 |
+
logger.info(f"input_ids: {' '.join([str(x) for x in input_ids])}")
|
311 |
+
logger.info(f"attention_mask: {' '.join([str(x) for x in attention_mask])}")
|
312 |
+
logger.info(f"label: {example.label} (id = {label})")
|
313 |
+
|
314 |
+
features.append(InputFeatures(input_ids=input_ids, attention_mask=attention_mask, label=label))
|
315 |
+
|
316 |
+
if return_tensors is None:
|
317 |
+
return features
|
318 |
+
elif return_tensors == "tf":
|
319 |
+
if not is_tf_available():
|
320 |
+
raise RuntimeError("return_tensors set to 'tf' but TensorFlow 2.0 can't be imported")
|
321 |
+
import tensorflow as tf
|
322 |
+
|
323 |
+
def gen():
|
324 |
+
for ex in features:
|
325 |
+
yield ({"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label)
|
326 |
+
|
327 |
+
dataset = tf.data.Dataset.from_generator(
|
328 |
+
gen,
|
329 |
+
({"input_ids": tf.int32, "attention_mask": tf.int32}, tf.int64),
|
330 |
+
({"input_ids": tf.TensorShape([None]), "attention_mask": tf.TensorShape([None])}, tf.TensorShape([])),
|
331 |
+
)
|
332 |
+
return dataset
|
333 |
+
elif return_tensors == "pt":
|
334 |
+
if not is_torch_available():
|
335 |
+
raise RuntimeError("return_tensors set to 'pt' but PyTorch can't be imported")
|
336 |
+
import torch
|
337 |
+
from torch.utils.data import TensorDataset
|
338 |
+
|
339 |
+
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
|
340 |
+
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
|
341 |
+
if self.mode == "classification":
|
342 |
+
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
|
343 |
+
elif self.mode == "regression":
|
344 |
+
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
|
345 |
+
|
346 |
+
dataset = TensorDataset(all_input_ids, all_attention_mask, all_labels)
|
347 |
+
return dataset
|
348 |
+
else:
|
349 |
+
raise ValueError("return_tensors should be one of 'tf' or 'pt'")
|
env-llmeval/lib/python3.10/site-packages/transformers/data/processors/xnli.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
|
3 |
+
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
""" XNLI utils (dataset loading and evaluation)"""
|
17 |
+
|
18 |
+
|
19 |
+
import os
|
20 |
+
|
21 |
+
from ...utils import logging
|
22 |
+
from .utils import DataProcessor, InputExample
|
23 |
+
|
24 |
+
|
25 |
+
logger = logging.get_logger(__name__)
|
26 |
+
|
27 |
+
|
28 |
+
class XnliProcessor(DataProcessor):
|
29 |
+
"""
|
30 |
+
Processor for the XNLI dataset. Adapted from
|
31 |
+
https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/run_classifier.py#L207
|
32 |
+
"""
|
33 |
+
|
34 |
+
def __init__(self, language, train_language=None):
|
35 |
+
self.language = language
|
36 |
+
self.train_language = train_language
|
37 |
+
|
38 |
+
def get_train_examples(self, data_dir):
|
39 |
+
"""See base class."""
|
40 |
+
lg = self.language if self.train_language is None else self.train_language
|
41 |
+
lines = self._read_tsv(os.path.join(data_dir, f"XNLI-MT-1.0/multinli/multinli.train.{lg}.tsv"))
|
42 |
+
examples = []
|
43 |
+
for i, line in enumerate(lines):
|
44 |
+
if i == 0:
|
45 |
+
continue
|
46 |
+
guid = f"train-{i}"
|
47 |
+
text_a = line[0]
|
48 |
+
text_b = line[1]
|
49 |
+
label = "contradiction" if line[2] == "contradictory" else line[2]
|
50 |
+
if not isinstance(text_a, str):
|
51 |
+
raise ValueError(f"Training input {text_a} is not a string")
|
52 |
+
if not isinstance(text_b, str):
|
53 |
+
raise ValueError(f"Training input {text_b} is not a string")
|
54 |
+
if not isinstance(label, str):
|
55 |
+
raise ValueError(f"Training label {label} is not a string")
|
56 |
+
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
|
57 |
+
return examples
|
58 |
+
|
59 |
+
def get_test_examples(self, data_dir):
|
60 |
+
"""See base class."""
|
61 |
+
lines = self._read_tsv(os.path.join(data_dir, "XNLI-1.0/xnli.test.tsv"))
|
62 |
+
examples = []
|
63 |
+
for i, line in enumerate(lines):
|
64 |
+
if i == 0:
|
65 |
+
continue
|
66 |
+
language = line[0]
|
67 |
+
if language != self.language:
|
68 |
+
continue
|
69 |
+
guid = f"test-{i}"
|
70 |
+
text_a = line[6]
|
71 |
+
text_b = line[7]
|
72 |
+
label = line[1]
|
73 |
+
if not isinstance(text_a, str):
|
74 |
+
raise ValueError(f"Training input {text_a} is not a string")
|
75 |
+
if not isinstance(text_b, str):
|
76 |
+
raise ValueError(f"Training input {text_b} is not a string")
|
77 |
+
if not isinstance(label, str):
|
78 |
+
raise ValueError(f"Training label {label} is not a string")
|
79 |
+
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
|
80 |
+
return examples
|
81 |
+
|
82 |
+
def get_labels(self):
|
83 |
+
"""See base class."""
|
84 |
+
return ["contradiction", "entailment", "neutral"]
|
85 |
+
|
86 |
+
|
87 |
+
xnli_processors = {
|
88 |
+
"xnli": XnliProcessor,
|
89 |
+
}
|
90 |
+
|
91 |
+
xnli_output_modes = {
|
92 |
+
"xnli": "classification",
|
93 |
+
}
|
94 |
+
|
95 |
+
xnli_tasks_num_labels = {
|
96 |
+
"xnli": 3,
|
97 |
+
}
|
env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__init__.py
ADDED
@@ -0,0 +1,1107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2018 The HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
import json
|
16 |
+
import os
|
17 |
+
import warnings
|
18 |
+
from pathlib import Path
|
19 |
+
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
|
20 |
+
|
21 |
+
from huggingface_hub import model_info
|
22 |
+
|
23 |
+
from ..configuration_utils import PretrainedConfig
|
24 |
+
from ..dynamic_module_utils import get_class_from_dynamic_module
|
25 |
+
from ..feature_extraction_utils import PreTrainedFeatureExtractor
|
26 |
+
from ..image_processing_utils import BaseImageProcessor
|
27 |
+
from ..models.auto.configuration_auto import AutoConfig
|
28 |
+
from ..models.auto.feature_extraction_auto import FEATURE_EXTRACTOR_MAPPING, AutoFeatureExtractor
|
29 |
+
from ..models.auto.image_processing_auto import IMAGE_PROCESSOR_MAPPING, AutoImageProcessor
|
30 |
+
from ..models.auto.modeling_auto import AutoModelForDepthEstimation, AutoModelForImageToImage
|
31 |
+
from ..models.auto.tokenization_auto import TOKENIZER_MAPPING, AutoTokenizer
|
32 |
+
from ..tokenization_utils import PreTrainedTokenizer
|
33 |
+
from ..utils import (
|
34 |
+
CONFIG_NAME,
|
35 |
+
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
|
36 |
+
cached_file,
|
37 |
+
extract_commit_hash,
|
38 |
+
find_adapter_config_file,
|
39 |
+
is_kenlm_available,
|
40 |
+
is_offline_mode,
|
41 |
+
is_peft_available,
|
42 |
+
is_pyctcdecode_available,
|
43 |
+
is_tf_available,
|
44 |
+
is_torch_available,
|
45 |
+
logging,
|
46 |
+
)
|
47 |
+
from .audio_classification import AudioClassificationPipeline
|
48 |
+
from .automatic_speech_recognition import AutomaticSpeechRecognitionPipeline
|
49 |
+
from .base import (
|
50 |
+
ArgumentHandler,
|
51 |
+
CsvPipelineDataFormat,
|
52 |
+
JsonPipelineDataFormat,
|
53 |
+
PipedPipelineDataFormat,
|
54 |
+
Pipeline,
|
55 |
+
PipelineDataFormat,
|
56 |
+
PipelineException,
|
57 |
+
PipelineRegistry,
|
58 |
+
get_default_model_and_revision,
|
59 |
+
infer_framework_load_model,
|
60 |
+
)
|
61 |
+
from .conversational import Conversation, ConversationalPipeline
|
62 |
+
from .depth_estimation import DepthEstimationPipeline
|
63 |
+
from .document_question_answering import DocumentQuestionAnsweringPipeline
|
64 |
+
from .feature_extraction import FeatureExtractionPipeline
|
65 |
+
from .fill_mask import FillMaskPipeline
|
66 |
+
from .image_classification import ImageClassificationPipeline
|
67 |
+
from .image_feature_extraction import ImageFeatureExtractionPipeline
|
68 |
+
from .image_segmentation import ImageSegmentationPipeline
|
69 |
+
from .image_to_image import ImageToImagePipeline
|
70 |
+
from .image_to_text import ImageToTextPipeline
|
71 |
+
from .mask_generation import MaskGenerationPipeline
|
72 |
+
from .object_detection import ObjectDetectionPipeline
|
73 |
+
from .question_answering import QuestionAnsweringArgumentHandler, QuestionAnsweringPipeline
|
74 |
+
from .table_question_answering import TableQuestionAnsweringArgumentHandler, TableQuestionAnsweringPipeline
|
75 |
+
from .text2text_generation import SummarizationPipeline, Text2TextGenerationPipeline, TranslationPipeline
|
76 |
+
from .text_classification import TextClassificationPipeline
|
77 |
+
from .text_generation import TextGenerationPipeline
|
78 |
+
from .text_to_audio import TextToAudioPipeline
|
79 |
+
from .token_classification import (
|
80 |
+
AggregationStrategy,
|
81 |
+
NerPipeline,
|
82 |
+
TokenClassificationArgumentHandler,
|
83 |
+
TokenClassificationPipeline,
|
84 |
+
)
|
85 |
+
from .video_classification import VideoClassificationPipeline
|
86 |
+
from .visual_question_answering import VisualQuestionAnsweringPipeline
|
87 |
+
from .zero_shot_audio_classification import ZeroShotAudioClassificationPipeline
|
88 |
+
from .zero_shot_classification import ZeroShotClassificationArgumentHandler, ZeroShotClassificationPipeline
|
89 |
+
from .zero_shot_image_classification import ZeroShotImageClassificationPipeline
|
90 |
+
from .zero_shot_object_detection import ZeroShotObjectDetectionPipeline
|
91 |
+
|
92 |
+
|
93 |
+
if is_tf_available():
|
94 |
+
import tensorflow as tf
|
95 |
+
|
96 |
+
from ..models.auto.modeling_tf_auto import (
|
97 |
+
TFAutoModel,
|
98 |
+
TFAutoModelForCausalLM,
|
99 |
+
TFAutoModelForImageClassification,
|
100 |
+
TFAutoModelForMaskedLM,
|
101 |
+
TFAutoModelForQuestionAnswering,
|
102 |
+
TFAutoModelForSeq2SeqLM,
|
103 |
+
TFAutoModelForSequenceClassification,
|
104 |
+
TFAutoModelForTableQuestionAnswering,
|
105 |
+
TFAutoModelForTokenClassification,
|
106 |
+
TFAutoModelForVision2Seq,
|
107 |
+
TFAutoModelForZeroShotImageClassification,
|
108 |
+
)
|
109 |
+
|
110 |
+
if is_torch_available():
|
111 |
+
import torch
|
112 |
+
|
113 |
+
from ..models.auto.modeling_auto import (
|
114 |
+
AutoModel,
|
115 |
+
AutoModelForAudioClassification,
|
116 |
+
AutoModelForCausalLM,
|
117 |
+
AutoModelForCTC,
|
118 |
+
AutoModelForDocumentQuestionAnswering,
|
119 |
+
AutoModelForImageClassification,
|
120 |
+
AutoModelForImageSegmentation,
|
121 |
+
AutoModelForMaskedLM,
|
122 |
+
AutoModelForMaskGeneration,
|
123 |
+
AutoModelForObjectDetection,
|
124 |
+
AutoModelForQuestionAnswering,
|
125 |
+
AutoModelForSemanticSegmentation,
|
126 |
+
AutoModelForSeq2SeqLM,
|
127 |
+
AutoModelForSequenceClassification,
|
128 |
+
AutoModelForSpeechSeq2Seq,
|
129 |
+
AutoModelForTableQuestionAnswering,
|
130 |
+
AutoModelForTextToSpectrogram,
|
131 |
+
AutoModelForTextToWaveform,
|
132 |
+
AutoModelForTokenClassification,
|
133 |
+
AutoModelForVideoClassification,
|
134 |
+
AutoModelForVision2Seq,
|
135 |
+
AutoModelForVisualQuestionAnswering,
|
136 |
+
AutoModelForZeroShotImageClassification,
|
137 |
+
AutoModelForZeroShotObjectDetection,
|
138 |
+
)
|
139 |
+
|
140 |
+
|
141 |
+
if TYPE_CHECKING:
|
142 |
+
from ..modeling_tf_utils import TFPreTrainedModel
|
143 |
+
from ..modeling_utils import PreTrainedModel
|
144 |
+
from ..tokenization_utils_fast import PreTrainedTokenizerFast
|
145 |
+
|
146 |
+
|
147 |
+
logger = logging.get_logger(__name__)
|
148 |
+
|
149 |
+
|
150 |
+
# Register all the supported tasks here
|
151 |
+
TASK_ALIASES = {
|
152 |
+
"sentiment-analysis": "text-classification",
|
153 |
+
"ner": "token-classification",
|
154 |
+
"vqa": "visual-question-answering",
|
155 |
+
"text-to-speech": "text-to-audio",
|
156 |
+
}
|
157 |
+
SUPPORTED_TASKS = {
|
158 |
+
"audio-classification": {
|
159 |
+
"impl": AudioClassificationPipeline,
|
160 |
+
"tf": (),
|
161 |
+
"pt": (AutoModelForAudioClassification,) if is_torch_available() else (),
|
162 |
+
"default": {"model": {"pt": ("superb/wav2vec2-base-superb-ks", "372e048")}},
|
163 |
+
"type": "audio",
|
164 |
+
},
|
165 |
+
"automatic-speech-recognition": {
|
166 |
+
"impl": AutomaticSpeechRecognitionPipeline,
|
167 |
+
"tf": (),
|
168 |
+
"pt": (AutoModelForCTC, AutoModelForSpeechSeq2Seq) if is_torch_available() else (),
|
169 |
+
"default": {"model": {"pt": ("facebook/wav2vec2-base-960h", "55bb623")}},
|
170 |
+
"type": "multimodal",
|
171 |
+
},
|
172 |
+
"text-to-audio": {
|
173 |
+
"impl": TextToAudioPipeline,
|
174 |
+
"tf": (),
|
175 |
+
"pt": (AutoModelForTextToWaveform, AutoModelForTextToSpectrogram) if is_torch_available() else (),
|
176 |
+
"default": {"model": {"pt": ("suno/bark-small", "645cfba")}},
|
177 |
+
"type": "text",
|
178 |
+
},
|
179 |
+
"feature-extraction": {
|
180 |
+
"impl": FeatureExtractionPipeline,
|
181 |
+
"tf": (TFAutoModel,) if is_tf_available() else (),
|
182 |
+
"pt": (AutoModel,) if is_torch_available() else (),
|
183 |
+
"default": {
|
184 |
+
"model": {
|
185 |
+
"pt": ("distilbert/distilbert-base-cased", "935ac13"),
|
186 |
+
"tf": ("distilbert/distilbert-base-cased", "935ac13"),
|
187 |
+
}
|
188 |
+
},
|
189 |
+
"type": "multimodal",
|
190 |
+
},
|
191 |
+
"text-classification": {
|
192 |
+
"impl": TextClassificationPipeline,
|
193 |
+
"tf": (TFAutoModelForSequenceClassification,) if is_tf_available() else (),
|
194 |
+
"pt": (AutoModelForSequenceClassification,) if is_torch_available() else (),
|
195 |
+
"default": {
|
196 |
+
"model": {
|
197 |
+
"pt": ("distilbert/distilbert-base-uncased-finetuned-sst-2-english", "af0f99b"),
|
198 |
+
"tf": ("distilbert/distilbert-base-uncased-finetuned-sst-2-english", "af0f99b"),
|
199 |
+
},
|
200 |
+
},
|
201 |
+
"type": "text",
|
202 |
+
},
|
203 |
+
"token-classification": {
|
204 |
+
"impl": TokenClassificationPipeline,
|
205 |
+
"tf": (TFAutoModelForTokenClassification,) if is_tf_available() else (),
|
206 |
+
"pt": (AutoModelForTokenClassification,) if is_torch_available() else (),
|
207 |
+
"default": {
|
208 |
+
"model": {
|
209 |
+
"pt": ("dbmdz/bert-large-cased-finetuned-conll03-english", "f2482bf"),
|
210 |
+
"tf": ("dbmdz/bert-large-cased-finetuned-conll03-english", "f2482bf"),
|
211 |
+
},
|
212 |
+
},
|
213 |
+
"type": "text",
|
214 |
+
},
|
215 |
+
"question-answering": {
|
216 |
+
"impl": QuestionAnsweringPipeline,
|
217 |
+
"tf": (TFAutoModelForQuestionAnswering,) if is_tf_available() else (),
|
218 |
+
"pt": (AutoModelForQuestionAnswering,) if is_torch_available() else (),
|
219 |
+
"default": {
|
220 |
+
"model": {
|
221 |
+
"pt": ("distilbert/distilbert-base-cased-distilled-squad", "626af31"),
|
222 |
+
"tf": ("distilbert/distilbert-base-cased-distilled-squad", "626af31"),
|
223 |
+
},
|
224 |
+
},
|
225 |
+
"type": "text",
|
226 |
+
},
|
227 |
+
"table-question-answering": {
|
228 |
+
"impl": TableQuestionAnsweringPipeline,
|
229 |
+
"pt": (AutoModelForTableQuestionAnswering,) if is_torch_available() else (),
|
230 |
+
"tf": (TFAutoModelForTableQuestionAnswering,) if is_tf_available() else (),
|
231 |
+
"default": {
|
232 |
+
"model": {
|
233 |
+
"pt": ("google/tapas-base-finetuned-wtq", "69ceee2"),
|
234 |
+
"tf": ("google/tapas-base-finetuned-wtq", "69ceee2"),
|
235 |
+
},
|
236 |
+
},
|
237 |
+
"type": "text",
|
238 |
+
},
|
239 |
+
"visual-question-answering": {
|
240 |
+
"impl": VisualQuestionAnsweringPipeline,
|
241 |
+
"pt": (AutoModelForVisualQuestionAnswering,) if is_torch_available() else (),
|
242 |
+
"tf": (),
|
243 |
+
"default": {
|
244 |
+
"model": {"pt": ("dandelin/vilt-b32-finetuned-vqa", "4355f59")},
|
245 |
+
},
|
246 |
+
"type": "multimodal",
|
247 |
+
},
|
248 |
+
"document-question-answering": {
|
249 |
+
"impl": DocumentQuestionAnsweringPipeline,
|
250 |
+
"pt": (AutoModelForDocumentQuestionAnswering,) if is_torch_available() else (),
|
251 |
+
"tf": (),
|
252 |
+
"default": {
|
253 |
+
"model": {"pt": ("impira/layoutlm-document-qa", "52e01b3")},
|
254 |
+
},
|
255 |
+
"type": "multimodal",
|
256 |
+
},
|
257 |
+
"fill-mask": {
|
258 |
+
"impl": FillMaskPipeline,
|
259 |
+
"tf": (TFAutoModelForMaskedLM,) if is_tf_available() else (),
|
260 |
+
"pt": (AutoModelForMaskedLM,) if is_torch_available() else (),
|
261 |
+
"default": {
|
262 |
+
"model": {
|
263 |
+
"pt": ("distilbert/distilroberta-base", "ec58a5b"),
|
264 |
+
"tf": ("distilbert/distilroberta-base", "ec58a5b"),
|
265 |
+
}
|
266 |
+
},
|
267 |
+
"type": "text",
|
268 |
+
},
|
269 |
+
"summarization": {
|
270 |
+
"impl": SummarizationPipeline,
|
271 |
+
"tf": (TFAutoModelForSeq2SeqLM,) if is_tf_available() else (),
|
272 |
+
"pt": (AutoModelForSeq2SeqLM,) if is_torch_available() else (),
|
273 |
+
"default": {
|
274 |
+
"model": {"pt": ("sshleifer/distilbart-cnn-12-6", "a4f8f3e"), "tf": ("google-t5/t5-small", "d769bba")}
|
275 |
+
},
|
276 |
+
"type": "text",
|
277 |
+
},
|
278 |
+
# This task is a special case as it's parametrized by SRC, TGT languages.
|
279 |
+
"translation": {
|
280 |
+
"impl": TranslationPipeline,
|
281 |
+
"tf": (TFAutoModelForSeq2SeqLM,) if is_tf_available() else (),
|
282 |
+
"pt": (AutoModelForSeq2SeqLM,) if is_torch_available() else (),
|
283 |
+
"default": {
|
284 |
+
("en", "fr"): {"model": {"pt": ("google-t5/t5-base", "686f1db"), "tf": ("google-t5/t5-base", "686f1db")}},
|
285 |
+
("en", "de"): {"model": {"pt": ("google-t5/t5-base", "686f1db"), "tf": ("google-t5/t5-base", "686f1db")}},
|
286 |
+
("en", "ro"): {"model": {"pt": ("google-t5/t5-base", "686f1db"), "tf": ("google-t5/t5-base", "686f1db")}},
|
287 |
+
},
|
288 |
+
"type": "text",
|
289 |
+
},
|
290 |
+
"text2text-generation": {
|
291 |
+
"impl": Text2TextGenerationPipeline,
|
292 |
+
"tf": (TFAutoModelForSeq2SeqLM,) if is_tf_available() else (),
|
293 |
+
"pt": (AutoModelForSeq2SeqLM,) if is_torch_available() else (),
|
294 |
+
"default": {"model": {"pt": ("google-t5/t5-base", "686f1db"), "tf": ("google-t5/t5-base", "686f1db")}},
|
295 |
+
"type": "text",
|
296 |
+
},
|
297 |
+
"text-generation": {
|
298 |
+
"impl": TextGenerationPipeline,
|
299 |
+
"tf": (TFAutoModelForCausalLM,) if is_tf_available() else (),
|
300 |
+
"pt": (AutoModelForCausalLM,) if is_torch_available() else (),
|
301 |
+
"default": {"model": {"pt": ("openai-community/gpt2", "6c0e608"), "tf": ("openai-community/gpt2", "6c0e608")}},
|
302 |
+
"type": "text",
|
303 |
+
},
|
304 |
+
"zero-shot-classification": {
|
305 |
+
"impl": ZeroShotClassificationPipeline,
|
306 |
+
"tf": (TFAutoModelForSequenceClassification,) if is_tf_available() else (),
|
307 |
+
"pt": (AutoModelForSequenceClassification,) if is_torch_available() else (),
|
308 |
+
"default": {
|
309 |
+
"model": {
|
310 |
+
"pt": ("facebook/bart-large-mnli", "c626438"),
|
311 |
+
"tf": ("FacebookAI/roberta-large-mnli", "130fb28"),
|
312 |
+
},
|
313 |
+
"config": {
|
314 |
+
"pt": ("facebook/bart-large-mnli", "c626438"),
|
315 |
+
"tf": ("FacebookAI/roberta-large-mnli", "130fb28"),
|
316 |
+
},
|
317 |
+
},
|
318 |
+
"type": "text",
|
319 |
+
},
|
320 |
+
"zero-shot-image-classification": {
|
321 |
+
"impl": ZeroShotImageClassificationPipeline,
|
322 |
+
"tf": (TFAutoModelForZeroShotImageClassification,) if is_tf_available() else (),
|
323 |
+
"pt": (AutoModelForZeroShotImageClassification,) if is_torch_available() else (),
|
324 |
+
"default": {
|
325 |
+
"model": {
|
326 |
+
"pt": ("openai/clip-vit-base-patch32", "f4881ba"),
|
327 |
+
"tf": ("openai/clip-vit-base-patch32", "f4881ba"),
|
328 |
+
}
|
329 |
+
},
|
330 |
+
"type": "multimodal",
|
331 |
+
},
|
332 |
+
"zero-shot-audio-classification": {
|
333 |
+
"impl": ZeroShotAudioClassificationPipeline,
|
334 |
+
"tf": (),
|
335 |
+
"pt": (AutoModel,) if is_torch_available() else (),
|
336 |
+
"default": {
|
337 |
+
"model": {
|
338 |
+
"pt": ("laion/clap-htsat-fused", "973b6e5"),
|
339 |
+
}
|
340 |
+
},
|
341 |
+
"type": "multimodal",
|
342 |
+
},
|
343 |
+
"conversational": {
|
344 |
+
"impl": ConversationalPipeline,
|
345 |
+
"tf": (TFAutoModelForSeq2SeqLM, TFAutoModelForCausalLM) if is_tf_available() else (),
|
346 |
+
"pt": (AutoModelForSeq2SeqLM, AutoModelForCausalLM) if is_torch_available() else (),
|
347 |
+
"default": {
|
348 |
+
"model": {"pt": ("microsoft/DialoGPT-medium", "8bada3b"), "tf": ("microsoft/DialoGPT-medium", "8bada3b")}
|
349 |
+
},
|
350 |
+
"type": "text",
|
351 |
+
},
|
352 |
+
"image-classification": {
|
353 |
+
"impl": ImageClassificationPipeline,
|
354 |
+
"tf": (TFAutoModelForImageClassification,) if is_tf_available() else (),
|
355 |
+
"pt": (AutoModelForImageClassification,) if is_torch_available() else (),
|
356 |
+
"default": {
|
357 |
+
"model": {
|
358 |
+
"pt": ("google/vit-base-patch16-224", "5dca96d"),
|
359 |
+
"tf": ("google/vit-base-patch16-224", "5dca96d"),
|
360 |
+
}
|
361 |
+
},
|
362 |
+
"type": "image",
|
363 |
+
},
|
364 |
+
"image-feature-extraction": {
|
365 |
+
"impl": ImageFeatureExtractionPipeline,
|
366 |
+
"tf": (TFAutoModel,) if is_tf_available() else (),
|
367 |
+
"pt": (AutoModel,) if is_torch_available() else (),
|
368 |
+
"default": {
|
369 |
+
"model": {
|
370 |
+
"pt": ("google/vit-base-patch16-224", "29e7a1e183"),
|
371 |
+
"tf": ("google/vit-base-patch16-224", "29e7a1e183"),
|
372 |
+
}
|
373 |
+
},
|
374 |
+
"type": "image",
|
375 |
+
},
|
376 |
+
"image-segmentation": {
|
377 |
+
"impl": ImageSegmentationPipeline,
|
378 |
+
"tf": (),
|
379 |
+
"pt": (AutoModelForImageSegmentation, AutoModelForSemanticSegmentation) if is_torch_available() else (),
|
380 |
+
"default": {"model": {"pt": ("facebook/detr-resnet-50-panoptic", "fc15262")}},
|
381 |
+
"type": "multimodal",
|
382 |
+
},
|
383 |
+
"image-to-text": {
|
384 |
+
"impl": ImageToTextPipeline,
|
385 |
+
"tf": (TFAutoModelForVision2Seq,) if is_tf_available() else (),
|
386 |
+
"pt": (AutoModelForVision2Seq,) if is_torch_available() else (),
|
387 |
+
"default": {
|
388 |
+
"model": {
|
389 |
+
"pt": ("ydshieh/vit-gpt2-coco-en", "65636df"),
|
390 |
+
"tf": ("ydshieh/vit-gpt2-coco-en", "65636df"),
|
391 |
+
}
|
392 |
+
},
|
393 |
+
"type": "multimodal",
|
394 |
+
},
|
395 |
+
"object-detection": {
|
396 |
+
"impl": ObjectDetectionPipeline,
|
397 |
+
"tf": (),
|
398 |
+
"pt": (AutoModelForObjectDetection,) if is_torch_available() else (),
|
399 |
+
"default": {"model": {"pt": ("facebook/detr-resnet-50", "2729413")}},
|
400 |
+
"type": "multimodal",
|
401 |
+
},
|
402 |
+
"zero-shot-object-detection": {
|
403 |
+
"impl": ZeroShotObjectDetectionPipeline,
|
404 |
+
"tf": (),
|
405 |
+
"pt": (AutoModelForZeroShotObjectDetection,) if is_torch_available() else (),
|
406 |
+
"default": {"model": {"pt": ("google/owlvit-base-patch32", "17740e1")}},
|
407 |
+
"type": "multimodal",
|
408 |
+
},
|
409 |
+
"depth-estimation": {
|
410 |
+
"impl": DepthEstimationPipeline,
|
411 |
+
"tf": (),
|
412 |
+
"pt": (AutoModelForDepthEstimation,) if is_torch_available() else (),
|
413 |
+
"default": {"model": {"pt": ("Intel/dpt-large", "e93beec")}},
|
414 |
+
"type": "image",
|
415 |
+
},
|
416 |
+
"video-classification": {
|
417 |
+
"impl": VideoClassificationPipeline,
|
418 |
+
"tf": (),
|
419 |
+
"pt": (AutoModelForVideoClassification,) if is_torch_available() else (),
|
420 |
+
"default": {"model": {"pt": ("MCG-NJU/videomae-base-finetuned-kinetics", "4800870")}},
|
421 |
+
"type": "video",
|
422 |
+
},
|
423 |
+
"mask-generation": {
|
424 |
+
"impl": MaskGenerationPipeline,
|
425 |
+
"tf": (),
|
426 |
+
"pt": (AutoModelForMaskGeneration,) if is_torch_available() else (),
|
427 |
+
"default": {"model": {"pt": ("facebook/sam-vit-huge", "997b15")}},
|
428 |
+
"type": "multimodal",
|
429 |
+
},
|
430 |
+
"image-to-image": {
|
431 |
+
"impl": ImageToImagePipeline,
|
432 |
+
"tf": (),
|
433 |
+
"pt": (AutoModelForImageToImage,) if is_torch_available() else (),
|
434 |
+
"default": {"model": {"pt": ("caidas/swin2SR-classical-sr-x2-64", "4aaedcb")}},
|
435 |
+
"type": "image",
|
436 |
+
},
|
437 |
+
}
|
438 |
+
|
439 |
+
NO_FEATURE_EXTRACTOR_TASKS = set()
|
440 |
+
NO_IMAGE_PROCESSOR_TASKS = set()
|
441 |
+
NO_TOKENIZER_TASKS = set()
|
442 |
+
|
443 |
+
# Those model configs are special, they are generic over their task, meaning
|
444 |
+
# any tokenizer/feature_extractor might be use for a given model so we cannot
|
445 |
+
# use the statically defined TOKENIZER_MAPPING and FEATURE_EXTRACTOR_MAPPING to
|
446 |
+
# see if the model defines such objects or not.
|
447 |
+
MULTI_MODEL_AUDIO_CONFIGS = {"SpeechEncoderDecoderConfig"}
|
448 |
+
MULTI_MODEL_VISION_CONFIGS = {"VisionEncoderDecoderConfig", "VisionTextDualEncoderConfig"}
|
449 |
+
for task, values in SUPPORTED_TASKS.items():
|
450 |
+
if values["type"] == "text":
|
451 |
+
NO_FEATURE_EXTRACTOR_TASKS.add(task)
|
452 |
+
NO_IMAGE_PROCESSOR_TASKS.add(task)
|
453 |
+
elif values["type"] in {"image", "video"}:
|
454 |
+
NO_TOKENIZER_TASKS.add(task)
|
455 |
+
elif values["type"] in {"audio"}:
|
456 |
+
NO_TOKENIZER_TASKS.add(task)
|
457 |
+
NO_IMAGE_PROCESSOR_TASKS.add(task)
|
458 |
+
elif values["type"] != "multimodal":
|
459 |
+
raise ValueError(f"SUPPORTED_TASK {task} contains invalid type {values['type']}")
|
460 |
+
|
461 |
+
PIPELINE_REGISTRY = PipelineRegistry(supported_tasks=SUPPORTED_TASKS, task_aliases=TASK_ALIASES)
|
462 |
+
|
463 |
+
|
464 |
+
def get_supported_tasks() -> List[str]:
|
465 |
+
"""
|
466 |
+
Returns a list of supported task strings.
|
467 |
+
"""
|
468 |
+
return PIPELINE_REGISTRY.get_supported_tasks()
|
469 |
+
|
470 |
+
|
471 |
+
def get_task(model: str, token: Optional[str] = None, **deprecated_kwargs) -> str:
|
472 |
+
use_auth_token = deprecated_kwargs.pop("use_auth_token", None)
|
473 |
+
if use_auth_token is not None:
|
474 |
+
warnings.warn(
|
475 |
+
"The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
|
476 |
+
FutureWarning,
|
477 |
+
)
|
478 |
+
if token is not None:
|
479 |
+
raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.")
|
480 |
+
token = use_auth_token
|
481 |
+
|
482 |
+
if is_offline_mode():
|
483 |
+
raise RuntimeError("You cannot infer task automatically within `pipeline` when using offline mode")
|
484 |
+
try:
|
485 |
+
info = model_info(model, token=token)
|
486 |
+
except Exception as e:
|
487 |
+
raise RuntimeError(f"Instantiating a pipeline without a task set raised an error: {e}")
|
488 |
+
if not info.pipeline_tag:
|
489 |
+
raise RuntimeError(
|
490 |
+
f"The model {model} does not seem to have a correct `pipeline_tag` set to infer the task automatically"
|
491 |
+
)
|
492 |
+
if getattr(info, "library_name", "transformers") != "transformers":
|
493 |
+
raise RuntimeError(f"This model is meant to be used with {info.library_name} not with transformers")
|
494 |
+
task = info.pipeline_tag
|
495 |
+
return task
|
496 |
+
|
497 |
+
|
498 |
+
def check_task(task: str) -> Tuple[str, Dict, Any]:
|
499 |
+
"""
|
500 |
+
Checks an incoming task string, to validate it's correct and return the default Pipeline and Model classes, and
|
501 |
+
default models if they exist.
|
502 |
+
|
503 |
+
Args:
|
504 |
+
task (`str`):
|
505 |
+
The task defining which pipeline will be returned. Currently accepted tasks are:
|
506 |
+
|
507 |
+
- `"audio-classification"`
|
508 |
+
- `"automatic-speech-recognition"`
|
509 |
+
- `"conversational"`
|
510 |
+
- `"depth-estimation"`
|
511 |
+
- `"document-question-answering"`
|
512 |
+
- `"feature-extraction"`
|
513 |
+
- `"fill-mask"`
|
514 |
+
- `"image-classification"`
|
515 |
+
- `"image-feature-extraction"`
|
516 |
+
- `"image-segmentation"`
|
517 |
+
- `"image-to-text"`
|
518 |
+
- `"image-to-image"`
|
519 |
+
- `"object-detection"`
|
520 |
+
- `"question-answering"`
|
521 |
+
- `"summarization"`
|
522 |
+
- `"table-question-answering"`
|
523 |
+
- `"text2text-generation"`
|
524 |
+
- `"text-classification"` (alias `"sentiment-analysis"` available)
|
525 |
+
- `"text-generation"`
|
526 |
+
- `"text-to-audio"` (alias `"text-to-speech"` available)
|
527 |
+
- `"token-classification"` (alias `"ner"` available)
|
528 |
+
- `"translation"`
|
529 |
+
- `"translation_xx_to_yy"`
|
530 |
+
- `"video-classification"`
|
531 |
+
- `"visual-question-answering"` (alias `"vqa"` available)
|
532 |
+
- `"zero-shot-classification"`
|
533 |
+
- `"zero-shot-image-classification"`
|
534 |
+
- `"zero-shot-object-detection"`
|
535 |
+
|
536 |
+
Returns:
|
537 |
+
(normalized_task: `str`, task_defaults: `dict`, task_options: (`tuple`, None)) The normalized task name
|
538 |
+
(removed alias and options). The actual dictionary required to initialize the pipeline and some extra task
|
539 |
+
options for parametrized tasks like "translation_XX_to_YY"
|
540 |
+
|
541 |
+
|
542 |
+
"""
|
543 |
+
return PIPELINE_REGISTRY.check_task(task)
|
544 |
+
|
545 |
+
|
546 |
+
def clean_custom_task(task_info):
|
547 |
+
import transformers
|
548 |
+
|
549 |
+
if "impl" not in task_info:
|
550 |
+
raise RuntimeError("This model introduces a custom pipeline without specifying its implementation.")
|
551 |
+
pt_class_names = task_info.get("pt", ())
|
552 |
+
if isinstance(pt_class_names, str):
|
553 |
+
pt_class_names = [pt_class_names]
|
554 |
+
task_info["pt"] = tuple(getattr(transformers, c) for c in pt_class_names)
|
555 |
+
tf_class_names = task_info.get("tf", ())
|
556 |
+
if isinstance(tf_class_names, str):
|
557 |
+
tf_class_names = [tf_class_names]
|
558 |
+
task_info["tf"] = tuple(getattr(transformers, c) for c in tf_class_names)
|
559 |
+
return task_info, None
|
560 |
+
|
561 |
+
|
562 |
+
def pipeline(
|
563 |
+
task: str = None,
|
564 |
+
model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None,
|
565 |
+
config: Optional[Union[str, PretrainedConfig]] = None,
|
566 |
+
tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None,
|
567 |
+
feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None,
|
568 |
+
image_processor: Optional[Union[str, BaseImageProcessor]] = None,
|
569 |
+
framework: Optional[str] = None,
|
570 |
+
revision: Optional[str] = None,
|
571 |
+
use_fast: bool = True,
|
572 |
+
token: Optional[Union[str, bool]] = None,
|
573 |
+
device: Optional[Union[int, str, "torch.device"]] = None,
|
574 |
+
device_map=None,
|
575 |
+
torch_dtype=None,
|
576 |
+
trust_remote_code: Optional[bool] = None,
|
577 |
+
model_kwargs: Dict[str, Any] = None,
|
578 |
+
pipeline_class: Optional[Any] = None,
|
579 |
+
**kwargs,
|
580 |
+
) -> Pipeline:
|
581 |
+
"""
|
582 |
+
Utility factory method to build a [`Pipeline`].
|
583 |
+
|
584 |
+
Pipelines are made of:
|
585 |
+
|
586 |
+
- A [tokenizer](tokenizer) in charge of mapping raw textual input to token.
|
587 |
+
- A [model](model) to make predictions from the inputs.
|
588 |
+
- Some (optional) post processing for enhancing model's output.
|
589 |
+
|
590 |
+
Args:
|
591 |
+
task (`str`):
|
592 |
+
The task defining which pipeline will be returned. Currently accepted tasks are:
|
593 |
+
|
594 |
+
- `"audio-classification"`: will return a [`AudioClassificationPipeline`].
|
595 |
+
- `"automatic-speech-recognition"`: will return a [`AutomaticSpeechRecognitionPipeline`].
|
596 |
+
- `"conversational"`: will return a [`ConversationalPipeline`].
|
597 |
+
- `"depth-estimation"`: will return a [`DepthEstimationPipeline`].
|
598 |
+
- `"document-question-answering"`: will return a [`DocumentQuestionAnsweringPipeline`].
|
599 |
+
- `"feature-extraction"`: will return a [`FeatureExtractionPipeline`].
|
600 |
+
- `"fill-mask"`: will return a [`FillMaskPipeline`]:.
|
601 |
+
- `"image-classification"`: will return a [`ImageClassificationPipeline`].
|
602 |
+
- `"image-feature-extraction"`: will return an [`ImageFeatureExtractionPipeline`].
|
603 |
+
- `"image-segmentation"`: will return a [`ImageSegmentationPipeline`].
|
604 |
+
- `"image-to-image"`: will return a [`ImageToImagePipeline`].
|
605 |
+
- `"image-to-text"`: will return a [`ImageToTextPipeline`].
|
606 |
+
- `"mask-generation"`: will return a [`MaskGenerationPipeline`].
|
607 |
+
- `"object-detection"`: will return a [`ObjectDetectionPipeline`].
|
608 |
+
- `"question-answering"`: will return a [`QuestionAnsweringPipeline`].
|
609 |
+
- `"summarization"`: will return a [`SummarizationPipeline`].
|
610 |
+
- `"table-question-answering"`: will return a [`TableQuestionAnsweringPipeline`].
|
611 |
+
- `"text2text-generation"`: will return a [`Text2TextGenerationPipeline`].
|
612 |
+
- `"text-classification"` (alias `"sentiment-analysis"` available): will return a
|
613 |
+
[`TextClassificationPipeline`].
|
614 |
+
- `"text-generation"`: will return a [`TextGenerationPipeline`]:.
|
615 |
+
- `"text-to-audio"` (alias `"text-to-speech"` available): will return a [`TextToAudioPipeline`]:.
|
616 |
+
- `"token-classification"` (alias `"ner"` available): will return a [`TokenClassificationPipeline`].
|
617 |
+
- `"translation"`: will return a [`TranslationPipeline`].
|
618 |
+
- `"translation_xx_to_yy"`: will return a [`TranslationPipeline`].
|
619 |
+
- `"video-classification"`: will return a [`VideoClassificationPipeline`].
|
620 |
+
- `"visual-question-answering"`: will return a [`VisualQuestionAnsweringPipeline`].
|
621 |
+
- `"zero-shot-classification"`: will return a [`ZeroShotClassificationPipeline`].
|
622 |
+
- `"zero-shot-image-classification"`: will return a [`ZeroShotImageClassificationPipeline`].
|
623 |
+
- `"zero-shot-audio-classification"`: will return a [`ZeroShotAudioClassificationPipeline`].
|
624 |
+
- `"zero-shot-object-detection"`: will return a [`ZeroShotObjectDetectionPipeline`].
|
625 |
+
|
626 |
+
model (`str` or [`PreTrainedModel`] or [`TFPreTrainedModel`], *optional*):
|
627 |
+
The model that will be used by the pipeline to make predictions. This can be a model identifier or an
|
628 |
+
actual instance of a pretrained model inheriting from [`PreTrainedModel`] (for PyTorch) or
|
629 |
+
[`TFPreTrainedModel`] (for TensorFlow).
|
630 |
+
|
631 |
+
If not provided, the default for the `task` will be loaded.
|
632 |
+
config (`str` or [`PretrainedConfig`], *optional*):
|
633 |
+
The configuration that will be used by the pipeline to instantiate the model. This can be a model
|
634 |
+
identifier or an actual pretrained model configuration inheriting from [`PretrainedConfig`].
|
635 |
+
|
636 |
+
If not provided, the default configuration file for the requested model will be used. That means that if
|
637 |
+
`model` is given, its default configuration will be used. However, if `model` is not supplied, this
|
638 |
+
`task`'s default model's config is used instead.
|
639 |
+
tokenizer (`str` or [`PreTrainedTokenizer`], *optional*):
|
640 |
+
The tokenizer that will be used by the pipeline to encode data for the model. This can be a model
|
641 |
+
identifier or an actual pretrained tokenizer inheriting from [`PreTrainedTokenizer`].
|
642 |
+
|
643 |
+
If not provided, the default tokenizer for the given `model` will be loaded (if it is a string). If `model`
|
644 |
+
is not specified or not a string, then the default tokenizer for `config` is loaded (if it is a string).
|
645 |
+
However, if `config` is also not given or not a string, then the default tokenizer for the given `task`
|
646 |
+
will be loaded.
|
647 |
+
feature_extractor (`str` or [`PreTrainedFeatureExtractor`], *optional*):
|
648 |
+
The feature extractor that will be used by the pipeline to encode data for the model. This can be a model
|
649 |
+
identifier or an actual pretrained feature extractor inheriting from [`PreTrainedFeatureExtractor`].
|
650 |
+
|
651 |
+
Feature extractors are used for non-NLP models, such as Speech or Vision models as well as multi-modal
|
652 |
+
models. Multi-modal models will also require a tokenizer to be passed.
|
653 |
+
|
654 |
+
If not provided, the default feature extractor for the given `model` will be loaded (if it is a string). If
|
655 |
+
`model` is not specified or not a string, then the default feature extractor for `config` is loaded (if it
|
656 |
+
is a string). However, if `config` is also not given or not a string, then the default feature extractor
|
657 |
+
for the given `task` will be loaded.
|
658 |
+
framework (`str`, *optional*):
|
659 |
+
The framework to use, either `"pt"` for PyTorch or `"tf"` for TensorFlow. The specified framework must be
|
660 |
+
installed.
|
661 |
+
|
662 |
+
If no framework is specified, will default to the one currently installed. If no framework is specified and
|
663 |
+
both frameworks are installed, will default to the framework of the `model`, or to PyTorch if no model is
|
664 |
+
provided.
|
665 |
+
revision (`str`, *optional*, defaults to `"main"`):
|
666 |
+
When passing a task name or a string model identifier: The specific model version to use. It can be a
|
667 |
+
branch name, a tag name, or a commit id, since we use a git-based system for storing models and other
|
668 |
+
artifacts on huggingface.co, so `revision` can be any identifier allowed by git.
|
669 |
+
use_fast (`bool`, *optional*, defaults to `True`):
|
670 |
+
Whether or not to use a Fast tokenizer if possible (a [`PreTrainedTokenizerFast`]).
|
671 |
+
use_auth_token (`str` or *bool*, *optional*):
|
672 |
+
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
|
673 |
+
when running `huggingface-cli login` (stored in `~/.huggingface`).
|
674 |
+
device (`int` or `str` or `torch.device`):
|
675 |
+
Defines the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank like `1`) on which this
|
676 |
+
pipeline will be allocated.
|
677 |
+
device_map (`str` or `Dict[str, Union[int, str, torch.device]`, *optional*):
|
678 |
+
Sent directly as `model_kwargs` (just a simpler shortcut). When `accelerate` library is present, set
|
679 |
+
`device_map="auto"` to compute the most optimized `device_map` automatically (see
|
680 |
+
[here](https://huggingface.co/docs/accelerate/main/en/package_reference/big_modeling#accelerate.cpu_offload)
|
681 |
+
for more information).
|
682 |
+
|
683 |
+
<Tip warning={true}>
|
684 |
+
|
685 |
+
Do not use `device_map` AND `device` at the same time as they will conflict
|
686 |
+
|
687 |
+
</Tip>
|
688 |
+
|
689 |
+
torch_dtype (`str` or `torch.dtype`, *optional*):
|
690 |
+
Sent directly as `model_kwargs` (just a simpler shortcut) to use the available precision for this model
|
691 |
+
(`torch.float16`, `torch.bfloat16`, ... or `"auto"`).
|
692 |
+
trust_remote_code (`bool`, *optional*, defaults to `False`):
|
693 |
+
Whether or not to allow for custom code defined on the Hub in their own modeling, configuration,
|
694 |
+
tokenization or even pipeline files. This option should only be set to `True` for repositories you trust
|
695 |
+
and in which you have read the code, as it will execute code present on the Hub on your local machine.
|
696 |
+
model_kwargs (`Dict[str, Any]`, *optional*):
|
697 |
+
Additional dictionary of keyword arguments passed along to the model's `from_pretrained(...,
|
698 |
+
**model_kwargs)` function.
|
699 |
+
kwargs (`Dict[str, Any]`, *optional*):
|
700 |
+
Additional keyword arguments passed along to the specific pipeline init (see the documentation for the
|
701 |
+
corresponding pipeline class for possible values).
|
702 |
+
|
703 |
+
Returns:
|
704 |
+
[`Pipeline`]: A suitable pipeline for the task.
|
705 |
+
|
706 |
+
Examples:
|
707 |
+
|
708 |
+
```python
|
709 |
+
>>> from transformers import pipeline, AutoModelForTokenClassification, AutoTokenizer
|
710 |
+
|
711 |
+
>>> # Sentiment analysis pipeline
|
712 |
+
>>> analyzer = pipeline("sentiment-analysis")
|
713 |
+
|
714 |
+
>>> # Question answering pipeline, specifying the checkpoint identifier
|
715 |
+
>>> oracle = pipeline(
|
716 |
+
... "question-answering", model="distilbert/distilbert-base-cased-distilled-squad", tokenizer="google-bert/bert-base-cased"
|
717 |
+
... )
|
718 |
+
|
719 |
+
>>> # Named entity recognition pipeline, passing in a specific model and tokenizer
|
720 |
+
>>> model = AutoModelForTokenClassification.from_pretrained("dbmdz/bert-large-cased-finetuned-conll03-english")
|
721 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased")
|
722 |
+
>>> recognizer = pipeline("ner", model=model, tokenizer=tokenizer)
|
723 |
+
```"""
|
724 |
+
if model_kwargs is None:
|
725 |
+
model_kwargs = {}
|
726 |
+
# Make sure we only pass use_auth_token once as a kwarg (it used to be possible to pass it in model_kwargs,
|
727 |
+
# this is to keep BC).
|
728 |
+
use_auth_token = model_kwargs.pop("use_auth_token", None)
|
729 |
+
if use_auth_token is not None:
|
730 |
+
warnings.warn(
|
731 |
+
"The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
|
732 |
+
FutureWarning,
|
733 |
+
)
|
734 |
+
if token is not None:
|
735 |
+
raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.")
|
736 |
+
token = use_auth_token
|
737 |
+
|
738 |
+
code_revision = kwargs.pop("code_revision", None)
|
739 |
+
commit_hash = kwargs.pop("_commit_hash", None)
|
740 |
+
|
741 |
+
hub_kwargs = {
|
742 |
+
"revision": revision,
|
743 |
+
"token": token,
|
744 |
+
"trust_remote_code": trust_remote_code,
|
745 |
+
"_commit_hash": commit_hash,
|
746 |
+
}
|
747 |
+
|
748 |
+
if task is None and model is None:
|
749 |
+
raise RuntimeError(
|
750 |
+
"Impossible to instantiate a pipeline without either a task or a model "
|
751 |
+
"being specified. "
|
752 |
+
"Please provide a task class or a model"
|
753 |
+
)
|
754 |
+
|
755 |
+
if model is None and tokenizer is not None:
|
756 |
+
raise RuntimeError(
|
757 |
+
"Impossible to instantiate a pipeline with tokenizer specified but not the model as the provided tokenizer"
|
758 |
+
" may not be compatible with the default model. Please provide a PreTrainedModel class or a"
|
759 |
+
" path/identifier to a pretrained model when providing tokenizer."
|
760 |
+
)
|
761 |
+
if model is None and feature_extractor is not None:
|
762 |
+
raise RuntimeError(
|
763 |
+
"Impossible to instantiate a pipeline with feature_extractor specified but not the model as the provided"
|
764 |
+
" feature_extractor may not be compatible with the default model. Please provide a PreTrainedModel class"
|
765 |
+
" or a path/identifier to a pretrained model when providing feature_extractor."
|
766 |
+
)
|
767 |
+
if isinstance(model, Path):
|
768 |
+
model = str(model)
|
769 |
+
|
770 |
+
if commit_hash is None:
|
771 |
+
pretrained_model_name_or_path = None
|
772 |
+
if isinstance(config, str):
|
773 |
+
pretrained_model_name_or_path = config
|
774 |
+
elif config is None and isinstance(model, str):
|
775 |
+
pretrained_model_name_or_path = model
|
776 |
+
|
777 |
+
if not isinstance(config, PretrainedConfig) and pretrained_model_name_or_path is not None:
|
778 |
+
# We make a call to the config file first (which may be absent) to get the commit hash as soon as possible
|
779 |
+
resolved_config_file = cached_file(
|
780 |
+
pretrained_model_name_or_path,
|
781 |
+
CONFIG_NAME,
|
782 |
+
_raise_exceptions_for_gated_repo=False,
|
783 |
+
_raise_exceptions_for_missing_entries=False,
|
784 |
+
_raise_exceptions_for_connection_errors=False,
|
785 |
+
**hub_kwargs,
|
786 |
+
)
|
787 |
+
hub_kwargs["_commit_hash"] = extract_commit_hash(resolved_config_file, commit_hash)
|
788 |
+
else:
|
789 |
+
hub_kwargs["_commit_hash"] = getattr(config, "_commit_hash", None)
|
790 |
+
|
791 |
+
# Config is the primordial information item.
|
792 |
+
# Instantiate config if needed
|
793 |
+
if isinstance(config, str):
|
794 |
+
config = AutoConfig.from_pretrained(
|
795 |
+
config, _from_pipeline=task, code_revision=code_revision, **hub_kwargs, **model_kwargs
|
796 |
+
)
|
797 |
+
hub_kwargs["_commit_hash"] = config._commit_hash
|
798 |
+
elif config is None and isinstance(model, str):
|
799 |
+
# Check for an adapter file in the model path if PEFT is available
|
800 |
+
if is_peft_available():
|
801 |
+
# `find_adapter_config_file` doesn't accept `trust_remote_code`
|
802 |
+
_hub_kwargs = {k: v for k, v in hub_kwargs.items() if k != "trust_remote_code"}
|
803 |
+
maybe_adapter_path = find_adapter_config_file(
|
804 |
+
model,
|
805 |
+
token=hub_kwargs["token"],
|
806 |
+
revision=hub_kwargs["revision"],
|
807 |
+
_commit_hash=hub_kwargs["_commit_hash"],
|
808 |
+
)
|
809 |
+
|
810 |
+
if maybe_adapter_path is not None:
|
811 |
+
with open(maybe_adapter_path, "r", encoding="utf-8") as f:
|
812 |
+
adapter_config = json.load(f)
|
813 |
+
model = adapter_config["base_model_name_or_path"]
|
814 |
+
|
815 |
+
config = AutoConfig.from_pretrained(
|
816 |
+
model, _from_pipeline=task, code_revision=code_revision, **hub_kwargs, **model_kwargs
|
817 |
+
)
|
818 |
+
hub_kwargs["_commit_hash"] = config._commit_hash
|
819 |
+
|
820 |
+
custom_tasks = {}
|
821 |
+
if config is not None and len(getattr(config, "custom_pipelines", {})) > 0:
|
822 |
+
custom_tasks = config.custom_pipelines
|
823 |
+
if task is None and trust_remote_code is not False:
|
824 |
+
if len(custom_tasks) == 1:
|
825 |
+
task = list(custom_tasks.keys())[0]
|
826 |
+
else:
|
827 |
+
raise RuntimeError(
|
828 |
+
"We can't infer the task automatically for this model as there are multiple tasks available. Pick "
|
829 |
+
f"one in {', '.join(custom_tasks.keys())}"
|
830 |
+
)
|
831 |
+
|
832 |
+
if task is None and model is not None:
|
833 |
+
if not isinstance(model, str):
|
834 |
+
raise RuntimeError(
|
835 |
+
"Inferring the task automatically requires to check the hub with a model_id defined as a `str`. "
|
836 |
+
f"{model} is not a valid model_id."
|
837 |
+
)
|
838 |
+
task = get_task(model, token)
|
839 |
+
|
840 |
+
# Retrieve the task
|
841 |
+
if task in custom_tasks:
|
842 |
+
normalized_task = task
|
843 |
+
targeted_task, task_options = clean_custom_task(custom_tasks[task])
|
844 |
+
if pipeline_class is None:
|
845 |
+
if not trust_remote_code:
|
846 |
+
raise ValueError(
|
847 |
+
"Loading this pipeline requires you to execute the code in the pipeline file in that"
|
848 |
+
" repo on your local machine. Make sure you have read the code there to avoid malicious use, then"
|
849 |
+
" set the option `trust_remote_code=True` to remove this error."
|
850 |
+
)
|
851 |
+
class_ref = targeted_task["impl"]
|
852 |
+
pipeline_class = get_class_from_dynamic_module(
|
853 |
+
class_ref,
|
854 |
+
model,
|
855 |
+
code_revision=code_revision,
|
856 |
+
**hub_kwargs,
|
857 |
+
)
|
858 |
+
else:
|
859 |
+
normalized_task, targeted_task, task_options = check_task(task)
|
860 |
+
if pipeline_class is None:
|
861 |
+
pipeline_class = targeted_task["impl"]
|
862 |
+
|
863 |
+
# Use default model/config/tokenizer for the task if no model is provided
|
864 |
+
if model is None:
|
865 |
+
# At that point framework might still be undetermined
|
866 |
+
model, default_revision = get_default_model_and_revision(targeted_task, framework, task_options)
|
867 |
+
revision = revision if revision is not None else default_revision
|
868 |
+
logger.warning(
|
869 |
+
f"No model was supplied, defaulted to {model} and revision"
|
870 |
+
f" {revision} ({HUGGINGFACE_CO_RESOLVE_ENDPOINT}/{model}).\n"
|
871 |
+
"Using a pipeline without specifying a model name and revision in production is not recommended."
|
872 |
+
)
|
873 |
+
if config is None and isinstance(model, str):
|
874 |
+
config = AutoConfig.from_pretrained(model, _from_pipeline=task, **hub_kwargs, **model_kwargs)
|
875 |
+
hub_kwargs["_commit_hash"] = config._commit_hash
|
876 |
+
|
877 |
+
if device_map is not None:
|
878 |
+
if "device_map" in model_kwargs:
|
879 |
+
raise ValueError(
|
880 |
+
'You cannot use both `pipeline(... device_map=..., model_kwargs={"device_map":...})` as those'
|
881 |
+
" arguments might conflict, use only one.)"
|
882 |
+
)
|
883 |
+
if device is not None:
|
884 |
+
logger.warning(
|
885 |
+
"Both `device` and `device_map` are specified. `device` will override `device_map`. You"
|
886 |
+
" will most likely encounter unexpected behavior. Please remove `device` and keep `device_map`."
|
887 |
+
)
|
888 |
+
model_kwargs["device_map"] = device_map
|
889 |
+
if torch_dtype is not None:
|
890 |
+
if "torch_dtype" in model_kwargs:
|
891 |
+
raise ValueError(
|
892 |
+
'You cannot use both `pipeline(... torch_dtype=..., model_kwargs={"torch_dtype":...})` as those'
|
893 |
+
" arguments might conflict, use only one.)"
|
894 |
+
)
|
895 |
+
if isinstance(torch_dtype, str) and hasattr(torch, torch_dtype):
|
896 |
+
torch_dtype = getattr(torch, torch_dtype)
|
897 |
+
model_kwargs["torch_dtype"] = torch_dtype
|
898 |
+
|
899 |
+
model_name = model if isinstance(model, str) else None
|
900 |
+
|
901 |
+
# Load the correct model if possible
|
902 |
+
# Infer the framework from the model if not already defined
|
903 |
+
if isinstance(model, str) or framework is None:
|
904 |
+
model_classes = {"tf": targeted_task["tf"], "pt": targeted_task["pt"]}
|
905 |
+
framework, model = infer_framework_load_model(
|
906 |
+
model,
|
907 |
+
model_classes=model_classes,
|
908 |
+
config=config,
|
909 |
+
framework=framework,
|
910 |
+
task=task,
|
911 |
+
**hub_kwargs,
|
912 |
+
**model_kwargs,
|
913 |
+
)
|
914 |
+
|
915 |
+
model_config = model.config
|
916 |
+
hub_kwargs["_commit_hash"] = model.config._commit_hash
|
917 |
+
load_tokenizer = type(model_config) in TOKENIZER_MAPPING or model_config.tokenizer_class is not None
|
918 |
+
load_feature_extractor = type(model_config) in FEATURE_EXTRACTOR_MAPPING or feature_extractor is not None
|
919 |
+
load_image_processor = type(model_config) in IMAGE_PROCESSOR_MAPPING or image_processor is not None
|
920 |
+
|
921 |
+
# If `model` (instance of `PretrainedModel` instead of `str`) is passed (and/or same for config), while
|
922 |
+
# `image_processor` or `feature_extractor` is `None`, the loading will fail. This happens particularly for some
|
923 |
+
# vision tasks when calling `pipeline()` with `model` and only one of the `image_processor` and `feature_extractor`.
|
924 |
+
# TODO: we need to make `NO_IMAGE_PROCESSOR_TASKS` and `NO_FEATURE_EXTRACTOR_TASKS` more robust to avoid such issue.
|
925 |
+
# This block is only temporarily to make CI green.
|
926 |
+
if load_image_processor and load_feature_extractor:
|
927 |
+
load_feature_extractor = False
|
928 |
+
|
929 |
+
if (
|
930 |
+
tokenizer is None
|
931 |
+
and not load_tokenizer
|
932 |
+
and normalized_task not in NO_TOKENIZER_TASKS
|
933 |
+
# Using class name to avoid importing the real class.
|
934 |
+
and (
|
935 |
+
model_config.__class__.__name__ in MULTI_MODEL_AUDIO_CONFIGS
|
936 |
+
or model_config.__class__.__name__ in MULTI_MODEL_VISION_CONFIGS
|
937 |
+
)
|
938 |
+
):
|
939 |
+
# This is a special category of models, that are fusions of multiple models
|
940 |
+
# so the model_config might not define a tokenizer, but it seems to be
|
941 |
+
# necessary for the task, so we're force-trying to load it.
|
942 |
+
load_tokenizer = True
|
943 |
+
if (
|
944 |
+
image_processor is None
|
945 |
+
and not load_image_processor
|
946 |
+
and normalized_task not in NO_IMAGE_PROCESSOR_TASKS
|
947 |
+
# Using class name to avoid importing the real class.
|
948 |
+
and model_config.__class__.__name__ in MULTI_MODEL_VISION_CONFIGS
|
949 |
+
):
|
950 |
+
# This is a special category of models, that are fusions of multiple models
|
951 |
+
# so the model_config might not define a tokenizer, but it seems to be
|
952 |
+
# necessary for the task, so we're force-trying to load it.
|
953 |
+
load_image_processor = True
|
954 |
+
if (
|
955 |
+
feature_extractor is None
|
956 |
+
and not load_feature_extractor
|
957 |
+
and normalized_task not in NO_FEATURE_EXTRACTOR_TASKS
|
958 |
+
# Using class name to avoid importing the real class.
|
959 |
+
and model_config.__class__.__name__ in MULTI_MODEL_AUDIO_CONFIGS
|
960 |
+
):
|
961 |
+
# This is a special category of models, that are fusions of multiple models
|
962 |
+
# so the model_config might not define a tokenizer, but it seems to be
|
963 |
+
# necessary for the task, so we're force-trying to load it.
|
964 |
+
load_feature_extractor = True
|
965 |
+
|
966 |
+
if task in NO_TOKENIZER_TASKS:
|
967 |
+
# These will never require a tokenizer.
|
968 |
+
# the model on the other hand might have a tokenizer, but
|
969 |
+
# the files could be missing from the hub, instead of failing
|
970 |
+
# on such repos, we just force to not load it.
|
971 |
+
load_tokenizer = False
|
972 |
+
|
973 |
+
if task in NO_FEATURE_EXTRACTOR_TASKS:
|
974 |
+
load_feature_extractor = False
|
975 |
+
if task in NO_IMAGE_PROCESSOR_TASKS:
|
976 |
+
load_image_processor = False
|
977 |
+
|
978 |
+
if load_tokenizer:
|
979 |
+
# Try to infer tokenizer from model or config name (if provided as str)
|
980 |
+
if tokenizer is None:
|
981 |
+
if isinstance(model_name, str):
|
982 |
+
tokenizer = model_name
|
983 |
+
elif isinstance(config, str):
|
984 |
+
tokenizer = config
|
985 |
+
else:
|
986 |
+
# Impossible to guess what is the right tokenizer here
|
987 |
+
raise Exception(
|
988 |
+
"Impossible to guess which tokenizer to use. "
|
989 |
+
"Please provide a PreTrainedTokenizer class or a path/identifier to a pretrained tokenizer."
|
990 |
+
)
|
991 |
+
|
992 |
+
# Instantiate tokenizer if needed
|
993 |
+
if isinstance(tokenizer, (str, tuple)):
|
994 |
+
if isinstance(tokenizer, tuple):
|
995 |
+
# For tuple we have (tokenizer name, {kwargs})
|
996 |
+
use_fast = tokenizer[1].pop("use_fast", use_fast)
|
997 |
+
tokenizer_identifier = tokenizer[0]
|
998 |
+
tokenizer_kwargs = tokenizer[1]
|
999 |
+
else:
|
1000 |
+
tokenizer_identifier = tokenizer
|
1001 |
+
tokenizer_kwargs = model_kwargs.copy()
|
1002 |
+
tokenizer_kwargs.pop("torch_dtype", None)
|
1003 |
+
|
1004 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
1005 |
+
tokenizer_identifier, use_fast=use_fast, _from_pipeline=task, **hub_kwargs, **tokenizer_kwargs
|
1006 |
+
)
|
1007 |
+
|
1008 |
+
if load_image_processor:
|
1009 |
+
# Try to infer image processor from model or config name (if provided as str)
|
1010 |
+
if image_processor is None:
|
1011 |
+
if isinstance(model_name, str):
|
1012 |
+
image_processor = model_name
|
1013 |
+
elif isinstance(config, str):
|
1014 |
+
image_processor = config
|
1015 |
+
# Backward compatibility, as `feature_extractor` used to be the name
|
1016 |
+
# for `ImageProcessor`.
|
1017 |
+
elif feature_extractor is not None and isinstance(feature_extractor, BaseImageProcessor):
|
1018 |
+
image_processor = feature_extractor
|
1019 |
+
else:
|
1020 |
+
# Impossible to guess what is the right image_processor here
|
1021 |
+
raise Exception(
|
1022 |
+
"Impossible to guess which image processor to use. "
|
1023 |
+
"Please provide a PreTrainedImageProcessor class or a path/identifier "
|
1024 |
+
"to a pretrained image processor."
|
1025 |
+
)
|
1026 |
+
|
1027 |
+
# Instantiate image_processor if needed
|
1028 |
+
if isinstance(image_processor, (str, tuple)):
|
1029 |
+
image_processor = AutoImageProcessor.from_pretrained(
|
1030 |
+
image_processor, _from_pipeline=task, **hub_kwargs, **model_kwargs
|
1031 |
+
)
|
1032 |
+
|
1033 |
+
if load_feature_extractor:
|
1034 |
+
# Try to infer feature extractor from model or config name (if provided as str)
|
1035 |
+
if feature_extractor is None:
|
1036 |
+
if isinstance(model_name, str):
|
1037 |
+
feature_extractor = model_name
|
1038 |
+
elif isinstance(config, str):
|
1039 |
+
feature_extractor = config
|
1040 |
+
else:
|
1041 |
+
# Impossible to guess what is the right feature_extractor here
|
1042 |
+
raise Exception(
|
1043 |
+
"Impossible to guess which feature extractor to use. "
|
1044 |
+
"Please provide a PreTrainedFeatureExtractor class or a path/identifier "
|
1045 |
+
"to a pretrained feature extractor."
|
1046 |
+
)
|
1047 |
+
|
1048 |
+
# Instantiate feature_extractor if needed
|
1049 |
+
if isinstance(feature_extractor, (str, tuple)):
|
1050 |
+
feature_extractor = AutoFeatureExtractor.from_pretrained(
|
1051 |
+
feature_extractor, _from_pipeline=task, **hub_kwargs, **model_kwargs
|
1052 |
+
)
|
1053 |
+
|
1054 |
+
if (
|
1055 |
+
feature_extractor._processor_class
|
1056 |
+
and feature_extractor._processor_class.endswith("WithLM")
|
1057 |
+
and isinstance(model_name, str)
|
1058 |
+
):
|
1059 |
+
try:
|
1060 |
+
import kenlm # to trigger `ImportError` if not installed
|
1061 |
+
from pyctcdecode import BeamSearchDecoderCTC
|
1062 |
+
|
1063 |
+
if os.path.isdir(model_name) or os.path.isfile(model_name):
|
1064 |
+
decoder = BeamSearchDecoderCTC.load_from_dir(model_name)
|
1065 |
+
else:
|
1066 |
+
language_model_glob = os.path.join(
|
1067 |
+
BeamSearchDecoderCTC._LANGUAGE_MODEL_SERIALIZED_DIRECTORY, "*"
|
1068 |
+
)
|
1069 |
+
alphabet_filename = BeamSearchDecoderCTC._ALPHABET_SERIALIZED_FILENAME
|
1070 |
+
allow_patterns = [language_model_glob, alphabet_filename]
|
1071 |
+
decoder = BeamSearchDecoderCTC.load_from_hf_hub(model_name, allow_patterns=allow_patterns)
|
1072 |
+
|
1073 |
+
kwargs["decoder"] = decoder
|
1074 |
+
except ImportError as e:
|
1075 |
+
logger.warning(f"Could not load the `decoder` for {model_name}. Defaulting to raw CTC. Error: {e}")
|
1076 |
+
if not is_kenlm_available():
|
1077 |
+
logger.warning("Try to install `kenlm`: `pip install kenlm")
|
1078 |
+
|
1079 |
+
if not is_pyctcdecode_available():
|
1080 |
+
logger.warning("Try to install `pyctcdecode`: `pip install pyctcdecode")
|
1081 |
+
|
1082 |
+
if task == "translation" and model.config.task_specific_params:
|
1083 |
+
for key in model.config.task_specific_params:
|
1084 |
+
if key.startswith("translation"):
|
1085 |
+
task = key
|
1086 |
+
warnings.warn(
|
1087 |
+
f'"translation" task was used, instead of "translation_XX_to_YY", defaulting to "{task}"',
|
1088 |
+
UserWarning,
|
1089 |
+
)
|
1090 |
+
break
|
1091 |
+
|
1092 |
+
if tokenizer is not None:
|
1093 |
+
kwargs["tokenizer"] = tokenizer
|
1094 |
+
|
1095 |
+
if feature_extractor is not None:
|
1096 |
+
kwargs["feature_extractor"] = feature_extractor
|
1097 |
+
|
1098 |
+
if torch_dtype is not None:
|
1099 |
+
kwargs["torch_dtype"] = torch_dtype
|
1100 |
+
|
1101 |
+
if image_processor is not None:
|
1102 |
+
kwargs["image_processor"] = image_processor
|
1103 |
+
|
1104 |
+
if device is not None:
|
1105 |
+
kwargs["device"] = device
|
1106 |
+
|
1107 |
+
return pipeline_class(model=model, framework=framework, task=task, **kwargs)
|
env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (32 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/audio_classification.cpython-310.pyc
ADDED
Binary file (7.55 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/base.cpython-310.pyc
ADDED
Binary file (44.2 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/conversational.cpython-310.pyc
ADDED
Binary file (12.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/document_question_answering.cpython-310.pyc
ADDED
Binary file (17 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/fill_mask.cpython-310.pyc
ADDED
Binary file (9.42 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_classification.cpython-310.pyc
ADDED
Binary file (8.41 kB). View file
|
|