applied-ai-018's picture
Add files using upload-large-folder tool
33a13ba verified
# Copyright 2022 The HuggingFace Evaluate Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from numbers import Number
from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Union
from datasets import Dataset, load_dataset
from typing_extensions import Literal
from ..module import EvaluationModule
from ..utils.file_utils import add_end_docstrings, add_start_docstrings
from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator
from .utils import DatasetColumnPair
if TYPE_CHECKING:
from transformers import FeatureExtractionMixin, Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel
TASK_DOCUMENTATION = r"""
Examples:
```python
>>> from evaluate import evaluator
>>> from datasets import load_dataset
>>> task_evaluator = evaluator("text-classification")
>>> data = load_dataset("imdb", split="test[:2]")
>>> results = task_evaluator.compute(
>>> model_or_pipeline="huggingface/prunebert-base-uncased-6-finepruned-w-distil-mnli",
>>> data=data,
>>> metric="accuracy",
>>> label_mapping={"LABEL_0": 0.0, "LABEL_1": 1.0},
>>> strategy="bootstrap",
>>> n_resamples=10,
>>> random_state=0
>>> )
```
"""
class TextClassificationEvaluator(Evaluator):
"""
Text classification evaluator.
This text classification evaluator can currently be loaded from [`evaluator`] using the default task name
`text-classification` or with a `"sentiment-analysis"` alias.
Methods in this class assume a data format compatible with the [`~transformers.TextClassificationPipeline`] - a single textual
feature as input and a categorical label as output.
"""
PIPELINE_KWARGS = {"truncation": True}
def __init__(self, task="text-classification", default_metric_name=None):
super().__init__(task, default_metric_name=default_metric_name)
def prepare_data(self, data: Union[str, Dataset], input_column: str, second_input_column: str, label_column: str):
if data is None:
raise ValueError(
"Please specify a valid `data` object - either a `str` with a name or a `Dataset` object."
)
self.check_required_columns(data, {"input_column": input_column, "label_column": label_column})
if second_input_column is not None:
self.check_required_columns(data, {"second_input_column": second_input_column})
data = load_dataset(data) if isinstance(data, str) else data
return {"references": data[label_column]}, DatasetColumnPair(
data, input_column, second_input_column, "text", "text_pair"
)
def predictions_processor(self, predictions, label_mapping):
predictions = [
label_mapping[element["label"]] if label_mapping is not None else element["label"]
for element in predictions
]
return {"predictions": predictions}
@add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING)
@add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION)
def compute(
self,
model_or_pipeline: Union[
str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
] = None,
data: Union[str, Dataset] = None,
subset: Optional[str] = None,
split: Optional[str] = None,
metric: Union[str, EvaluationModule] = None,
tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
feature_extractor: Optional[Union[str, "FeatureExtractionMixin"]] = None, # noqa: F821
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
device: int = None,
random_state: Optional[int] = None,
input_column: str = "text",
second_input_column: Optional[str] = None,
label_column: str = "label",
label_mapping: Optional[Dict[str, Number]] = None,
) -> Tuple[Dict[str, float], Any]:
"""
input_column (`str`, *optional*, defaults to `"text"`):
The name of the column containing the text feature in the dataset specified by `data`.
second_input_column (`str`, *optional*, defaults to `None`):
The name of the second column containing the text features. This may be useful for classification tasks
as MNLI, where two columns are used.
label_column (`str`, defaults to `"label"`):
The name of the column containing the labels in the dataset specified by `data`.
label_mapping (`Dict[str, Number]`, *optional*, defaults to `None`):
We want to map class labels defined by the model in the pipeline to values consistent with those
defined in the `label_column` of the `data` dataset.
"""
result = {}
self.check_for_mismatch_in_device_setup(device, model_or_pipeline)
# Prepare inputs
data = self.load_data(data=data, subset=subset, split=split)
metric_inputs, pipe_inputs = self.prepare_data(
data=data, input_column=input_column, second_input_column=second_input_column, label_column=label_column
)
pipe = self.prepare_pipeline(
model_or_pipeline=model_or_pipeline,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
device=device,
)
metric = self.prepare_metric(metric)
# Compute predictions
predictions, perf_results = self.call_pipeline(pipe, pipe_inputs)
predictions = self.predictions_processor(predictions, label_mapping)
metric_inputs.update(predictions)
# Compute metrics from references and predictions
metric_results = self.compute_metric(
metric=metric,
metric_inputs=metric_inputs,
strategy=strategy,
confidence_level=confidence_level,
n_resamples=n_resamples,
random_state=random_state,
)
result.update(metric_results)
result.update(perf_results)
return result