index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
71,052 |
evaluate.evaluator.base
|
_infer_device
|
Helper function to check if GPU or CPU is available for inference.
|
@staticmethod
def _infer_device() -> int:
"""Helper function to check if GPU or CPU is available for inference."""
# try infer with torch first
try:
import torch
if torch.cuda.is_available():
device = 0 # first GPU
else:
device = -1 # CPU
except ImportError:
# if not available try TF
try:
import tensorflow as tf
if len(tf.config.list_physical_devices("GPU")) > 0:
device = 0 # first GPU
else:
device = -1 # CPU
except ImportError:
device = -1
if device == -1:
logger.info("No GPU found. The default device for pipeline inference is set to CPU.")
else:
logger.info("GPU found. The default device for pipeline inference is set to GPU (CUDA:0).")
return device
|
() -> int
|
71,053 |
evaluate.evaluator.base
|
call_pipeline
| null |
def call_pipeline(self, pipe, *args, **kwargs):
start_time = perf_counter()
pipe_output = pipe(*args, **kwargs, **self.PIPELINE_KWARGS)
end_time = perf_counter()
return pipe_output, self._compute_time_perf(start_time, end_time, len(pipe_output))
|
(self, pipe, *args, **kwargs)
|
71,054 |
evaluate.evaluator.base
|
check_for_mismatch_in_device_setup
| null |
@staticmethod
def check_for_mismatch_in_device_setup(device, model_or_pipeline):
if device is not None and device != -1 and isinstance(model_or_pipeline, Pipeline):
if model_or_pipeline.device.type == "cpu":
raise ValueError(
"The value of the `device` kwarg passed to `compute` suggests that this pipe should be run on an "
"accelerator, but the pipe was instantiated on CPU. Pass `device` to the pipeline during "
"initialization to use an accelerator, or pass `device=None` to `compute`. "
)
elif device != model_or_pipeline.device.index:
raise ValueError(
f"This pipeline was instantiated on device {model_or_pipeline.device.index} but device={device} was passed to `compute`."
)
|
(device, model_or_pipeline)
|
71,055 |
evaluate.evaluator.base
|
check_required_columns
|
Ensure the columns required for the evaluation are present in the dataset.
Args:
data (`str` or [`Dataset`]):
Specifies the dataset we will run evaluation on.
columns_names (`List[str]`):
List of column names to check in the dataset. The keys are the arguments to the [`evaluate.EvaluationModule.compute`] method,
while the values are the column names to check.
Example:
```py
>>> from datasets import load_dataset
>>> from evaluate import evaluator
>>> data = load_dataset("rotten_tomatoes', split="train")
>>> evaluator.check_required_columns(data, {"input_column": "text", "label_column": "label"})
```
|
def check_required_columns(self, data: Union[str, Dataset], columns_names: Dict[str, str]):
"""
Ensure the columns required for the evaluation are present in the dataset.
Args:
data (`str` or [`Dataset`]):
Specifies the dataset we will run evaluation on.
columns_names (`List[str]`):
List of column names to check in the dataset. The keys are the arguments to the [`evaluate.EvaluationModule.compute`] method,
while the values are the column names to check.
Example:
```py
>>> from datasets import load_dataset
>>> from evaluate import evaluator
>>> data = load_dataset("rotten_tomatoes', split="train")
>>> evaluator.check_required_columns(data, {"input_column": "text", "label_column": "label"})
```
"""
for input_name, column_name in columns_names.items():
if column_name not in data.column_names:
raise ValueError(
f"Invalid `{input_name}` {column_name} specified. The dataset contains the following columns: {data.column_names}."
)
|
(self, data: Union[str, datasets.arrow_dataset.Dataset], columns_names: Dict[str, str])
|
71,056 |
evaluate.evaluator.audio_classification
|
compute
|
Compute the metric for a given pipeline and dataset combination.
Args:
model_or_pipeline (`str` or `Pipeline` or `Callable` or `PreTrainedModel` or `TFPreTrainedModel`, defaults to `None`):
If the argument in not specified, we initialize the default pipeline for the task (in this case
`text-classification` or its alias - `sentiment-analysis`). If the argument is of the type `str` or
is a model instance, we use it to initialize a new `Pipeline` with the given model. Otherwise we assume the
argument specifies a pre-initialized pipeline.
data (`str` or `Dataset`, defaults to `None`):
Specifies the dataset we will run evaluation on. If it is of type `str`, we treat it as the dataset
name, and load it. Otherwise we assume it represents a pre-loaded dataset.
subset (`str`, defaults to `None`):
Defines which dataset subset to load. If `None` is passed the default subset is loaded.
split (`str`, defaults to `None`):
Defines which dataset split to load. If `None` is passed, infers based on the `choose_split` function.
metric (`str` or `EvaluationModule`, defaults to `None`):
Specifies the metric we use in evaluator. If it is of type `str`, we treat it as the metric name, and
load it. Otherwise we assume it represents a pre-loaded metric.
tokenizer (`str` or `PreTrainedTokenizer`, *optional*, defaults to `None`):
Argument can be used to overwrite a default tokenizer if `model_or_pipeline` represents a model for
which we build a pipeline. If `model_or_pipeline` is `None` or a pre-initialized pipeline, we ignore
this argument.
strategy (`Literal["simple", "bootstrap"]`, defaults to "simple"):
specifies the evaluation strategy. Possible values are:
- `"simple"` - we evaluate the metric and return the scores.
- `"bootstrap"` - on top of computing the metric scores, we calculate the confidence interval for each
of the returned metric keys, using `scipy`'s `bootstrap` method
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.bootstrap.html.
confidence_level (`float`, defaults to `0.95`):
The `confidence_level` value passed to `bootstrap` if `"bootstrap"` strategy is chosen.
n_resamples (`int`, defaults to `9999`):
The `n_resamples` value passed to `bootstrap` if `"bootstrap"` strategy is chosen.
device (`int`, defaults to `None`):
Device ordinal for CPU/GPU support of the pipeline. Setting this to -1 will leverage CPU, a positive
integer will run the model on the associated CUDA device ID. If `None` is provided it will be inferred and
CUDA:0 used if available, CPU otherwise.
random_state (`int`, *optional*, defaults to `None`):
The `random_state` value passed to `bootstrap` if `"bootstrap"` strategy is chosen. Useful for
debugging.
input_column (`str`, defaults to `"file"`):
The name of the column containing either the audio files or a raw waveform, represented as a numpy array, in the dataset specified by `data`.
label_column (`str`, defaults to `"label"`):
The name of the column containing the labels in the dataset specified by `data`.
label_mapping (`Dict[str, Number]`, *optional*, defaults to `None`):
We want to map class labels defined by the model in the pipeline to values consistent with those
defined in the `label_column` of the `data` dataset.
Return:
A `Dict`. The keys represent metric keys calculated for the `metric` spefied in function arguments. For the
`"simple"` strategy, the value is the metric score. For the `"bootstrap"` strategy, the value is a `Dict`
containing the score, the confidence interval and the standard error calculated for each metric key.
Examples:
<Tip>
Remember that, in order to process audio files, you need ffmpeg installed (https://ffmpeg.org/download.html)
</Tip>
```python
>>> from evaluate import evaluator
>>> from datasets import load_dataset
>>> task_evaluator = evaluator("audio-classification")
>>> data = load_dataset("superb", 'ks', split="test[:40]")
>>> results = task_evaluator.compute(
>>> model_or_pipeline=""superb/wav2vec2-base-superb-ks"",
>>> data=data,
>>> label_column="label",
>>> input_column="file",
>>> metric="accuracy",
>>> label_mapping={0: "yes", 1: "no", 2: "up", 3: "down"}
>>> )
```
<Tip>
The evaluator supports raw audio data as well, in the form of a numpy array. However, be aware that calling
the audio column automatically decodes and resamples the audio files, which can be slow for large datasets.
</Tip>
```python
>>> from evaluate import evaluator
>>> from datasets import load_dataset
>>> task_evaluator = evaluator("audio-classification")
>>> data = load_dataset("superb", 'ks', split="test[:40]")
>>> data = data.map(lambda example: {"audio": example["audio"]["array"]})
>>> results = task_evaluator.compute(
>>> model_or_pipeline=""superb/wav2vec2-base-superb-ks"",
>>> data=data,
>>> label_column="label",
>>> input_column="audio",
>>> metric="accuracy",
>>> label_mapping={0: "yes", 1: "no", 2: "up", 3: "down"}
>>> )
```
|
@add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING)
@add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION)
def compute(
self,
model_or_pipeline: Union[
str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
] = None,
data: Union[str, Dataset] = None,
subset: Optional[str] = None,
split: Optional[str] = None,
metric: Union[str, EvaluationModule] = None,
tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
feature_extractor: Optional[Union[str, "FeatureExtractionMixin"]] = None, # noqa: F821
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
device: int = None,
random_state: Optional[int] = None,
input_column: str = "file",
label_column: str = "label",
label_mapping: Optional[Dict[str, Number]] = None,
) -> Tuple[Dict[str, float], Any]:
"""
input_column (`str`, defaults to `"file"`):
The name of the column containing either the audio files or a raw waveform, represented as a numpy array, in the dataset specified by `data`.
label_column (`str`, defaults to `"label"`):
The name of the column containing the labels in the dataset specified by `data`.
label_mapping (`Dict[str, Number]`, *optional*, defaults to `None`):
We want to map class labels defined by the model in the pipeline to values consistent with those
defined in the `label_column` of the `data` dataset.
"""
result = super().compute(
model_or_pipeline=model_or_pipeline,
data=data,
subset=subset,
split=split,
metric=metric,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
strategy=strategy,
confidence_level=confidence_level,
n_resamples=n_resamples,
device=device,
random_state=random_state,
input_column=input_column,
label_column=label_column,
label_mapping=label_mapping,
)
return result
|
(self, model_or_pipeline: Union[str, ForwardRef('Pipeline'), Callable, ForwardRef('PreTrainedModel'), ForwardRef('TFPreTrainedModel')] = None, data: Union[str, datasets.arrow_dataset.Dataset] = None, subset: Optional[str] = None, split: Optional[str] = None, metric: Union[str, evaluate.module.EvaluationModule] = None, tokenizer: Union[str, ForwardRef('PreTrainedTokenizer'), NoneType] = None, feature_extractor: Union[str, ForwardRef('FeatureExtractionMixin'), NoneType] = None, strategy: Literal['simple', 'bootstrap'] = 'simple', confidence_level: float = 0.95, n_resamples: int = 9999, device: int = None, random_state: Optional[int] = None, input_column: str = 'file', label_column: str = 'label', label_mapping: Optional[Dict[str, numbers.Number]] = None) -> Tuple[Dict[str, float], Any]
|
71,057 |
evaluate.evaluator.base
|
compute_metric
|
Compute and return metrics.
|
def compute_metric(
self,
metric: EvaluationModule,
metric_inputs: Dict,
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
random_state: Optional[int] = None,
):
"""Compute and return metrics."""
result = metric.compute(**metric_inputs, **self.METRIC_KWARGS)
if strategy == "bootstrap":
metric_keys = result.keys()
bootstrap_dict = self._compute_confidence_interval(
metric,
metric_inputs,
metric_keys,
confidence_level,
n_resamples,
random_state,
)
for key in metric_keys:
bootstrap_dict[key]["score"] = result[key]
return bootstrap_dict
return result
|
(self, metric: evaluate.module.EvaluationModule, metric_inputs: Dict, strategy: Literal['simple', 'bootstrap'] = 'simple', confidence_level: float = 0.95, n_resamples: int = 9999, random_state: Optional[int] = None)
|
71,058 |
evaluate.evaluator.base
|
get_dataset_split
|
Infers which split to use if `None` is given.
Args:
data (`str`):
Name of dataset.
subset (`str`):
Name of config for datasets with multiple configurations (e.g. 'glue/cola').
split (`str`, defaults to `None`):
Split to use.
Returns:
`split`: `str` containing which split to use
Example:
```py
>>> from evaluate import evaluator
>>> evaluator("text-classification").get_dataset_split(data="rotten_tomatoes")
WARNING:evaluate.evaluator.base:Dataset split not defined! Automatically evaluating with split: TEST
'test'
```
|
@staticmethod
def get_dataset_split(data, subset=None, split=None):
"""
Infers which split to use if `None` is given.
Args:
data (`str`):
Name of dataset.
subset (`str`):
Name of config for datasets with multiple configurations (e.g. 'glue/cola').
split (`str`, defaults to `None`):
Split to use.
Returns:
`split`: `str` containing which split to use
Example:
```py
>>> from evaluate import evaluator
>>> evaluator("text-classification").get_dataset_split(data="rotten_tomatoes")
WARNING:evaluate.evaluator.base:Dataset split not defined! Automatically evaluating with split: TEST
'test'
```
"""
if split is None:
split = choose_split(data, subset)
logger.warning(f"Dataset split not defined! Automatically evaluating with split: {split.upper()}")
return split
|
(data, subset=None, split=None)
|
71,059 |
evaluate.evaluator.base
|
load_data
|
Load dataset with given subset and split.
Args:
data ([`Dataset`] or `str`, defaults to `None`):
Specifies the dataset we will run evaluation on. If it is of
type `str`, we treat it as the dataset name, and load it. Otherwise we assume it represents a pre-loaded dataset.
subset (`str`, defaults to `None`):
Specifies dataset subset to be passed to `name` in `load_dataset`. To be
used with datasets with several configurations (e.g. glue/sst2).
split (`str`, defaults to `None`):
User-defined dataset split by name (e.g. train, validation, test). Supports slice-split (`test[:n]`).
If not defined and data is a `str` type, will automatically select the best one via `choose_split()`.
Returns:
data ([`Dataset`]): Loaded dataset which will be used for evaluation.
Example:
```py
>>> from evaluate import evaluator
>>> evaluator("text-classification").load_data(data="rotten_tomatoes", split="train")
Dataset({
features: ['text', 'label'],
num_rows: 8530
})
```
|
def load_data(self, data: Union[str, Dataset], subset: str = None, split: str = None):
"""
Load dataset with given subset and split.
Args:
data ([`Dataset`] or `str`, defaults to `None`):
Specifies the dataset we will run evaluation on. If it is of
type `str`, we treat it as the dataset name, and load it. Otherwise we assume it represents a pre-loaded dataset.
subset (`str`, defaults to `None`):
Specifies dataset subset to be passed to `name` in `load_dataset`. To be
used with datasets with several configurations (e.g. glue/sst2).
split (`str`, defaults to `None`):
User-defined dataset split by name (e.g. train, validation, test). Supports slice-split (`test[:n]`).
If not defined and data is a `str` type, will automatically select the best one via `choose_split()`.
Returns:
data ([`Dataset`]): Loaded dataset which will be used for evaluation.
Example:
```py
>>> from evaluate import evaluator
>>> evaluator("text-classification").load_data(data="rotten_tomatoes", split="train")
Dataset({
features: ['text', 'label'],
num_rows: 8530
})
```
"""
if isinstance(data, str):
split = self.get_dataset_split(data, subset, split)
data = load_dataset(data, name=subset, split=split)
return data
elif isinstance(data, Dataset):
if split is not None or subset is not None:
logger.warning("`data` is a preloaded Dataset! Ignoring `subset` and `split`.")
return data
else:
raise ValueError(
"Please specify a valid `data` object - either a `str` with a name or a `Dataset` object."
)
|
(self, data: Union[str, datasets.arrow_dataset.Dataset], subset: Optional[str] = None, split: Optional[str] = None)
|
71,060 |
evaluate.evaluator.audio_classification
|
predictions_processor
| null |
def predictions_processor(self, predictions, label_mapping):
pred_label = [max(pred, key=lambda x: x["score"])["label"] for pred in predictions]
pred_label = [label_mapping[pred] if label_mapping is not None else pred for pred in pred_label]
return {"predictions": pred_label}
|
(self, predictions, label_mapping)
|
71,061 |
evaluate.evaluator.base
|
prepare_data
|
Prepare data.
Args:
data ([`Dataset`]):
Specifies the dataset we will run evaluation on.
input_column (`str`, defaults to `"text"`):
The name of the column containing the text feature in the dataset specified by `data`.
second_input_column(`str`, *optional*):
The name of the column containing the second text feature if there is one. Otherwise, set to `None`.
label_column (`str`, defaults to `"label"`):
The name of the column containing the labels in the dataset specified by `data`.
Returns:
`dict`: metric inputs.
`list`: pipeline inputs.
Example:
```py
>>> from evaluate import evaluator
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train")
>>> evaluator("text-classification").prepare_data(ds, input_column="text", second_input_column=None, label_column="label")
```
|
def prepare_data(self, data: Dataset, input_column: str, label_column: str, *args, **kwargs):
"""
Prepare data.
Args:
data ([`Dataset`]):
Specifies the dataset we will run evaluation on.
input_column (`str`, defaults to `"text"`):
The name of the column containing the text feature in the dataset specified by `data`.
second_input_column(`str`, *optional*):
The name of the column containing the second text feature if there is one. Otherwise, set to `None`.
label_column (`str`, defaults to `"label"`):
The name of the column containing the labels in the dataset specified by `data`.
Returns:
`dict`: metric inputs.
`list`: pipeline inputs.
Example:
```py
>>> from evaluate import evaluator
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train")
>>> evaluator("text-classification").prepare_data(ds, input_column="text", second_input_column=None, label_column="label")
```
"""
self.check_required_columns(data, {"input_column": input_column, "label_column": label_column})
return {"references": data[label_column]}, DatasetColumn(data, input_column)
|
(self, data: datasets.arrow_dataset.Dataset, input_column: str, label_column: str, *args, **kwargs)
|
71,062 |
evaluate.evaluator.base
|
prepare_metric
|
Prepare metric.
Args:
metric (`str` or [`EvaluationModule`], defaults to `None`):
Specifies the metric we use in evaluator. If it is of type `str`, we treat it as the metric name, and
load it. Otherwise we assume it represents a pre-loaded metric.
Returns:
The loaded metric.
Example:
```py
>>> from evaluate import evaluator
>>> evaluator("text-classification").prepare_metric("accuracy")
```
|
def prepare_metric(self, metric: Union[str, EvaluationModule]):
"""
Prepare metric.
Args:
metric (`str` or [`EvaluationModule`], defaults to `None`):
Specifies the metric we use in evaluator. If it is of type `str`, we treat it as the metric name, and
load it. Otherwise we assume it represents a pre-loaded metric.
Returns:
The loaded metric.
Example:
```py
>>> from evaluate import evaluator
>>> evaluator("text-classification").prepare_metric("accuracy")
```
"""
# Prepare metric.
if metric is None:
if self.default_metric_name is None:
raise ValueError(
"`Evaluator` doesn't specify a default metric. Please specify a valid `metric` argument."
)
metric = load(self.default_metric_name)
elif isinstance(metric, str):
metric = load(metric)
return metric
|
(self, metric: Union[str, evaluate.module.EvaluationModule])
|
71,063 |
evaluate.evaluator.base
|
prepare_pipeline
|
Prepare pipeline.
Args:
model_or_pipeline (`str` or [`~transformers.Pipeline`] or `Callable` or [`~transformers.PreTrainedModel`] or [`~transformers.TFPreTrainedModel`], defaults to `None`):
If the argument in not specified, we initialize the default pipeline for the task. If the argument is of the type `str` or
is a model instance, we use it to initialize a new [`~transformers.Pipeline`] with the given model. Otherwise we assume the
argument specifies a pre-initialized pipeline.
preprocessor ([`~transformers.PreTrainedTokenizerBase`] or [`~transformers.FeatureExtractionMixin`], *optional*, defaults to `None`):
Argument can be used to overwrite a default preprocessor if `model_or_pipeline` represents a model for
which we build a pipeline. If `model_or_pipeline` is `None` or a pre-initialized pipeline, we ignore
this argument.
Returns:
The initialized pipeline.
Example:
```py
>>> from evaluate import evaluator
>>> evaluator("text-classification").prepare_pipeline(model_or_pipeline="distilbert-base-uncased")
```
|
def prepare_pipeline(
self,
model_or_pipeline: Union[str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel"], # noqa: F821
tokenizer: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] = None, # noqa: F821
feature_extractor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] = None, # noqa: F821
device: int = None,
):
"""
Prepare pipeline.
Args:
model_or_pipeline (`str` or [`~transformers.Pipeline`] or `Callable` or [`~transformers.PreTrainedModel`] or [`~transformers.TFPreTrainedModel`], defaults to `None`):
If the argument in not specified, we initialize the default pipeline for the task. If the argument is of the type `str` or
is a model instance, we use it to initialize a new [`~transformers.Pipeline`] with the given model. Otherwise we assume the
argument specifies a pre-initialized pipeline.
preprocessor ([`~transformers.PreTrainedTokenizerBase`] or [`~transformers.FeatureExtractionMixin`], *optional*, defaults to `None`):
Argument can be used to overwrite a default preprocessor if `model_or_pipeline` represents a model for
which we build a pipeline. If `model_or_pipeline` is `None` or a pre-initialized pipeline, we ignore
this argument.
Returns:
The initialized pipeline.
Example:
```py
>>> from evaluate import evaluator
>>> evaluator("text-classification").prepare_pipeline(model_or_pipeline="distilbert-base-uncased")
```
"""
if device is None:
device = self._infer_device()
if (
isinstance(model_or_pipeline, str)
or isinstance(model_or_pipeline, transformers.PreTrainedModel)
or isinstance(model_or_pipeline, transformers.TFPreTrainedModel)
):
pipe = pipeline(
self.task,
model=model_or_pipeline,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
device=device,
)
else:
if model_or_pipeline is None:
pipe = pipeline(self.task, device=device)
else:
pipe = model_or_pipeline
if tokenizer is not None and feature_extractor is not None:
logger.warning("Ignoring the value of the preprocessor argument (`tokenizer` or `feature_extractor`).")
if (pipe.task != self.task) and not (self.task == "translation" and pipe.task.startswith("translation")):
raise ValueError(
f"Incompatible `model_or_pipeline`. Please specify `model_or_pipeline` compatible with the `{self.task}` task."
)
return pipe
|
(self, model_or_pipeline: Union[str, ForwardRef('Pipeline'), Callable, ForwardRef('PreTrainedModel'), ForwardRef('TFPreTrainedModel')], tokenizer: Union[ForwardRef('PreTrainedTokenizerBase'), ForwardRef('FeatureExtractionMixin')] = None, feature_extractor: Union[ForwardRef('PreTrainedTokenizerBase'), ForwardRef('FeatureExtractionMixin')] = None, device: int = None)
|
71,064 |
evaluate.evaluator.automatic_speech_recognition
|
AutomaticSpeechRecognitionEvaluator
|
Automatic speech recognition evaluator.
This automatic speech recognition evaluator can currently be loaded from [`evaluator`] using the default task name
`automatic-speech-recognition`.
Methods in this class assume a data format compatible with the [`AutomaticSpeechRecognitionPipeline`].
|
class AutomaticSpeechRecognitionEvaluator(Evaluator):
"""
Automatic speech recognition evaluator.
This automatic speech recognition evaluator can currently be loaded from [`evaluator`] using the default task name
`automatic-speech-recognition`.
Methods in this class assume a data format compatible with the [`AutomaticSpeechRecognitionPipeline`].
"""
PIPELINE_KWARGS = {"truncation": True}
def __init__(self, task="automatic-speech-recognition", default_metric_name=None):
super().__init__(task, default_metric_name=default_metric_name)
def predictions_processor(self, predictions, label_mapping):
return {"predictions": [pred["text"] for pred in predictions]}
@add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING)
@add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION)
def compute(
self,
model_or_pipeline: Union[
str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
] = None,
data: Union[str, Dataset] = None,
subset: Optional[str] = None,
split: Optional[str] = None,
metric: Union[str, EvaluationModule] = None,
tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
device: int = None,
random_state: Optional[int] = None,
input_column: str = "path",
label_column: str = "sentence",
generation_kwargs: dict = None,
) -> Tuple[Dict[str, float], Any]:
"""
input_column (`str`, defaults to `"path"`):
the name of the column containing the input audio path in the dataset specified by `data`.
label_column (`str`, defaults to `"sentence"`):
the name of the column containing the labels in the dataset specified by `data`.
generation_kwargs (`Dict`, *optional*, defaults to `None`):
The generation kwargs are passed to the pipeline and set the text generation strategy.
"""
if generation_kwargs is not None:
self.PIPELINE_KWARGS.update(generation_kwargs)
result = super().compute(
model_or_pipeline=model_or_pipeline,
data=data,
subset=subset,
split=split,
metric=metric,
tokenizer=tokenizer,
strategy=strategy,
confidence_level=confidence_level,
n_resamples=n_resamples,
device=device,
random_state=random_state,
input_column=input_column,
label_column=label_column,
)
return result
|
(task='automatic-speech-recognition', default_metric_name=None)
|
71,065 |
evaluate.evaluator.automatic_speech_recognition
|
__init__
| null |
def __init__(self, task="automatic-speech-recognition", default_metric_name=None):
super().__init__(task, default_metric_name=default_metric_name)
|
(self, task='automatic-speech-recognition', default_metric_name=None)
|
71,072 |
evaluate.evaluator.automatic_speech_recognition
|
compute
|
Compute the metric for a given pipeline and dataset combination.
Args:
model_or_pipeline (`str` or `Pipeline` or `Callable` or `PreTrainedModel` or `TFPreTrainedModel`, defaults to `None`):
If the argument in not specified, we initialize the default pipeline for the task (in this case
`text-classification` or its alias - `sentiment-analysis`). If the argument is of the type `str` or
is a model instance, we use it to initialize a new `Pipeline` with the given model. Otherwise we assume the
argument specifies a pre-initialized pipeline.
data (`str` or `Dataset`, defaults to `None`):
Specifies the dataset we will run evaluation on. If it is of type `str`, we treat it as the dataset
name, and load it. Otherwise we assume it represents a pre-loaded dataset.
subset (`str`, defaults to `None`):
Defines which dataset subset to load. If `None` is passed the default subset is loaded.
split (`str`, defaults to `None`):
Defines which dataset split to load. If `None` is passed, infers based on the `choose_split` function.
metric (`str` or `EvaluationModule`, defaults to `None`):
Specifies the metric we use in evaluator. If it is of type `str`, we treat it as the metric name, and
load it. Otherwise we assume it represents a pre-loaded metric.
tokenizer (`str` or `PreTrainedTokenizer`, *optional*, defaults to `None`):
Argument can be used to overwrite a default tokenizer if `model_or_pipeline` represents a model for
which we build a pipeline. If `model_or_pipeline` is `None` or a pre-initialized pipeline, we ignore
this argument.
strategy (`Literal["simple", "bootstrap"]`, defaults to "simple"):
specifies the evaluation strategy. Possible values are:
- `"simple"` - we evaluate the metric and return the scores.
- `"bootstrap"` - on top of computing the metric scores, we calculate the confidence interval for each
of the returned metric keys, using `scipy`'s `bootstrap` method
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.bootstrap.html.
confidence_level (`float`, defaults to `0.95`):
The `confidence_level` value passed to `bootstrap` if `"bootstrap"` strategy is chosen.
n_resamples (`int`, defaults to `9999`):
The `n_resamples` value passed to `bootstrap` if `"bootstrap"` strategy is chosen.
device (`int`, defaults to `None`):
Device ordinal for CPU/GPU support of the pipeline. Setting this to -1 will leverage CPU, a positive
integer will run the model on the associated CUDA device ID. If `None` is provided it will be inferred and
CUDA:0 used if available, CPU otherwise.
random_state (`int`, *optional*, defaults to `None`):
The `random_state` value passed to `bootstrap` if `"bootstrap"` strategy is chosen. Useful for
debugging.
input_column (`str`, defaults to `"path"`):
the name of the column containing the input audio path in the dataset specified by `data`.
label_column (`str`, defaults to `"sentence"`):
the name of the column containing the labels in the dataset specified by `data`.
generation_kwargs (`Dict`, *optional*, defaults to `None`):
The generation kwargs are passed to the pipeline and set the text generation strategy.
Return:
A `Dict`. The keys represent metric keys calculated for the `metric` spefied in function arguments. For the
`"simple"` strategy, the value is the metric score. For the `"bootstrap"` strategy, the value is a `Dict`
containing the score, the confidence interval and the standard error calculated for each metric key.
Examples:
```python
>>> from evaluate import evaluator
>>> from datasets import load_dataset
>>> task_evaluator = evaluator("automatic-speech-recognition")
>>> data = load_dataset("mozilla-foundation/common_voice_11_0", "en", split="validation[:40]")
>>> results = task_evaluator.compute(
>>> model_or_pipeline="https://huggingface.co/openai/whisper-tiny.en",
>>> data=data,
>>> input_column="path",
>>> label_column="sentence",
>>> metric="wer",
>>> )
```
|
@add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING)
@add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION)
def compute(
self,
model_or_pipeline: Union[
str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
] = None,
data: Union[str, Dataset] = None,
subset: Optional[str] = None,
split: Optional[str] = None,
metric: Union[str, EvaluationModule] = None,
tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
device: int = None,
random_state: Optional[int] = None,
input_column: str = "path",
label_column: str = "sentence",
generation_kwargs: dict = None,
) -> Tuple[Dict[str, float], Any]:
"""
input_column (`str`, defaults to `"path"`):
the name of the column containing the input audio path in the dataset specified by `data`.
label_column (`str`, defaults to `"sentence"`):
the name of the column containing the labels in the dataset specified by `data`.
generation_kwargs (`Dict`, *optional*, defaults to `None`):
The generation kwargs are passed to the pipeline and set the text generation strategy.
"""
if generation_kwargs is not None:
self.PIPELINE_KWARGS.update(generation_kwargs)
result = super().compute(
model_or_pipeline=model_or_pipeline,
data=data,
subset=subset,
split=split,
metric=metric,
tokenizer=tokenizer,
strategy=strategy,
confidence_level=confidence_level,
n_resamples=n_resamples,
device=device,
random_state=random_state,
input_column=input_column,
label_column=label_column,
)
return result
|
(self, model_or_pipeline: Union[str, ForwardRef('Pipeline'), Callable, ForwardRef('PreTrainedModel'), ForwardRef('TFPreTrainedModel')] = None, data: Union[str, datasets.arrow_dataset.Dataset] = None, subset: Optional[str] = None, split: Optional[str] = None, metric: Union[str, evaluate.module.EvaluationModule] = None, tokenizer: Union[str, ForwardRef('PreTrainedTokenizer'), NoneType] = None, strategy: Literal['simple', 'bootstrap'] = 'simple', confidence_level: float = 0.95, n_resamples: int = 9999, device: int = None, random_state: Optional[int] = None, input_column: str = 'path', label_column: str = 'sentence', generation_kwargs: dict = None) -> Tuple[Dict[str, float], Any]
|
71,076 |
evaluate.evaluator.automatic_speech_recognition
|
predictions_processor
| null |
def predictions_processor(self, predictions, label_mapping):
return {"predictions": [pred["text"] for pred in predictions]}
|
(self, predictions, label_mapping)
|
71,080 |
evaluate.module
|
CombinedEvaluations
| null |
class CombinedEvaluations:
def __init__(self, evaluation_modules, force_prefix=False):
from .loading import load # avoid circular imports
self.evaluation_module_names = None
if isinstance(evaluation_modules, list):
self.evaluation_modules = evaluation_modules
elif isinstance(evaluation_modules, dict):
self.evaluation_modules = list(evaluation_modules.values())
self.evaluation_module_names = list(evaluation_modules.keys())
loaded_modules = []
for module in self.evaluation_modules:
if isinstance(module, str):
module = load(module)
loaded_modules.append(module)
self.evaluation_modules = loaded_modules
if self.evaluation_module_names is None:
self.evaluation_module_names = [module.name for module in self.evaluation_modules]
self.force_prefix = force_prefix
def add(self, prediction=None, reference=None, **kwargs):
"""Add one prediction and reference for each evaluation module's stack.
Args:
predictions (`list/array/tensor`, *optional*):
Predictions.
references (`list/array/tensor`, *optional*):
References.
Example:
```py
>>> import evaluate
>>> accuracy = evaluate.load("accuracy")
>>> f1 = evaluate.load("f1")
>>> clf_metrics = combine(["accuracy", "f1"])
>>> for ref, pred in zip([0,1,0,1], [1,0,0,1]):
... clf_metrics.add(references=ref, predictions=pred)
```
"""
for evaluation_module in self.evaluation_modules:
batch = {"predictions": prediction, "references": reference, **kwargs}
batch = {input_name: batch[input_name] for input_name in evaluation_module._feature_names()}
evaluation_module.add(**batch)
def add_batch(self, predictions=None, references=None, **kwargs):
"""Add a batch of predictions and references for each evaluation module's stack.
Args:
predictions (`list/array/tensor`, *optional*):
Predictions.
references (`list/array/tensor`, *optional*):
References.
Example:
```py
>>> import evaluate
>>> accuracy = evaluate.load("accuracy")
>>> f1 = evaluate.load("f1")
>>> clf_metrics = combine(["accuracy", "f1"])
>>> for refs, preds in zip([[0,1],[0,1]], [[1,0],[0,1]]):
... clf_metrics.add(references=refs, predictions=preds)
```
"""
for evaluation_module in self.evaluation_modules:
batch = {"predictions": predictions, "references": references, **kwargs}
batch = {input_name: batch[input_name] for input_name in evaluation_module._feature_names()}
evaluation_module.add_batch(**batch)
def compute(self, predictions=None, references=None, **kwargs):
"""Compute each evaluation module.
Usage of positional arguments is not allowed to prevent mistakes.
Args:
predictions (`list/array/tensor`, *optional*):
Predictions.
references (`list/array/tensor`, *optional*):
References.
**kwargs (*optional*):
Keyword arguments that will be forwarded to the evaluation module [`~evaluate.EvaluationModule.compute`]
method (see details in the docstring).
Return:
`dict` or `None`
- Dictionary with the results if this evaluation module is run on the main process (`process_id == 0`).
- `None` if the evaluation module is not run on the main process (`process_id != 0`).
Example:
```py
>>> import evaluate
>>> accuracy = evaluate.load("accuracy")
>>> f1 = evaluate.load("f1")
>>> clf_metrics = combine(["accuracy", "f1"])
>>> clf_metrics.compute(predictions=[0,1], references=[1,1])
{'accuracy': 0.5, 'f1': 0.6666666666666666}
```
"""
results = []
for evaluation_module in self.evaluation_modules:
batch = {"predictions": predictions, "references": references, **kwargs}
results.append(evaluation_module.compute(**batch))
return self._merge_results(results)
def _merge_results(self, results):
merged_results = {}
results_keys = list(itertools.chain.from_iterable([r.keys() for r in results]))
duplicate_keys = {item for item, count in collections.Counter(results_keys).items() if count > 1}
duplicate_names = [
item for item, count in collections.Counter(self.evaluation_module_names).items() if count > 1
]
duplicate_counter = {name: 0 for name in duplicate_names}
for module_name, result in zip(self.evaluation_module_names, results):
for k, v in result.items():
if k not in duplicate_keys and not self.force_prefix:
merged_results[f"{k}"] = v
elif module_name in duplicate_counter:
merged_results[f"{module_name}_{duplicate_counter[module_name]}_{k}"] = v
else:
merged_results[f"{module_name}_{k}"] = v
if module_name in duplicate_counter:
duplicate_counter[module_name] += 1
return merged_results
|
(evaluation_modules, force_prefix=False)
|
71,081 |
evaluate.module
|
__init__
| null |
def __init__(self, evaluation_modules, force_prefix=False):
from .loading import load # avoid circular imports
self.evaluation_module_names = None
if isinstance(evaluation_modules, list):
self.evaluation_modules = evaluation_modules
elif isinstance(evaluation_modules, dict):
self.evaluation_modules = list(evaluation_modules.values())
self.evaluation_module_names = list(evaluation_modules.keys())
loaded_modules = []
for module in self.evaluation_modules:
if isinstance(module, str):
module = load(module)
loaded_modules.append(module)
self.evaluation_modules = loaded_modules
if self.evaluation_module_names is None:
self.evaluation_module_names = [module.name for module in self.evaluation_modules]
self.force_prefix = force_prefix
|
(self, evaluation_modules, force_prefix=False)
|
71,082 |
evaluate.module
|
_merge_results
| null |
def _merge_results(self, results):
merged_results = {}
results_keys = list(itertools.chain.from_iterable([r.keys() for r in results]))
duplicate_keys = {item for item, count in collections.Counter(results_keys).items() if count > 1}
duplicate_names = [
item for item, count in collections.Counter(self.evaluation_module_names).items() if count > 1
]
duplicate_counter = {name: 0 for name in duplicate_names}
for module_name, result in zip(self.evaluation_module_names, results):
for k, v in result.items():
if k not in duplicate_keys and not self.force_prefix:
merged_results[f"{k}"] = v
elif module_name in duplicate_counter:
merged_results[f"{module_name}_{duplicate_counter[module_name]}_{k}"] = v
else:
merged_results[f"{module_name}_{k}"] = v
if module_name in duplicate_counter:
duplicate_counter[module_name] += 1
return merged_results
|
(self, results)
|
71,083 |
evaluate.module
|
add
|
Add one prediction and reference for each evaluation module's stack.
Args:
predictions (`list/array/tensor`, *optional*):
Predictions.
references (`list/array/tensor`, *optional*):
References.
Example:
```py
>>> import evaluate
>>> accuracy = evaluate.load("accuracy")
>>> f1 = evaluate.load("f1")
>>> clf_metrics = combine(["accuracy", "f1"])
>>> for ref, pred in zip([0,1,0,1], [1,0,0,1]):
... clf_metrics.add(references=ref, predictions=pred)
```
|
def add(self, prediction=None, reference=None, **kwargs):
"""Add one prediction and reference for each evaluation module's stack.
Args:
predictions (`list/array/tensor`, *optional*):
Predictions.
references (`list/array/tensor`, *optional*):
References.
Example:
```py
>>> import evaluate
>>> accuracy = evaluate.load("accuracy")
>>> f1 = evaluate.load("f1")
>>> clf_metrics = combine(["accuracy", "f1"])
>>> for ref, pred in zip([0,1,0,1], [1,0,0,1]):
... clf_metrics.add(references=ref, predictions=pred)
```
"""
for evaluation_module in self.evaluation_modules:
batch = {"predictions": prediction, "references": reference, **kwargs}
batch = {input_name: batch[input_name] for input_name in evaluation_module._feature_names()}
evaluation_module.add(**batch)
|
(self, prediction=None, reference=None, **kwargs)
|
71,084 |
evaluate.module
|
add_batch
|
Add a batch of predictions and references for each evaluation module's stack.
Args:
predictions (`list/array/tensor`, *optional*):
Predictions.
references (`list/array/tensor`, *optional*):
References.
Example:
```py
>>> import evaluate
>>> accuracy = evaluate.load("accuracy")
>>> f1 = evaluate.load("f1")
>>> clf_metrics = combine(["accuracy", "f1"])
>>> for refs, preds in zip([[0,1],[0,1]], [[1,0],[0,1]]):
... clf_metrics.add(references=refs, predictions=preds)
```
|
def add_batch(self, predictions=None, references=None, **kwargs):
"""Add a batch of predictions and references for each evaluation module's stack.
Args:
predictions (`list/array/tensor`, *optional*):
Predictions.
references (`list/array/tensor`, *optional*):
References.
Example:
```py
>>> import evaluate
>>> accuracy = evaluate.load("accuracy")
>>> f1 = evaluate.load("f1")
>>> clf_metrics = combine(["accuracy", "f1"])
>>> for refs, preds in zip([[0,1],[0,1]], [[1,0],[0,1]]):
... clf_metrics.add(references=refs, predictions=preds)
```
"""
for evaluation_module in self.evaluation_modules:
batch = {"predictions": predictions, "references": references, **kwargs}
batch = {input_name: batch[input_name] for input_name in evaluation_module._feature_names()}
evaluation_module.add_batch(**batch)
|
(self, predictions=None, references=None, **kwargs)
|
71,085 |
evaluate.module
|
compute
|
Compute each evaluation module.
Usage of positional arguments is not allowed to prevent mistakes.
Args:
predictions (`list/array/tensor`, *optional*):
Predictions.
references (`list/array/tensor`, *optional*):
References.
**kwargs (*optional*):
Keyword arguments that will be forwarded to the evaluation module [`~evaluate.EvaluationModule.compute`]
method (see details in the docstring).
Return:
`dict` or `None`
- Dictionary with the results if this evaluation module is run on the main process (`process_id == 0`).
- `None` if the evaluation module is not run on the main process (`process_id != 0`).
Example:
```py
>>> import evaluate
>>> accuracy = evaluate.load("accuracy")
>>> f1 = evaluate.load("f1")
>>> clf_metrics = combine(["accuracy", "f1"])
>>> clf_metrics.compute(predictions=[0,1], references=[1,1])
{'accuracy': 0.5, 'f1': 0.6666666666666666}
```
|
def compute(self, predictions=None, references=None, **kwargs):
"""Compute each evaluation module.
Usage of positional arguments is not allowed to prevent mistakes.
Args:
predictions (`list/array/tensor`, *optional*):
Predictions.
references (`list/array/tensor`, *optional*):
References.
**kwargs (*optional*):
Keyword arguments that will be forwarded to the evaluation module [`~evaluate.EvaluationModule.compute`]
method (see details in the docstring).
Return:
`dict` or `None`
- Dictionary with the results if this evaluation module is run on the main process (`process_id == 0`).
- `None` if the evaluation module is not run on the main process (`process_id != 0`).
Example:
```py
>>> import evaluate
>>> accuracy = evaluate.load("accuracy")
>>> f1 = evaluate.load("f1")
>>> clf_metrics = combine(["accuracy", "f1"])
>>> clf_metrics.compute(predictions=[0,1], references=[1,1])
{'accuracy': 0.5, 'f1': 0.6666666666666666}
```
"""
results = []
for evaluation_module in self.evaluation_modules:
batch = {"predictions": predictions, "references": references, **kwargs}
results.append(evaluation_module.compute(**batch))
return self._merge_results(results)
|
(self, predictions=None, references=None, **kwargs)
|
71,086 |
evaluate.module
|
Comparison
|
A Comparison is the base class and common API for all comparisons.
Args:
config_name (`str`):
This is used to define a hash specific to a comparison computation script and prevents the comparison's data
to be overridden when the comparison loading script is modified.
keep_in_memory (`bool`):
Keep all predictions and references in memory. Not possible in distributed settings.
cache_dir (`str`):
Path to a directory in which temporary prediction/references data will be stored.
The data directory should be located on a shared file-system in distributed setups.
num_process (`int`):
Specify the total number of nodes in a distributed settings.
This is useful to compute comparisons in distributed setups (in particular non-additive comparisons).
process_id (`int`):
Specify the id of the current process in a distributed setup (between 0 and num_process-1)
This is useful to compute comparisons in distributed setups (in particular non-additive comparisons).
seed (`int`, *optional*):
If specified, this will temporarily set numpy's random seed when [`~evaluate.Comparison.compute`] is run.
experiment_id (`str`):
A specific experiment id. This is used if several distributed evaluations share the same file system.
This is useful to compute comparisons in distributed setups (in particular non-additive comparisons).
max_concurrent_cache_files (`int`):
Max number of concurrent comparison cache files (default `10000`).
timeout (`Union[int, float]`):
Timeout in second for distributed setting synchronization.
|
class Comparison(EvaluationModule):
"""A Comparison is the base class and common API for all comparisons.
Args:
config_name (`str`):
This is used to define a hash specific to a comparison computation script and prevents the comparison's data
to be overridden when the comparison loading script is modified.
keep_in_memory (`bool`):
Keep all predictions and references in memory. Not possible in distributed settings.
cache_dir (`str`):
Path to a directory in which temporary prediction/references data will be stored.
The data directory should be located on a shared file-system in distributed setups.
num_process (`int`):
Specify the total number of nodes in a distributed settings.
This is useful to compute comparisons in distributed setups (in particular non-additive comparisons).
process_id (`int`):
Specify the id of the current process in a distributed setup (between 0 and num_process-1)
This is useful to compute comparisons in distributed setups (in particular non-additive comparisons).
seed (`int`, *optional*):
If specified, this will temporarily set numpy's random seed when [`~evaluate.Comparison.compute`] is run.
experiment_id (`str`):
A specific experiment id. This is used if several distributed evaluations share the same file system.
This is useful to compute comparisons in distributed setups (in particular non-additive comparisons).
max_concurrent_cache_files (`int`):
Max number of concurrent comparison cache files (default `10000`).
timeout (`Union[int, float]`):
Timeout in second for distributed setting synchronization.
"""
|
(config_name: Optional[str] = None, keep_in_memory: bool = False, cache_dir: Optional[str] = None, num_process: int = 1, process_id: int = 0, seed: Optional[int] = None, experiment_id: Optional[str] = None, hash: str = None, max_concurrent_cache_files: int = 10000, timeout: Union[int, float] = 100, **kwargs)
|
71,087 |
evaluate.module
|
__del__
| null |
def __del__(self):
if hasattr(self, "filelock") and self.filelock is not None:
self.filelock.release()
if hasattr(self, "rendez_vous_lock") and self.rendez_vous_lock is not None:
self.rendez_vous_lock.release()
if hasattr(self, "writer"): # in case it was already deleted
del self.writer
if hasattr(self, "data"): # in case it was already deleted
del self.data
|
(self)
|
71,088 |
evaluate.module
|
__init__
| null |
def __init__(
self,
config_name: Optional[str] = None,
keep_in_memory: bool = False,
cache_dir: Optional[str] = None,
num_process: int = 1,
process_id: int = 0,
seed: Optional[int] = None,
experiment_id: Optional[str] = None,
hash: str = None,
max_concurrent_cache_files: int = 10000,
timeout: Union[int, float] = 100,
**kwargs,
):
# prepare info
self.config_name = config_name or "default"
info = self._info()
info.module_name = camelcase_to_snakecase(self.__class__.__name__)
info.config_name = self.config_name
info.experiment_id = experiment_id or "default_experiment"
EvaluationModuleInfoMixin.__init__(self, info) # For easy access on low level
# Safety checks on num_process and process_id
if not isinstance(process_id, int) or process_id < 0:
raise ValueError("'process_id' should be a number greater than 0")
if not isinstance(num_process, int) or num_process <= process_id:
raise ValueError("'num_process' should be a number greater than process_id")
if keep_in_memory and num_process != 1:
raise ValueError("Using 'keep_in_memory' is not possible in distributed setting (num_process > 1).")
self.num_process = num_process
self.process_id = process_id
self.max_concurrent_cache_files = max_concurrent_cache_files
self.keep_in_memory = keep_in_memory
self._data_dir_root = os.path.expanduser(cache_dir or config.HF_METRICS_CACHE)
self.data_dir = self._build_data_dir()
if seed is None:
_, seed, pos, *_ = np.random.get_state()
self.seed: int = seed[pos] if pos < 624 else seed[0]
else:
self.seed: int = seed
self.timeout: Union[int, float] = timeout
# Update 'compute' and 'add' docstring
# methods need to be copied otherwise it changes the docstrings of every instance
self.compute = types.MethodType(copyfunc(self.compute), self)
self.add_batch = types.MethodType(copyfunc(self.add_batch), self)
self.add = types.MethodType(copyfunc(self.add), self)
self.compute.__func__.__doc__ += self.info.inputs_description
self.add_batch.__func__.__doc__ += self.info.inputs_description
self.add.__func__.__doc__ += self.info.inputs_description
# self.arrow_schema = pa.schema(field for field in self.info.features.type)
self.selected_feature_format = None
self.buf_writer = None
self.writer = None
self.writer_batch_size = None
self.data = None
# This is the cache file we store our predictions/references in
# Keep it None for now so we can (cloud)pickle the object
self.cache_file_name = None
self.filelock = None
self.rendez_vous_lock = None
# This is all the cache files on which we have a lock when we are in a distributed setting
self.file_paths = None
self.filelocks = None
# This fingerprints the evaluation module according to the hashed contents of the module code
self._hash = hash
|
(self, config_name: Optional[str] = None, keep_in_memory: bool = False, cache_dir: Optional[str] = None, num_process: int = 1, process_id: int = 0, seed: Optional[int] = None, experiment_id: Optional[str] = None, hash: Optional[str] = None, max_concurrent_cache_files: int = 10000, timeout: Union[int, float] = 100, **kwargs)
|
71,089 |
evaluate.module
|
__len__
|
Return the number of examples (predictions or predictions/references pair)
currently stored in the evaluation module's cache.
|
def __len__(self):
"""Return the number of examples (predictions or predictions/references pair)
currently stored in the evaluation module's cache.
"""
return 0 if self.writer is None else len(self.writer)
|
(self)
|
71,090 |
evaluate.module
|
__repr__
| null |
def __repr__(self):
return (
f'EvaluationModule(name: "{self.name}", module_type: "{self.module_type}", '
f'features: {self.features}, usage: """{self.inputs_description}""", '
f"stored examples: {len(self)})"
)
|
(self)
|
71,091 |
evaluate.module
|
_build_data_dir
|
Path of this evaluation module in cache_dir:
Will be:
self._data_dir_root/self.name/self.config_name/self.hash (if not none)/
If any of these element is missing or if ``with_version=False`` the corresponding subfolders are dropped.
|
def _build_data_dir(self):
"""Path of this evaluation module in cache_dir:
Will be:
self._data_dir_root/self.name/self.config_name/self.hash (if not none)/
If any of these element is missing or if ``with_version=False`` the corresponding subfolders are dropped.
"""
builder_data_dir = self._data_dir_root
builder_data_dir = os.path.join(builder_data_dir, self.name, self.config_name)
os.makedirs(builder_data_dir, exist_ok=True)
return builder_data_dir
|
(self)
|
71,092 |
evaluate.module
|
_check_all_processes_locks
| null |
def _check_all_processes_locks(self):
expected_lock_file_names = [
os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{process_id}.arrow.lock")
for process_id in range(self.num_process)
]
for expected_lock_file_name in expected_lock_file_names:
nofilelock = FileFreeLock(expected_lock_file_name)
try:
nofilelock.acquire(timeout=self.timeout)
except Timeout:
raise ValueError(
f"Expected to find locked file {expected_lock_file_name} from process {self.process_id} but it doesn't exist."
) from None
else:
nofilelock.release()
|
(self)
|
71,093 |
evaluate.module
|
_check_rendez_vous
| null |
def _check_rendez_vous(self):
expected_lock_file_name = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-0.arrow.lock")
nofilelock = FileFreeLock(expected_lock_file_name)
try:
nofilelock.acquire(timeout=self.timeout)
except Timeout:
raise ValueError(
f"Expected to find locked file {expected_lock_file_name} from process {self.process_id} but it doesn't exist."
) from None
else:
nofilelock.release()
lock_file_name = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-rdv.lock")
rendez_vous_lock = FileLock(lock_file_name)
try:
rendez_vous_lock.acquire(timeout=self.timeout)
except Timeout:
raise ValueError(f"Couldn't acquire lock on {lock_file_name} from process {self.process_id}.") from None
else:
rendez_vous_lock.release()
|
(self)
|
71,094 |
evaluate.module
|
_compute
|
This method defines the common API for all the evaluation module in the library
|
def _compute(self, *, predictions=None, references=None, **kwargs) -> Dict[str, Any]:
"""This method defines the common API for all the evaluation module in the library"""
raise NotImplementedError
|
(self, *, predictions=None, references=None, **kwargs) -> Dict[str, Any]
|
71,095 |
evaluate.module
|
_create_cache_file
|
Create a new cache file. If the default cache file is used, we generated a new hash.
|
def _create_cache_file(self, timeout=1) -> Tuple[str, FileLock]:
"""Create a new cache file. If the default cache file is used, we generated a new hash."""
file_path = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{self.process_id}.arrow")
filelock = None
for i in range(self.max_concurrent_cache_files):
filelock = FileLock(file_path + ".lock")
try:
filelock.acquire(timeout=timeout)
except Timeout:
# If we have reached the max number of attempts or we are not allow to find a free name (distributed setup)
# We raise an error
if self.num_process != 1:
raise ValueError(
f"Error in _create_cache_file: another evaluation module instance is already using the local cache file at {file_path}. "
f"Please specify an experiment_id (currently: {self.experiment_id}) to avoid collision "
f"between distributed evaluation module instances."
) from None
if i == self.max_concurrent_cache_files - 1:
raise ValueError(
f"Cannot acquire lock, too many evaluation module instance are operating concurrently on this file system."
f"You should set a larger value of max_concurrent_cache_files when creating the evaluation module "
f"(current value is {self.max_concurrent_cache_files})."
) from None
# In other cases (allow to find new file name + not yet at max num of attempts) we can try to sample a new hashing name.
file_uuid = str(uuid.uuid4())
file_path = os.path.join(
self.data_dir, f"{self.experiment_id}-{file_uuid}-{self.num_process}-{self.process_id}.arrow"
)
else:
break
return file_path, filelock
|
(self, timeout=1) -> Tuple[str, datasets.utils._filelock.FileLock]
|
71,096 |
evaluate.module
|
_download_and_prepare
|
Downloads and prepares resources for the evaluation module.
This is the internal implementation to overwrite called when user calls
`download_and_prepare`. It should download all required resources for the evaluation module.
Args:
dl_manager (:class:`DownloadManager`): `DownloadManager` used to download and cache data.
|
def _download_and_prepare(self, dl_manager):
"""Downloads and prepares resources for the evaluation module.
This is the internal implementation to overwrite called when user calls
`download_and_prepare`. It should download all required resources for the evaluation module.
Args:
dl_manager (:class:`DownloadManager`): `DownloadManager` used to download and cache data.
"""
return None
|
(self, dl_manager)
|
71,097 |
evaluate.module
|
_enforce_nested_string_type
|
Recursively checks if there is any Value feature of type string and throws TypeError if corresponding object is not a string.
Since any Python object can be cast to string this avoids implicitly casting wrong input types (e.g. lists) to string without error.
|
def _enforce_nested_string_type(self, schema, obj):
"""
Recursively checks if there is any Value feature of type string and throws TypeError if corresponding object is not a string.
Since any Python object can be cast to string this avoids implicitly casting wrong input types (e.g. lists) to string without error.
"""
# Nested structures: we allow dict, list, tuples, sequences
if isinstance(schema, dict):
return [self._enforce_nested_string_type(sub_schema, o) for k, (sub_schema, o) in zip_dict(schema, obj)]
elif isinstance(schema, (list, tuple)):
sub_schema = schema[0]
return [self._enforce_nested_string_type(sub_schema, o) for o in obj]
elif isinstance(schema, Sequence):
# We allow to reverse list of dict => dict of list for compatiblity with tfds
if isinstance(schema.feature, dict):
if isinstance(obj, (list, tuple)):
# obj is a list of dict
for k, dict_tuples in zip_dict(schema.feature, *obj):
for sub_obj in dict_tuples[1:]:
if _check_non_null_non_empty_recursive(sub_obj, dict_tuples[0]):
self._enforce_nested_string_type(dict_tuples[0], sub_obj)
break
return None
else:
# obj is a single dict
for k, (sub_schema, sub_objs) in zip_dict(schema.feature, obj):
for sub_obj in sub_objs:
if _check_non_null_non_empty_recursive(sub_obj, sub_schema):
self._enforce_nested_string_type(sub_schema, sub_obj)
break
return None
# schema.feature is not a dict
if isinstance(obj, str): # don't interpret a string as a list
raise ValueError(f"Got a string but expected a list instead: '{obj}'")
if obj is None:
return None
else:
if len(obj) > 0:
for first_elmt in obj:
if _check_non_null_non_empty_recursive(first_elmt, schema.feature):
break
if not isinstance(first_elmt, list):
return self._enforce_nested_string_type(schema.feature, first_elmt)
elif isinstance(schema, Value):
if pa.types.is_string(schema.pa_type) and not isinstance(obj, str):
raise TypeError(f"Expected type str but got {type(obj)}.")
|
(self, schema, obj)
|
71,098 |
evaluate.module
|
_feature_names
| null |
def _feature_names(self):
if isinstance(self.features, list):
feature_names = list(self.features[0].keys())
else:
feature_names = list(self.features.keys())
return feature_names
|
(self)
|
71,099 |
evaluate.module
|
_finalize
|
Close all the writing process and load/gather the data
from all the nodes if main node or all_process is True.
|
def _finalize(self):
"""Close all the writing process and load/gather the data
from all the nodes if main node or all_process is True.
"""
if self.writer is not None:
self.writer.finalize()
self.writer = None
# release the locks of the processes > 0 so that process 0 can lock them to read + delete the data
if self.filelock is not None and self.process_id > 0:
self.filelock.release()
if self.keep_in_memory:
# Read the predictions and references
reader = ArrowReader(path=self.data_dir, info=DatasetInfo(features=self.selected_feature_format))
self.data = Dataset.from_buffer(self.buf_writer.getvalue())
elif self.process_id == 0:
# Let's acquire a lock on each node files to be sure they are finished writing
file_paths, filelocks = self._get_all_cache_files()
# Read the predictions and references
try:
reader = ArrowReader(path="", info=DatasetInfo(features=self.selected_feature_format))
self.data = Dataset(**reader.read_files([{"filename": f} for f in file_paths]))
except FileNotFoundError:
raise ValueError(
"Error in finalize: another evaluation module instance is already using the local cache file. "
"Please specify an experiment_id to avoid collision between distributed evaluation module instances."
) from None
# Store file paths and locks and we will release/delete them after the computation.
self.file_paths = file_paths
self.filelocks = filelocks
|
(self)
|
71,100 |
evaluate.module
|
_get_all_cache_files
|
Get a lock on all the cache files in a distributed setup.
We wait for timeout second to let all the distributed node finish their tasks (default is 100 seconds).
|
def _get_all_cache_files(self) -> Tuple[List[str], List[FileLock]]:
"""Get a lock on all the cache files in a distributed setup.
We wait for timeout second to let all the distributed node finish their tasks (default is 100 seconds).
"""
if self.num_process == 1:
if self.cache_file_name is None:
raise ValueError(
"Evaluation module cache file doesn't exist. Please make sure that you call `add` or `add_batch` "
"at least once before calling `compute`."
)
file_paths = [self.cache_file_name]
else:
file_paths = [
os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{process_id}.arrow")
for process_id in range(self.num_process)
]
# Let's acquire a lock on each process files to be sure they are finished writing
filelocks = []
for process_id, file_path in enumerate(file_paths):
if process_id == 0: # process 0 already has its lock file
filelocks.append(self.filelock)
else:
filelock = FileLock(file_path + ".lock")
try:
filelock.acquire(timeout=self.timeout)
except Timeout:
raise ValueError(
f"Cannot acquire lock on cached file {file_path} for process {process_id}."
) from None
else:
filelocks.append(filelock)
return file_paths, filelocks
|
(self) -> Tuple[List[str], List[datasets.utils._filelock.FileLock]]
|
71,101 |
evaluate.module
|
_infer_feature_from_batch
| null |
def _infer_feature_from_batch(self, batch):
if isinstance(self.features, Features):
return self.features
else:
example = dict([(k, v[0]) for k, v in batch.items()])
return self._infer_feature_from_example(example)
|
(self, batch)
|
71,102 |
evaluate.module
|
_infer_feature_from_example
| null |
def _infer_feature_from_example(self, example):
if isinstance(self.features, Features):
return self.features
else:
for features in self.features:
try:
self._enforce_nested_string_type(features, example)
features.encode_example(example)
return features
except (ValueError, TypeError):
continue
feature_strings = "\n".join([f"Feature option {i}: {feature}" for i, feature in enumerate(self.features)])
error_msg = (
f"Predictions and/or references don't match the expected format.\n"
f"Expected format:\n{feature_strings},\n"
f"Input predictions: {summarize_if_long_list(example['predictions'])},\n"
f"Input references: {summarize_if_long_list(example['references'])}"
)
raise ValueError(error_msg) from None
|
(self, example)
|
71,103 |
evaluate.module
|
_info
|
Construct the EvaluationModuleInfo object. See `EvaluationModuleInfo` for details.
Warning: This function is only called once and the result is cached for all
following .info() calls.
Returns:
info: (EvaluationModuleInfo) The EvaluationModule information
|
def _info(self) -> EvaluationModuleInfo:
"""Construct the EvaluationModuleInfo object. See `EvaluationModuleInfo` for details.
Warning: This function is only called once and the result is cached for all
following .info() calls.
Returns:
info: (EvaluationModuleInfo) The EvaluationModule information
"""
raise NotImplementedError
|
(self) -> evaluate.info.EvaluationModuleInfo
|
71,104 |
evaluate.module
|
_init_writer
| null |
def _init_writer(self, timeout=1):
if self.num_process > 1:
if self.process_id == 0:
file_path = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-rdv.lock")
self.rendez_vous_lock = FileLock(file_path)
try:
self.rendez_vous_lock.acquire(timeout=timeout)
except TimeoutError:
raise ValueError(
f"Error in _init_writer: another evalution module instance is already using the local cache file at {file_path}. "
f"Please specify an experiment_id (currently: {self.experiment_id}) to avoid collision "
f"between distributed evaluation module instances."
) from None
if self.keep_in_memory:
self.buf_writer = pa.BufferOutputStream()
self.writer = ArrowWriter(
features=self.selected_feature_format, stream=self.buf_writer, writer_batch_size=self.writer_batch_size
)
else:
self.buf_writer = None
# Get cache file name and lock it
if self.cache_file_name is None or self.filelock is None:
cache_file_name, filelock = self._create_cache_file() # get ready
self.cache_file_name = cache_file_name
self.filelock = filelock
self.writer = ArrowWriter(
features=self.selected_feature_format,
path=self.cache_file_name,
writer_batch_size=self.writer_batch_size,
)
# Setup rendez-vous here if
if self.num_process > 1:
if self.process_id == 0:
self._check_all_processes_locks() # wait for everyone to be ready
self.rendez_vous_lock.release() # let everyone go
else:
self._check_rendez_vous() # wait for master to be ready and to let everyone go
|
(self, timeout=1)
|
71,105 |
evaluate.module
|
add
|
Add one prediction and reference for the evaluation module's stack.
Args:
prediction (`list/array/tensor`, *optional*):
Predictions.
reference (`list/array/tensor`, *optional*):
References.
Example:
```py
>>> import evaluate
>>> accuracy = evaluate.load("accuracy")
>>> accuracy.add(references=[0,1], predictions=[1,0])
```
|
def add(self, *, prediction=None, reference=None, **kwargs):
"""Add one prediction and reference for the evaluation module's stack.
Args:
prediction (`list/array/tensor`, *optional*):
Predictions.
reference (`list/array/tensor`, *optional*):
References.
Example:
```py
>>> import evaluate
>>> accuracy = evaluate.load("accuracy")
>>> accuracy.add(references=[0,1], predictions=[1,0])
```
"""
bad_inputs = [input_name for input_name in kwargs if input_name not in self._feature_names()]
if bad_inputs:
raise ValueError(
f"Bad inputs for evaluation module: {bad_inputs}. All required inputs are {list(self._feature_names())}"
)
example = {"predictions": prediction, "references": reference, **kwargs}
example = {input_name: example[input_name] for input_name in self._feature_names()}
if self.writer is None:
self.selected_feature_format = self._infer_feature_from_example(example)
self._init_writer()
try:
self._enforce_nested_string_type(self.selected_feature_format, example)
example = self.selected_feature_format.encode_example(example)
self.writer.write(example)
except (pa.ArrowInvalid, TypeError):
error_msg = (
f"Evaluation module inputs don't match the expected format.\n"
f"Expected format: {self.selected_feature_format},\n"
)
error_msg_inputs = ",\n".join(
f"Input {input_name}: {summarize_if_long_list(example[input_name])}"
for input_name in self.selected_feature_format
)
error_msg += error_msg_inputs
raise ValueError(error_msg) from None
|
(self, *, prediction=None, reference=None, **kwargs)
|
71,106 |
evaluate.module
|
add_batch
|
Add a batch of predictions and references for the evaluation module's stack.
Args:
predictions (`list/array/tensor`, *optional*):
Predictions.
references (`list/array/tensor`, *optional*):
References.
Example:
```py
>>> import evaluate
>>> accuracy = evaluate.load("accuracy")
>>> for refs, preds in zip([[0,1],[0,1]], [[1,0],[0,1]]):
... accuracy.add_batch(references=refs, predictions=preds)
```
|
def add_batch(self, *, predictions=None, references=None, **kwargs):
"""Add a batch of predictions and references for the evaluation module's stack.
Args:
predictions (`list/array/tensor`, *optional*):
Predictions.
references (`list/array/tensor`, *optional*):
References.
Example:
```py
>>> import evaluate
>>> accuracy = evaluate.load("accuracy")
>>> for refs, preds in zip([[0,1],[0,1]], [[1,0],[0,1]]):
... accuracy.add_batch(references=refs, predictions=preds)
```
"""
bad_inputs = [input_name for input_name in kwargs if input_name not in self._feature_names()]
if bad_inputs:
raise ValueError(
f"Bad inputs for evaluation module: {bad_inputs}. All required inputs are {list(self._feature_names())}"
)
batch = {"predictions": predictions, "references": references, **kwargs}
batch = {input_name: batch[input_name] for input_name in self._feature_names()}
if self.writer is None:
self.selected_feature_format = self._infer_feature_from_batch(batch)
self._init_writer()
try:
for key, column in batch.items():
if len(column) > 0:
self._enforce_nested_string_type(self.selected_feature_format[key], column[0])
batch = self.selected_feature_format.encode_batch(batch)
self.writer.write_batch(batch)
except (pa.ArrowInvalid, TypeError):
if any(len(batch[c]) != len(next(iter(batch.values()))) for c in batch):
col0 = next(iter(batch))
bad_col = [c for c in batch if len(batch[c]) != len(batch[col0])][0]
error_msg = (
f"Mismatch in the number of {col0} ({len(batch[col0])}) and {bad_col} ({len(batch[bad_col])})"
)
elif set(self.selected_feature_format) != {"references", "predictions"}:
error_msg = (
f"Module inputs don't match the expected format.\n"
f"Expected format: {self.selected_feature_format },\n"
)
error_msg_inputs = ",\n".join(
f"Input {input_name}: {summarize_if_long_list(batch[input_name])}"
for input_name in self.selected_feature_format
)
error_msg += error_msg_inputs
else:
error_msg = (
f"Predictions and/or references don't match the expected format.\n"
f"Expected format: {self.selected_feature_format },\n"
f"Input predictions: {summarize_if_long_list(predictions)},\n"
f"Input references: {summarize_if_long_list(references)}"
)
raise ValueError(error_msg) from None
|
(self, *, predictions=None, references=None, **kwargs)
|
71,107 |
evaluate.module
|
compute
|
Compute the evaluation module.
Usage of positional arguments is not allowed to prevent mistakes.
Args:
predictions (`list/array/tensor`, *optional*):
Predictions.
references (`list/array/tensor`, *optional*):
References.
**kwargs (optional):
Keyword arguments that will be forwarded to the evaluation module [`~evaluate.EvaluationModule.compute`]
method (see details in the docstring).
Return:
`dict` or `None`
- Dictionary with the results if this evaluation module is run on the main process (`process_id == 0`).
- `None` if the evaluation module is not run on the main process (`process_id != 0`).
```py
>>> import evaluate
>>> accuracy = evaluate.load("accuracy")
>>> accuracy.compute(predictions=[0, 1, 1, 0], references=[0, 1, 0, 1])
```
|
def compute(self, *, predictions=None, references=None, **kwargs) -> Optional[dict]:
"""Compute the evaluation module.
Usage of positional arguments is not allowed to prevent mistakes.
Args:
predictions (`list/array/tensor`, *optional*):
Predictions.
references (`list/array/tensor`, *optional*):
References.
**kwargs (optional):
Keyword arguments that will be forwarded to the evaluation module [`~evaluate.EvaluationModule.compute`]
method (see details in the docstring).
Return:
`dict` or `None`
- Dictionary with the results if this evaluation module is run on the main process (`process_id == 0`).
- `None` if the evaluation module is not run on the main process (`process_id != 0`).
```py
>>> import evaluate
>>> accuracy = evaluate.load("accuracy")
>>> accuracy.compute(predictions=[0, 1, 1, 0], references=[0, 1, 0, 1])
```
"""
all_kwargs = {"predictions": predictions, "references": references, **kwargs}
if predictions is None and references is None:
missing_kwargs = {k: None for k in self._feature_names() if k not in all_kwargs}
all_kwargs.update(missing_kwargs)
else:
missing_inputs = [k for k in self._feature_names() if k not in all_kwargs]
if missing_inputs:
raise ValueError(
f"Evaluation module inputs are missing: {missing_inputs}. All required inputs are {list(self._feature_names())}"
)
inputs = {input_name: all_kwargs[input_name] for input_name in self._feature_names()}
compute_kwargs = {k: kwargs[k] for k in kwargs if k not in self._feature_names()}
if any(v is not None for v in inputs.values()):
self.add_batch(**inputs)
self._finalize()
self.cache_file_name = None
self.filelock = None
self.selected_feature_format = None
if self.process_id == 0:
self.data.set_format(type=self.info.format)
inputs = {input_name: self.data[input_name] for input_name in self._feature_names()}
with temp_seed(self.seed):
output = self._compute(**inputs, **compute_kwargs)
if self.buf_writer is not None:
self.buf_writer = None
del self.data
self.data = None
else:
# Release locks and delete all the cache files. Process 0 is released last.
for filelock, file_path in reversed(list(zip(self.filelocks, self.file_paths))):
logger.info(f"Removing {file_path}")
del self.data
self.data = None
del self.writer
self.writer = None
os.remove(file_path)
filelock.release()
return output
else:
return None
|
(self, *, predictions=None, references=None, **kwargs) -> Optional[dict]
|
71,108 |
evaluate.module
|
download_and_prepare
|
Downloads and prepares evaluation module for reading.
Args:
download_config ([`DownloadConfig`], *optional*):
Specific download configuration parameters.
dl_manager ([`DownloadManager`], *optional*):
Specific download manager to use.
Example:
```py
>>> import evaluate
```
|
def download_and_prepare(
self,
download_config: Optional[DownloadConfig] = None,
dl_manager: Optional[DownloadManager] = None,
):
"""Downloads and prepares evaluation module for reading.
Args:
download_config ([`DownloadConfig`], *optional*):
Specific download configuration parameters.
dl_manager ([`DownloadManager`], *optional*):
Specific download manager to use.
Example:
```py
>>> import evaluate
```
"""
if dl_manager is None:
if download_config is None:
download_config = DownloadConfig()
download_config.cache_dir = os.path.join(self.data_dir, "downloads")
download_config.force_download = False
dl_manager = DownloadManager(
dataset_name=self.name, download_config=download_config, data_dir=self.data_dir
)
self._download_and_prepare(dl_manager)
|
(self, download_config: Optional[datasets.download.download_config.DownloadConfig] = None, dl_manager: Optional[datasets.download.download_manager.DownloadManager] = None)
|
71,109 |
evaluate.info
|
ComparisonInfo
|
Information about a comparison.
`EvaluationModuleInfo` documents a comparison, including its name, version, and features.
See the constructor arguments and properties for a full list.
Note: Not all fields are known on construction and may be updated later.
|
class ComparisonInfo(EvaluationModuleInfo):
"""Information about a comparison.
`EvaluationModuleInfo` documents a comparison, including its name, version, and features.
See the constructor arguments and properties for a full list.
Note: Not all fields are known on construction and may be updated later.
"""
module_type: str = "comparison"
|
(description: str, citation: str, features: Union[datasets.features.features.Features, List[datasets.features.features.Features]], inputs_description: str = <factory>, homepage: str = <factory>, license: str = <factory>, codebase_urls: List[str] = <factory>, reference_urls: List[str] = <factory>, streamable: bool = False, format: Optional[str] = None, module_type: str = 'comparison', module_name: Optional[str] = None, config_name: Optional[str] = None, experiment_id: Optional[str] = None) -> None
|
71,110 |
evaluate.info
|
__eq__
| null |
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
""" EvaluationModuleInfo records information we know about a dataset and a metric.
"""
import dataclasses
import json
import os
from dataclasses import asdict, dataclass, field
from typing import List, Optional, Union
from datasets.features import Features, Value
from . import config
from .utils.logging import get_logger
logger = get_logger(__name__)
@dataclass
class EvaluationModuleInfo:
"""Base class to store information about an evaluation used for `MetricInfo`, `ComparisonInfo`,
and `MeasurementInfo`.
`EvaluationModuleInfo` documents an evaluation, including its name, version, and features.
See the constructor arguments and properties for a full list.
Note: Not all fields are known on construction and may be updated later.
"""
# Set in the dataset scripts
description: str
citation: str
features: Union[Features, List[Features]]
inputs_description: str = field(default_factory=str)
homepage: str = field(default_factory=str)
license: str = field(default_factory=str)
codebase_urls: List[str] = field(default_factory=list)
reference_urls: List[str] = field(default_factory=list)
streamable: bool = False
format: Optional[str] = None
module_type: str = "metric" # deprecate this in the future
# Set later by the builder
module_name: Optional[str] = None
config_name: Optional[str] = None
experiment_id: Optional[str] = None
def __post_init__(self):
if self.format is not None:
for key, value in self.features.items():
if not isinstance(value, Value):
raise ValueError(
f"When using 'numpy' format, all features should be a `datasets.Value` feature. "
f"Here {key} is an instance of {value.__class__.__name__}"
)
def write_to_directory(self, metric_info_dir):
"""Write `EvaluationModuleInfo` as JSON to `metric_info_dir`.
Also save the license separately in LICENSE.
Args:
metric_info_dir (`str`):
The directory to save `metric_info_dir` to.
Example:
```py
>>> my_metric.info.write_to_directory("/path/to/directory/")
```
"""
with open(os.path.join(metric_info_dir, config.METRIC_INFO_FILENAME), "w", encoding="utf-8") as f:
json.dump(asdict(self), f)
with open(os.path.join(metric_info_dir, config.LICENSE_FILENAME), "w", encoding="utf-8") as f:
f.write(self.license)
@classmethod
def from_directory(cls, metric_info_dir) -> "EvaluationModuleInfo":
"""Create `EvaluationModuleInfo` from the JSON file in `metric_info_dir`.
Args:
metric_info_dir (`str`):
The directory containing the `metric_info` JSON file. This
should be the root directory of a specific metric version.
Example:
```py
>>> my_metric = EvaluationModuleInfo.from_directory("/path/to/directory/")
```
"""
logger.info(f"Loading Metric info from {metric_info_dir}")
if not metric_info_dir:
raise ValueError("Calling EvaluationModuleInfo.from_directory() with undefined metric_info_dir.")
with open(os.path.join(metric_info_dir, config.METRIC_INFO_FILENAME), encoding="utf-8") as f:
metric_info_dict = json.load(f)
return cls.from_dict(metric_info_dict)
@classmethod
def from_dict(cls, metric_info_dict: dict) -> "EvaluationModuleInfo":
field_names = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in metric_info_dict.items() if k in field_names})
|
(self, other)
|
71,112 |
evaluate.info
|
__post_init__
| null |
def __post_init__(self):
if self.format is not None:
for key, value in self.features.items():
if not isinstance(value, Value):
raise ValueError(
f"When using 'numpy' format, all features should be a `datasets.Value` feature. "
f"Here {key} is an instance of {value.__class__.__name__}"
)
|
(self)
|
71,114 |
evaluate.info
|
write_to_directory
|
Write `EvaluationModuleInfo` as JSON to `metric_info_dir`.
Also save the license separately in LICENSE.
Args:
metric_info_dir (`str`):
The directory to save `metric_info_dir` to.
Example:
```py
>>> my_metric.info.write_to_directory("/path/to/directory/")
```
|
def write_to_directory(self, metric_info_dir):
"""Write `EvaluationModuleInfo` as JSON to `metric_info_dir`.
Also save the license separately in LICENSE.
Args:
metric_info_dir (`str`):
The directory to save `metric_info_dir` to.
Example:
```py
>>> my_metric.info.write_to_directory("/path/to/directory/")
```
"""
with open(os.path.join(metric_info_dir, config.METRIC_INFO_FILENAME), "w", encoding="utf-8") as f:
json.dump(asdict(self), f)
with open(os.path.join(metric_info_dir, config.LICENSE_FILENAME), "w", encoding="utf-8") as f:
f.write(self.license)
|
(self, metric_info_dir)
|
71,115 |
evaluate.module
|
EvaluationModule
|
A `EvaluationModule` is the base class and common API for metrics, comparisons, and measurements.
Args:
config_name (`str`):
This is used to define a hash specific to a module computation script and prevents the module's data
to be overridden when the module loading script is modified.
keep_in_memory (`bool`):
Keep all predictions and references in memory. Not possible in distributed settings.
cache_dir (`str`):
Path to a directory in which temporary prediction/references data will be stored.
The data directory should be located on a shared file-system in distributed setups.
num_process (`int`):
Specify the total number of nodes in a distributed settings.
This is useful to compute module in distributed setups (in particular non-additive modules like F1).
process_id (`int`):
Specify the id of the current process in a distributed setup (between 0 and num_process-1)
This is useful to compute module in distributed setups (in particular non-additive metrics like F1).
seed (`int`, optional):
If specified, this will temporarily set numpy's random seed when [`~evaluate.EvaluationModule.compute`] is run.
experiment_id (`str`):
A specific experiment id. This is used if several distributed evaluations share the same file system.
This is useful to compute module in distributed setups (in particular non-additive metrics like F1).
hash (`str`):
Used to identify the evaluation module according to the hashed file contents.
max_concurrent_cache_files (`int`):
Max number of concurrent module cache files (default `10000`).
timeout (`Union[int, float]`):
Timeout in second for distributed setting synchronization.
|
class EvaluationModule(EvaluationModuleInfoMixin):
"""A `EvaluationModule` is the base class and common API for metrics, comparisons, and measurements.
Args:
config_name (`str`):
This is used to define a hash specific to a module computation script and prevents the module's data
to be overridden when the module loading script is modified.
keep_in_memory (`bool`):
Keep all predictions and references in memory. Not possible in distributed settings.
cache_dir (`str`):
Path to a directory in which temporary prediction/references data will be stored.
The data directory should be located on a shared file-system in distributed setups.
num_process (`int`):
Specify the total number of nodes in a distributed settings.
This is useful to compute module in distributed setups (in particular non-additive modules like F1).
process_id (`int`):
Specify the id of the current process in a distributed setup (between 0 and num_process-1)
This is useful to compute module in distributed setups (in particular non-additive metrics like F1).
seed (`int`, optional):
If specified, this will temporarily set numpy's random seed when [`~evaluate.EvaluationModule.compute`] is run.
experiment_id (`str`):
A specific experiment id. This is used if several distributed evaluations share the same file system.
This is useful to compute module in distributed setups (in particular non-additive metrics like F1).
hash (`str`):
Used to identify the evaluation module according to the hashed file contents.
max_concurrent_cache_files (`int`):
Max number of concurrent module cache files (default `10000`).
timeout (`Union[int, float]`):
Timeout in second for distributed setting synchronization.
"""
def __init__(
self,
config_name: Optional[str] = None,
keep_in_memory: bool = False,
cache_dir: Optional[str] = None,
num_process: int = 1,
process_id: int = 0,
seed: Optional[int] = None,
experiment_id: Optional[str] = None,
hash: str = None,
max_concurrent_cache_files: int = 10000,
timeout: Union[int, float] = 100,
**kwargs,
):
# prepare info
self.config_name = config_name or "default"
info = self._info()
info.module_name = camelcase_to_snakecase(self.__class__.__name__)
info.config_name = self.config_name
info.experiment_id = experiment_id or "default_experiment"
EvaluationModuleInfoMixin.__init__(self, info) # For easy access on low level
# Safety checks on num_process and process_id
if not isinstance(process_id, int) or process_id < 0:
raise ValueError("'process_id' should be a number greater than 0")
if not isinstance(num_process, int) or num_process <= process_id:
raise ValueError("'num_process' should be a number greater than process_id")
if keep_in_memory and num_process != 1:
raise ValueError("Using 'keep_in_memory' is not possible in distributed setting (num_process > 1).")
self.num_process = num_process
self.process_id = process_id
self.max_concurrent_cache_files = max_concurrent_cache_files
self.keep_in_memory = keep_in_memory
self._data_dir_root = os.path.expanduser(cache_dir or config.HF_METRICS_CACHE)
self.data_dir = self._build_data_dir()
if seed is None:
_, seed, pos, *_ = np.random.get_state()
self.seed: int = seed[pos] if pos < 624 else seed[0]
else:
self.seed: int = seed
self.timeout: Union[int, float] = timeout
# Update 'compute' and 'add' docstring
# methods need to be copied otherwise it changes the docstrings of every instance
self.compute = types.MethodType(copyfunc(self.compute), self)
self.add_batch = types.MethodType(copyfunc(self.add_batch), self)
self.add = types.MethodType(copyfunc(self.add), self)
self.compute.__func__.__doc__ += self.info.inputs_description
self.add_batch.__func__.__doc__ += self.info.inputs_description
self.add.__func__.__doc__ += self.info.inputs_description
# self.arrow_schema = pa.schema(field for field in self.info.features.type)
self.selected_feature_format = None
self.buf_writer = None
self.writer = None
self.writer_batch_size = None
self.data = None
# This is the cache file we store our predictions/references in
# Keep it None for now so we can (cloud)pickle the object
self.cache_file_name = None
self.filelock = None
self.rendez_vous_lock = None
# This is all the cache files on which we have a lock when we are in a distributed setting
self.file_paths = None
self.filelocks = None
# This fingerprints the evaluation module according to the hashed contents of the module code
self._hash = hash
def __len__(self):
"""Return the number of examples (predictions or predictions/references pair)
currently stored in the evaluation module's cache.
"""
return 0 if self.writer is None else len(self.writer)
def __repr__(self):
return (
f'EvaluationModule(name: "{self.name}", module_type: "{self.module_type}", '
f'features: {self.features}, usage: """{self.inputs_description}""", '
f"stored examples: {len(self)})"
)
def _build_data_dir(self):
"""Path of this evaluation module in cache_dir:
Will be:
self._data_dir_root/self.name/self.config_name/self.hash (if not none)/
If any of these element is missing or if ``with_version=False`` the corresponding subfolders are dropped.
"""
builder_data_dir = self._data_dir_root
builder_data_dir = os.path.join(builder_data_dir, self.name, self.config_name)
os.makedirs(builder_data_dir, exist_ok=True)
return builder_data_dir
def _create_cache_file(self, timeout=1) -> Tuple[str, FileLock]:
"""Create a new cache file. If the default cache file is used, we generated a new hash."""
file_path = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{self.process_id}.arrow")
filelock = None
for i in range(self.max_concurrent_cache_files):
filelock = FileLock(file_path + ".lock")
try:
filelock.acquire(timeout=timeout)
except Timeout:
# If we have reached the max number of attempts or we are not allow to find a free name (distributed setup)
# We raise an error
if self.num_process != 1:
raise ValueError(
f"Error in _create_cache_file: another evaluation module instance is already using the local cache file at {file_path}. "
f"Please specify an experiment_id (currently: {self.experiment_id}) to avoid collision "
f"between distributed evaluation module instances."
) from None
if i == self.max_concurrent_cache_files - 1:
raise ValueError(
f"Cannot acquire lock, too many evaluation module instance are operating concurrently on this file system."
f"You should set a larger value of max_concurrent_cache_files when creating the evaluation module "
f"(current value is {self.max_concurrent_cache_files})."
) from None
# In other cases (allow to find new file name + not yet at max num of attempts) we can try to sample a new hashing name.
file_uuid = str(uuid.uuid4())
file_path = os.path.join(
self.data_dir, f"{self.experiment_id}-{file_uuid}-{self.num_process}-{self.process_id}.arrow"
)
else:
break
return file_path, filelock
def _get_all_cache_files(self) -> Tuple[List[str], List[FileLock]]:
"""Get a lock on all the cache files in a distributed setup.
We wait for timeout second to let all the distributed node finish their tasks (default is 100 seconds).
"""
if self.num_process == 1:
if self.cache_file_name is None:
raise ValueError(
"Evaluation module cache file doesn't exist. Please make sure that you call `add` or `add_batch` "
"at least once before calling `compute`."
)
file_paths = [self.cache_file_name]
else:
file_paths = [
os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{process_id}.arrow")
for process_id in range(self.num_process)
]
# Let's acquire a lock on each process files to be sure they are finished writing
filelocks = []
for process_id, file_path in enumerate(file_paths):
if process_id == 0: # process 0 already has its lock file
filelocks.append(self.filelock)
else:
filelock = FileLock(file_path + ".lock")
try:
filelock.acquire(timeout=self.timeout)
except Timeout:
raise ValueError(
f"Cannot acquire lock on cached file {file_path} for process {process_id}."
) from None
else:
filelocks.append(filelock)
return file_paths, filelocks
def _check_all_processes_locks(self):
expected_lock_file_names = [
os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{process_id}.arrow.lock")
for process_id in range(self.num_process)
]
for expected_lock_file_name in expected_lock_file_names:
nofilelock = FileFreeLock(expected_lock_file_name)
try:
nofilelock.acquire(timeout=self.timeout)
except Timeout:
raise ValueError(
f"Expected to find locked file {expected_lock_file_name} from process {self.process_id} but it doesn't exist."
) from None
else:
nofilelock.release()
def _check_rendez_vous(self):
expected_lock_file_name = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-0.arrow.lock")
nofilelock = FileFreeLock(expected_lock_file_name)
try:
nofilelock.acquire(timeout=self.timeout)
except Timeout:
raise ValueError(
f"Expected to find locked file {expected_lock_file_name} from process {self.process_id} but it doesn't exist."
) from None
else:
nofilelock.release()
lock_file_name = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-rdv.lock")
rendez_vous_lock = FileLock(lock_file_name)
try:
rendez_vous_lock.acquire(timeout=self.timeout)
except Timeout:
raise ValueError(f"Couldn't acquire lock on {lock_file_name} from process {self.process_id}.") from None
else:
rendez_vous_lock.release()
def _finalize(self):
"""Close all the writing process and load/gather the data
from all the nodes if main node or all_process is True.
"""
if self.writer is not None:
self.writer.finalize()
self.writer = None
# release the locks of the processes > 0 so that process 0 can lock them to read + delete the data
if self.filelock is not None and self.process_id > 0:
self.filelock.release()
if self.keep_in_memory:
# Read the predictions and references
reader = ArrowReader(path=self.data_dir, info=DatasetInfo(features=self.selected_feature_format))
self.data = Dataset.from_buffer(self.buf_writer.getvalue())
elif self.process_id == 0:
# Let's acquire a lock on each node files to be sure they are finished writing
file_paths, filelocks = self._get_all_cache_files()
# Read the predictions and references
try:
reader = ArrowReader(path="", info=DatasetInfo(features=self.selected_feature_format))
self.data = Dataset(**reader.read_files([{"filename": f} for f in file_paths]))
except FileNotFoundError:
raise ValueError(
"Error in finalize: another evaluation module instance is already using the local cache file. "
"Please specify an experiment_id to avoid collision between distributed evaluation module instances."
) from None
# Store file paths and locks and we will release/delete them after the computation.
self.file_paths = file_paths
self.filelocks = filelocks
def compute(self, *, predictions=None, references=None, **kwargs) -> Optional[dict]:
"""Compute the evaluation module.
Usage of positional arguments is not allowed to prevent mistakes.
Args:
predictions (`list/array/tensor`, *optional*):
Predictions.
references (`list/array/tensor`, *optional*):
References.
**kwargs (optional):
Keyword arguments that will be forwarded to the evaluation module [`~evaluate.EvaluationModule.compute`]
method (see details in the docstring).
Return:
`dict` or `None`
- Dictionary with the results if this evaluation module is run on the main process (`process_id == 0`).
- `None` if the evaluation module is not run on the main process (`process_id != 0`).
```py
>>> import evaluate
>>> accuracy = evaluate.load("accuracy")
>>> accuracy.compute(predictions=[0, 1, 1, 0], references=[0, 1, 0, 1])
```
"""
all_kwargs = {"predictions": predictions, "references": references, **kwargs}
if predictions is None and references is None:
missing_kwargs = {k: None for k in self._feature_names() if k not in all_kwargs}
all_kwargs.update(missing_kwargs)
else:
missing_inputs = [k for k in self._feature_names() if k not in all_kwargs]
if missing_inputs:
raise ValueError(
f"Evaluation module inputs are missing: {missing_inputs}. All required inputs are {list(self._feature_names())}"
)
inputs = {input_name: all_kwargs[input_name] for input_name in self._feature_names()}
compute_kwargs = {k: kwargs[k] for k in kwargs if k not in self._feature_names()}
if any(v is not None for v in inputs.values()):
self.add_batch(**inputs)
self._finalize()
self.cache_file_name = None
self.filelock = None
self.selected_feature_format = None
if self.process_id == 0:
self.data.set_format(type=self.info.format)
inputs = {input_name: self.data[input_name] for input_name in self._feature_names()}
with temp_seed(self.seed):
output = self._compute(**inputs, **compute_kwargs)
if self.buf_writer is not None:
self.buf_writer = None
del self.data
self.data = None
else:
# Release locks and delete all the cache files. Process 0 is released last.
for filelock, file_path in reversed(list(zip(self.filelocks, self.file_paths))):
logger.info(f"Removing {file_path}")
del self.data
self.data = None
del self.writer
self.writer = None
os.remove(file_path)
filelock.release()
return output
else:
return None
def add_batch(self, *, predictions=None, references=None, **kwargs):
"""Add a batch of predictions and references for the evaluation module's stack.
Args:
predictions (`list/array/tensor`, *optional*):
Predictions.
references (`list/array/tensor`, *optional*):
References.
Example:
```py
>>> import evaluate
>>> accuracy = evaluate.load("accuracy")
>>> for refs, preds in zip([[0,1],[0,1]], [[1,0],[0,1]]):
... accuracy.add_batch(references=refs, predictions=preds)
```
"""
bad_inputs = [input_name for input_name in kwargs if input_name not in self._feature_names()]
if bad_inputs:
raise ValueError(
f"Bad inputs for evaluation module: {bad_inputs}. All required inputs are {list(self._feature_names())}"
)
batch = {"predictions": predictions, "references": references, **kwargs}
batch = {input_name: batch[input_name] for input_name in self._feature_names()}
if self.writer is None:
self.selected_feature_format = self._infer_feature_from_batch(batch)
self._init_writer()
try:
for key, column in batch.items():
if len(column) > 0:
self._enforce_nested_string_type(self.selected_feature_format[key], column[0])
batch = self.selected_feature_format.encode_batch(batch)
self.writer.write_batch(batch)
except (pa.ArrowInvalid, TypeError):
if any(len(batch[c]) != len(next(iter(batch.values()))) for c in batch):
col0 = next(iter(batch))
bad_col = [c for c in batch if len(batch[c]) != len(batch[col0])][0]
error_msg = (
f"Mismatch in the number of {col0} ({len(batch[col0])}) and {bad_col} ({len(batch[bad_col])})"
)
elif set(self.selected_feature_format) != {"references", "predictions"}:
error_msg = (
f"Module inputs don't match the expected format.\n"
f"Expected format: {self.selected_feature_format },\n"
)
error_msg_inputs = ",\n".join(
f"Input {input_name}: {summarize_if_long_list(batch[input_name])}"
for input_name in self.selected_feature_format
)
error_msg += error_msg_inputs
else:
error_msg = (
f"Predictions and/or references don't match the expected format.\n"
f"Expected format: {self.selected_feature_format },\n"
f"Input predictions: {summarize_if_long_list(predictions)},\n"
f"Input references: {summarize_if_long_list(references)}"
)
raise ValueError(error_msg) from None
def add(self, *, prediction=None, reference=None, **kwargs):
"""Add one prediction and reference for the evaluation module's stack.
Args:
prediction (`list/array/tensor`, *optional*):
Predictions.
reference (`list/array/tensor`, *optional*):
References.
Example:
```py
>>> import evaluate
>>> accuracy = evaluate.load("accuracy")
>>> accuracy.add(references=[0,1], predictions=[1,0])
```
"""
bad_inputs = [input_name for input_name in kwargs if input_name not in self._feature_names()]
if bad_inputs:
raise ValueError(
f"Bad inputs for evaluation module: {bad_inputs}. All required inputs are {list(self._feature_names())}"
)
example = {"predictions": prediction, "references": reference, **kwargs}
example = {input_name: example[input_name] for input_name in self._feature_names()}
if self.writer is None:
self.selected_feature_format = self._infer_feature_from_example(example)
self._init_writer()
try:
self._enforce_nested_string_type(self.selected_feature_format, example)
example = self.selected_feature_format.encode_example(example)
self.writer.write(example)
except (pa.ArrowInvalid, TypeError):
error_msg = (
f"Evaluation module inputs don't match the expected format.\n"
f"Expected format: {self.selected_feature_format},\n"
)
error_msg_inputs = ",\n".join(
f"Input {input_name}: {summarize_if_long_list(example[input_name])}"
for input_name in self.selected_feature_format
)
error_msg += error_msg_inputs
raise ValueError(error_msg) from None
def _infer_feature_from_batch(self, batch):
if isinstance(self.features, Features):
return self.features
else:
example = dict([(k, v[0]) for k, v in batch.items()])
return self._infer_feature_from_example(example)
def _infer_feature_from_example(self, example):
if isinstance(self.features, Features):
return self.features
else:
for features in self.features:
try:
self._enforce_nested_string_type(features, example)
features.encode_example(example)
return features
except (ValueError, TypeError):
continue
feature_strings = "\n".join([f"Feature option {i}: {feature}" for i, feature in enumerate(self.features)])
error_msg = (
f"Predictions and/or references don't match the expected format.\n"
f"Expected format:\n{feature_strings},\n"
f"Input predictions: {summarize_if_long_list(example['predictions'])},\n"
f"Input references: {summarize_if_long_list(example['references'])}"
)
raise ValueError(error_msg) from None
def _feature_names(self):
if isinstance(self.features, list):
feature_names = list(self.features[0].keys())
else:
feature_names = list(self.features.keys())
return feature_names
def _init_writer(self, timeout=1):
if self.num_process > 1:
if self.process_id == 0:
file_path = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-rdv.lock")
self.rendez_vous_lock = FileLock(file_path)
try:
self.rendez_vous_lock.acquire(timeout=timeout)
except TimeoutError:
raise ValueError(
f"Error in _init_writer: another evalution module instance is already using the local cache file at {file_path}. "
f"Please specify an experiment_id (currently: {self.experiment_id}) to avoid collision "
f"between distributed evaluation module instances."
) from None
if self.keep_in_memory:
self.buf_writer = pa.BufferOutputStream()
self.writer = ArrowWriter(
features=self.selected_feature_format, stream=self.buf_writer, writer_batch_size=self.writer_batch_size
)
else:
self.buf_writer = None
# Get cache file name and lock it
if self.cache_file_name is None or self.filelock is None:
cache_file_name, filelock = self._create_cache_file() # get ready
self.cache_file_name = cache_file_name
self.filelock = filelock
self.writer = ArrowWriter(
features=self.selected_feature_format,
path=self.cache_file_name,
writer_batch_size=self.writer_batch_size,
)
# Setup rendez-vous here if
if self.num_process > 1:
if self.process_id == 0:
self._check_all_processes_locks() # wait for everyone to be ready
self.rendez_vous_lock.release() # let everyone go
else:
self._check_rendez_vous() # wait for master to be ready and to let everyone go
def _info(self) -> EvaluationModuleInfo:
"""Construct the EvaluationModuleInfo object. See `EvaluationModuleInfo` for details.
Warning: This function is only called once and the result is cached for all
following .info() calls.
Returns:
info: (EvaluationModuleInfo) The EvaluationModule information
"""
raise NotImplementedError
def download_and_prepare(
self,
download_config: Optional[DownloadConfig] = None,
dl_manager: Optional[DownloadManager] = None,
):
"""Downloads and prepares evaluation module for reading.
Args:
download_config ([`DownloadConfig`], *optional*):
Specific download configuration parameters.
dl_manager ([`DownloadManager`], *optional*):
Specific download manager to use.
Example:
```py
>>> import evaluate
```
"""
if dl_manager is None:
if download_config is None:
download_config = DownloadConfig()
download_config.cache_dir = os.path.join(self.data_dir, "downloads")
download_config.force_download = False
dl_manager = DownloadManager(
dataset_name=self.name, download_config=download_config, data_dir=self.data_dir
)
self._download_and_prepare(dl_manager)
def _download_and_prepare(self, dl_manager):
"""Downloads and prepares resources for the evaluation module.
This is the internal implementation to overwrite called when user calls
`download_and_prepare`. It should download all required resources for the evaluation module.
Args:
dl_manager (:class:`DownloadManager`): `DownloadManager` used to download and cache data.
"""
return None
def _compute(self, *, predictions=None, references=None, **kwargs) -> Dict[str, Any]:
"""This method defines the common API for all the evaluation module in the library"""
raise NotImplementedError
def __del__(self):
if hasattr(self, "filelock") and self.filelock is not None:
self.filelock.release()
if hasattr(self, "rendez_vous_lock") and self.rendez_vous_lock is not None:
self.rendez_vous_lock.release()
if hasattr(self, "writer"): # in case it was already deleted
del self.writer
if hasattr(self, "data"): # in case it was already deleted
del self.data
def _enforce_nested_string_type(self, schema, obj):
"""
Recursively checks if there is any Value feature of type string and throws TypeError if corresponding object is not a string.
Since any Python object can be cast to string this avoids implicitly casting wrong input types (e.g. lists) to string without error.
"""
# Nested structures: we allow dict, list, tuples, sequences
if isinstance(schema, dict):
return [self._enforce_nested_string_type(sub_schema, o) for k, (sub_schema, o) in zip_dict(schema, obj)]
elif isinstance(schema, (list, tuple)):
sub_schema = schema[0]
return [self._enforce_nested_string_type(sub_schema, o) for o in obj]
elif isinstance(schema, Sequence):
# We allow to reverse list of dict => dict of list for compatiblity with tfds
if isinstance(schema.feature, dict):
if isinstance(obj, (list, tuple)):
# obj is a list of dict
for k, dict_tuples in zip_dict(schema.feature, *obj):
for sub_obj in dict_tuples[1:]:
if _check_non_null_non_empty_recursive(sub_obj, dict_tuples[0]):
self._enforce_nested_string_type(dict_tuples[0], sub_obj)
break
return None
else:
# obj is a single dict
for k, (sub_schema, sub_objs) in zip_dict(schema.feature, obj):
for sub_obj in sub_objs:
if _check_non_null_non_empty_recursive(sub_obj, sub_schema):
self._enforce_nested_string_type(sub_schema, sub_obj)
break
return None
# schema.feature is not a dict
if isinstance(obj, str): # don't interpret a string as a list
raise ValueError(f"Got a string but expected a list instead: '{obj}'")
if obj is None:
return None
else:
if len(obj) > 0:
for first_elmt in obj:
if _check_non_null_non_empty_recursive(first_elmt, schema.feature):
break
if not isinstance(first_elmt, list):
return self._enforce_nested_string_type(schema.feature, first_elmt)
elif isinstance(schema, Value):
if pa.types.is_string(schema.pa_type) and not isinstance(obj, str):
raise TypeError(f"Expected type str but got {type(obj)}.")
|
(config_name: Optional[str] = None, keep_in_memory: bool = False, cache_dir: Optional[str] = None, num_process: int = 1, process_id: int = 0, seed: Optional[int] = None, experiment_id: Optional[str] = None, hash: str = None, max_concurrent_cache_files: int = 10000, timeout: Union[int, float] = 100, **kwargs)
|
71,138 |
evaluate.info
|
EvaluationModuleInfo
|
Base class to store information about an evaluation used for `MetricInfo`, `ComparisonInfo`,
and `MeasurementInfo`.
`EvaluationModuleInfo` documents an evaluation, including its name, version, and features.
See the constructor arguments and properties for a full list.
Note: Not all fields are known on construction and may be updated later.
|
class EvaluationModuleInfo:
"""Base class to store information about an evaluation used for `MetricInfo`, `ComparisonInfo`,
and `MeasurementInfo`.
`EvaluationModuleInfo` documents an evaluation, including its name, version, and features.
See the constructor arguments and properties for a full list.
Note: Not all fields are known on construction and may be updated later.
"""
# Set in the dataset scripts
description: str
citation: str
features: Union[Features, List[Features]]
inputs_description: str = field(default_factory=str)
homepage: str = field(default_factory=str)
license: str = field(default_factory=str)
codebase_urls: List[str] = field(default_factory=list)
reference_urls: List[str] = field(default_factory=list)
streamable: bool = False
format: Optional[str] = None
module_type: str = "metric" # deprecate this in the future
# Set later by the builder
module_name: Optional[str] = None
config_name: Optional[str] = None
experiment_id: Optional[str] = None
def __post_init__(self):
if self.format is not None:
for key, value in self.features.items():
if not isinstance(value, Value):
raise ValueError(
f"When using 'numpy' format, all features should be a `datasets.Value` feature. "
f"Here {key} is an instance of {value.__class__.__name__}"
)
def write_to_directory(self, metric_info_dir):
"""Write `EvaluationModuleInfo` as JSON to `metric_info_dir`.
Also save the license separately in LICENSE.
Args:
metric_info_dir (`str`):
The directory to save `metric_info_dir` to.
Example:
```py
>>> my_metric.info.write_to_directory("/path/to/directory/")
```
"""
with open(os.path.join(metric_info_dir, config.METRIC_INFO_FILENAME), "w", encoding="utf-8") as f:
json.dump(asdict(self), f)
with open(os.path.join(metric_info_dir, config.LICENSE_FILENAME), "w", encoding="utf-8") as f:
f.write(self.license)
@classmethod
def from_directory(cls, metric_info_dir) -> "EvaluationModuleInfo":
"""Create `EvaluationModuleInfo` from the JSON file in `metric_info_dir`.
Args:
metric_info_dir (`str`):
The directory containing the `metric_info` JSON file. This
should be the root directory of a specific metric version.
Example:
```py
>>> my_metric = EvaluationModuleInfo.from_directory("/path/to/directory/")
```
"""
logger.info(f"Loading Metric info from {metric_info_dir}")
if not metric_info_dir:
raise ValueError("Calling EvaluationModuleInfo.from_directory() with undefined metric_info_dir.")
with open(os.path.join(metric_info_dir, config.METRIC_INFO_FILENAME), encoding="utf-8") as f:
metric_info_dict = json.load(f)
return cls.from_dict(metric_info_dict)
@classmethod
def from_dict(cls, metric_info_dict: dict) -> "EvaluationModuleInfo":
field_names = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in metric_info_dict.items() if k in field_names})
|
(description: str, citation: str, features: Union[datasets.features.features.Features, List[datasets.features.features.Features]], inputs_description: str = <factory>, homepage: str = <factory>, license: str = <factory>, codebase_urls: List[str] = <factory>, reference_urls: List[str] = <factory>, streamable: bool = False, format: Optional[str] = None, module_type: str = 'metric', module_name: Optional[str] = None, config_name: Optional[str] = None, experiment_id: Optional[str] = None) -> None
|
71,144 |
evaluate.evaluation_suite
|
EvaluationSuite
|
This class instantiates an evaluation suite made up of multiple tasks, where each task consists of a dataset and
an associated metric, and runs evaluation on a model or pipeline. Evaluation suites can be a Python script found
either locally or uploaded as a Space on the Hugging Face Hub.
Usage:
```python
from evaluate import EvaluationSuite
suite = EvaluationSuite.load("evaluate/evaluation-suite-ci")
results = suite.run("lvwerra/distilbert-imdb")
```
|
class EvaluationSuite:
"""
This class instantiates an evaluation suite made up of multiple tasks, where each task consists of a dataset and
an associated metric, and runs evaluation on a model or pipeline. Evaluation suites can be a Python script found
either locally or uploaded as a Space on the Hugging Face Hub.
Usage:
```python
from evaluate import EvaluationSuite
suite = EvaluationSuite.load("evaluate/evaluation-suite-ci")
results = suite.run("lvwerra/distilbert-imdb")
```
"""
def __init__(self, name):
self.name = name
@staticmethod
def load(
path: str,
download_mode: Optional[DownloadMode] = None,
revision: Optional[Union[str, Version]] = None,
download_config: Optional[DownloadConfig] = None,
):
download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
evaluation_module = evaluation_module_factory(
path, module_type=None, revision=revision, download_config=download_config, download_mode=download_mode
)
name = Path(path).stem
evaluation_cls = import_main_class(evaluation_module.module_path)
evaluation_instance = evaluation_cls(name)
return evaluation_instance
def __repr__(self):
self.tasks = [str(task) for task in self.suite]
return f'EvaluationSuite name: "{self.name}", ' f"Tasks: {self.tasks})"
def assert_suite_nonempty(self):
if not self.suite:
raise ValueError(
"No evaluation tasks found. The EvaluationSuite must include at least one SubTask definition."
)
def run(
self, model_or_pipeline: Union[str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel"] # noqa: F821
) -> Dict[str, float]:
self.assert_suite_nonempty()
results_all = []
for task in self.suite:
task_name = task.data
if task.data_preprocessor: # task requires extra preprocessing
ds = load_dataset(task.data, name=task.subset, split=task.split)
task.data = ds.map(task.data_preprocessor)
task_evaluator = evaluator(task.task_type)
args_for_task = task.args_for_task
args_for_task["model_or_pipeline"] = model_or_pipeline
args_for_task["data"] = task.data
args_for_task["subset"] = task.subset
args_for_task["split"] = task.split
results = task_evaluator.compute(**args_for_task)
results["task_name"] = task_name + "/" + task.subset if task.subset else task_name
results["data_preprocessor"] = str(task.data_preprocessor) if task.data_preprocessor is not None else None
results_all.append(results)
return results_all
|
(name)
|
71,146 |
evaluate.evaluation_suite
|
__repr__
| null |
def __repr__(self):
self.tasks = [str(task) for task in self.suite]
return f'EvaluationSuite name: "{self.name}", ' f"Tasks: {self.tasks})"
|
(self)
|
71,147 |
evaluate.evaluation_suite
|
assert_suite_nonempty
| null |
def assert_suite_nonempty(self):
if not self.suite:
raise ValueError(
"No evaluation tasks found. The EvaluationSuite must include at least one SubTask definition."
)
|
(self)
|
71,148 |
evaluate.evaluation_suite
|
load
| null |
@staticmethod
def load(
path: str,
download_mode: Optional[DownloadMode] = None,
revision: Optional[Union[str, Version]] = None,
download_config: Optional[DownloadConfig] = None,
):
download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
evaluation_module = evaluation_module_factory(
path, module_type=None, revision=revision, download_config=download_config, download_mode=download_mode
)
name = Path(path).stem
evaluation_cls = import_main_class(evaluation_module.module_path)
evaluation_instance = evaluation_cls(name)
return evaluation_instance
|
(path: str, download_mode: Optional[datasets.download.download_manager.DownloadMode] = None, revision: Union[str, datasets.utils.version.Version, NoneType] = None, download_config: Optional[datasets.download.download_config.DownloadConfig] = None)
|
71,149 |
evaluate.evaluation_suite
|
run
| null |
def run(
self, model_or_pipeline: Union[str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel"] # noqa: F821
) -> Dict[str, float]:
self.assert_suite_nonempty()
results_all = []
for task in self.suite:
task_name = task.data
if task.data_preprocessor: # task requires extra preprocessing
ds = load_dataset(task.data, name=task.subset, split=task.split)
task.data = ds.map(task.data_preprocessor)
task_evaluator = evaluator(task.task_type)
args_for_task = task.args_for_task
args_for_task["model_or_pipeline"] = model_or_pipeline
args_for_task["data"] = task.data
args_for_task["subset"] = task.subset
args_for_task["split"] = task.split
results = task_evaluator.compute(**args_for_task)
results["task_name"] = task_name + "/" + task.subset if task.subset else task_name
results["data_preprocessor"] = str(task.data_preprocessor) if task.data_preprocessor is not None else None
results_all.append(results)
return results_all
|
(self, model_or_pipeline: Union[str, ForwardRef('Pipeline'), Callable, ForwardRef('PreTrainedModel'), ForwardRef('TFPreTrainedModel')]) -> Dict[str, float]
|
71,150 |
evaluate.evaluator.base
|
Evaluator
|
The [`Evaluator`] class is the class from which all evaluators inherit. Refer to this class for methods shared across
different evaluators.
Base class implementing evaluator operations.
|
class Evaluator(ABC):
"""
The [`Evaluator`] class is the class from which all evaluators inherit. Refer to this class for methods shared across
different evaluators.
Base class implementing evaluator operations.
"""
PIPELINE_KWARGS = {}
METRIC_KWARGS = {}
def __init__(self, task: str, default_metric_name: str = None):
if not TRANSFORMERS_AVAILABLE:
raise ImportError(
"If you want to use the `Evaluator` you need `transformers`. Run `pip install evaluate[evaluator]`."
)
if not SCIPY_AVAILABLE:
raise ImportError(
"If you want to use the `Evaluator` you need `scipy>=1.7.1`. Run `pip install evaluate[evaluator]`."
)
self.task = task
self.default_metric_name = default_metric_name
@staticmethod
def _compute_confidence_interval(
metric,
metric_inputs,
metric_keys: List[str],
confidence_level: float = 0.95,
n_resamples: int = 9999,
random_state: Optional[int] = None,
) -> Dict[str, Any]:
"""
A utility function enabling the confidence interval calculation for metrics computed
by the evaluator based on `scipy`'s `bootstrap` method.
"""
# bootstrap only works with functions that use args and no kwargs
def build_args_metric(metric, key, **kwargs):
def args_metric(*args):
return metric.compute(**{k: v for k, v in zip(kwargs.keys(), args)})[key]
return args_metric
bootstrap_dict = {}
for key in metric_keys:
bs = bootstrap(
data=list(metric_inputs.values()),
statistic=build_args_metric(metric, key, **metric_inputs),
paired=True,
vectorized=False,
confidence_level=confidence_level,
n_resamples=n_resamples,
random_state=random_state,
)
bootstrap_dict[key] = {
"confidence_interval": (bs.confidence_interval.low, bs.confidence_interval.high),
"standard_error": bs.standard_error,
}
return bootstrap_dict
@staticmethod
def _compute_time_perf(start_time: float, end_time: float, num_samples: int) -> Dict[str, Any]:
"""
A utility function computing time performance metrics:
- `total_time_in_seconds` - pipeline inference runtime for the evaluation data in seconds,
- `samples_per_second` - pipeline throughput in the number of samples per second.
- `latency_in_seconds` - pipeline inference runtime for the evaluation data in seconds per sample,
"""
latency = end_time - start_time
throughput = num_samples / latency
latency_sample = 1.0 / throughput
return {
"total_time_in_seconds": latency,
"samples_per_second": throughput,
"latency_in_seconds": latency_sample,
}
@staticmethod
def _infer_device() -> int:
"""Helper function to check if GPU or CPU is available for inference."""
# try infer with torch first
try:
import torch
if torch.cuda.is_available():
device = 0 # first GPU
else:
device = -1 # CPU
except ImportError:
# if not available try TF
try:
import tensorflow as tf
if len(tf.config.list_physical_devices("GPU")) > 0:
device = 0 # first GPU
else:
device = -1 # CPU
except ImportError:
device = -1
if device == -1:
logger.info("No GPU found. The default device for pipeline inference is set to CPU.")
else:
logger.info("GPU found. The default device for pipeline inference is set to GPU (CUDA:0).")
return device
@abstractmethod
def predictions_processor(self, *args, **kwargs):
"""
A core method of the `Evaluator` class, which processes the pipeline outputs for compatibility with the metric.
"""
raise NotImplementedError()
def compute(
self,
model_or_pipeline: Union[
str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
] = None,
data: Union[str, Dataset] = None,
subset: Optional[str] = None,
split: Optional[str] = None,
metric: Union[str, EvaluationModule] = None,
tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
feature_extractor: Optional[Union[str, "FeatureExtractionMixin"]] = None, # noqa: F821
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
device: int = None,
random_state: Optional[int] = None,
input_column: str = "text",
label_column: str = "label",
label_mapping: Optional[Dict[str, Number]] = None,
) -> Dict[str, float]:
result = {}
self.check_for_mismatch_in_device_setup(device, model_or_pipeline)
# Prepare inputs
data = self.load_data(data=data, subset=subset, split=split)
metric_inputs, pipe_inputs = self.prepare_data(data=data, input_column=input_column, label_column=label_column)
pipe = self.prepare_pipeline(
model_or_pipeline=model_or_pipeline,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
device=device,
)
metric = self.prepare_metric(metric)
# Compute predictions
predictions, perf_results = self.call_pipeline(pipe, pipe_inputs)
predictions = self.predictions_processor(predictions, label_mapping)
metric_inputs.update(predictions)
# Compute metrics from references and predictions
metric_results = self.compute_metric(
metric=metric,
metric_inputs=metric_inputs,
strategy=strategy,
confidence_level=confidence_level,
n_resamples=n_resamples,
random_state=random_state,
)
# TODO: To clarify why `wer` and `cer` return float
# even though metric.compute contract says that it
# returns Optional[dict].
if type(metric_results) is float:
metric_results = {metric.name: metric_results}
result.update(metric_results)
result.update(perf_results)
return result
@staticmethod
def check_for_mismatch_in_device_setup(device, model_or_pipeline):
if device is not None and device != -1 and isinstance(model_or_pipeline, Pipeline):
if model_or_pipeline.device.type == "cpu":
raise ValueError(
"The value of the `device` kwarg passed to `compute` suggests that this pipe should be run on an "
"accelerator, but the pipe was instantiated on CPU. Pass `device` to the pipeline during "
"initialization to use an accelerator, or pass `device=None` to `compute`. "
)
elif device != model_or_pipeline.device.index:
raise ValueError(
f"This pipeline was instantiated on device {model_or_pipeline.device.index} but device={device} was passed to `compute`."
)
def check_required_columns(self, data: Union[str, Dataset], columns_names: Dict[str, str]):
"""
Ensure the columns required for the evaluation are present in the dataset.
Args:
data (`str` or [`Dataset`]):
Specifies the dataset we will run evaluation on.
columns_names (`List[str]`):
List of column names to check in the dataset. The keys are the arguments to the [`evaluate.EvaluationModule.compute`] method,
while the values are the column names to check.
Example:
```py
>>> from datasets import load_dataset
>>> from evaluate import evaluator
>>> data = load_dataset("rotten_tomatoes', split="train")
>>> evaluator.check_required_columns(data, {"input_column": "text", "label_column": "label"})
```
"""
for input_name, column_name in columns_names.items():
if column_name not in data.column_names:
raise ValueError(
f"Invalid `{input_name}` {column_name} specified. The dataset contains the following columns: {data.column_names}."
)
@staticmethod
def get_dataset_split(data, subset=None, split=None):
"""
Infers which split to use if `None` is given.
Args:
data (`str`):
Name of dataset.
subset (`str`):
Name of config for datasets with multiple configurations (e.g. 'glue/cola').
split (`str`, defaults to `None`):
Split to use.
Returns:
`split`: `str` containing which split to use
Example:
```py
>>> from evaluate import evaluator
>>> evaluator("text-classification").get_dataset_split(data="rotten_tomatoes")
WARNING:evaluate.evaluator.base:Dataset split not defined! Automatically evaluating with split: TEST
'test'
```
"""
if split is None:
split = choose_split(data, subset)
logger.warning(f"Dataset split not defined! Automatically evaluating with split: {split.upper()}")
return split
def load_data(self, data: Union[str, Dataset], subset: str = None, split: str = None):
"""
Load dataset with given subset and split.
Args:
data ([`Dataset`] or `str`, defaults to `None`):
Specifies the dataset we will run evaluation on. If it is of
type `str`, we treat it as the dataset name, and load it. Otherwise we assume it represents a pre-loaded dataset.
subset (`str`, defaults to `None`):
Specifies dataset subset to be passed to `name` in `load_dataset`. To be
used with datasets with several configurations (e.g. glue/sst2).
split (`str`, defaults to `None`):
User-defined dataset split by name (e.g. train, validation, test). Supports slice-split (`test[:n]`).
If not defined and data is a `str` type, will automatically select the best one via `choose_split()`.
Returns:
data ([`Dataset`]): Loaded dataset which will be used for evaluation.
Example:
```py
>>> from evaluate import evaluator
>>> evaluator("text-classification").load_data(data="rotten_tomatoes", split="train")
Dataset({
features: ['text', 'label'],
num_rows: 8530
})
```
"""
if isinstance(data, str):
split = self.get_dataset_split(data, subset, split)
data = load_dataset(data, name=subset, split=split)
return data
elif isinstance(data, Dataset):
if split is not None or subset is not None:
logger.warning("`data` is a preloaded Dataset! Ignoring `subset` and `split`.")
return data
else:
raise ValueError(
"Please specify a valid `data` object - either a `str` with a name or a `Dataset` object."
)
def prepare_data(self, data: Dataset, input_column: str, label_column: str, *args, **kwargs):
"""
Prepare data.
Args:
data ([`Dataset`]):
Specifies the dataset we will run evaluation on.
input_column (`str`, defaults to `"text"`):
The name of the column containing the text feature in the dataset specified by `data`.
second_input_column(`str`, *optional*):
The name of the column containing the second text feature if there is one. Otherwise, set to `None`.
label_column (`str`, defaults to `"label"`):
The name of the column containing the labels in the dataset specified by `data`.
Returns:
`dict`: metric inputs.
`list`: pipeline inputs.
Example:
```py
>>> from evaluate import evaluator
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train")
>>> evaluator("text-classification").prepare_data(ds, input_column="text", second_input_column=None, label_column="label")
```
"""
self.check_required_columns(data, {"input_column": input_column, "label_column": label_column})
return {"references": data[label_column]}, DatasetColumn(data, input_column)
def prepare_pipeline(
self,
model_or_pipeline: Union[str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel"], # noqa: F821
tokenizer: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] = None, # noqa: F821
feature_extractor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] = None, # noqa: F821
device: int = None,
):
"""
Prepare pipeline.
Args:
model_or_pipeline (`str` or [`~transformers.Pipeline`] or `Callable` or [`~transformers.PreTrainedModel`] or [`~transformers.TFPreTrainedModel`], defaults to `None`):
If the argument in not specified, we initialize the default pipeline for the task. If the argument is of the type `str` or
is a model instance, we use it to initialize a new [`~transformers.Pipeline`] with the given model. Otherwise we assume the
argument specifies a pre-initialized pipeline.
preprocessor ([`~transformers.PreTrainedTokenizerBase`] or [`~transformers.FeatureExtractionMixin`], *optional*, defaults to `None`):
Argument can be used to overwrite a default preprocessor if `model_or_pipeline` represents a model for
which we build a pipeline. If `model_or_pipeline` is `None` or a pre-initialized pipeline, we ignore
this argument.
Returns:
The initialized pipeline.
Example:
```py
>>> from evaluate import evaluator
>>> evaluator("text-classification").prepare_pipeline(model_or_pipeline="distilbert-base-uncased")
```
"""
if device is None:
device = self._infer_device()
if (
isinstance(model_or_pipeline, str)
or isinstance(model_or_pipeline, transformers.PreTrainedModel)
or isinstance(model_or_pipeline, transformers.TFPreTrainedModel)
):
pipe = pipeline(
self.task,
model=model_or_pipeline,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
device=device,
)
else:
if model_or_pipeline is None:
pipe = pipeline(self.task, device=device)
else:
pipe = model_or_pipeline
if tokenizer is not None and feature_extractor is not None:
logger.warning("Ignoring the value of the preprocessor argument (`tokenizer` or `feature_extractor`).")
if (pipe.task != self.task) and not (self.task == "translation" and pipe.task.startswith("translation")):
raise ValueError(
f"Incompatible `model_or_pipeline`. Please specify `model_or_pipeline` compatible with the `{self.task}` task."
)
return pipe
def prepare_metric(self, metric: Union[str, EvaluationModule]):
"""
Prepare metric.
Args:
metric (`str` or [`EvaluationModule`], defaults to `None`):
Specifies the metric we use in evaluator. If it is of type `str`, we treat it as the metric name, and
load it. Otherwise we assume it represents a pre-loaded metric.
Returns:
The loaded metric.
Example:
```py
>>> from evaluate import evaluator
>>> evaluator("text-classification").prepare_metric("accuracy")
```
"""
# Prepare metric.
if metric is None:
if self.default_metric_name is None:
raise ValueError(
"`Evaluator` doesn't specify a default metric. Please specify a valid `metric` argument."
)
metric = load(self.default_metric_name)
elif isinstance(metric, str):
metric = load(metric)
return metric
def call_pipeline(self, pipe, *args, **kwargs):
start_time = perf_counter()
pipe_output = pipe(*args, **kwargs, **self.PIPELINE_KWARGS)
end_time = perf_counter()
return pipe_output, self._compute_time_perf(start_time, end_time, len(pipe_output))
def compute_metric(
self,
metric: EvaluationModule,
metric_inputs: Dict,
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
random_state: Optional[int] = None,
):
"""Compute and return metrics."""
result = metric.compute(**metric_inputs, **self.METRIC_KWARGS)
if strategy == "bootstrap":
metric_keys = result.keys()
bootstrap_dict = self._compute_confidence_interval(
metric,
metric_inputs,
metric_keys,
confidence_level,
n_resamples,
random_state,
)
for key in metric_keys:
bootstrap_dict[key]["score"] = result[key]
return bootstrap_dict
return result
|
(task: str, default_metric_name: str = None)
|
71,151 |
evaluate.evaluator.base
|
__init__
| null |
def __init__(self, task: str, default_metric_name: str = None):
if not TRANSFORMERS_AVAILABLE:
raise ImportError(
"If you want to use the `Evaluator` you need `transformers`. Run `pip install evaluate[evaluator]`."
)
if not SCIPY_AVAILABLE:
raise ImportError(
"If you want to use the `Evaluator` you need `scipy>=1.7.1`. Run `pip install evaluate[evaluator]`."
)
self.task = task
self.default_metric_name = default_metric_name
|
(self, task: str, default_metric_name: Optional[str] = None)
|
71,158 |
evaluate.evaluator.base
|
compute
| null |
def compute(
self,
model_or_pipeline: Union[
str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
] = None,
data: Union[str, Dataset] = None,
subset: Optional[str] = None,
split: Optional[str] = None,
metric: Union[str, EvaluationModule] = None,
tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
feature_extractor: Optional[Union[str, "FeatureExtractionMixin"]] = None, # noqa: F821
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
device: int = None,
random_state: Optional[int] = None,
input_column: str = "text",
label_column: str = "label",
label_mapping: Optional[Dict[str, Number]] = None,
) -> Dict[str, float]:
result = {}
self.check_for_mismatch_in_device_setup(device, model_or_pipeline)
# Prepare inputs
data = self.load_data(data=data, subset=subset, split=split)
metric_inputs, pipe_inputs = self.prepare_data(data=data, input_column=input_column, label_column=label_column)
pipe = self.prepare_pipeline(
model_or_pipeline=model_or_pipeline,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
device=device,
)
metric = self.prepare_metric(metric)
# Compute predictions
predictions, perf_results = self.call_pipeline(pipe, pipe_inputs)
predictions = self.predictions_processor(predictions, label_mapping)
metric_inputs.update(predictions)
# Compute metrics from references and predictions
metric_results = self.compute_metric(
metric=metric,
metric_inputs=metric_inputs,
strategy=strategy,
confidence_level=confidence_level,
n_resamples=n_resamples,
random_state=random_state,
)
# TODO: To clarify why `wer` and `cer` return float
# even though metric.compute contract says that it
# returns Optional[dict].
if type(metric_results) is float:
metric_results = {metric.name: metric_results}
result.update(metric_results)
result.update(perf_results)
return result
|
(self, model_or_pipeline: Union[str, ForwardRef('Pipeline'), Callable, ForwardRef('PreTrainedModel'), ForwardRef('TFPreTrainedModel')] = None, data: Union[str, datasets.arrow_dataset.Dataset] = None, subset: Optional[str] = None, split: Optional[str] = None, metric: Union[str, evaluate.module.EvaluationModule] = None, tokenizer: Union[str, ForwardRef('PreTrainedTokenizer'), NoneType] = None, feature_extractor: Union[str, ForwardRef('FeatureExtractionMixin'), NoneType] = None, strategy: Literal['simple', 'bootstrap'] = 'simple', confidence_level: float = 0.95, n_resamples: int = 9999, device: int = None, random_state: Optional[int] = None, input_column: str = 'text', label_column: str = 'label', label_mapping: Optional[Dict[str, numbers.Number]] = None) -> Dict[str, float]
|
71,162 |
evaluate.evaluator.base
|
predictions_processor
|
A core method of the `Evaluator` class, which processes the pipeline outputs for compatibility with the metric.
|
@abstractmethod
def predictions_processor(self, *args, **kwargs):
"""
A core method of the `Evaluator` class, which processes the pipeline outputs for compatibility with the metric.
"""
raise NotImplementedError()
|
(self, *args, **kwargs)
|
71,166 |
evaluate.evaluator.image_classification
|
ImageClassificationEvaluator
|
Image classification evaluator.
This image classification evaluator can currently be loaded from [`evaluator`] using the default task name
`image-classification`.
Methods in this class assume a data format compatible with the [`ImageClassificationPipeline`].
|
class ImageClassificationEvaluator(Evaluator):
"""
Image classification evaluator.
This image classification evaluator can currently be loaded from [`evaluator`] using the default task name
`image-classification`.
Methods in this class assume a data format compatible with the [`ImageClassificationPipeline`].
"""
PIPELINE_KWARGS = {}
def __init__(self, task="image-classification", default_metric_name=None):
super().__init__(task, default_metric_name=default_metric_name)
def predictions_processor(self, predictions, label_mapping):
pred_label = [max(pred, key=lambda x: x["score"])["label"] for pred in predictions]
pred_label = [label_mapping[pred] if label_mapping is not None else pred for pred in pred_label]
return {"predictions": pred_label}
@add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING)
@add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION)
def compute(
self,
model_or_pipeline: Union[
str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
] = None,
data: Union[str, Dataset] = None,
subset: Optional[str] = None,
split: Optional[str] = None,
metric: Union[str, EvaluationModule] = None,
tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
feature_extractor: Optional[Union[str, "FeatureExtractionMixin"]] = None, # noqa: F821
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
device: int = None,
random_state: Optional[int] = None,
input_column: str = "image",
label_column: str = "label",
label_mapping: Optional[Dict[str, Number]] = None,
) -> Tuple[Dict[str, float], Any]:
"""
input_column (`str`, defaults to `"image"`):
The name of the column containing the images as PIL ImageFile in the dataset specified by `data`.
label_column (`str`, defaults to `"label"`):
The name of the column containing the labels in the dataset specified by `data`.
label_mapping (`Dict[str, Number]`, *optional*, defaults to `None`):
We want to map class labels defined by the model in the pipeline to values consistent with those
defined in the `label_column` of the `data` dataset.
"""
result = super().compute(
model_or_pipeline=model_or_pipeline,
data=data,
subset=subset,
split=split,
metric=metric,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
strategy=strategy,
confidence_level=confidence_level,
n_resamples=n_resamples,
device=device,
random_state=random_state,
input_column=input_column,
label_column=label_column,
label_mapping=label_mapping,
)
return result
|
(task='image-classification', default_metric_name=None)
|
71,167 |
evaluate.evaluator.image_classification
|
__init__
| null |
def __init__(self, task="image-classification", default_metric_name=None):
super().__init__(task, default_metric_name=default_metric_name)
|
(self, task='image-classification', default_metric_name=None)
|
71,174 |
evaluate.evaluator.image_classification
|
compute
|
Compute the metric for a given pipeline and dataset combination.
Args:
model_or_pipeline (`str` or `Pipeline` or `Callable` or `PreTrainedModel` or `TFPreTrainedModel`, defaults to `None`):
If the argument in not specified, we initialize the default pipeline for the task (in this case
`text-classification` or its alias - `sentiment-analysis`). If the argument is of the type `str` or
is a model instance, we use it to initialize a new `Pipeline` with the given model. Otherwise we assume the
argument specifies a pre-initialized pipeline.
data (`str` or `Dataset`, defaults to `None`):
Specifies the dataset we will run evaluation on. If it is of type `str`, we treat it as the dataset
name, and load it. Otherwise we assume it represents a pre-loaded dataset.
subset (`str`, defaults to `None`):
Defines which dataset subset to load. If `None` is passed the default subset is loaded.
split (`str`, defaults to `None`):
Defines which dataset split to load. If `None` is passed, infers based on the `choose_split` function.
metric (`str` or `EvaluationModule`, defaults to `None`):
Specifies the metric we use in evaluator. If it is of type `str`, we treat it as the metric name, and
load it. Otherwise we assume it represents a pre-loaded metric.
tokenizer (`str` or `PreTrainedTokenizer`, *optional*, defaults to `None`):
Argument can be used to overwrite a default tokenizer if `model_or_pipeline` represents a model for
which we build a pipeline. If `model_or_pipeline` is `None` or a pre-initialized pipeline, we ignore
this argument.
strategy (`Literal["simple", "bootstrap"]`, defaults to "simple"):
specifies the evaluation strategy. Possible values are:
- `"simple"` - we evaluate the metric and return the scores.
- `"bootstrap"` - on top of computing the metric scores, we calculate the confidence interval for each
of the returned metric keys, using `scipy`'s `bootstrap` method
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.bootstrap.html.
confidence_level (`float`, defaults to `0.95`):
The `confidence_level` value passed to `bootstrap` if `"bootstrap"` strategy is chosen.
n_resamples (`int`, defaults to `9999`):
The `n_resamples` value passed to `bootstrap` if `"bootstrap"` strategy is chosen.
device (`int`, defaults to `None`):
Device ordinal for CPU/GPU support of the pipeline. Setting this to -1 will leverage CPU, a positive
integer will run the model on the associated CUDA device ID. If `None` is provided it will be inferred and
CUDA:0 used if available, CPU otherwise.
random_state (`int`, *optional*, defaults to `None`):
The `random_state` value passed to `bootstrap` if `"bootstrap"` strategy is chosen. Useful for
debugging.
input_column (`str`, defaults to `"image"`):
The name of the column containing the images as PIL ImageFile in the dataset specified by `data`.
label_column (`str`, defaults to `"label"`):
The name of the column containing the labels in the dataset specified by `data`.
label_mapping (`Dict[str, Number]`, *optional*, defaults to `None`):
We want to map class labels defined by the model in the pipeline to values consistent with those
defined in the `label_column` of the `data` dataset.
Return:
A `Dict`. The keys represent metric keys calculated for the `metric` spefied in function arguments. For the
`"simple"` strategy, the value is the metric score. For the `"bootstrap"` strategy, the value is a `Dict`
containing the score, the confidence interval and the standard error calculated for each metric key.
Examples:
```python
>>> from evaluate import evaluator
>>> from datasets import load_dataset
>>> task_evaluator = evaluator("image-classification")
>>> data = load_dataset("beans", split="test[:40]")
>>> results = task_evaluator.compute(
>>> model_or_pipeline="nateraw/vit-base-beans",
>>> data=data,
>>> label_column="labels",
>>> metric="accuracy",
>>> label_mapping={'angular_leaf_spot': 0, 'bean_rust': 1, 'healthy': 2},
>>> strategy="bootstrap"
>>> )
```
|
@add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING)
@add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION)
def compute(
self,
model_or_pipeline: Union[
str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
] = None,
data: Union[str, Dataset] = None,
subset: Optional[str] = None,
split: Optional[str] = None,
metric: Union[str, EvaluationModule] = None,
tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
feature_extractor: Optional[Union[str, "FeatureExtractionMixin"]] = None, # noqa: F821
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
device: int = None,
random_state: Optional[int] = None,
input_column: str = "image",
label_column: str = "label",
label_mapping: Optional[Dict[str, Number]] = None,
) -> Tuple[Dict[str, float], Any]:
"""
input_column (`str`, defaults to `"image"`):
The name of the column containing the images as PIL ImageFile in the dataset specified by `data`.
label_column (`str`, defaults to `"label"`):
The name of the column containing the labels in the dataset specified by `data`.
label_mapping (`Dict[str, Number]`, *optional*, defaults to `None`):
We want to map class labels defined by the model in the pipeline to values consistent with those
defined in the `label_column` of the `data` dataset.
"""
result = super().compute(
model_or_pipeline=model_or_pipeline,
data=data,
subset=subset,
split=split,
metric=metric,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
strategy=strategy,
confidence_level=confidence_level,
n_resamples=n_resamples,
device=device,
random_state=random_state,
input_column=input_column,
label_column=label_column,
label_mapping=label_mapping,
)
return result
|
(self, model_or_pipeline: Union[str, ForwardRef('Pipeline'), Callable, ForwardRef('PreTrainedModel'), ForwardRef('TFPreTrainedModel')] = None, data: Union[str, datasets.arrow_dataset.Dataset] = None, subset: Optional[str] = None, split: Optional[str] = None, metric: Union[str, evaluate.module.EvaluationModule] = None, tokenizer: Union[str, ForwardRef('PreTrainedTokenizer'), NoneType] = None, feature_extractor: Union[str, ForwardRef('FeatureExtractionMixin'), NoneType] = None, strategy: Literal['simple', 'bootstrap'] = 'simple', confidence_level: float = 0.95, n_resamples: int = 9999, device: int = None, random_state: Optional[int] = None, input_column: str = 'image', label_column: str = 'label', label_mapping: Optional[Dict[str, numbers.Number]] = None) -> Tuple[Dict[str, float], Any]
|
71,182 |
evaluate.module
|
Measurement
|
A Measurement is the base class and common API for all measurements.
Args:
config_name (`str`):
This is used to define a hash specific to a measurement computation script and prevents the measurement's data
to be overridden when the measurement loading script is modified.
keep_in_memory (`bool`):
Keep all predictions and references in memory. Not possible in distributed settings.
cache_dir (`str`):
Path to a directory in which temporary prediction/references data will be stored.
The data directory should be located on a shared file-system in distributed setups.
num_process (`int`):
Specify the total number of nodes in a distributed settings.
This is useful to compute measurements in distributed setups (in particular non-additive measurements).
process_id (`int`):
Specify the id of the current process in a distributed setup (between 0 and num_process-1)
This is useful to compute measurements in distributed setups (in particular non-additive measurements).
seed (`int`, *optional*):
If specified, this will temporarily set numpy's random seed when [`~evaluate.Measurement.compute`] is run.
experiment_id (`str`):
A specific experiment id. This is used if several distributed evaluations share the same file system.
This is useful to compute measurements in distributed setups (in particular non-additive measurements).
max_concurrent_cache_files (`int`):
Max number of concurrent measurement cache files (default `10000`).
timeout (`Union[int, float]`):
Timeout in second for distributed setting synchronization.
|
class Measurement(EvaluationModule):
"""A Measurement is the base class and common API for all measurements.
Args:
config_name (`str`):
This is used to define a hash specific to a measurement computation script and prevents the measurement's data
to be overridden when the measurement loading script is modified.
keep_in_memory (`bool`):
Keep all predictions and references in memory. Not possible in distributed settings.
cache_dir (`str`):
Path to a directory in which temporary prediction/references data will be stored.
The data directory should be located on a shared file-system in distributed setups.
num_process (`int`):
Specify the total number of nodes in a distributed settings.
This is useful to compute measurements in distributed setups (in particular non-additive measurements).
process_id (`int`):
Specify the id of the current process in a distributed setup (between 0 and num_process-1)
This is useful to compute measurements in distributed setups (in particular non-additive measurements).
seed (`int`, *optional*):
If specified, this will temporarily set numpy's random seed when [`~evaluate.Measurement.compute`] is run.
experiment_id (`str`):
A specific experiment id. This is used if several distributed evaluations share the same file system.
This is useful to compute measurements in distributed setups (in particular non-additive measurements).
max_concurrent_cache_files (`int`):
Max number of concurrent measurement cache files (default `10000`).
timeout (`Union[int, float]`):
Timeout in second for distributed setting synchronization.
"""
|
(config_name: Optional[str] = None, keep_in_memory: bool = False, cache_dir: Optional[str] = None, num_process: int = 1, process_id: int = 0, seed: Optional[int] = None, experiment_id: Optional[str] = None, hash: str = None, max_concurrent_cache_files: int = 10000, timeout: Union[int, float] = 100, **kwargs)
|
71,205 |
evaluate.info
|
MeasurementInfo
|
Information about a measurement.
`EvaluationModuleInfo` documents a measurement, including its name, version, and features.
See the constructor arguments and properties for a full list.
Note: Not all fields are known on construction and may be updated later.
|
class MeasurementInfo(EvaluationModuleInfo):
"""Information about a measurement.
`EvaluationModuleInfo` documents a measurement, including its name, version, and features.
See the constructor arguments and properties for a full list.
Note: Not all fields are known on construction and may be updated later.
"""
module_type: str = "measurement"
|
(description: str, citation: str, features: Union[datasets.features.features.Features, List[datasets.features.features.Features]], inputs_description: str = <factory>, homepage: str = <factory>, license: str = <factory>, codebase_urls: List[str] = <factory>, reference_urls: List[str] = <factory>, streamable: bool = False, format: Optional[str] = None, module_type: str = 'measurement', module_name: Optional[str] = None, config_name: Optional[str] = None, experiment_id: Optional[str] = None) -> None
|
71,211 |
evaluate.module
|
Metric
|
A Metric is the base class and common API for all metrics.
Args:
config_name (`str`):
This is used to define a hash specific to a metric computation script and prevents the metric's data
to be overridden when the metric loading script is modified.
keep_in_memory (`bool`):
Keep all predictions and references in memory. Not possible in distributed settings.
cache_dir (`str`):
Path to a directory in which temporary prediction/references data will be stored.
The data directory should be located on a shared file-system in distributed setups.
num_process (`int`):
Specify the total number of nodes in a distributed settings.
This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
process_id (`int`):
Specify the id of the current process in a distributed setup (between 0 and num_process-1)
This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
seed (`int`, *optional*):
If specified, this will temporarily set numpy's random seed when [`~evaluate.Metric.compute`] is run.
experiment_id (`str`):
A specific experiment id. This is used if several distributed evaluations share the same file system.
This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
max_concurrent_cache_files (`int`):
Max number of concurrent metric cache files (default `10000`).
timeout (`Union[int, float]`):
Timeout in second for distributed setting synchronization.
|
class Metric(EvaluationModule):
"""A Metric is the base class and common API for all metrics.
Args:
config_name (`str`):
This is used to define a hash specific to a metric computation script and prevents the metric's data
to be overridden when the metric loading script is modified.
keep_in_memory (`bool`):
Keep all predictions and references in memory. Not possible in distributed settings.
cache_dir (`str`):
Path to a directory in which temporary prediction/references data will be stored.
The data directory should be located on a shared file-system in distributed setups.
num_process (`int`):
Specify the total number of nodes in a distributed settings.
This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
process_id (`int`):
Specify the id of the current process in a distributed setup (between 0 and num_process-1)
This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
seed (`int`, *optional*):
If specified, this will temporarily set numpy's random seed when [`~evaluate.Metric.compute`] is run.
experiment_id (`str`):
A specific experiment id. This is used if several distributed evaluations share the same file system.
This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
max_concurrent_cache_files (`int`):
Max number of concurrent metric cache files (default `10000`).
timeout (`Union[int, float]`):
Timeout in second for distributed setting synchronization.
"""
|
(config_name: Optional[str] = None, keep_in_memory: bool = False, cache_dir: Optional[str] = None, num_process: int = 1, process_id: int = 0, seed: Optional[int] = None, experiment_id: Optional[str] = None, hash: str = None, max_concurrent_cache_files: int = 10000, timeout: Union[int, float] = 100, **kwargs)
|
71,234 |
evaluate.info
|
MetricInfo
|
Information about a metric.
`EvaluationModuleInfo` documents a metric, including its name, version, and features.
See the constructor arguments and properties for a full list.
Note: Not all fields are known on construction and may be updated later.
|
class MetricInfo(EvaluationModuleInfo):
"""Information about a metric.
`EvaluationModuleInfo` documents a metric, including its name, version, and features.
See the constructor arguments and properties for a full list.
Note: Not all fields are known on construction and may be updated later.
"""
module_type: str = "metric"
|
(description: str, citation: str, features: Union[datasets.features.features.Features, List[datasets.features.features.Features]], inputs_description: str = <factory>, homepage: str = <factory>, license: str = <factory>, codebase_urls: List[str] = <factory>, reference_urls: List[str] = <factory>, streamable: bool = False, format: Optional[str] = None, module_type: str = 'metric', module_name: Optional[str] = None, config_name: Optional[str] = None, experiment_id: Optional[str] = None) -> None
|
71,240 |
evaluate.evaluator.question_answering
|
QuestionAnsweringEvaluator
|
Question answering evaluator. This evaluator handles
[**extractive** question answering](https://huggingface.co/docs/transformers/task_summary#extractive-question-answering),
where the answer to the question is extracted from a context.
This question answering evaluator can currently be loaded from [`evaluator`] using the default task name
`question-answering`.
Methods in this class assume a data format compatible with the
[`~transformers.QuestionAnsweringPipeline`].
|
class QuestionAnsweringEvaluator(Evaluator):
"""
Question answering evaluator. This evaluator handles
[**extractive** question answering](https://huggingface.co/docs/transformers/task_summary#extractive-question-answering),
where the answer to the question is extracted from a context.
This question answering evaluator can currently be loaded from [`evaluator`] using the default task name
`question-answering`.
Methods in this class assume a data format compatible with the
[`~transformers.QuestionAnsweringPipeline`].
"""
PIPELINE_KWARGS = {}
def __init__(self, task="question-answering", default_metric_name=None):
super().__init__(task, default_metric_name=default_metric_name)
def prepare_data(
self, data: Dataset, question_column: str, context_column: str, id_column: str, label_column: str
):
"""Prepare data."""
if data is None:
raise ValueError(
"Please specify a valid `data` object - either a `str` with a name or a `Dataset` object."
)
self.check_required_columns(
data,
{
"question_column": question_column,
"context_column": context_column,
"id_column": id_column,
"label_column": label_column,
},
)
metric_inputs = dict()
metric_inputs["references"] = [
{"id": element[id_column], "answers": element[label_column]} for element in data
]
return metric_inputs, {
"question": DatasetColumn(data, question_column),
"context": DatasetColumn(data, context_column),
}
def is_squad_v2_format(self, data: Dataset, label_column: str = "answers"):
"""
Check if the provided dataset follows the squad v2 data schema, namely possible samples where the answer is not in the context.
In this case, the answer text list should be `[]`.
"""
original_num_rows = data.num_rows
nonempty_num_rows = data.filter(
lambda x: len(x[label_column]["text"]) > 0, load_from_cache_file=False
).num_rows
if original_num_rows > nonempty_num_rows:
return True
else:
return False
def predictions_processor(self, predictions: List, squad_v2_format: bool, ids: List):
result = []
for i in range(len(predictions)):
pred = {"prediction_text": predictions[i]["answer"], "id": ids[i]}
if squad_v2_format:
pred["no_answer_probability"] = predictions[i]["score"]
result.append(pred)
return {"predictions": result}
@add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING)
@add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION)
def compute(
self,
model_or_pipeline: Union[
str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
] = None,
data: Union[str, Dataset] = None,
subset: Optional[str] = None,
split: Optional[str] = None,
metric: Union[str, EvaluationModule] = None,
tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
device: int = None,
random_state: Optional[int] = None,
question_column: str = "question",
context_column: str = "context",
id_column: str = "id",
label_column: str = "answers",
squad_v2_format: Optional[bool] = None,
) -> Tuple[Dict[str, float], Any]:
"""
question_column (`str`, defaults to `"question"`):
The name of the column containing the question in the dataset specified by `data`.
context_column (`str`, defaults to `"context"`):
The name of the column containing the context in the dataset specified by `data`.
id_column (`str`, defaults to `"id"`):
The name of the column containing the identification field of the question and answer pair in the
dataset specified by `data`.
label_column (`str`, defaults to `"answers"`):
The name of the column containing the answers in the dataset specified by `data`.
squad_v2_format (`bool`, *optional*, defaults to `None`):
Whether the dataset follows the format of squad_v2 dataset. This is the case when the provided dataset
has questions where the answer is not in the context, more specifically when are answers as
`{"text": [], "answer_start": []}` in the answer column. If all questions have at least one answer, this parameter
should be set to `False`. If this parameter is not provided, the format will be automatically inferred.
"""
result = {}
self.check_for_mismatch_in_device_setup(device, model_or_pipeline)
data = self.load_data(data=data, subset=subset, split=split)
metric_inputs, pipe_inputs = self.prepare_data(
data=data,
question_column=question_column,
context_column=context_column,
id_column=id_column,
label_column=label_column,
)
if squad_v2_format is None:
squad_v2_format = self.is_squad_v2_format(data=data, label_column=label_column)
logger.warning(
f"`squad_v2_format` parameter not provided to QuestionAnsweringEvaluator.compute(). Automatically inferred `squad_v2_format` as {squad_v2_format}."
)
pipe = self.prepare_pipeline(model_or_pipeline=model_or_pipeline, tokenizer=tokenizer, device=device)
metric = self.prepare_metric(metric)
if squad_v2_format and metric.name == "squad":
logger.warning(
"The dataset has SQuAD v2 format but you are using the SQuAD metric. Consider passing the 'squad_v2' metric."
)
if not squad_v2_format and metric.name == "squad_v2":
logger.warning(
"The dataset has SQuAD v1 format but you are using the SQuAD v2 metric. Consider passing the 'squad' metric."
)
if squad_v2_format:
self.PIPELINE_KWARGS["handle_impossible_answer"] = True
else:
self.PIPELINE_KWARGS["handle_impossible_answer"] = False
# Compute predictions
predictions, perf_results = self.call_pipeline(pipe, **pipe_inputs)
predictions = self.predictions_processor(predictions, squad_v2_format=squad_v2_format, ids=data[id_column])
metric_inputs.update(predictions)
# Compute metrics from references and predictions
metric_results = self.compute_metric(
metric=metric,
metric_inputs=metric_inputs,
strategy=strategy,
confidence_level=confidence_level,
n_resamples=n_resamples,
random_state=random_state,
)
result.update(metric_results)
result.update(perf_results)
return result
|
(task='question-answering', default_metric_name=None)
|
71,241 |
evaluate.evaluator.question_answering
|
__init__
| null |
def __init__(self, task="question-answering", default_metric_name=None):
super().__init__(task, default_metric_name=default_metric_name)
|
(self, task='question-answering', default_metric_name=None)
|
71,248 |
evaluate.evaluator.question_answering
|
compute
|
Compute the metric for a given pipeline and dataset combination.
Args:
model_or_pipeline (`str` or `Pipeline` or `Callable` or `PreTrainedModel` or `TFPreTrainedModel`, defaults to `None`):
If the argument in not specified, we initialize the default pipeline for the task (in this case
`text-classification` or its alias - `sentiment-analysis`). If the argument is of the type `str` or
is a model instance, we use it to initialize a new `Pipeline` with the given model. Otherwise we assume the
argument specifies a pre-initialized pipeline.
data (`str` or `Dataset`, defaults to `None`):
Specifies the dataset we will run evaluation on. If it is of type `str`, we treat it as the dataset
name, and load it. Otherwise we assume it represents a pre-loaded dataset.
subset (`str`, defaults to `None`):
Defines which dataset subset to load. If `None` is passed the default subset is loaded.
split (`str`, defaults to `None`):
Defines which dataset split to load. If `None` is passed, infers based on the `choose_split` function.
metric (`str` or `EvaluationModule`, defaults to `None`):
Specifies the metric we use in evaluator. If it is of type `str`, we treat it as the metric name, and
load it. Otherwise we assume it represents a pre-loaded metric.
tokenizer (`str` or `PreTrainedTokenizer`, *optional*, defaults to `None`):
Argument can be used to overwrite a default tokenizer if `model_or_pipeline` represents a model for
which we build a pipeline. If `model_or_pipeline` is `None` or a pre-initialized pipeline, we ignore
this argument.
strategy (`Literal["simple", "bootstrap"]`, defaults to "simple"):
specifies the evaluation strategy. Possible values are:
- `"simple"` - we evaluate the metric and return the scores.
- `"bootstrap"` - on top of computing the metric scores, we calculate the confidence interval for each
of the returned metric keys, using `scipy`'s `bootstrap` method
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.bootstrap.html.
confidence_level (`float`, defaults to `0.95`):
The `confidence_level` value passed to `bootstrap` if `"bootstrap"` strategy is chosen.
n_resamples (`int`, defaults to `9999`):
The `n_resamples` value passed to `bootstrap` if `"bootstrap"` strategy is chosen.
device (`int`, defaults to `None`):
Device ordinal for CPU/GPU support of the pipeline. Setting this to -1 will leverage CPU, a positive
integer will run the model on the associated CUDA device ID. If `None` is provided it will be inferred and
CUDA:0 used if available, CPU otherwise.
random_state (`int`, *optional*, defaults to `None`):
The `random_state` value passed to `bootstrap` if `"bootstrap"` strategy is chosen. Useful for
debugging.
question_column (`str`, defaults to `"question"`):
The name of the column containing the question in the dataset specified by `data`.
context_column (`str`, defaults to `"context"`):
The name of the column containing the context in the dataset specified by `data`.
id_column (`str`, defaults to `"id"`):
The name of the column containing the identification field of the question and answer pair in the
dataset specified by `data`.
label_column (`str`, defaults to `"answers"`):
The name of the column containing the answers in the dataset specified by `data`.
squad_v2_format (`bool`, *optional*, defaults to `None`):
Whether the dataset follows the format of squad_v2 dataset. This is the case when the provided dataset
has questions where the answer is not in the context, more specifically when are answers as
`{"text": [], "answer_start": []}` in the answer column. If all questions have at least one answer, this parameter
should be set to `False`. If this parameter is not provided, the format will be automatically inferred.
Return:
A `Dict`. The keys represent metric keys calculated for the `metric` spefied in function arguments. For the
`"simple"` strategy, the value is the metric score. For the `"bootstrap"` strategy, the value is a `Dict`
containing the score, the confidence interval and the standard error calculated for each metric key.
Examples:
```python
>>> from evaluate import evaluator
>>> from datasets import load_dataset
>>> task_evaluator = evaluator("question-answering")
>>> data = load_dataset("squad", split="validation[:2]")
>>> results = task_evaluator.compute(
>>> model_or_pipeline="sshleifer/tiny-distilbert-base-cased-distilled-squad",
>>> data=data,
>>> metric="squad",
>>> )
```
<Tip>
Datasets where the answer may be missing in the context are supported, for example SQuAD v2 dataset. In this case, it is safer to pass `squad_v2_format=True` to
the compute() call.
</Tip>
```python
>>> from evaluate import evaluator
>>> from datasets import load_dataset
>>> task_evaluator = evaluator("question-answering")
>>> data = load_dataset("squad_v2", split="validation[:2]")
>>> results = task_evaluator.compute(
>>> model_or_pipeline="mrm8488/bert-tiny-finetuned-squadv2",
>>> data=data,
>>> metric="squad_v2",
>>> squad_v2_format=True,
>>> )
```
|
@add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING)
@add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION)
def compute(
self,
model_or_pipeline: Union[
str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
] = None,
data: Union[str, Dataset] = None,
subset: Optional[str] = None,
split: Optional[str] = None,
metric: Union[str, EvaluationModule] = None,
tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
device: int = None,
random_state: Optional[int] = None,
question_column: str = "question",
context_column: str = "context",
id_column: str = "id",
label_column: str = "answers",
squad_v2_format: Optional[bool] = None,
) -> Tuple[Dict[str, float], Any]:
"""
question_column (`str`, defaults to `"question"`):
The name of the column containing the question in the dataset specified by `data`.
context_column (`str`, defaults to `"context"`):
The name of the column containing the context in the dataset specified by `data`.
id_column (`str`, defaults to `"id"`):
The name of the column containing the identification field of the question and answer pair in the
dataset specified by `data`.
label_column (`str`, defaults to `"answers"`):
The name of the column containing the answers in the dataset specified by `data`.
squad_v2_format (`bool`, *optional*, defaults to `None`):
Whether the dataset follows the format of squad_v2 dataset. This is the case when the provided dataset
has questions where the answer is not in the context, more specifically when are answers as
`{"text": [], "answer_start": []}` in the answer column. If all questions have at least one answer, this parameter
should be set to `False`. If this parameter is not provided, the format will be automatically inferred.
"""
result = {}
self.check_for_mismatch_in_device_setup(device, model_or_pipeline)
data = self.load_data(data=data, subset=subset, split=split)
metric_inputs, pipe_inputs = self.prepare_data(
data=data,
question_column=question_column,
context_column=context_column,
id_column=id_column,
label_column=label_column,
)
if squad_v2_format is None:
squad_v2_format = self.is_squad_v2_format(data=data, label_column=label_column)
logger.warning(
f"`squad_v2_format` parameter not provided to QuestionAnsweringEvaluator.compute(). Automatically inferred `squad_v2_format` as {squad_v2_format}."
)
pipe = self.prepare_pipeline(model_or_pipeline=model_or_pipeline, tokenizer=tokenizer, device=device)
metric = self.prepare_metric(metric)
if squad_v2_format and metric.name == "squad":
logger.warning(
"The dataset has SQuAD v2 format but you are using the SQuAD metric. Consider passing the 'squad_v2' metric."
)
if not squad_v2_format and metric.name == "squad_v2":
logger.warning(
"The dataset has SQuAD v1 format but you are using the SQuAD v2 metric. Consider passing the 'squad' metric."
)
if squad_v2_format:
self.PIPELINE_KWARGS["handle_impossible_answer"] = True
else:
self.PIPELINE_KWARGS["handle_impossible_answer"] = False
# Compute predictions
predictions, perf_results = self.call_pipeline(pipe, **pipe_inputs)
predictions = self.predictions_processor(predictions, squad_v2_format=squad_v2_format, ids=data[id_column])
metric_inputs.update(predictions)
# Compute metrics from references and predictions
metric_results = self.compute_metric(
metric=metric,
metric_inputs=metric_inputs,
strategy=strategy,
confidence_level=confidence_level,
n_resamples=n_resamples,
random_state=random_state,
)
result.update(metric_results)
result.update(perf_results)
return result
|
(self, model_or_pipeline: Union[str, ForwardRef('Pipeline'), Callable, ForwardRef('PreTrainedModel'), ForwardRef('TFPreTrainedModel')] = None, data: Union[str, datasets.arrow_dataset.Dataset] = None, subset: Optional[str] = None, split: Optional[str] = None, metric: Union[str, evaluate.module.EvaluationModule] = None, tokenizer: Union[str, ForwardRef('PreTrainedTokenizer'), NoneType] = None, strategy: Literal['simple', 'bootstrap'] = 'simple', confidence_level: float = 0.95, n_resamples: int = 9999, device: int = None, random_state: Optional[int] = None, question_column: str = 'question', context_column: str = 'context', id_column: str = 'id', label_column: str = 'answers', squad_v2_format: Optional[bool] = None) -> Tuple[Dict[str, float], Any]
|
71,251 |
evaluate.evaluator.question_answering
|
is_squad_v2_format
|
Check if the provided dataset follows the squad v2 data schema, namely possible samples where the answer is not in the context.
In this case, the answer text list should be `[]`.
|
def is_squad_v2_format(self, data: Dataset, label_column: str = "answers"):
"""
Check if the provided dataset follows the squad v2 data schema, namely possible samples where the answer is not in the context.
In this case, the answer text list should be `[]`.
"""
original_num_rows = data.num_rows
nonempty_num_rows = data.filter(
lambda x: len(x[label_column]["text"]) > 0, load_from_cache_file=False
).num_rows
if original_num_rows > nonempty_num_rows:
return True
else:
return False
|
(self, data: datasets.arrow_dataset.Dataset, label_column: str = 'answers')
|
71,253 |
evaluate.evaluator.question_answering
|
predictions_processor
| null |
def predictions_processor(self, predictions: List, squad_v2_format: bool, ids: List):
result = []
for i in range(len(predictions)):
pred = {"prediction_text": predictions[i]["answer"], "id": ids[i]}
if squad_v2_format:
pred["no_answer_probability"] = predictions[i]["score"]
result.append(pred)
return {"predictions": result}
|
(self, predictions: List, squad_v2_format: bool, ids: List)
|
71,254 |
evaluate.evaluator.question_answering
|
prepare_data
|
Prepare data.
|
def prepare_data(
self, data: Dataset, question_column: str, context_column: str, id_column: str, label_column: str
):
"""Prepare data."""
if data is None:
raise ValueError(
"Please specify a valid `data` object - either a `str` with a name or a `Dataset` object."
)
self.check_required_columns(
data,
{
"question_column": question_column,
"context_column": context_column,
"id_column": id_column,
"label_column": label_column,
},
)
metric_inputs = dict()
metric_inputs["references"] = [
{"id": element[id_column], "answers": element[label_column]} for element in data
]
return metric_inputs, {
"question": DatasetColumn(data, question_column),
"context": DatasetColumn(data, context_column),
}
|
(self, data: datasets.arrow_dataset.Dataset, question_column: str, context_column: str, id_column: str, label_column: str)
|
71,257 |
evaluate.evaluator.text2text_generation
|
SummarizationEvaluator
|
Text summarization evaluator.
This text summarization evaluator can currently be loaded from [`evaluator`] using the default task name
`summarization`.
Methods in this class assume a data format compatible with the [`SummarizationEvaluator`].
|
class SummarizationEvaluator(Text2TextGenerationEvaluator):
"""
Text summarization evaluator.
This text summarization evaluator can currently be loaded from [`evaluator`] using the default task name
`summarization`.
Methods in this class assume a data format compatible with the [`SummarizationEvaluator`].
"""
PREDICTION_PREFIX = "summary"
PIPELINE_KWARGS = {"truncation": True}
def __init__(self, task="summarization", default_metric_name=None):
super().__init__(task, default_metric_name=default_metric_name)
@add_start_docstrings(
EVALUTOR_COMPUTE_START_DOCSTRING,
TASK_DOCUMENTATION_KWARGS,
EVALUATOR_COMPUTE_RETURN_DOCSTRING,
SUMMARIZATION_TASK_DOCSTRING_EXAMPLE,
)
def compute(
self,
model_or_pipeline: Union[
str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
] = None,
data: Union[str, Dataset] = None,
subset: Optional[str] = None,
split: Optional[str] = None,
metric: Union[str, EvaluationModule] = None,
tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
device: int = None,
random_state: Optional[int] = None,
input_column: str = "text",
label_column: str = "label",
generation_kwargs: dict = None,
) -> Tuple[Dict[str, float], Any]:
result = super().compute(
model_or_pipeline=model_or_pipeline,
data=data,
subset=subset,
split=split,
metric=metric,
tokenizer=tokenizer,
strategy=strategy,
confidence_level=confidence_level,
n_resamples=n_resamples,
device=device,
random_state=random_state,
input_column=input_column,
label_column=label_column,
generation_kwargs=generation_kwargs,
)
return result
|
(task='summarization', default_metric_name=None)
|
71,258 |
evaluate.evaluator.text2text_generation
|
__init__
| null |
def __init__(self, task="summarization", default_metric_name=None):
super().__init__(task, default_metric_name=default_metric_name)
|
(self, task='summarization', default_metric_name=None)
|
71,265 |
evaluate.evaluator.text2text_generation
|
compute
|
Compute the metric for a given pipeline and dataset combination.
Args:
model_or_pipeline (`str` or `Pipeline` or `Callable` or `PreTrainedModel` or `TFPreTrainedModel`, defaults to `None`):
If the argument in not specified, we initialize the default pipeline for the task (in this case
`text-classification` or its alias - `sentiment-analysis`). If the argument is of the type `str` or
is a model instance, we use it to initialize a new `Pipeline` with the given model. Otherwise we assume the
argument specifies a pre-initialized pipeline.
data (`str` or `Dataset`, defaults to `None`):
Specifies the dataset we will run evaluation on. If it is of type `str`, we treat it as the dataset
name, and load it. Otherwise we assume it represents a pre-loaded dataset.
subset (`str`, defaults to `None`):
Defines which dataset subset to load. If `None` is passed the default subset is loaded.
split (`str`, defaults to `None`):
Defines which dataset split to load. If `None` is passed, infers based on the `choose_split` function.
metric (`str` or `EvaluationModule`, defaults to `None`):
Specifies the metric we use in evaluator. If it is of type `str`, we treat it as the metric name, and
load it. Otherwise we assume it represents a pre-loaded metric.
tokenizer (`str` or `PreTrainedTokenizer`, *optional*, defaults to `None`):
Argument can be used to overwrite a default tokenizer if `model_or_pipeline` represents a model for
which we build a pipeline. If `model_or_pipeline` is `None` or a pre-initialized pipeline, we ignore
this argument.
strategy (`Literal["simple", "bootstrap"]`, defaults to "simple"):
specifies the evaluation strategy. Possible values are:
- `"simple"` - we evaluate the metric and return the scores.
- `"bootstrap"` - on top of computing the metric scores, we calculate the confidence interval for each
of the returned metric keys, using `scipy`'s `bootstrap` method
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.bootstrap.html.
confidence_level (`float`, defaults to `0.95`):
The `confidence_level` value passed to `bootstrap` if `"bootstrap"` strategy is chosen.
n_resamples (`int`, defaults to `9999`):
The `n_resamples` value passed to `bootstrap` if `"bootstrap"` strategy is chosen.
device (`int`, defaults to `None`):
Device ordinal for CPU/GPU support of the pipeline. Setting this to -1 will leverage CPU, a positive
integer will run the model on the associated CUDA device ID. If `None` is provided it will be inferred and
CUDA:0 used if available, CPU otherwise.
random_state (`int`, *optional*, defaults to `None`):
The `random_state` value passed to `bootstrap` if `"bootstrap"` strategy is chosen. Useful for
debugging.
input_column (`str`, defaults to `"text"`):
the name of the column containing the input text in the dataset specified by `data`.
label_column (`str`, defaults to `"label"`):
the name of the column containing the labels in the dataset specified by `data`.
generation_kwargs (`Dict`, *optional*, defaults to `None`):
The generation kwargs are passed to the pipeline and set the text generation strategy.
Return:
A `Dict`. The keys represent metric keys calculated for the `metric` spefied in function arguments. For the
`"simple"` strategy, the value is the metric score. For the `"bootstrap"` strategy, the value is a `Dict`
containing the score, the confidence interval and the standard error calculated for each metric key.
Examples:
```python
>>> from evaluate import evaluator
>>> from datasets import load_dataset
>>> task_evaluator = evaluator("summarization")
>>> data = load_dataset("cnn_dailymail", "3.0.0", split="validation[:40]")
>>> results = task_evaluator.compute(
>>> model_or_pipeline="facebook/bart-large-cnn",
>>> data=data,
>>> input_column="article",
>>> label_column="highlights",
>>> )
```
|
@add_start_docstrings(
EVALUTOR_COMPUTE_START_DOCSTRING,
TASK_DOCUMENTATION_KWARGS,
EVALUATOR_COMPUTE_RETURN_DOCSTRING,
SUMMARIZATION_TASK_DOCSTRING_EXAMPLE,
)
def compute(
self,
model_or_pipeline: Union[
str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
] = None,
data: Union[str, Dataset] = None,
subset: Optional[str] = None,
split: Optional[str] = None,
metric: Union[str, EvaluationModule] = None,
tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
device: int = None,
random_state: Optional[int] = None,
input_column: str = "text",
label_column: str = "label",
generation_kwargs: dict = None,
) -> Tuple[Dict[str, float], Any]:
result = super().compute(
model_or_pipeline=model_or_pipeline,
data=data,
subset=subset,
split=split,
metric=metric,
tokenizer=tokenizer,
strategy=strategy,
confidence_level=confidence_level,
n_resamples=n_resamples,
device=device,
random_state=random_state,
input_column=input_column,
label_column=label_column,
generation_kwargs=generation_kwargs,
)
return result
|
(self, model_or_pipeline: Union[str, ForwardRef('Pipeline'), Callable, ForwardRef('PreTrainedModel'), ForwardRef('TFPreTrainedModel')] = None, data: Union[str, datasets.arrow_dataset.Dataset] = None, subset: Optional[str] = None, split: Optional[str] = None, metric: Union[str, evaluate.module.EvaluationModule] = None, tokenizer: Union[str, ForwardRef('PreTrainedTokenizer'), NoneType] = None, strategy: Literal['simple', 'bootstrap'] = 'simple', confidence_level: float = 0.95, n_resamples: int = 9999, device: int = None, random_state: Optional[int] = None, input_column: str = 'text', label_column: str = 'label', generation_kwargs: dict = None) -> Tuple[Dict[str, float], Any]
|
71,269 |
evaluate.evaluator.text2text_generation
|
predictions_processor
| null |
def predictions_processor(self, predictions, label_mapping):
return {"predictions": [pred[f"{self.PREDICTION_PREFIX}_text"] for pred in predictions]}
|
(self, predictions, label_mapping)
|
71,273 |
evaluate.evaluator.text2text_generation
|
Text2TextGenerationEvaluator
|
Text2Text generation evaluator.
This Text2Text generation evaluator can currently be loaded from [`evaluator`] using the default task name
`text2text-generation`.
Methods in this class assume a data format compatible with the [`~transformers.Text2TextGenerationPipeline`].
|
class Text2TextGenerationEvaluator(Evaluator):
"""
Text2Text generation evaluator.
This Text2Text generation evaluator can currently be loaded from [`evaluator`] using the default task name
`text2text-generation`.
Methods in this class assume a data format compatible with the [`~transformers.Text2TextGenerationPipeline`].
"""
PREDICTION_PREFIX = "generated"
PIPELINE_KWARGS = {"truncation": True}
def __init__(self, task="text2text-generation", default_metric_name=None):
super().__init__(task, default_metric_name=default_metric_name)
def predictions_processor(self, predictions, label_mapping):
return {"predictions": [pred[f"{self.PREDICTION_PREFIX}_text"] for pred in predictions]}
@add_start_docstrings(
EVALUTOR_COMPUTE_START_DOCSTRING,
TASK_DOCUMENTATION_KWARGS,
EVALUATOR_COMPUTE_RETURN_DOCSTRING,
TEXT2TEXT_TASK_DOCSTRING_EXAMPLE,
)
def compute(
self,
model_or_pipeline: Union[
str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
] = None,
data: Union[str, Dataset] = None,
subset: Optional[str] = None,
split: Optional[str] = None,
metric: Union[str, EvaluationModule] = None,
tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
device: int = None,
random_state: Optional[int] = None,
input_column: str = "text",
label_column: str = "label",
generation_kwargs: dict = None,
) -> Tuple[Dict[str, float], Any]:
if generation_kwargs is not None:
self.PIPELINE_KWARGS.update(generation_kwargs)
result = super().compute(
model_or_pipeline=model_or_pipeline,
data=data,
subset=subset,
split=split,
metric=metric,
tokenizer=tokenizer,
strategy=strategy,
confidence_level=confidence_level,
n_resamples=n_resamples,
device=device,
random_state=random_state,
input_column=input_column,
label_column=label_column,
)
return result
|
(task='text2text-generation', default_metric_name=None)
|
71,274 |
evaluate.evaluator.text2text_generation
|
__init__
| null |
def __init__(self, task="text2text-generation", default_metric_name=None):
super().__init__(task, default_metric_name=default_metric_name)
|
(self, task='text2text-generation', default_metric_name=None)
|
71,281 |
evaluate.evaluator.text2text_generation
|
compute
|
Compute the metric for a given pipeline and dataset combination.
Args:
model_or_pipeline (`str` or `Pipeline` or `Callable` or `PreTrainedModel` or `TFPreTrainedModel`, defaults to `None`):
If the argument in not specified, we initialize the default pipeline for the task (in this case
`text-classification` or its alias - `sentiment-analysis`). If the argument is of the type `str` or
is a model instance, we use it to initialize a new `Pipeline` with the given model. Otherwise we assume the
argument specifies a pre-initialized pipeline.
data (`str` or `Dataset`, defaults to `None`):
Specifies the dataset we will run evaluation on. If it is of type `str`, we treat it as the dataset
name, and load it. Otherwise we assume it represents a pre-loaded dataset.
subset (`str`, defaults to `None`):
Defines which dataset subset to load. If `None` is passed the default subset is loaded.
split (`str`, defaults to `None`):
Defines which dataset split to load. If `None` is passed, infers based on the `choose_split` function.
metric (`str` or `EvaluationModule`, defaults to `None`):
Specifies the metric we use in evaluator. If it is of type `str`, we treat it as the metric name, and
load it. Otherwise we assume it represents a pre-loaded metric.
tokenizer (`str` or `PreTrainedTokenizer`, *optional*, defaults to `None`):
Argument can be used to overwrite a default tokenizer if `model_or_pipeline` represents a model for
which we build a pipeline. If `model_or_pipeline` is `None` or a pre-initialized pipeline, we ignore
this argument.
strategy (`Literal["simple", "bootstrap"]`, defaults to "simple"):
specifies the evaluation strategy. Possible values are:
- `"simple"` - we evaluate the metric and return the scores.
- `"bootstrap"` - on top of computing the metric scores, we calculate the confidence interval for each
of the returned metric keys, using `scipy`'s `bootstrap` method
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.bootstrap.html.
confidence_level (`float`, defaults to `0.95`):
The `confidence_level` value passed to `bootstrap` if `"bootstrap"` strategy is chosen.
n_resamples (`int`, defaults to `9999`):
The `n_resamples` value passed to `bootstrap` if `"bootstrap"` strategy is chosen.
device (`int`, defaults to `None`):
Device ordinal for CPU/GPU support of the pipeline. Setting this to -1 will leverage CPU, a positive
integer will run the model on the associated CUDA device ID. If `None` is provided it will be inferred and
CUDA:0 used if available, CPU otherwise.
random_state (`int`, *optional*, defaults to `None`):
The `random_state` value passed to `bootstrap` if `"bootstrap"` strategy is chosen. Useful for
debugging.
input_column (`str`, defaults to `"text"`):
the name of the column containing the input text in the dataset specified by `data`.
label_column (`str`, defaults to `"label"`):
the name of the column containing the labels in the dataset specified by `data`.
generation_kwargs (`Dict`, *optional*, defaults to `None`):
The generation kwargs are passed to the pipeline and set the text generation strategy.
Return:
A `Dict`. The keys represent metric keys calculated for the `metric` spefied in function arguments. For the
`"simple"` strategy, the value is the metric score. For the `"bootstrap"` strategy, the value is a `Dict`
containing the score, the confidence interval and the standard error calculated for each metric key.
Examples:
```python
>>> from evaluate import evaluator
>>> from datasets import load_dataset
>>> task_evaluator = evaluator("text2text-generation")
>>> data = load_dataset("cnn_dailymail", "3.0.0", split="validation[:40]")
>>> results = task_evaluator.compute(
>>> model_or_pipeline="facebook/bart-large-cnn",
>>> data=data,
>>> input_column="article",
>>> label_column="highlights",
>>> metric="rouge",
>>> )
```
|
@add_start_docstrings(
EVALUTOR_COMPUTE_START_DOCSTRING,
TASK_DOCUMENTATION_KWARGS,
EVALUATOR_COMPUTE_RETURN_DOCSTRING,
TEXT2TEXT_TASK_DOCSTRING_EXAMPLE,
)
def compute(
self,
model_or_pipeline: Union[
str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
] = None,
data: Union[str, Dataset] = None,
subset: Optional[str] = None,
split: Optional[str] = None,
metric: Union[str, EvaluationModule] = None,
tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
device: int = None,
random_state: Optional[int] = None,
input_column: str = "text",
label_column: str = "label",
generation_kwargs: dict = None,
) -> Tuple[Dict[str, float], Any]:
if generation_kwargs is not None:
self.PIPELINE_KWARGS.update(generation_kwargs)
result = super().compute(
model_or_pipeline=model_or_pipeline,
data=data,
subset=subset,
split=split,
metric=metric,
tokenizer=tokenizer,
strategy=strategy,
confidence_level=confidence_level,
n_resamples=n_resamples,
device=device,
random_state=random_state,
input_column=input_column,
label_column=label_column,
)
return result
|
(self, model_or_pipeline: Union[str, ForwardRef('Pipeline'), Callable, ForwardRef('PreTrainedModel'), ForwardRef('TFPreTrainedModel')] = None, data: Union[str, datasets.arrow_dataset.Dataset] = None, subset: Optional[str] = None, split: Optional[str] = None, metric: Union[str, evaluate.module.EvaluationModule] = None, tokenizer: Union[str, ForwardRef('PreTrainedTokenizer'), NoneType] = None, strategy: Literal['simple', 'bootstrap'] = 'simple', confidence_level: float = 0.95, n_resamples: int = 9999, device: int = None, random_state: Optional[int] = None, input_column: str = 'text', label_column: str = 'label', generation_kwargs: dict = None) -> Tuple[Dict[str, float], Any]
|
71,289 |
evaluate.evaluator.text_classification
|
TextClassificationEvaluator
|
Text classification evaluator.
This text classification evaluator can currently be loaded from [`evaluator`] using the default task name
`text-classification` or with a `"sentiment-analysis"` alias.
Methods in this class assume a data format compatible with the [`~transformers.TextClassificationPipeline`] - a single textual
feature as input and a categorical label as output.
|
class TextClassificationEvaluator(Evaluator):
"""
Text classification evaluator.
This text classification evaluator can currently be loaded from [`evaluator`] using the default task name
`text-classification` or with a `"sentiment-analysis"` alias.
Methods in this class assume a data format compatible with the [`~transformers.TextClassificationPipeline`] - a single textual
feature as input and a categorical label as output.
"""
PIPELINE_KWARGS = {"truncation": True}
def __init__(self, task="text-classification", default_metric_name=None):
super().__init__(task, default_metric_name=default_metric_name)
def prepare_data(self, data: Union[str, Dataset], input_column: str, second_input_column: str, label_column: str):
if data is None:
raise ValueError(
"Please specify a valid `data` object - either a `str` with a name or a `Dataset` object."
)
self.check_required_columns(data, {"input_column": input_column, "label_column": label_column})
if second_input_column is not None:
self.check_required_columns(data, {"second_input_column": second_input_column})
data = load_dataset(data) if isinstance(data, str) else data
return {"references": data[label_column]}, DatasetColumnPair(
data, input_column, second_input_column, "text", "text_pair"
)
def predictions_processor(self, predictions, label_mapping):
predictions = [
label_mapping[element["label"]] if label_mapping is not None else element["label"]
for element in predictions
]
return {"predictions": predictions}
@add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING)
@add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION)
def compute(
self,
model_or_pipeline: Union[
str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
] = None,
data: Union[str, Dataset] = None,
subset: Optional[str] = None,
split: Optional[str] = None,
metric: Union[str, EvaluationModule] = None,
tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
feature_extractor: Optional[Union[str, "FeatureExtractionMixin"]] = None, # noqa: F821
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
device: int = None,
random_state: Optional[int] = None,
input_column: str = "text",
second_input_column: Optional[str] = None,
label_column: str = "label",
label_mapping: Optional[Dict[str, Number]] = None,
) -> Tuple[Dict[str, float], Any]:
"""
input_column (`str`, *optional*, defaults to `"text"`):
The name of the column containing the text feature in the dataset specified by `data`.
second_input_column (`str`, *optional*, defaults to `None`):
The name of the second column containing the text features. This may be useful for classification tasks
as MNLI, where two columns are used.
label_column (`str`, defaults to `"label"`):
The name of the column containing the labels in the dataset specified by `data`.
label_mapping (`Dict[str, Number]`, *optional*, defaults to `None`):
We want to map class labels defined by the model in the pipeline to values consistent with those
defined in the `label_column` of the `data` dataset.
"""
result = {}
self.check_for_mismatch_in_device_setup(device, model_or_pipeline)
# Prepare inputs
data = self.load_data(data=data, subset=subset, split=split)
metric_inputs, pipe_inputs = self.prepare_data(
data=data, input_column=input_column, second_input_column=second_input_column, label_column=label_column
)
pipe = self.prepare_pipeline(
model_or_pipeline=model_or_pipeline,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
device=device,
)
metric = self.prepare_metric(metric)
# Compute predictions
predictions, perf_results = self.call_pipeline(pipe, pipe_inputs)
predictions = self.predictions_processor(predictions, label_mapping)
metric_inputs.update(predictions)
# Compute metrics from references and predictions
metric_results = self.compute_metric(
metric=metric,
metric_inputs=metric_inputs,
strategy=strategy,
confidence_level=confidence_level,
n_resamples=n_resamples,
random_state=random_state,
)
result.update(metric_results)
result.update(perf_results)
return result
|
(task='text-classification', default_metric_name=None)
|
71,290 |
evaluate.evaluator.text_classification
|
__init__
| null |
def __init__(self, task="text-classification", default_metric_name=None):
super().__init__(task, default_metric_name=default_metric_name)
|
(self, task='text-classification', default_metric_name=None)
|
71,297 |
evaluate.evaluator.text_classification
|
compute
|
Compute the metric for a given pipeline and dataset combination.
Args:
model_or_pipeline (`str` or `Pipeline` or `Callable` or `PreTrainedModel` or `TFPreTrainedModel`, defaults to `None`):
If the argument in not specified, we initialize the default pipeline for the task (in this case
`text-classification` or its alias - `sentiment-analysis`). If the argument is of the type `str` or
is a model instance, we use it to initialize a new `Pipeline` with the given model. Otherwise we assume the
argument specifies a pre-initialized pipeline.
data (`str` or `Dataset`, defaults to `None`):
Specifies the dataset we will run evaluation on. If it is of type `str`, we treat it as the dataset
name, and load it. Otherwise we assume it represents a pre-loaded dataset.
subset (`str`, defaults to `None`):
Defines which dataset subset to load. If `None` is passed the default subset is loaded.
split (`str`, defaults to `None`):
Defines which dataset split to load. If `None` is passed, infers based on the `choose_split` function.
metric (`str` or `EvaluationModule`, defaults to `None`):
Specifies the metric we use in evaluator. If it is of type `str`, we treat it as the metric name, and
load it. Otherwise we assume it represents a pre-loaded metric.
tokenizer (`str` or `PreTrainedTokenizer`, *optional*, defaults to `None`):
Argument can be used to overwrite a default tokenizer if `model_or_pipeline` represents a model for
which we build a pipeline. If `model_or_pipeline` is `None` or a pre-initialized pipeline, we ignore
this argument.
strategy (`Literal["simple", "bootstrap"]`, defaults to "simple"):
specifies the evaluation strategy. Possible values are:
- `"simple"` - we evaluate the metric and return the scores.
- `"bootstrap"` - on top of computing the metric scores, we calculate the confidence interval for each
of the returned metric keys, using `scipy`'s `bootstrap` method
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.bootstrap.html.
confidence_level (`float`, defaults to `0.95`):
The `confidence_level` value passed to `bootstrap` if `"bootstrap"` strategy is chosen.
n_resamples (`int`, defaults to `9999`):
The `n_resamples` value passed to `bootstrap` if `"bootstrap"` strategy is chosen.
device (`int`, defaults to `None`):
Device ordinal for CPU/GPU support of the pipeline. Setting this to -1 will leverage CPU, a positive
integer will run the model on the associated CUDA device ID. If `None` is provided it will be inferred and
CUDA:0 used if available, CPU otherwise.
random_state (`int`, *optional*, defaults to `None`):
The `random_state` value passed to `bootstrap` if `"bootstrap"` strategy is chosen. Useful for
debugging.
input_column (`str`, *optional*, defaults to `"text"`):
The name of the column containing the text feature in the dataset specified by `data`.
second_input_column (`str`, *optional*, defaults to `None`):
The name of the second column containing the text features. This may be useful for classification tasks
as MNLI, where two columns are used.
label_column (`str`, defaults to `"label"`):
The name of the column containing the labels in the dataset specified by `data`.
label_mapping (`Dict[str, Number]`, *optional*, defaults to `None`):
We want to map class labels defined by the model in the pipeline to values consistent with those
defined in the `label_column` of the `data` dataset.
Return:
A `Dict`. The keys represent metric keys calculated for the `metric` spefied in function arguments. For the
`"simple"` strategy, the value is the metric score. For the `"bootstrap"` strategy, the value is a `Dict`
containing the score, the confidence interval and the standard error calculated for each metric key.
Examples:
```python
>>> from evaluate import evaluator
>>> from datasets import load_dataset
>>> task_evaluator = evaluator("text-classification")
>>> data = load_dataset("imdb", split="test[:2]")
>>> results = task_evaluator.compute(
>>> model_or_pipeline="huggingface/prunebert-base-uncased-6-finepruned-w-distil-mnli",
>>> data=data,
>>> metric="accuracy",
>>> label_mapping={"LABEL_0": 0.0, "LABEL_1": 1.0},
>>> strategy="bootstrap",
>>> n_resamples=10,
>>> random_state=0
>>> )
```
|
@add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING)
@add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION)
def compute(
self,
model_or_pipeline: Union[
str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
] = None,
data: Union[str, Dataset] = None,
subset: Optional[str] = None,
split: Optional[str] = None,
metric: Union[str, EvaluationModule] = None,
tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
feature_extractor: Optional[Union[str, "FeatureExtractionMixin"]] = None, # noqa: F821
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
device: int = None,
random_state: Optional[int] = None,
input_column: str = "text",
second_input_column: Optional[str] = None,
label_column: str = "label",
label_mapping: Optional[Dict[str, Number]] = None,
) -> Tuple[Dict[str, float], Any]:
"""
input_column (`str`, *optional*, defaults to `"text"`):
The name of the column containing the text feature in the dataset specified by `data`.
second_input_column (`str`, *optional*, defaults to `None`):
The name of the second column containing the text features. This may be useful for classification tasks
as MNLI, where two columns are used.
label_column (`str`, defaults to `"label"`):
The name of the column containing the labels in the dataset specified by `data`.
label_mapping (`Dict[str, Number]`, *optional*, defaults to `None`):
We want to map class labels defined by the model in the pipeline to values consistent with those
defined in the `label_column` of the `data` dataset.
"""
result = {}
self.check_for_mismatch_in_device_setup(device, model_or_pipeline)
# Prepare inputs
data = self.load_data(data=data, subset=subset, split=split)
metric_inputs, pipe_inputs = self.prepare_data(
data=data, input_column=input_column, second_input_column=second_input_column, label_column=label_column
)
pipe = self.prepare_pipeline(
model_or_pipeline=model_or_pipeline,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
device=device,
)
metric = self.prepare_metric(metric)
# Compute predictions
predictions, perf_results = self.call_pipeline(pipe, pipe_inputs)
predictions = self.predictions_processor(predictions, label_mapping)
metric_inputs.update(predictions)
# Compute metrics from references and predictions
metric_results = self.compute_metric(
metric=metric,
metric_inputs=metric_inputs,
strategy=strategy,
confidence_level=confidence_level,
n_resamples=n_resamples,
random_state=random_state,
)
result.update(metric_results)
result.update(perf_results)
return result
|
(self, model_or_pipeline: Union[str, ForwardRef('Pipeline'), Callable, ForwardRef('PreTrainedModel'), ForwardRef('TFPreTrainedModel')] = None, data: Union[str, datasets.arrow_dataset.Dataset] = None, subset: Optional[str] = None, split: Optional[str] = None, metric: Union[str, evaluate.module.EvaluationModule] = None, tokenizer: Union[str, ForwardRef('PreTrainedTokenizer'), NoneType] = None, feature_extractor: Union[str, ForwardRef('FeatureExtractionMixin'), NoneType] = None, strategy: Literal['simple', 'bootstrap'] = 'simple', confidence_level: float = 0.95, n_resamples: int = 9999, device: int = None, random_state: Optional[int] = None, input_column: str = 'text', second_input_column: Optional[str] = None, label_column: str = 'label', label_mapping: Optional[Dict[str, numbers.Number]] = None) -> Tuple[Dict[str, float], Any]
|
71,301 |
evaluate.evaluator.text_classification
|
predictions_processor
| null |
def predictions_processor(self, predictions, label_mapping):
predictions = [
label_mapping[element["label"]] if label_mapping is not None else element["label"]
for element in predictions
]
return {"predictions": predictions}
|
(self, predictions, label_mapping)
|
71,302 |
evaluate.evaluator.text_classification
|
prepare_data
| null |
def prepare_data(self, data: Union[str, Dataset], input_column: str, second_input_column: str, label_column: str):
if data is None:
raise ValueError(
"Please specify a valid `data` object - either a `str` with a name or a `Dataset` object."
)
self.check_required_columns(data, {"input_column": input_column, "label_column": label_column})
if second_input_column is not None:
self.check_required_columns(data, {"second_input_column": second_input_column})
data = load_dataset(data) if isinstance(data, str) else data
return {"references": data[label_column]}, DatasetColumnPair(
data, input_column, second_input_column, "text", "text_pair"
)
|
(self, data: Union[str, datasets.arrow_dataset.Dataset], input_column: str, second_input_column: str, label_column: str)
|
71,305 |
evaluate.evaluator.text_generation
|
TextGenerationEvaluator
|
Text generation evaluator.
This Text generation evaluator can currently be loaded from [`evaluator`] using the default task name
`text-generation`.
Methods in this class assume a data format compatible with the [`~transformers.TextGenerationPipeline`].
|
class TextGenerationEvaluator(Evaluator):
"""
Text generation evaluator.
This Text generation evaluator can currently be loaded from [`evaluator`] using the default task name
`text-generation`.
Methods in this class assume a data format compatible with the [`~transformers.TextGenerationPipeline`].
"""
def predictions_processor(self, predictions, *args, **kwargs):
"""
Args:
predictions: A list of lists of dicts
Returns:
`dict`: All the generated texts are flattened and stored under the "data" key.
"""
return {"data": [pred[f"{self.predictions_prefix}_text"] for pred_list in predictions for pred in pred_list]}
def __init__(self, task="text-generation", default_metric_name=None, predictions_prefix: str = "generated"):
super().__init__(task=task, default_metric_name=default_metric_name)
self.predictions_prefix = predictions_prefix
def prepare_data(self, data: Dataset, input_column: str, *args, **kwargs) -> Tuple[Dict, DatasetColumn]:
"""
Prepare data.
Args:
data ([`Dataset`]):
Specifies the dataset we will run evaluation on.
input_column (`str`, defaults to `"text"`):
The name of the column containing the text feature in the dataset specified by `data`.
Returns:
`dict`: metric inputs.
`list`: pipeline inputs.
"""
self.check_required_columns(data, {"input_column": input_column})
return {}, DatasetColumn(data, input_column)
|
(task='text-generation', default_metric_name=None, predictions_prefix: str = 'generated')
|
71,306 |
evaluate.evaluator.text_generation
|
__init__
| null |
def __init__(self, task="text-generation", default_metric_name=None, predictions_prefix: str = "generated"):
super().__init__(task=task, default_metric_name=default_metric_name)
self.predictions_prefix = predictions_prefix
|
(self, task='text-generation', default_metric_name=None, predictions_prefix: str = 'generated')
|
71,317 |
evaluate.evaluator.text_generation
|
predictions_processor
|
Args:
predictions: A list of lists of dicts
Returns:
`dict`: All the generated texts are flattened and stored under the "data" key.
|
def predictions_processor(self, predictions, *args, **kwargs):
"""
Args:
predictions: A list of lists of dicts
Returns:
`dict`: All the generated texts are flattened and stored under the "data" key.
"""
return {"data": [pred[f"{self.predictions_prefix}_text"] for pred_list in predictions for pred in pred_list]}
|
(self, predictions, *args, **kwargs)
|
71,318 |
evaluate.evaluator.text_generation
|
prepare_data
|
Prepare data.
Args:
data ([`Dataset`]):
Specifies the dataset we will run evaluation on.
input_column (`str`, defaults to `"text"`):
The name of the column containing the text feature in the dataset specified by `data`.
Returns:
`dict`: metric inputs.
`list`: pipeline inputs.
|
def prepare_data(self, data: Dataset, input_column: str, *args, **kwargs) -> Tuple[Dict, DatasetColumn]:
"""
Prepare data.
Args:
data ([`Dataset`]):
Specifies the dataset we will run evaluation on.
input_column (`str`, defaults to `"text"`):
The name of the column containing the text feature in the dataset specified by `data`.
Returns:
`dict`: metric inputs.
`list`: pipeline inputs.
"""
self.check_required_columns(data, {"input_column": input_column})
return {}, DatasetColumn(data, input_column)
|
(self, data: datasets.arrow_dataset.Dataset, input_column: str, *args, **kwargs) -> Tuple[Dict, evaluate.evaluator.utils.DatasetColumn]
|
71,321 |
evaluate.evaluator.token_classification
|
TokenClassificationEvaluator
|
Token classification evaluator.
This token classification evaluator can currently be loaded from [`evaluator`] using the default task name
`token-classification`.
Methods in this class assume a data format compatible with the [`~transformers.TokenClassificationPipeline`].
|
class TokenClassificationEvaluator(Evaluator):
"""
Token classification evaluator.
This token classification evaluator can currently be loaded from [`evaluator`] using the default task name
`token-classification`.
Methods in this class assume a data format compatible with the [`~transformers.TokenClassificationPipeline`].
"""
PIPELINE_KWARGS = {"ignore_labels": []}
def __init__(self, task="token-classification", default_metric_name=None):
super().__init__(task, default_metric_name=default_metric_name)
def predictions_processor(self, predictions: List[List[Dict]], words: List[List[str]], join_by: str):
"""
Transform the pipeline predictions into a list of predicted labels of the same length as the true labels.
Args:
predictions (`List[List[Dict]]`):
List of pipeline predictions, where each token has been labeled.
words (`List[List[str]]`):
Original input data to the pipeline, used to build predicted labels of the same length.
join_by (`str`):
String to use to join two words. In English, it will typically be " ".
Returns:
`dict`: a dictionary holding the predictions
"""
preds = []
# iterate over the data rows
for i, prediction in enumerate(predictions):
pred_processed = []
# get a list of tuples giving the indexes of the start and end character of each word
words_offsets = self.words_to_offsets(words[i], join_by)
token_index = 0
for word_offset in words_offsets:
# for each word, we may keep only the predicted label for the first token, discard the others
while prediction[token_index]["start"] < word_offset[0]:
token_index += 1
if prediction[token_index]["start"] > word_offset[0]: # bad indexing
pred_processed.append("O")
elif prediction[token_index]["start"] == word_offset[0]:
pred_processed.append(prediction[token_index]["entity"])
preds.append(pred_processed)
return {"predictions": preds}
def words_to_offsets(self, words: List[str], join_by: str):
"""
Convert a list of words to a list of offsets, where word are joined by `join_by`.
Args:
words (`List[str]`):
List of words to get offsets from.
join_by (`str`):
String to insert between words.
Returns:
`List[Tuple[int, int]]`: List of the characters (start index, end index) for each of the words.
"""
offsets = []
start = 0
for word in words:
end = start + len(word) - 1
offsets.append((start, end))
start = end + len(join_by) + 1
return offsets
def prepare_data(self, data: Union[str, Dataset], input_column: str, label_column: str, join_by: str):
super().prepare_data(data, input_column, label_column)
if not isinstance(data.features[input_column], Sequence) or not isinstance(
data.features[label_column], Sequence
):
raise ValueError(
"TokenClassificationEvaluator expects the input and label columns to be provided as lists."
)
# If the labels are of type ClassLabel, they are already integers and we have the map stored somewhere.
# Otherwise, we have to get the list of labels manually.
labels_are_int = isinstance(data.features[label_column].feature, ClassLabel)
if labels_are_int:
label_list = data.features[label_column].feature.names # list of string labels
id_to_label = {i: label for i, label in enumerate(label_list)}
references = [[id_to_label[label_id] for label_id in label_ids] for label_ids in data[label_column]]
elif data.features[label_column].feature.dtype.startswith("int"):
raise NotImplementedError(
"References provided as integers, but the reference column is not a Sequence of ClassLabels."
)
else:
# In the event the labels are not a `Sequence[ClassLabel]`, we have already labels as strings
# An example is labels as ["PER", "PER", "O", "LOC", "O", "LOC", "O"], e.g. in polyglot_ner dataset
references = data[label_column]
metric_inputs = {"references": references}
data = data.map(lambda x: {input_column: join_by.join(x[input_column])})
pipeline_inputs = DatasetColumn(data, input_column)
return metric_inputs, pipeline_inputs
def prepare_pipeline(
self,
model_or_pipeline: Union[str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel"], # noqa: F821
tokenizer: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] = None, # noqa: F821
feature_extractor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] = None, # noqa: F821
device: int = None,
):
pipe = super().prepare_pipeline(model_or_pipeline, tokenizer, feature_extractor, device)
# check the pipeline outputs start characters in its predictions
dummy_output = pipe(["2003 New York Gregory"], **self.PIPELINE_KWARGS)
if dummy_output[0][0]["start"] is None:
raise ValueError(
"TokenClassificationEvaluator supports only pipelines giving 'start' index as a pipeline output (got None). "
"Transformers pipelines with a slow tokenizer will raise this error."
)
return pipe
@add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING)
@add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION)
def compute(
self,
model_or_pipeline: Union[
str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
] = None,
data: Union[str, Dataset] = None,
subset: Optional[str] = None,
split: str = None,
metric: Union[str, EvaluationModule] = None,
tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
device: Optional[int] = None,
random_state: Optional[int] = None,
input_column: str = "tokens",
label_column: str = "ner_tags",
join_by: Optional[str] = " ",
) -> Tuple[Dict[str, float], Any]:
"""
input_column (`str`, defaults to `"tokens"`):
The name of the column containing the tokens feature in the dataset specified by `data`.
label_column (`str`, defaults to `"label"`):
The name of the column containing the labels in the dataset specified by `data`.
join_by (`str`, *optional*, defaults to `" "`):
This evaluator supports dataset whose input column is a list of words. This parameter specifies how to join
words to generate a string input. This is especially useful for languages that do not separate words by a space.
"""
result = {}
self.check_for_mismatch_in_device_setup(device, model_or_pipeline)
# Prepare inputs
data = self.load_data(data=data, subset=subset, split=split)
metric_inputs, pipe_inputs = self.prepare_data(
data=data, input_column=input_column, label_column=label_column, join_by=join_by
)
pipe = self.prepare_pipeline(model_or_pipeline=model_or_pipeline, tokenizer=tokenizer, device=device)
metric = self.prepare_metric(metric)
# Compute predictions
predictions, perf_results = self.call_pipeline(pipe, pipe_inputs)
predictions = self.predictions_processor(predictions, data[input_column], join_by)
metric_inputs.update(predictions)
# Compute metrics from references and predictions
metric_results = self.compute_metric(
metric=metric,
metric_inputs=metric_inputs,
strategy=strategy,
confidence_level=confidence_level,
n_resamples=n_resamples,
random_state=random_state,
)
result.update(metric_results)
result.update(perf_results)
return result
|
(task='token-classification', default_metric_name=None)
|
71,322 |
evaluate.evaluator.token_classification
|
__init__
| null |
def __init__(self, task="token-classification", default_metric_name=None):
super().__init__(task, default_metric_name=default_metric_name)
|
(self, task='token-classification', default_metric_name=None)
|
71,329 |
evaluate.evaluator.token_classification
|
compute
|
Compute the metric for a given pipeline and dataset combination.
Args:
model_or_pipeline (`str` or `Pipeline` or `Callable` or `PreTrainedModel` or `TFPreTrainedModel`, defaults to `None`):
If the argument in not specified, we initialize the default pipeline for the task (in this case
`text-classification` or its alias - `sentiment-analysis`). If the argument is of the type `str` or
is a model instance, we use it to initialize a new `Pipeline` with the given model. Otherwise we assume the
argument specifies a pre-initialized pipeline.
data (`str` or `Dataset`, defaults to `None`):
Specifies the dataset we will run evaluation on. If it is of type `str`, we treat it as the dataset
name, and load it. Otherwise we assume it represents a pre-loaded dataset.
subset (`str`, defaults to `None`):
Defines which dataset subset to load. If `None` is passed the default subset is loaded.
split (`str`, defaults to `None`):
Defines which dataset split to load. If `None` is passed, infers based on the `choose_split` function.
metric (`str` or `EvaluationModule`, defaults to `None`):
Specifies the metric we use in evaluator. If it is of type `str`, we treat it as the metric name, and
load it. Otherwise we assume it represents a pre-loaded metric.
tokenizer (`str` or `PreTrainedTokenizer`, *optional*, defaults to `None`):
Argument can be used to overwrite a default tokenizer if `model_or_pipeline` represents a model for
which we build a pipeline. If `model_or_pipeline` is `None` or a pre-initialized pipeline, we ignore
this argument.
strategy (`Literal["simple", "bootstrap"]`, defaults to "simple"):
specifies the evaluation strategy. Possible values are:
- `"simple"` - we evaluate the metric and return the scores.
- `"bootstrap"` - on top of computing the metric scores, we calculate the confidence interval for each
of the returned metric keys, using `scipy`'s `bootstrap` method
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.bootstrap.html.
confidence_level (`float`, defaults to `0.95`):
The `confidence_level` value passed to `bootstrap` if `"bootstrap"` strategy is chosen.
n_resamples (`int`, defaults to `9999`):
The `n_resamples` value passed to `bootstrap` if `"bootstrap"` strategy is chosen.
device (`int`, defaults to `None`):
Device ordinal for CPU/GPU support of the pipeline. Setting this to -1 will leverage CPU, a positive
integer will run the model on the associated CUDA device ID. If `None` is provided it will be inferred and
CUDA:0 used if available, CPU otherwise.
random_state (`int`, *optional*, defaults to `None`):
The `random_state` value passed to `bootstrap` if `"bootstrap"` strategy is chosen. Useful for
debugging.
input_column (`str`, defaults to `"tokens"`):
The name of the column containing the tokens feature in the dataset specified by `data`.
label_column (`str`, defaults to `"label"`):
The name of the column containing the labels in the dataset specified by `data`.
join_by (`str`, *optional*, defaults to `" "`):
This evaluator supports dataset whose input column is a list of words. This parameter specifies how to join
words to generate a string input. This is especially useful for languages that do not separate words by a space.
Return:
A `Dict`. The keys represent metric keys calculated for the `metric` spefied in function arguments. For the
`"simple"` strategy, the value is the metric score. For the `"bootstrap"` strategy, the value is a `Dict`
containing the score, the confidence interval and the standard error calculated for each metric key.
The dataset input and label columns are expected to be formatted as a list of words and a list of labels respectively, following [conll2003 dataset](https://huggingface.co/datasets/conll2003). Datasets whose inputs are single strings, and labels are a list of offset are not supported.
Examples:
```python
>>> from evaluate import evaluator
>>> from datasets import load_dataset
>>> task_evaluator = evaluator("token-classification")
>>> data = load_dataset("conll2003", split="validation[:2]")
>>> results = task_evaluator.compute(
>>> model_or_pipeline="elastic/distilbert-base-uncased-finetuned-conll03-english",
>>> data=data,
>>> metric="seqeval",
>>> )
```
<Tip>
For example, the following dataset format is accepted by the evaluator:
```python
dataset = Dataset.from_dict(
mapping={
"tokens": [["New", "York", "is", "a", "city", "and", "Felix", "a", "person", "."]],
"ner_tags": [[1, 2, 0, 0, 0, 0, 3, 0, 0, 0]],
},
features=Features({
"tokens": Sequence(feature=Value(dtype="string")),
"ner_tags": Sequence(feature=ClassLabel(names=["O", "B-LOC", "I-LOC", "B-PER", "I-PER"])),
}),
)
```
</Tip>
<Tip warning={true}>
For example, the following dataset format is **not** accepted by the evaluator:
```python
dataset = Dataset.from_dict(
mapping={
"tokens": [["New York is a city and Felix a person."]],
"starts": [[0, 23]],
"ends": [[7, 27]],
"ner_tags": [["LOC", "PER"]],
},
features=Features({
"tokens": Value(dtype="string"),
"starts": Sequence(feature=Value(dtype="int32")),
"ends": Sequence(feature=Value(dtype="int32")),
"ner_tags": Sequence(feature=Value(dtype="string")),
}),
)
```
</Tip>
|
@add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING)
@add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION)
def compute(
self,
model_or_pipeline: Union[
str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
] = None,
data: Union[str, Dataset] = None,
subset: Optional[str] = None,
split: str = None,
metric: Union[str, EvaluationModule] = None,
tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
device: Optional[int] = None,
random_state: Optional[int] = None,
input_column: str = "tokens",
label_column: str = "ner_tags",
join_by: Optional[str] = " ",
) -> Tuple[Dict[str, float], Any]:
"""
input_column (`str`, defaults to `"tokens"`):
The name of the column containing the tokens feature in the dataset specified by `data`.
label_column (`str`, defaults to `"label"`):
The name of the column containing the labels in the dataset specified by `data`.
join_by (`str`, *optional*, defaults to `" "`):
This evaluator supports dataset whose input column is a list of words. This parameter specifies how to join
words to generate a string input. This is especially useful for languages that do not separate words by a space.
"""
result = {}
self.check_for_mismatch_in_device_setup(device, model_or_pipeline)
# Prepare inputs
data = self.load_data(data=data, subset=subset, split=split)
metric_inputs, pipe_inputs = self.prepare_data(
data=data, input_column=input_column, label_column=label_column, join_by=join_by
)
pipe = self.prepare_pipeline(model_or_pipeline=model_or_pipeline, tokenizer=tokenizer, device=device)
metric = self.prepare_metric(metric)
# Compute predictions
predictions, perf_results = self.call_pipeline(pipe, pipe_inputs)
predictions = self.predictions_processor(predictions, data[input_column], join_by)
metric_inputs.update(predictions)
# Compute metrics from references and predictions
metric_results = self.compute_metric(
metric=metric,
metric_inputs=metric_inputs,
strategy=strategy,
confidence_level=confidence_level,
n_resamples=n_resamples,
random_state=random_state,
)
result.update(metric_results)
result.update(perf_results)
return result
|
(self, model_or_pipeline: Union[str, ForwardRef('Pipeline'), Callable, ForwardRef('PreTrainedModel'), ForwardRef('TFPreTrainedModel')] = None, data: Union[str, datasets.arrow_dataset.Dataset] = None, subset: Optional[str] = None, split: str = None, metric: Union[str, evaluate.module.EvaluationModule] = None, tokenizer: Union[str, ForwardRef('PreTrainedTokenizer'), NoneType] = None, strategy: Literal['simple', 'bootstrap'] = 'simple', confidence_level: float = 0.95, n_resamples: int = 9999, device: Optional[int] = None, random_state: Optional[int] = None, input_column: str = 'tokens', label_column: str = 'ner_tags', join_by: Optional[str] = ' ') -> Tuple[Dict[str, float], Any]
|
71,333 |
evaluate.evaluator.token_classification
|
predictions_processor
|
Transform the pipeline predictions into a list of predicted labels of the same length as the true labels.
Args:
predictions (`List[List[Dict]]`):
List of pipeline predictions, where each token has been labeled.
words (`List[List[str]]`):
Original input data to the pipeline, used to build predicted labels of the same length.
join_by (`str`):
String to use to join two words. In English, it will typically be " ".
Returns:
`dict`: a dictionary holding the predictions
|
def predictions_processor(self, predictions: List[List[Dict]], words: List[List[str]], join_by: str):
"""
Transform the pipeline predictions into a list of predicted labels of the same length as the true labels.
Args:
predictions (`List[List[Dict]]`):
List of pipeline predictions, where each token has been labeled.
words (`List[List[str]]`):
Original input data to the pipeline, used to build predicted labels of the same length.
join_by (`str`):
String to use to join two words. In English, it will typically be " ".
Returns:
`dict`: a dictionary holding the predictions
"""
preds = []
# iterate over the data rows
for i, prediction in enumerate(predictions):
pred_processed = []
# get a list of tuples giving the indexes of the start and end character of each word
words_offsets = self.words_to_offsets(words[i], join_by)
token_index = 0
for word_offset in words_offsets:
# for each word, we may keep only the predicted label for the first token, discard the others
while prediction[token_index]["start"] < word_offset[0]:
token_index += 1
if prediction[token_index]["start"] > word_offset[0]: # bad indexing
pred_processed.append("O")
elif prediction[token_index]["start"] == word_offset[0]:
pred_processed.append(prediction[token_index]["entity"])
preds.append(pred_processed)
return {"predictions": preds}
|
(self, predictions: List[List[Dict]], words: List[List[str]], join_by: str)
|
71,334 |
evaluate.evaluator.token_classification
|
prepare_data
| null |
def prepare_data(self, data: Union[str, Dataset], input_column: str, label_column: str, join_by: str):
super().prepare_data(data, input_column, label_column)
if not isinstance(data.features[input_column], Sequence) or not isinstance(
data.features[label_column], Sequence
):
raise ValueError(
"TokenClassificationEvaluator expects the input and label columns to be provided as lists."
)
# If the labels are of type ClassLabel, they are already integers and we have the map stored somewhere.
# Otherwise, we have to get the list of labels manually.
labels_are_int = isinstance(data.features[label_column].feature, ClassLabel)
if labels_are_int:
label_list = data.features[label_column].feature.names # list of string labels
id_to_label = {i: label for i, label in enumerate(label_list)}
references = [[id_to_label[label_id] for label_id in label_ids] for label_ids in data[label_column]]
elif data.features[label_column].feature.dtype.startswith("int"):
raise NotImplementedError(
"References provided as integers, but the reference column is not a Sequence of ClassLabels."
)
else:
# In the event the labels are not a `Sequence[ClassLabel]`, we have already labels as strings
# An example is labels as ["PER", "PER", "O", "LOC", "O", "LOC", "O"], e.g. in polyglot_ner dataset
references = data[label_column]
metric_inputs = {"references": references}
data = data.map(lambda x: {input_column: join_by.join(x[input_column])})
pipeline_inputs = DatasetColumn(data, input_column)
return metric_inputs, pipeline_inputs
|
(self, data: Union[str, datasets.arrow_dataset.Dataset], input_column: str, label_column: str, join_by: str)
|
71,336 |
evaluate.evaluator.token_classification
|
prepare_pipeline
| null |
def prepare_pipeline(
self,
model_or_pipeline: Union[str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel"], # noqa: F821
tokenizer: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] = None, # noqa: F821
feature_extractor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] = None, # noqa: F821
device: int = None,
):
pipe = super().prepare_pipeline(model_or_pipeline, tokenizer, feature_extractor, device)
# check the pipeline outputs start characters in its predictions
dummy_output = pipe(["2003 New York Gregory"], **self.PIPELINE_KWARGS)
if dummy_output[0][0]["start"] is None:
raise ValueError(
"TokenClassificationEvaluator supports only pipelines giving 'start' index as a pipeline output (got None). "
"Transformers pipelines with a slow tokenizer will raise this error."
)
return pipe
|
(self, model_or_pipeline: Union[str, ForwardRef('Pipeline'), Callable, ForwardRef('PreTrainedModel'), ForwardRef('TFPreTrainedModel')], tokenizer: Union[ForwardRef('PreTrainedTokenizerBase'), ForwardRef('FeatureExtractionMixin')] = None, feature_extractor: Union[ForwardRef('PreTrainedTokenizerBase'), ForwardRef('FeatureExtractionMixin')] = None, device: int = None)
|
71,337 |
evaluate.evaluator.token_classification
|
words_to_offsets
|
Convert a list of words to a list of offsets, where word are joined by `join_by`.
Args:
words (`List[str]`):
List of words to get offsets from.
join_by (`str`):
String to insert between words.
Returns:
`List[Tuple[int, int]]`: List of the characters (start index, end index) for each of the words.
|
def words_to_offsets(self, words: List[str], join_by: str):
"""
Convert a list of words to a list of offsets, where word are joined by `join_by`.
Args:
words (`List[str]`):
List of words to get offsets from.
join_by (`str`):
String to insert between words.
Returns:
`List[Tuple[int, int]]`: List of the characters (start index, end index) for each of the words.
"""
offsets = []
start = 0
for word in words:
end = start + len(word) - 1
offsets.append((start, end))
start = end + len(join_by) + 1
return offsets
|
(self, words: List[str], join_by: str)
|
71,338 |
evaluate.evaluator.text2text_generation
|
TranslationEvaluator
|
Translation evaluator.
This translation generation evaluator can currently be loaded from [`evaluator`] using the default task name
`translation`.
Methods in this class assume a data format compatible with the [`~transformers.TranslationPipeline`].
|
class TranslationEvaluator(Text2TextGenerationEvaluator):
"""
Translation evaluator.
This translation generation evaluator can currently be loaded from [`evaluator`] using the default task name
`translation`.
Methods in this class assume a data format compatible with the [`~transformers.TranslationPipeline`].
"""
PREDICTION_PREFIX = "translation"
PIPELINE_KWARGS = {"truncation": True}
def __init__(self, task="translation", default_metric_name=None):
super().__init__(task, default_metric_name=default_metric_name)
@add_start_docstrings(
EVALUTOR_COMPUTE_START_DOCSTRING,
TASK_DOCUMENTATION_KWARGS,
EVALUATOR_COMPUTE_RETURN_DOCSTRING,
TRANSLATION_TASK_DOCSTRING_EXAMPLE,
)
def compute(
self,
model_or_pipeline: Union[
str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
] = None,
data: Union[str, Dataset] = None,
subset: Optional[str] = None,
split: Optional[str] = None,
metric: Union[str, EvaluationModule] = None,
tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
device: int = None,
random_state: Optional[int] = None,
input_column: str = "text",
label_column: str = "label",
generation_kwargs: dict = None,
) -> Tuple[Dict[str, float], Any]:
result = super().compute(
model_or_pipeline=model_or_pipeline,
data=data,
subset=subset,
split=split,
metric=metric,
tokenizer=tokenizer,
strategy=strategy,
confidence_level=confidence_level,
n_resamples=n_resamples,
device=device,
random_state=random_state,
input_column=input_column,
label_column=label_column,
generation_kwargs=generation_kwargs,
)
return result
|
(task='translation', default_metric_name=None)
|
71,339 |
evaluate.evaluator.text2text_generation
|
__init__
| null |
def __init__(self, task="translation", default_metric_name=None):
super().__init__(task, default_metric_name=default_metric_name)
|
(self, task='translation', default_metric_name=None)
|
71,346 |
evaluate.evaluator.text2text_generation
|
compute
|
Compute the metric for a given pipeline and dataset combination.
Args:
model_or_pipeline (`str` or `Pipeline` or `Callable` or `PreTrainedModel` or `TFPreTrainedModel`, defaults to `None`):
If the argument in not specified, we initialize the default pipeline for the task (in this case
`text-classification` or its alias - `sentiment-analysis`). If the argument is of the type `str` or
is a model instance, we use it to initialize a new `Pipeline` with the given model. Otherwise we assume the
argument specifies a pre-initialized pipeline.
data (`str` or `Dataset`, defaults to `None`):
Specifies the dataset we will run evaluation on. If it is of type `str`, we treat it as the dataset
name, and load it. Otherwise we assume it represents a pre-loaded dataset.
subset (`str`, defaults to `None`):
Defines which dataset subset to load. If `None` is passed the default subset is loaded.
split (`str`, defaults to `None`):
Defines which dataset split to load. If `None` is passed, infers based on the `choose_split` function.
metric (`str` or `EvaluationModule`, defaults to `None`):
Specifies the metric we use in evaluator. If it is of type `str`, we treat it as the metric name, and
load it. Otherwise we assume it represents a pre-loaded metric.
tokenizer (`str` or `PreTrainedTokenizer`, *optional*, defaults to `None`):
Argument can be used to overwrite a default tokenizer if `model_or_pipeline` represents a model for
which we build a pipeline. If `model_or_pipeline` is `None` or a pre-initialized pipeline, we ignore
this argument.
strategy (`Literal["simple", "bootstrap"]`, defaults to "simple"):
specifies the evaluation strategy. Possible values are:
- `"simple"` - we evaluate the metric and return the scores.
- `"bootstrap"` - on top of computing the metric scores, we calculate the confidence interval for each
of the returned metric keys, using `scipy`'s `bootstrap` method
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.bootstrap.html.
confidence_level (`float`, defaults to `0.95`):
The `confidence_level` value passed to `bootstrap` if `"bootstrap"` strategy is chosen.
n_resamples (`int`, defaults to `9999`):
The `n_resamples` value passed to `bootstrap` if `"bootstrap"` strategy is chosen.
device (`int`, defaults to `None`):
Device ordinal for CPU/GPU support of the pipeline. Setting this to -1 will leverage CPU, a positive
integer will run the model on the associated CUDA device ID. If `None` is provided it will be inferred and
CUDA:0 used if available, CPU otherwise.
random_state (`int`, *optional*, defaults to `None`):
The `random_state` value passed to `bootstrap` if `"bootstrap"` strategy is chosen. Useful for
debugging.
input_column (`str`, defaults to `"text"`):
the name of the column containing the input text in the dataset specified by `data`.
label_column (`str`, defaults to `"label"`):
the name of the column containing the labels in the dataset specified by `data`.
generation_kwargs (`Dict`, *optional*, defaults to `None`):
The generation kwargs are passed to the pipeline and set the text generation strategy.
Return:
A `Dict`. The keys represent metric keys calculated for the `metric` spefied in function arguments. For the
`"simple"` strategy, the value is the metric score. For the `"bootstrap"` strategy, the value is a `Dict`
containing the score, the confidence interval and the standard error calculated for each metric key.
Examples:
```python
>>> from evaluate import evaluator
>>> from datasets import load_dataset
>>> task_evaluator = evaluator("translation")
>>> data = load_dataset("wmt19", "fr-de", split="validation[:40]")
>>> data = data.map(lambda x: {"text": x["translation"]["de"], "label": x["translation"]["fr"]})
>>> results = task_evaluator.compute(
>>> model_or_pipeline="Helsinki-NLP/opus-mt-de-fr",
>>> data=data,
>>> )
```
|
@add_start_docstrings(
EVALUTOR_COMPUTE_START_DOCSTRING,
TASK_DOCUMENTATION_KWARGS,
EVALUATOR_COMPUTE_RETURN_DOCSTRING,
TRANSLATION_TASK_DOCSTRING_EXAMPLE,
)
def compute(
self,
model_or_pipeline: Union[
str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
] = None,
data: Union[str, Dataset] = None,
subset: Optional[str] = None,
split: Optional[str] = None,
metric: Union[str, EvaluationModule] = None,
tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
device: int = None,
random_state: Optional[int] = None,
input_column: str = "text",
label_column: str = "label",
generation_kwargs: dict = None,
) -> Tuple[Dict[str, float], Any]:
result = super().compute(
model_or_pipeline=model_or_pipeline,
data=data,
subset=subset,
split=split,
metric=metric,
tokenizer=tokenizer,
strategy=strategy,
confidence_level=confidence_level,
n_resamples=n_resamples,
device=device,
random_state=random_state,
input_column=input_column,
label_column=label_column,
generation_kwargs=generation_kwargs,
)
return result
|
(self, model_or_pipeline: Union[str, ForwardRef('Pipeline'), Callable, ForwardRef('PreTrainedModel'), ForwardRef('TFPreTrainedModel')] = None, data: Union[str, datasets.arrow_dataset.Dataset] = None, subset: Optional[str] = None, split: Optional[str] = None, metric: Union[str, evaluate.module.EvaluationModule] = None, tokenizer: Union[str, ForwardRef('PreTrainedTokenizer'), NoneType] = None, strategy: Literal['simple', 'bootstrap'] = 'simple', confidence_level: float = 0.95, n_resamples: int = 9999, device: int = None, random_state: Optional[int] = None, input_column: str = 'text', label_column: str = 'label', generation_kwargs: dict = None) -> Tuple[Dict[str, float], Any]
|
71,354 |
evaluate.module
|
combine
|
Combines several metrics, comparisons, or measurements into a single `CombinedEvaluations` object that
can be used like a single evaluation module.
If two scores have the same name, then they are prefixed with their module names.
And if two modules have the same name, please use a dictionary to give them different names, otherwise an integer id is appended to the prefix.
Args:
evaluations (`Union[list, dict]`):
A list or dictionary of evaluation modules. The modules can either be passed
as strings or loaded `EvaluationModule`s. If a dictionary is passed its keys are the names used and the values the modules.
The names are used as prefix in case there are name overlaps in the returned results of each module or if `force_prefix=True`.
force_prefix (`bool`, *optional*, defaults to `False`):
If `True` all scores from the modules are prefixed with their name. If
a dictionary is passed the keys are used as name otherwise the module's name.
Examples:
```py
>>> import evaluate
>>> accuracy = evaluate.load("accuracy")
>>> f1 = evaluate.load("f1")
>>> clf_metrics = combine(["accuracy", "f1"])
```
|
def combine(evaluations, force_prefix=False):
"""Combines several metrics, comparisons, or measurements into a single `CombinedEvaluations` object that
can be used like a single evaluation module.
If two scores have the same name, then they are prefixed with their module names.
And if two modules have the same name, please use a dictionary to give them different names, otherwise an integer id is appended to the prefix.
Args:
evaluations (`Union[list, dict]`):
A list or dictionary of evaluation modules. The modules can either be passed
as strings or loaded `EvaluationModule`s. If a dictionary is passed its keys are the names used and the values the modules.
The names are used as prefix in case there are name overlaps in the returned results of each module or if `force_prefix=True`.
force_prefix (`bool`, *optional*, defaults to `False`):
If `True` all scores from the modules are prefixed with their name. If
a dictionary is passed the keys are used as name otherwise the module's name.
Examples:
```py
>>> import evaluate
>>> accuracy = evaluate.load("accuracy")
>>> f1 = evaluate.load("f1")
>>> clf_metrics = combine(["accuracy", "f1"])
```
"""
return CombinedEvaluations(evaluations, force_prefix=force_prefix)
|
(evaluations, force_prefix=False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.