Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/universal/global_step20/zero/11.post_attention_layernorm.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step20/zero/11.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step20/zero/11.post_attention_layernorm.weight/fp32.pt +3 -0
- ckpts/universal/global_step20/zero/20.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step20/zero/22.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step20/zero/22.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
- ckpts/universal/global_step20/zero/3.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step20/zero/3.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
- venv/lib/python3.10/site-packages/evaluate/__init__.py +51 -0
- venv/lib/python3.10/site-packages/evaluate/config.py +192 -0
- venv/lib/python3.10/site-packages/evaluate/evaluator/text_classification.py +160 -0
- venv/lib/python3.10/site-packages/evaluate/hub.py +133 -0
- venv/lib/python3.10/site-packages/evaluate/info.py +157 -0
- venv/lib/python3.10/site-packages/evaluate/inspect.py +129 -0
- venv/lib/python3.10/site-packages/evaluate/loading.py +771 -0
- venv/lib/python3.10/site-packages/evaluate/module.py +1029 -0
- venv/lib/python3.10/site-packages/evaluate/naming.py +82 -0
- venv/lib/python3.10/site-packages/evaluate/saving.py +73 -0
- venv/lib/python3.10/site-packages/evaluate/utils/__init__.py +39 -0
- venv/lib/python3.10/site-packages/evaluate/utils/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/evaluate/utils/__pycache__/file_utils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/evaluate/utils/__pycache__/gradio.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/evaluate/utils/__pycache__/logging.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/evaluate/utils/file_utils.py +618 -0
- venv/lib/python3.10/site-packages/evaluate/utils/gradio.py +131 -0
- venv/lib/python3.10/site-packages/evaluate/utils/logging.py +234 -0
- venv/lib/python3.10/site-packages/evaluate/visualization.py +230 -0
- venv/lib/python3.10/site-packages/tqdm/__init__.py +38 -0
- venv/lib/python3.10/site-packages/tqdm/__main__.py +3 -0
- venv/lib/python3.10/site-packages/tqdm/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/tqdm/__pycache__/__main__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/tqdm/__pycache__/_dist_ver.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/tqdm/__pycache__/_main.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/tqdm/__pycache__/_monitor.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/tqdm/__pycache__/_tqdm.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/tqdm/__pycache__/_tqdm_gui.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/tqdm/__pycache__/_tqdm_notebook.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/tqdm/__pycache__/_tqdm_pandas.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/tqdm/__pycache__/_utils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/tqdm/__pycache__/asyncio.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/tqdm/__pycache__/auto.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/tqdm/__pycache__/autonotebook.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/tqdm/__pycache__/cli.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/tqdm/__pycache__/dask.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/tqdm/__pycache__/gui.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/tqdm/__pycache__/keras.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/tqdm/__pycache__/notebook.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/tqdm/__pycache__/rich.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/tqdm/__pycache__/std.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/tqdm/__pycache__/tk.cpython-310.pyc +0 -0
ckpts/universal/global_step20/zero/11.post_attention_layernorm.weight/exp_avg.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9b868d2ad639585862ecd0bae7338a78346fee26909257b752da254d623432c7
|
| 3 |
+
size 9372
|
ckpts/universal/global_step20/zero/11.post_attention_layernorm.weight/exp_avg_sq.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:71ca8977f165bcb8834ba4ea4bdba3ca99cbfa0d49068b1a73040a8d8f32c6cb
|
| 3 |
+
size 9387
|
ckpts/universal/global_step20/zero/11.post_attention_layernorm.weight/fp32.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:179021d69b745ef2f12bf6114241e7292bf95bc7189b2e1506944ab672bc04be
|
| 3 |
+
size 9293
|
ckpts/universal/global_step20/zero/20.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9b6168a259f29872d21503bb27ef53eb14fea4415cd9bea71b8d352aa969b912
|
| 3 |
+
size 33555627
|
ckpts/universal/global_step20/zero/22.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4ff60894adea3d281d8ff4cdb31c452264d7739dddae11a894441434e2b16c89
|
| 3 |
+
size 33555612
|
ckpts/universal/global_step20/zero/22.mlp.dense_h_to_4h_swiglu.weight/fp32.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6b427c672541496909ca540e245108e6836c334c7c39aa8c6c57533535b39f3c
|
| 3 |
+
size 33555533
|
ckpts/universal/global_step20/zero/3.mlp.dense_4h_to_h.weight/exp_avg.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:76e5febd800bdaf7d34eaaab38f4d0219cae3e396786f798b5400798400a3652
|
| 3 |
+
size 33555612
|
ckpts/universal/global_step20/zero/3.mlp.dense_4h_to_h.weight/fp32.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8ff2943709b74322da50098fe814405c855839bb588a80264a2264c0070a43b6
|
| 3 |
+
size 33555533
|
venv/lib/python3.10/site-packages/evaluate/__init__.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# flake8: noqa
|
| 2 |
+
# Copyright 2020 The HuggingFace Evaluate Authors and the TensorFlow Datasets Authors.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
# Lint as: python3
|
| 17 |
+
# pylint: enable=line-too-long
|
| 18 |
+
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
|
| 19 |
+
|
| 20 |
+
__version__ = "0.4.1"
|
| 21 |
+
|
| 22 |
+
from packaging import version
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
SCRIPTS_VERSION = "main" if version.parse(__version__).is_devrelease else __version__
|
| 26 |
+
|
| 27 |
+
del version
|
| 28 |
+
|
| 29 |
+
from .evaluation_suite import EvaluationSuite
|
| 30 |
+
from .evaluator import (
|
| 31 |
+
AudioClassificationEvaluator,
|
| 32 |
+
AutomaticSpeechRecognitionEvaluator,
|
| 33 |
+
Evaluator,
|
| 34 |
+
ImageClassificationEvaluator,
|
| 35 |
+
QuestionAnsweringEvaluator,
|
| 36 |
+
SummarizationEvaluator,
|
| 37 |
+
Text2TextGenerationEvaluator,
|
| 38 |
+
TextClassificationEvaluator,
|
| 39 |
+
TextGenerationEvaluator,
|
| 40 |
+
TokenClassificationEvaluator,
|
| 41 |
+
TranslationEvaluator,
|
| 42 |
+
evaluator,
|
| 43 |
+
)
|
| 44 |
+
from .hub import push_to_hub
|
| 45 |
+
from .info import ComparisonInfo, EvaluationModuleInfo, MeasurementInfo, MetricInfo
|
| 46 |
+
from .inspect import inspect_evaluation_module, list_evaluation_modules
|
| 47 |
+
from .loading import load
|
| 48 |
+
from .module import CombinedEvaluations, Comparison, EvaluationModule, Measurement, Metric, combine
|
| 49 |
+
from .saving import save
|
| 50 |
+
from .utils import *
|
| 51 |
+
from .utils import gradio, logging
|
venv/lib/python3.10/site-packages/evaluate/config.py
ADDED
|
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import importlib
|
| 2 |
+
import os
|
| 3 |
+
import platform
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
|
| 6 |
+
from packaging import version
|
| 7 |
+
|
| 8 |
+
from .utils.logging import get_logger
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
logger = get_logger(__name__)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
# Metrics
|
| 15 |
+
S3_METRICS_BUCKET_PREFIX = "https://s3.amazonaws.com/datasets.huggingface.co/datasets/metrics"
|
| 16 |
+
CLOUDFRONT_METRICS_DISTRIB_PREFIX = "https://cdn-datasets.huggingface.co/datasets/metric"
|
| 17 |
+
REPO_METRICS_URL = "https://raw.githubusercontent.com/huggingface/evaluate/{revision}/metrics/{path}/{name}"
|
| 18 |
+
REPO_MEASUREMENTS_URL = "https://raw.githubusercontent.com/huggingface/evaluate/{revision}/measurements/{path}/{name}"
|
| 19 |
+
REPO_COMPARISONS_URL = "https://raw.githubusercontent.com/huggingface/evaluate/{revision}/comparisons/{path}/{name}"
|
| 20 |
+
|
| 21 |
+
# Evaluation module types
|
| 22 |
+
EVALUATION_MODULE_TYPES = ["metric", "comparison", "measurement"]
|
| 23 |
+
|
| 24 |
+
# Hub
|
| 25 |
+
HF_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co")
|
| 26 |
+
HF_LIST_ENDPOINT = HF_ENDPOINT + "/api/spaces?filter={type}"
|
| 27 |
+
HUB_EVALUATE_URL = HF_ENDPOINT + "/spaces/{path}/resolve/{revision}/{name}"
|
| 28 |
+
HUB_DEFAULT_VERSION = "main"
|
| 29 |
+
|
| 30 |
+
PY_VERSION = version.parse(platform.python_version())
|
| 31 |
+
|
| 32 |
+
if PY_VERSION < version.parse("3.8"):
|
| 33 |
+
import importlib_metadata
|
| 34 |
+
else:
|
| 35 |
+
import importlib.metadata as importlib_metadata
|
| 36 |
+
|
| 37 |
+
# General environment variables accepted values for booleans
|
| 38 |
+
ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"}
|
| 39 |
+
ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"})
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
# Imports
|
| 43 |
+
PANDAS_VERSION = version.parse(importlib_metadata.version("pandas"))
|
| 44 |
+
PYARROW_VERSION = version.parse(importlib_metadata.version("pyarrow"))
|
| 45 |
+
|
| 46 |
+
USE_TF = os.environ.get("USE_TF", "AUTO").upper()
|
| 47 |
+
USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper()
|
| 48 |
+
USE_JAX = os.environ.get("USE_JAX", "AUTO").upper()
|
| 49 |
+
|
| 50 |
+
TORCH_VERSION = "N/A"
|
| 51 |
+
TORCH_AVAILABLE = False
|
| 52 |
+
|
| 53 |
+
if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES:
|
| 54 |
+
TORCH_AVAILABLE = importlib.util.find_spec("torch") is not None
|
| 55 |
+
if TORCH_AVAILABLE:
|
| 56 |
+
try:
|
| 57 |
+
TORCH_VERSION = version.parse(importlib_metadata.version("torch"))
|
| 58 |
+
logger.info(f"PyTorch version {TORCH_VERSION} available.")
|
| 59 |
+
except importlib_metadata.PackageNotFoundError:
|
| 60 |
+
pass
|
| 61 |
+
else:
|
| 62 |
+
logger.info("Disabling PyTorch because USE_TF is set")
|
| 63 |
+
|
| 64 |
+
TF_VERSION = "N/A"
|
| 65 |
+
TF_AVAILABLE = False
|
| 66 |
+
|
| 67 |
+
if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES:
|
| 68 |
+
TF_AVAILABLE = importlib.util.find_spec("tensorflow") is not None
|
| 69 |
+
if TF_AVAILABLE:
|
| 70 |
+
# For the metadata, we have to look for both tensorflow and tensorflow-cpu
|
| 71 |
+
for package in [
|
| 72 |
+
"tensorflow",
|
| 73 |
+
"tensorflow-cpu",
|
| 74 |
+
"tensorflow-gpu",
|
| 75 |
+
"tf-nightly",
|
| 76 |
+
"tf-nightly-cpu",
|
| 77 |
+
"tf-nightly-gpu",
|
| 78 |
+
"intel-tensorflow",
|
| 79 |
+
"tensorflow-rocm",
|
| 80 |
+
"tensorflow-macos",
|
| 81 |
+
]:
|
| 82 |
+
try:
|
| 83 |
+
TF_VERSION = version.parse(importlib_metadata.version(package))
|
| 84 |
+
except importlib_metadata.PackageNotFoundError:
|
| 85 |
+
continue
|
| 86 |
+
else:
|
| 87 |
+
break
|
| 88 |
+
else:
|
| 89 |
+
TF_AVAILABLE = False
|
| 90 |
+
if TF_AVAILABLE:
|
| 91 |
+
if TF_VERSION.major < 2:
|
| 92 |
+
logger.info(f"TensorFlow found but with version {TF_VERSION}. `datasets` requires version 2 minimum.")
|
| 93 |
+
TF_AVAILABLE = False
|
| 94 |
+
else:
|
| 95 |
+
logger.info(f"TensorFlow version {TF_VERSION} available.")
|
| 96 |
+
else:
|
| 97 |
+
logger.info("Disabling Tensorflow because USE_TORCH is set")
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
JAX_VERSION = "N/A"
|
| 101 |
+
JAX_AVAILABLE = False
|
| 102 |
+
|
| 103 |
+
if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES:
|
| 104 |
+
JAX_AVAILABLE = importlib.util.find_spec("jax") is not None
|
| 105 |
+
if JAX_AVAILABLE:
|
| 106 |
+
try:
|
| 107 |
+
JAX_VERSION = version.parse(importlib_metadata.version("jax"))
|
| 108 |
+
logger.info(f"JAX version {JAX_VERSION} available.")
|
| 109 |
+
except importlib_metadata.PackageNotFoundError:
|
| 110 |
+
pass
|
| 111 |
+
else:
|
| 112 |
+
logger.info("Disabling JAX because USE_JAX is set to False")
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
# Cache location
|
| 116 |
+
DEFAULT_XDG_CACHE_HOME = "~/.cache"
|
| 117 |
+
XDG_CACHE_HOME = os.getenv("XDG_CACHE_HOME", DEFAULT_XDG_CACHE_HOME)
|
| 118 |
+
DEFAULT_HF_CACHE_HOME = os.path.join(XDG_CACHE_HOME, "huggingface")
|
| 119 |
+
HF_CACHE_HOME = os.path.expanduser(os.getenv("HF_HOME", DEFAULT_HF_CACHE_HOME))
|
| 120 |
+
|
| 121 |
+
DEFAULT_HF_EVALUATE_CACHE = os.path.join(HF_CACHE_HOME, "evaluate")
|
| 122 |
+
HF_EVALUATE_CACHE = Path(os.getenv("HF_EVALUATE_CACHE", DEFAULT_HF_EVALUATE_CACHE))
|
| 123 |
+
|
| 124 |
+
DEFAULT_HF_METRICS_CACHE = os.path.join(HF_CACHE_HOME, "metrics")
|
| 125 |
+
HF_METRICS_CACHE = Path(os.getenv("HF_METRICS_CACHE", DEFAULT_HF_METRICS_CACHE))
|
| 126 |
+
|
| 127 |
+
DEFAULT_HF_MODULES_CACHE = os.path.join(HF_CACHE_HOME, "modules")
|
| 128 |
+
HF_MODULES_CACHE = Path(os.getenv("HF_MODULES_CACHE", DEFAULT_HF_MODULES_CACHE))
|
| 129 |
+
|
| 130 |
+
DOWNLOADED_DATASETS_DIR = "downloads"
|
| 131 |
+
DEFAULT_DOWNLOADED_EVALUATE_PATH = os.path.join(HF_EVALUATE_CACHE, DOWNLOADED_DATASETS_DIR)
|
| 132 |
+
DOWNLOADED_EVALUATE_PATH = Path(os.getenv("HF_DATASETS_DOWNLOADED_EVALUATE_PATH", DEFAULT_DOWNLOADED_EVALUATE_PATH))
|
| 133 |
+
|
| 134 |
+
EXTRACTED_EVALUATE_DIR = "extracted"
|
| 135 |
+
DEFAULT_EXTRACTED_EVALUATE_PATH = os.path.join(DEFAULT_DOWNLOADED_EVALUATE_PATH, EXTRACTED_EVALUATE_DIR)
|
| 136 |
+
EXTRACTED_EVALUATE_PATH = Path(os.getenv("HF_DATASETS_EXTRACTED_EVALUATE_PATH", DEFAULT_EXTRACTED_EVALUATE_PATH))
|
| 137 |
+
|
| 138 |
+
# Download count for the website
|
| 139 |
+
HF_UPDATE_DOWNLOAD_COUNTS = (
|
| 140 |
+
os.environ.get("HF_UPDATE_DOWNLOAD_COUNTS", "AUTO").upper() in ENV_VARS_TRUE_AND_AUTO_VALUES
|
| 141 |
+
)
|
| 142 |
+
|
| 143 |
+
# Offline mode
|
| 144 |
+
HF_EVALUATE_OFFLINE = os.environ.get("HF_EVALUATE_OFFLINE", "AUTO").upper() in ENV_VARS_TRUE_VALUES
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
# File names
|
| 148 |
+
LICENSE_FILENAME = "LICENSE"
|
| 149 |
+
METRIC_INFO_FILENAME = "metric_info.json"
|
| 150 |
+
DATASETDICT_JSON_FILENAME = "dataset_dict.json"
|
| 151 |
+
|
| 152 |
+
MODULE_NAME_FOR_DYNAMIC_MODULES = "evaluate_modules"
|
| 153 |
+
|
| 154 |
+
HF_HUB_ALLOWED_TASKS = [
|
| 155 |
+
"image-classification",
|
| 156 |
+
"translation",
|
| 157 |
+
"image-segmentation",
|
| 158 |
+
"fill-mask",
|
| 159 |
+
"automatic-speech-recognition",
|
| 160 |
+
"token-classification",
|
| 161 |
+
"sentence-similarity",
|
| 162 |
+
"audio-classification",
|
| 163 |
+
"question-answering",
|
| 164 |
+
"summarization",
|
| 165 |
+
"zero-shot-classification",
|
| 166 |
+
"table-to-text",
|
| 167 |
+
"feature-extraction",
|
| 168 |
+
"other",
|
| 169 |
+
"multiple-choice",
|
| 170 |
+
"text-classification",
|
| 171 |
+
"text-to-image",
|
| 172 |
+
"text2text-generation",
|
| 173 |
+
"zero-shot-image-classification",
|
| 174 |
+
"tabular-classification",
|
| 175 |
+
"tabular-regression",
|
| 176 |
+
"image-to-image",
|
| 177 |
+
"tabular-to-text",
|
| 178 |
+
"unconditional-image-generation",
|
| 179 |
+
"text-retrieval",
|
| 180 |
+
"text-to-speech",
|
| 181 |
+
"object-detection",
|
| 182 |
+
"audio-to-audio",
|
| 183 |
+
"text-generation",
|
| 184 |
+
"conversational",
|
| 185 |
+
"table-question-answering",
|
| 186 |
+
"visual-question-answering",
|
| 187 |
+
"image-to-text",
|
| 188 |
+
"reinforcement-learning",
|
| 189 |
+
"voice-activity-detection",
|
| 190 |
+
"time-series-forecasting",
|
| 191 |
+
"document-question-answering",
|
| 192 |
+
]
|
venv/lib/python3.10/site-packages/evaluate/evaluator/text_classification.py
ADDED
|
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2022 The HuggingFace Evaluate Authors.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
from numbers import Number
|
| 16 |
+
from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Union
|
| 17 |
+
|
| 18 |
+
from datasets import Dataset, load_dataset
|
| 19 |
+
from typing_extensions import Literal
|
| 20 |
+
|
| 21 |
+
from ..module import EvaluationModule
|
| 22 |
+
from ..utils.file_utils import add_end_docstrings, add_start_docstrings
|
| 23 |
+
from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator
|
| 24 |
+
from .utils import DatasetColumnPair
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
if TYPE_CHECKING:
|
| 28 |
+
from transformers import FeatureExtractionMixin, Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
TASK_DOCUMENTATION = r"""
|
| 32 |
+
Examples:
|
| 33 |
+
```python
|
| 34 |
+
>>> from evaluate import evaluator
|
| 35 |
+
>>> from datasets import load_dataset
|
| 36 |
+
>>> task_evaluator = evaluator("text-classification")
|
| 37 |
+
>>> data = load_dataset("imdb", split="test[:2]")
|
| 38 |
+
>>> results = task_evaluator.compute(
|
| 39 |
+
>>> model_or_pipeline="huggingface/prunebert-base-uncased-6-finepruned-w-distil-mnli",
|
| 40 |
+
>>> data=data,
|
| 41 |
+
>>> metric="accuracy",
|
| 42 |
+
>>> label_mapping={"LABEL_0": 0.0, "LABEL_1": 1.0},
|
| 43 |
+
>>> strategy="bootstrap",
|
| 44 |
+
>>> n_resamples=10,
|
| 45 |
+
>>> random_state=0
|
| 46 |
+
>>> )
|
| 47 |
+
```
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
class TextClassificationEvaluator(Evaluator):
|
| 52 |
+
"""
|
| 53 |
+
Text classification evaluator.
|
| 54 |
+
This text classification evaluator can currently be loaded from [`evaluator`] using the default task name
|
| 55 |
+
`text-classification` or with a `"sentiment-analysis"` alias.
|
| 56 |
+
Methods in this class assume a data format compatible with the [`~transformers.TextClassificationPipeline`] - a single textual
|
| 57 |
+
feature as input and a categorical label as output.
|
| 58 |
+
"""
|
| 59 |
+
|
| 60 |
+
PIPELINE_KWARGS = {"truncation": True}
|
| 61 |
+
|
| 62 |
+
def __init__(self, task="text-classification", default_metric_name=None):
|
| 63 |
+
super().__init__(task, default_metric_name=default_metric_name)
|
| 64 |
+
|
| 65 |
+
def prepare_data(self, data: Union[str, Dataset], input_column: str, second_input_column: str, label_column: str):
|
| 66 |
+
if data is None:
|
| 67 |
+
raise ValueError(
|
| 68 |
+
"Please specify a valid `data` object - either a `str` with a name or a `Dataset` object."
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
self.check_required_columns(data, {"input_column": input_column, "label_column": label_column})
|
| 72 |
+
|
| 73 |
+
if second_input_column is not None:
|
| 74 |
+
self.check_required_columns(data, {"second_input_column": second_input_column})
|
| 75 |
+
|
| 76 |
+
data = load_dataset(data) if isinstance(data, str) else data
|
| 77 |
+
|
| 78 |
+
return {"references": data[label_column]}, DatasetColumnPair(
|
| 79 |
+
data, input_column, second_input_column, "text", "text_pair"
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
def predictions_processor(self, predictions, label_mapping):
|
| 83 |
+
predictions = [
|
| 84 |
+
label_mapping[element["label"]] if label_mapping is not None else element["label"]
|
| 85 |
+
for element in predictions
|
| 86 |
+
]
|
| 87 |
+
return {"predictions": predictions}
|
| 88 |
+
|
| 89 |
+
@add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING)
|
| 90 |
+
@add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION)
|
| 91 |
+
def compute(
|
| 92 |
+
self,
|
| 93 |
+
model_or_pipeline: Union[
|
| 94 |
+
str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
|
| 95 |
+
] = None,
|
| 96 |
+
data: Union[str, Dataset] = None,
|
| 97 |
+
subset: Optional[str] = None,
|
| 98 |
+
split: Optional[str] = None,
|
| 99 |
+
metric: Union[str, EvaluationModule] = None,
|
| 100 |
+
tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
|
| 101 |
+
feature_extractor: Optional[Union[str, "FeatureExtractionMixin"]] = None, # noqa: F821
|
| 102 |
+
strategy: Literal["simple", "bootstrap"] = "simple",
|
| 103 |
+
confidence_level: float = 0.95,
|
| 104 |
+
n_resamples: int = 9999,
|
| 105 |
+
device: int = None,
|
| 106 |
+
random_state: Optional[int] = None,
|
| 107 |
+
input_column: str = "text",
|
| 108 |
+
second_input_column: Optional[str] = None,
|
| 109 |
+
label_column: str = "label",
|
| 110 |
+
label_mapping: Optional[Dict[str, Number]] = None,
|
| 111 |
+
) -> Tuple[Dict[str, float], Any]:
|
| 112 |
+
"""
|
| 113 |
+
input_column (`str`, *optional*, defaults to `"text"`):
|
| 114 |
+
The name of the column containing the text feature in the dataset specified by `data`.
|
| 115 |
+
second_input_column (`str`, *optional*, defaults to `None`):
|
| 116 |
+
The name of the second column containing the text features. This may be useful for classification tasks
|
| 117 |
+
as MNLI, where two columns are used.
|
| 118 |
+
label_column (`str`, defaults to `"label"`):
|
| 119 |
+
The name of the column containing the labels in the dataset specified by `data`.
|
| 120 |
+
label_mapping (`Dict[str, Number]`, *optional*, defaults to `None`):
|
| 121 |
+
We want to map class labels defined by the model in the pipeline to values consistent with those
|
| 122 |
+
defined in the `label_column` of the `data` dataset.
|
| 123 |
+
"""
|
| 124 |
+
|
| 125 |
+
result = {}
|
| 126 |
+
|
| 127 |
+
self.check_for_mismatch_in_device_setup(device, model_or_pipeline)
|
| 128 |
+
|
| 129 |
+
# Prepare inputs
|
| 130 |
+
data = self.load_data(data=data, subset=subset, split=split)
|
| 131 |
+
metric_inputs, pipe_inputs = self.prepare_data(
|
| 132 |
+
data=data, input_column=input_column, second_input_column=second_input_column, label_column=label_column
|
| 133 |
+
)
|
| 134 |
+
pipe = self.prepare_pipeline(
|
| 135 |
+
model_or_pipeline=model_or_pipeline,
|
| 136 |
+
tokenizer=tokenizer,
|
| 137 |
+
feature_extractor=feature_extractor,
|
| 138 |
+
device=device,
|
| 139 |
+
)
|
| 140 |
+
metric = self.prepare_metric(metric)
|
| 141 |
+
|
| 142 |
+
# Compute predictions
|
| 143 |
+
predictions, perf_results = self.call_pipeline(pipe, pipe_inputs)
|
| 144 |
+
predictions = self.predictions_processor(predictions, label_mapping)
|
| 145 |
+
metric_inputs.update(predictions)
|
| 146 |
+
|
| 147 |
+
# Compute metrics from references and predictions
|
| 148 |
+
metric_results = self.compute_metric(
|
| 149 |
+
metric=metric,
|
| 150 |
+
metric_inputs=metric_inputs,
|
| 151 |
+
strategy=strategy,
|
| 152 |
+
confidence_level=confidence_level,
|
| 153 |
+
n_resamples=n_resamples,
|
| 154 |
+
random_state=random_state,
|
| 155 |
+
)
|
| 156 |
+
|
| 157 |
+
result.update(metric_results)
|
| 158 |
+
result.update(perf_results)
|
| 159 |
+
|
| 160 |
+
return result
|
venv/lib/python3.10/site-packages/evaluate/hub.py
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict
|
| 2 |
+
|
| 3 |
+
import requests
|
| 4 |
+
from huggingface_hub import dataset_info, model_info
|
| 5 |
+
from huggingface_hub.repocard import metadata_update
|
| 6 |
+
|
| 7 |
+
from .config import HF_HUB_ALLOWED_TASKS
|
| 8 |
+
from .utils.logging import get_logger
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
logger = get_logger(__name__)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def push_to_hub(
|
| 15 |
+
model_id: str,
|
| 16 |
+
task_type: str,
|
| 17 |
+
dataset_type: str,
|
| 18 |
+
dataset_name: str,
|
| 19 |
+
metric_type: str,
|
| 20 |
+
metric_name: str,
|
| 21 |
+
metric_value: float,
|
| 22 |
+
task_name: str = None,
|
| 23 |
+
dataset_config: str = None,
|
| 24 |
+
dataset_split: str = None,
|
| 25 |
+
dataset_revision: str = None,
|
| 26 |
+
dataset_args: Dict[str, int] = None,
|
| 27 |
+
metric_config: str = None,
|
| 28 |
+
metric_args: Dict[str, int] = None,
|
| 29 |
+
overwrite: bool = False,
|
| 30 |
+
):
|
| 31 |
+
r"""
|
| 32 |
+
Pushes the result of a metric to the metadata of a model repository in the Hub.
|
| 33 |
+
|
| 34 |
+
Args:
|
| 35 |
+
model_id (`str`):
|
| 36 |
+
Model id from https://hf.co/models.
|
| 37 |
+
task_type (`str`):
|
| 38 |
+
Task id, refer to the [Hub allowed tasks](https://github.com/huggingface/evaluate/blob/main/src/evaluate/config.py#L154) for allowed values.
|
| 39 |
+
dataset_type (`str`):
|
| 40 |
+
Dataset id from https://hf.co/datasets.
|
| 41 |
+
dataset_name (`str`):
|
| 42 |
+
Pretty name for the dataset.
|
| 43 |
+
metric_type (`str`):
|
| 44 |
+
Metric id from https://hf.co/metrics.
|
| 45 |
+
metric_name (`str`):
|
| 46 |
+
Pretty name for the metric.
|
| 47 |
+
metric_value (`float`):
|
| 48 |
+
Computed metric value.
|
| 49 |
+
task_name (`str`, *optional*):
|
| 50 |
+
Pretty name for the task.
|
| 51 |
+
dataset_config (`str`, *optional*):
|
| 52 |
+
Dataset configuration used in [`~datasets.load_dataset`].
|
| 53 |
+
See [`~datasets.load_dataset`] for more info.
|
| 54 |
+
dataset_split (`str`, *optional*):
|
| 55 |
+
Name of split used for metric computation.
|
| 56 |
+
dataset_revision (`str`, *optional*):
|
| 57 |
+
Git hash for the specific version of the dataset.
|
| 58 |
+
dataset_args (`dict[str, int]`, *optional*):
|
| 59 |
+
Additional arguments passed to [`~datasets.load_dataset`].
|
| 60 |
+
metric_config (`str`, *optional*):
|
| 61 |
+
Configuration for the metric (e.g. the GLUE metric has a configuration for each subset).
|
| 62 |
+
metric_args (`dict[str, int]`, *optional*):
|
| 63 |
+
Arguments passed during [`~evaluate.EvaluationModule.compute`].
|
| 64 |
+
overwrite (`bool`, *optional*, defaults to `False`):
|
| 65 |
+
If set to `True` an existing metric field can be overwritten, otherwise
|
| 66 |
+
attempting to overwrite any existing fields will cause an error.
|
| 67 |
+
|
| 68 |
+
Example:
|
| 69 |
+
|
| 70 |
+
```python
|
| 71 |
+
>>> push_to_hub(
|
| 72 |
+
... model_id="huggingface/gpt2-wikitext2",
|
| 73 |
+
... metric_value=0.5
|
| 74 |
+
... metric_type="bleu",
|
| 75 |
+
... metric_name="BLEU",
|
| 76 |
+
... dataset_name="WikiText",
|
| 77 |
+
... dataset_type="wikitext",
|
| 78 |
+
... dataset_split="test",
|
| 79 |
+
... task_type="text-generation",
|
| 80 |
+
... task_name="Text Generation"
|
| 81 |
+
... )
|
| 82 |
+
```"""
|
| 83 |
+
if task_type not in HF_HUB_ALLOWED_TASKS:
|
| 84 |
+
raise ValueError(f"Task type not supported. Task has to be one of {HF_HUB_ALLOWED_TASKS}")
|
| 85 |
+
|
| 86 |
+
try:
|
| 87 |
+
dataset_info(dataset_type)
|
| 88 |
+
except requests.exceptions.HTTPError:
|
| 89 |
+
logger.warning(f"Dataset {dataset_type} not found on the Hub at hf.co/datasets/{dataset_type}")
|
| 90 |
+
|
| 91 |
+
try:
|
| 92 |
+
model_info(model_id)
|
| 93 |
+
except requests.exceptions.HTTPError:
|
| 94 |
+
raise ValueError(f"Model {model_id} not found on the Hub at hf.co/{model_id}")
|
| 95 |
+
|
| 96 |
+
result = {
|
| 97 |
+
"task": {
|
| 98 |
+
"type": task_type,
|
| 99 |
+
},
|
| 100 |
+
"dataset": {
|
| 101 |
+
"type": dataset_type,
|
| 102 |
+
"name": dataset_name,
|
| 103 |
+
},
|
| 104 |
+
"metrics": [
|
| 105 |
+
{
|
| 106 |
+
"type": metric_type,
|
| 107 |
+
"value": metric_value,
|
| 108 |
+
},
|
| 109 |
+
],
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
if dataset_config is not None:
|
| 113 |
+
result["dataset"]["config"] = dataset_config
|
| 114 |
+
if dataset_split is not None:
|
| 115 |
+
result["dataset"]["split"] = dataset_split
|
| 116 |
+
if dataset_revision is not None:
|
| 117 |
+
result["dataset"]["revision"] = dataset_revision
|
| 118 |
+
if dataset_args is not None:
|
| 119 |
+
result["dataset"]["args"] = dataset_args
|
| 120 |
+
|
| 121 |
+
if task_name is not None:
|
| 122 |
+
result["task"]["name"] = task_name
|
| 123 |
+
|
| 124 |
+
if metric_name is not None:
|
| 125 |
+
result["metrics"][0]["name"] = metric_name
|
| 126 |
+
if metric_config is not None:
|
| 127 |
+
result["metrics"][0]["config"] = metric_config
|
| 128 |
+
if metric_args is not None:
|
| 129 |
+
result["metrics"][0]["args"] = metric_args
|
| 130 |
+
|
| 131 |
+
metadata = {"model-index": [{"results": [result]}]}
|
| 132 |
+
|
| 133 |
+
return metadata_update(repo_id=model_id, metadata=metadata, overwrite=overwrite)
|
venv/lib/python3.10/site-packages/evaluate/info.py
ADDED
|
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
# Lint as: python3
|
| 16 |
+
""" EvaluationModuleInfo records information we know about a dataset and a metric.
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
import dataclasses
|
| 20 |
+
import json
|
| 21 |
+
import os
|
| 22 |
+
from dataclasses import asdict, dataclass, field
|
| 23 |
+
from typing import List, Optional, Union
|
| 24 |
+
|
| 25 |
+
from datasets.features import Features, Value
|
| 26 |
+
|
| 27 |
+
from . import config
|
| 28 |
+
from .utils.logging import get_logger
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
logger = get_logger(__name__)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
@dataclass
|
| 35 |
+
class EvaluationModuleInfo:
|
| 36 |
+
"""Base class to store information about an evaluation used for `MetricInfo`, `ComparisonInfo`,
|
| 37 |
+
and `MeasurementInfo`.
|
| 38 |
+
|
| 39 |
+
`EvaluationModuleInfo` documents an evaluation, including its name, version, and features.
|
| 40 |
+
See the constructor arguments and properties for a full list.
|
| 41 |
+
|
| 42 |
+
Note: Not all fields are known on construction and may be updated later.
|
| 43 |
+
"""
|
| 44 |
+
|
| 45 |
+
# Set in the dataset scripts
|
| 46 |
+
description: str
|
| 47 |
+
citation: str
|
| 48 |
+
features: Union[Features, List[Features]]
|
| 49 |
+
inputs_description: str = field(default_factory=str)
|
| 50 |
+
homepage: str = field(default_factory=str)
|
| 51 |
+
license: str = field(default_factory=str)
|
| 52 |
+
codebase_urls: List[str] = field(default_factory=list)
|
| 53 |
+
reference_urls: List[str] = field(default_factory=list)
|
| 54 |
+
streamable: bool = False
|
| 55 |
+
format: Optional[str] = None
|
| 56 |
+
module_type: str = "metric" # deprecate this in the future
|
| 57 |
+
|
| 58 |
+
# Set later by the builder
|
| 59 |
+
module_name: Optional[str] = None
|
| 60 |
+
config_name: Optional[str] = None
|
| 61 |
+
experiment_id: Optional[str] = None
|
| 62 |
+
|
| 63 |
+
def __post_init__(self):
|
| 64 |
+
if self.format is not None:
|
| 65 |
+
for key, value in self.features.items():
|
| 66 |
+
if not isinstance(value, Value):
|
| 67 |
+
raise ValueError(
|
| 68 |
+
f"When using 'numpy' format, all features should be a `datasets.Value` feature. "
|
| 69 |
+
f"Here {key} is an instance of {value.__class__.__name__}"
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
def write_to_directory(self, metric_info_dir):
|
| 73 |
+
"""Write `EvaluationModuleInfo` as JSON to `metric_info_dir`.
|
| 74 |
+
Also save the license separately in LICENSE.
|
| 75 |
+
|
| 76 |
+
Args:
|
| 77 |
+
metric_info_dir (`str`):
|
| 78 |
+
The directory to save `metric_info_dir` to.
|
| 79 |
+
|
| 80 |
+
Example:
|
| 81 |
+
|
| 82 |
+
```py
|
| 83 |
+
>>> my_metric.info.write_to_directory("/path/to/directory/")
|
| 84 |
+
```
|
| 85 |
+
"""
|
| 86 |
+
with open(os.path.join(metric_info_dir, config.METRIC_INFO_FILENAME), "w", encoding="utf-8") as f:
|
| 87 |
+
json.dump(asdict(self), f)
|
| 88 |
+
|
| 89 |
+
with open(os.path.join(metric_info_dir, config.LICENSE_FILENAME), "w", encoding="utf-8") as f:
|
| 90 |
+
f.write(self.license)
|
| 91 |
+
|
| 92 |
+
@classmethod
|
| 93 |
+
def from_directory(cls, metric_info_dir) -> "EvaluationModuleInfo":
|
| 94 |
+
"""Create `EvaluationModuleInfo` from the JSON file in `metric_info_dir`.
|
| 95 |
+
|
| 96 |
+
Args:
|
| 97 |
+
metric_info_dir (`str`):
|
| 98 |
+
The directory containing the `metric_info` JSON file. This
|
| 99 |
+
should be the root directory of a specific metric version.
|
| 100 |
+
|
| 101 |
+
Example:
|
| 102 |
+
|
| 103 |
+
```py
|
| 104 |
+
>>> my_metric = EvaluationModuleInfo.from_directory("/path/to/directory/")
|
| 105 |
+
```
|
| 106 |
+
"""
|
| 107 |
+
logger.info(f"Loading Metric info from {metric_info_dir}")
|
| 108 |
+
if not metric_info_dir:
|
| 109 |
+
raise ValueError("Calling EvaluationModuleInfo.from_directory() with undefined metric_info_dir.")
|
| 110 |
+
|
| 111 |
+
with open(os.path.join(metric_info_dir, config.METRIC_INFO_FILENAME), encoding="utf-8") as f:
|
| 112 |
+
metric_info_dict = json.load(f)
|
| 113 |
+
return cls.from_dict(metric_info_dict)
|
| 114 |
+
|
| 115 |
+
@classmethod
|
| 116 |
+
def from_dict(cls, metric_info_dict: dict) -> "EvaluationModuleInfo":
|
| 117 |
+
field_names = {f.name for f in dataclasses.fields(cls)}
|
| 118 |
+
return cls(**{k: v for k, v in metric_info_dict.items() if k in field_names})
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
@dataclass
|
| 122 |
+
class MetricInfo(EvaluationModuleInfo):
|
| 123 |
+
"""Information about a metric.
|
| 124 |
+
|
| 125 |
+
`EvaluationModuleInfo` documents a metric, including its name, version, and features.
|
| 126 |
+
See the constructor arguments and properties for a full list.
|
| 127 |
+
|
| 128 |
+
Note: Not all fields are known on construction and may be updated later.
|
| 129 |
+
"""
|
| 130 |
+
|
| 131 |
+
module_type: str = "metric"
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
@dataclass
|
| 135 |
+
class ComparisonInfo(EvaluationModuleInfo):
|
| 136 |
+
"""Information about a comparison.
|
| 137 |
+
|
| 138 |
+
`EvaluationModuleInfo` documents a comparison, including its name, version, and features.
|
| 139 |
+
See the constructor arguments and properties for a full list.
|
| 140 |
+
|
| 141 |
+
Note: Not all fields are known on construction and may be updated later.
|
| 142 |
+
"""
|
| 143 |
+
|
| 144 |
+
module_type: str = "comparison"
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
@dataclass
|
| 148 |
+
class MeasurementInfo(EvaluationModuleInfo):
|
| 149 |
+
"""Information about a measurement.
|
| 150 |
+
|
| 151 |
+
`EvaluationModuleInfo` documents a measurement, including its name, version, and features.
|
| 152 |
+
See the constructor arguments and properties for a full list.
|
| 153 |
+
|
| 154 |
+
Note: Not all fields are known on construction and may be updated later.
|
| 155 |
+
"""
|
| 156 |
+
|
| 157 |
+
module_type: str = "measurement"
|
venv/lib/python3.10/site-packages/evaluate/inspect.py
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 The HuggingFace Evaluate Authors.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
# Lint as: python3
|
| 16 |
+
""" List and inspect metrics."""
|
| 17 |
+
|
| 18 |
+
from typing import Optional
|
| 19 |
+
|
| 20 |
+
import requests
|
| 21 |
+
from datasets import DownloadConfig
|
| 22 |
+
|
| 23 |
+
from .config import EVALUATION_MODULE_TYPES, HF_LIST_ENDPOINT
|
| 24 |
+
from .loading import evaluation_module_factory
|
| 25 |
+
from .utils.logging import get_logger
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
logger = get_logger(__name__)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class SplitsNotFoundError(ValueError):
|
| 32 |
+
pass
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def list_evaluation_modules(module_type=None, include_community=True, with_details=False):
|
| 36 |
+
"""List all evaluation modules available on the Hugging Face Hub.
|
| 37 |
+
|
| 38 |
+
Args:
|
| 39 |
+
module_type (`str`, *optional*, defaults to `None`):
|
| 40 |
+
Type of evaluation modules to list. Has to be one of `'metric'`, `'comparison'`, or `'measurement'`. If `None`, all types are listed.
|
| 41 |
+
include_community (`bool`, *optional*, defaults to `True`):
|
| 42 |
+
Include community modules in the list.
|
| 43 |
+
with_details (`bool`, *optional*, defaults to `False`):
|
| 44 |
+
Return the full details on the metrics instead of only the ID.
|
| 45 |
+
|
| 46 |
+
Returns:
|
| 47 |
+
`List[Union[str, dict]]`
|
| 48 |
+
|
| 49 |
+
Example:
|
| 50 |
+
|
| 51 |
+
```py
|
| 52 |
+
>>> from evaluate import list_evaluation_modules
|
| 53 |
+
>>> list_evaluation_modules(module_type="metric")
|
| 54 |
+
```
|
| 55 |
+
"""
|
| 56 |
+
|
| 57 |
+
if module_type is None:
|
| 58 |
+
evaluations_list = []
|
| 59 |
+
for module_type in EVALUATION_MODULE_TYPES:
|
| 60 |
+
evaluations_list.extend(
|
| 61 |
+
_list_evaluation_modules_type(
|
| 62 |
+
module_type, include_community=include_community, with_details=with_details
|
| 63 |
+
)
|
| 64 |
+
)
|
| 65 |
+
else:
|
| 66 |
+
if module_type not in EVALUATION_MODULE_TYPES:
|
| 67 |
+
raise ValueError(f"Invalid module type '{module_type}'. Has to be one of {EVALUATION_MODULE_TYPES}.")
|
| 68 |
+
evaluations_list = _list_evaluation_modules_type(
|
| 69 |
+
module_type, include_community=include_community, with_details=with_details
|
| 70 |
+
)
|
| 71 |
+
return evaluations_list
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def _list_evaluation_modules_type(module_type, include_community=True, with_details=False):
|
| 75 |
+
|
| 76 |
+
r = requests.get(HF_LIST_ENDPOINT.format(type=module_type))
|
| 77 |
+
r.raise_for_status()
|
| 78 |
+
d = r.json()
|
| 79 |
+
|
| 80 |
+
if not include_community:
|
| 81 |
+
d = [element for element in d if element["id"].split("/")[0] == f"evaluate-{module_type}"]
|
| 82 |
+
|
| 83 |
+
# remove namespace for canonical modules and add community tag
|
| 84 |
+
for element in d:
|
| 85 |
+
if element["id"].split("/")[0] == f"evaluate-{module_type}":
|
| 86 |
+
element["id"] = element["id"].split("/")[1]
|
| 87 |
+
element["community"] = False
|
| 88 |
+
else:
|
| 89 |
+
element["community"] = True
|
| 90 |
+
|
| 91 |
+
if with_details:
|
| 92 |
+
return [
|
| 93 |
+
{
|
| 94 |
+
"name": element["id"],
|
| 95 |
+
"type": module_type,
|
| 96 |
+
"community": element["community"],
|
| 97 |
+
"likes": element.get("likes", 0),
|
| 98 |
+
}
|
| 99 |
+
for element in d
|
| 100 |
+
]
|
| 101 |
+
else:
|
| 102 |
+
return [element["id"] for element in d]
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def inspect_evaluation_module(
|
| 106 |
+
path: str, local_path: str, download_config: Optional[DownloadConfig] = None, **download_kwargs
|
| 107 |
+
):
|
| 108 |
+
r"""
|
| 109 |
+
Allow inspection/modification of a evaluation script by copying it on local drive at local_path.
|
| 110 |
+
|
| 111 |
+
Args:
|
| 112 |
+
path (``str``): path to the evaluation script. Can be either:
|
| 113 |
+
|
| 114 |
+
- a local path to script or the directory containing the script (if the script has the same name as the directory),
|
| 115 |
+
e.g. ``'./metrics/accuracy'`` or ``'./metrics/accuracy/accuracy.py'``
|
| 116 |
+
- a dataset identifier on the Hugging Face Hub (list all available datasets and ids with ``evaluate.list_evaluation_modules()``)
|
| 117 |
+
e.g. ``'accuracy'``, ``'bleu'`` or ``'word_length'``
|
| 118 |
+
local_path (``str``): path to the local folder to copy the datset script to.
|
| 119 |
+
download_config (Optional ``datasets.DownloadConfig``: specific download configuration parameters.
|
| 120 |
+
**download_kwargs: optional attributes for DownloadConfig() which will override the attributes in download_config if supplied.
|
| 121 |
+
"""
|
| 122 |
+
evaluation_module = evaluation_module_factory(
|
| 123 |
+
path, download_config=download_config, force_local_path=local_path, **download_kwargs
|
| 124 |
+
)
|
| 125 |
+
print(
|
| 126 |
+
f"The processing scripts for metric {path} can be inspected at {local_path}. "
|
| 127 |
+
f"The main class is in {evaluation_module.module_path}. "
|
| 128 |
+
f"You can modify this processing scripts and use it with `evaluate.load({local_path})`."
|
| 129 |
+
)
|
venv/lib/python3.10/site-packages/evaluate/loading.py
ADDED
|
@@ -0,0 +1,771 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
# Lint as: python3
|
| 16 |
+
"""Access datasets."""
|
| 17 |
+
import filecmp
|
| 18 |
+
import importlib
|
| 19 |
+
import inspect
|
| 20 |
+
import json
|
| 21 |
+
import os
|
| 22 |
+
import re
|
| 23 |
+
import shutil
|
| 24 |
+
import time
|
| 25 |
+
from dataclasses import dataclass
|
| 26 |
+
from pathlib import Path
|
| 27 |
+
from typing import List, Optional, Tuple, Type, Union
|
| 28 |
+
from urllib.parse import urlparse
|
| 29 |
+
|
| 30 |
+
from datasets import DownloadConfig, DownloadMode
|
| 31 |
+
from datasets.builder import DatasetBuilder
|
| 32 |
+
from datasets.packaged_modules import _EXTENSION_TO_MODULE, _hash_python_lines
|
| 33 |
+
from datasets.utils.filelock import FileLock
|
| 34 |
+
from datasets.utils.version import Version
|
| 35 |
+
|
| 36 |
+
from . import SCRIPTS_VERSION, config
|
| 37 |
+
from .module import EvaluationModule
|
| 38 |
+
from .utils.file_utils import (
|
| 39 |
+
cached_path,
|
| 40 |
+
head_hf_s3,
|
| 41 |
+
hf_hub_url,
|
| 42 |
+
init_hf_modules,
|
| 43 |
+
is_relative_path,
|
| 44 |
+
relative_to_absolute_path,
|
| 45 |
+
url_or_path_join,
|
| 46 |
+
)
|
| 47 |
+
from .utils.logging import get_logger
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
logger = get_logger(__name__)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
ALL_ALLOWED_EXTENSIONS = list(_EXTENSION_TO_MODULE.keys()) + ["zip"]
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def init_dynamic_modules(
|
| 57 |
+
name: str = config.MODULE_NAME_FOR_DYNAMIC_MODULES, hf_modules_cache: Optional[Union[Path, str]] = None
|
| 58 |
+
):
|
| 59 |
+
"""
|
| 60 |
+
Create a module with name `name` in which you can add dynamic modules
|
| 61 |
+
such as metrics or datasets. The module can be imported using its name.
|
| 62 |
+
The module is created in the HF_MODULE_CACHE directory by default (~/.cache/huggingface/modules) but it can
|
| 63 |
+
be overriden by specifying a path to another directory in `hf_modules_cache`.
|
| 64 |
+
"""
|
| 65 |
+
hf_modules_cache = init_hf_modules(hf_modules_cache)
|
| 66 |
+
dynamic_modules_path = os.path.join(hf_modules_cache, name)
|
| 67 |
+
os.makedirs(dynamic_modules_path, exist_ok=True)
|
| 68 |
+
if not os.path.exists(os.path.join(dynamic_modules_path, "__init__.py")):
|
| 69 |
+
with open(os.path.join(dynamic_modules_path, "__init__.py"), "w"):
|
| 70 |
+
pass
|
| 71 |
+
return dynamic_modules_path
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def import_main_class(module_path) -> Optional[Union[Type[DatasetBuilder], Type[EvaluationModule]]]:
|
| 75 |
+
"""Import a module at module_path and return its main class, a Metric by default"""
|
| 76 |
+
module = importlib.import_module(module_path)
|
| 77 |
+
main_cls_type = EvaluationModule
|
| 78 |
+
|
| 79 |
+
# Find the main class in our imported module
|
| 80 |
+
module_main_cls = None
|
| 81 |
+
for name, obj in module.__dict__.items():
|
| 82 |
+
if isinstance(obj, type) and issubclass(obj, main_cls_type):
|
| 83 |
+
if inspect.isabstract(obj):
|
| 84 |
+
continue
|
| 85 |
+
module_main_cls = obj
|
| 86 |
+
break
|
| 87 |
+
|
| 88 |
+
return module_main_cls
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def files_to_hash(file_paths: List[str]) -> str:
|
| 92 |
+
"""
|
| 93 |
+
Convert a list of scripts or text files provided in file_paths into a hashed filename in a repeatable way.
|
| 94 |
+
"""
|
| 95 |
+
# List all python files in directories if directories are supplied as part of external imports
|
| 96 |
+
to_use_files: List[Union[Path, str]] = []
|
| 97 |
+
for file_path in file_paths:
|
| 98 |
+
if os.path.isdir(file_path):
|
| 99 |
+
to_use_files.extend(list(Path(file_path).rglob("*.[pP][yY]")))
|
| 100 |
+
else:
|
| 101 |
+
to_use_files.append(file_path)
|
| 102 |
+
|
| 103 |
+
# Get the code from all these files
|
| 104 |
+
lines = []
|
| 105 |
+
for file_path in to_use_files:
|
| 106 |
+
with open(file_path, encoding="utf-8") as f:
|
| 107 |
+
lines.extend(f.readlines())
|
| 108 |
+
return _hash_python_lines(lines)
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def convert_github_url(url_path: str) -> Tuple[str, Optional[str]]:
|
| 112 |
+
"""Convert a link to a file on a github repo in a link to the raw github object."""
|
| 113 |
+
parsed = urlparse(url_path)
|
| 114 |
+
sub_directory = None
|
| 115 |
+
if parsed.scheme in ("http", "https", "s3") and parsed.netloc == "github.com":
|
| 116 |
+
if "blob" in url_path:
|
| 117 |
+
if not url_path.endswith(".py"):
|
| 118 |
+
raise ValueError(f"External import from github at {url_path} should point to a file ending with '.py'")
|
| 119 |
+
url_path = url_path.replace("blob", "raw") # Point to the raw file
|
| 120 |
+
else:
|
| 121 |
+
# Parse github url to point to zip
|
| 122 |
+
github_path = parsed.path[1:]
|
| 123 |
+
repo_info, branch = github_path.split("/tree/") if "/tree/" in github_path else (github_path, "master")
|
| 124 |
+
repo_owner, repo_name = repo_info.split("/")
|
| 125 |
+
url_path = f"https://github.com/{repo_owner}/{repo_name}/archive/{branch}.zip"
|
| 126 |
+
sub_directory = f"{repo_name}-{branch}"
|
| 127 |
+
return url_path, sub_directory
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def increase_load_count(name: str, resource_type: str):
|
| 131 |
+
"""Update the download count of a dataset or metric."""
|
| 132 |
+
if not config.HF_EVALUATE_OFFLINE and config.HF_UPDATE_DOWNLOAD_COUNTS:
|
| 133 |
+
try:
|
| 134 |
+
head_hf_s3(name, filename=name + ".py", dataset=(resource_type == "dataset"))
|
| 135 |
+
except Exception:
|
| 136 |
+
pass
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def get_imports(file_path: str) -> Tuple[str, str, str, str]:
|
| 140 |
+
"""Find whether we should import or clone additional files for a given processing script.
|
| 141 |
+
And list the import.
|
| 142 |
+
|
| 143 |
+
We allow:
|
| 144 |
+
- library dependencies,
|
| 145 |
+
- local dependencies and
|
| 146 |
+
- external dependencies whose url is specified with a comment starting from "# From:' followed by the raw url to a file, an archive or a github repository.
|
| 147 |
+
external dependencies will be downloaded (and extracted if needed in the dataset folder).
|
| 148 |
+
We also add an `__init__.py` to each sub-folder of a downloaded folder so the user can import from them in the script.
|
| 149 |
+
|
| 150 |
+
Note that only direct import in the dataset processing script will be handled
|
| 151 |
+
We don't recursively explore the additional import to download further files.
|
| 152 |
+
|
| 153 |
+
Example::
|
| 154 |
+
|
| 155 |
+
import tensorflow
|
| 156 |
+
import .c4_utils
|
| 157 |
+
import .clicr.dataset-code.build_json_dataset # From: https://raw.githubusercontent.com/clips/clicr/master/dataset-code/build_json_dataset
|
| 158 |
+
"""
|
| 159 |
+
lines = []
|
| 160 |
+
with open(file_path, encoding="utf-8") as f:
|
| 161 |
+
lines.extend(f.readlines())
|
| 162 |
+
|
| 163 |
+
logger.debug(f"Checking {file_path} for additional imports.")
|
| 164 |
+
imports: List[Tuple[str, str, str, Optional[str]]] = []
|
| 165 |
+
is_in_docstring = False
|
| 166 |
+
for line in lines:
|
| 167 |
+
docstr_start_match = re.findall(r'[\s\S]*?"""[\s\S]*?', line)
|
| 168 |
+
|
| 169 |
+
if len(docstr_start_match) == 1:
|
| 170 |
+
# flip True <=> False only if doctstring
|
| 171 |
+
# starts at line without finishing
|
| 172 |
+
is_in_docstring = not is_in_docstring
|
| 173 |
+
|
| 174 |
+
if is_in_docstring:
|
| 175 |
+
# import statements in doctstrings should
|
| 176 |
+
# not be added as required dependencies
|
| 177 |
+
continue
|
| 178 |
+
|
| 179 |
+
match = re.match(r"^import\s+(\.?)([^\s\.]+)[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)", line, flags=re.MULTILINE)
|
| 180 |
+
if match is None:
|
| 181 |
+
match = re.match(
|
| 182 |
+
r"^from\s+(\.?)([^\s\.]+)(?:[^\s]*)\s+import\s+[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)",
|
| 183 |
+
line,
|
| 184 |
+
flags=re.MULTILINE,
|
| 185 |
+
)
|
| 186 |
+
if match is None:
|
| 187 |
+
continue
|
| 188 |
+
if match.group(1):
|
| 189 |
+
# The import starts with a '.', we will download the relevant file
|
| 190 |
+
if any(imp[1] == match.group(2) for imp in imports):
|
| 191 |
+
# We already have this import
|
| 192 |
+
continue
|
| 193 |
+
if match.group(3):
|
| 194 |
+
# The import has a comment with 'From:', we'll retrieve it from the given url
|
| 195 |
+
url_path = match.group(3)
|
| 196 |
+
url_path, sub_directory = convert_github_url(url_path)
|
| 197 |
+
imports.append(("external", match.group(2), url_path, sub_directory))
|
| 198 |
+
elif match.group(2):
|
| 199 |
+
# The import should be at the same place as the file
|
| 200 |
+
imports.append(("internal", match.group(2), match.group(2), None))
|
| 201 |
+
else:
|
| 202 |
+
if match.group(3):
|
| 203 |
+
# The import has a comment with `From: git+https:...`, asks user to pip install from git.
|
| 204 |
+
url_path = match.group(3)
|
| 205 |
+
imports.append(("library", match.group(2), url_path, None))
|
| 206 |
+
else:
|
| 207 |
+
imports.append(("library", match.group(2), match.group(2), None))
|
| 208 |
+
|
| 209 |
+
return imports
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
def _download_additional_modules(
|
| 213 |
+
name: str, base_path: str, imports: Tuple[str, str, str, str], download_config: Optional[DownloadConfig]
|
| 214 |
+
) -> List[Tuple[str, str]]:
|
| 215 |
+
"""
|
| 216 |
+
Download additional module for a module <name>.py at URL (or local path) <base_path>/<name>.py
|
| 217 |
+
The imports must have been parsed first using ``get_imports``.
|
| 218 |
+
|
| 219 |
+
If some modules need to be installed with pip, an error is raised showing how to install them.
|
| 220 |
+
This function return the list of downloaded modules as tuples (import_name, module_file_path).
|
| 221 |
+
|
| 222 |
+
The downloaded modules can then be moved into an importable directory with ``_copy_script_and_other_resources_in_importable_dir``.
|
| 223 |
+
"""
|
| 224 |
+
local_imports = []
|
| 225 |
+
library_imports = []
|
| 226 |
+
download_config = download_config.copy()
|
| 227 |
+
if download_config.download_desc is None:
|
| 228 |
+
download_config.download_desc = "Downloading extra modules"
|
| 229 |
+
for import_type, import_name, import_path, sub_directory in imports:
|
| 230 |
+
if import_type == "library":
|
| 231 |
+
library_imports.append((import_name, import_path)) # Import from a library
|
| 232 |
+
continue
|
| 233 |
+
|
| 234 |
+
if import_name == name:
|
| 235 |
+
raise ValueError(
|
| 236 |
+
f"Error in the {name} script, importing relative {import_name} module "
|
| 237 |
+
f"but {import_name} is the name of the script. "
|
| 238 |
+
f"Please change relative import {import_name} to another name and add a '# From: URL_OR_PATH' "
|
| 239 |
+
f"comment pointing to the original relative import file path."
|
| 240 |
+
)
|
| 241 |
+
if import_type == "internal":
|
| 242 |
+
url_or_filename = url_or_path_join(base_path, import_path + ".py")
|
| 243 |
+
elif import_type == "external":
|
| 244 |
+
url_or_filename = import_path
|
| 245 |
+
else:
|
| 246 |
+
raise ValueError("Wrong import_type")
|
| 247 |
+
|
| 248 |
+
local_import_path = cached_path(
|
| 249 |
+
url_or_filename,
|
| 250 |
+
download_config=download_config,
|
| 251 |
+
)
|
| 252 |
+
if sub_directory is not None:
|
| 253 |
+
local_import_path = os.path.join(local_import_path, sub_directory)
|
| 254 |
+
local_imports.append((import_name, local_import_path))
|
| 255 |
+
|
| 256 |
+
# Check library imports
|
| 257 |
+
needs_to_be_installed = set()
|
| 258 |
+
for library_import_name, library_import_path in library_imports:
|
| 259 |
+
try:
|
| 260 |
+
lib = importlib.import_module(library_import_name) # noqa F841
|
| 261 |
+
except ImportError:
|
| 262 |
+
library_import_name = "scikit-learn" if library_import_name == "sklearn" else library_import_name
|
| 263 |
+
needs_to_be_installed.add((library_import_name, library_import_path))
|
| 264 |
+
if needs_to_be_installed:
|
| 265 |
+
raise ImportError(
|
| 266 |
+
f"To be able to use {name}, you need to install the following dependencies"
|
| 267 |
+
f"{[lib_name for lib_name, lib_path in needs_to_be_installed]} using 'pip install "
|
| 268 |
+
f"{' '.join([lib_path for lib_name, lib_path in needs_to_be_installed])}' for instance'"
|
| 269 |
+
)
|
| 270 |
+
return local_imports
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
def _copy_script_and_other_resources_in_importable_dir(
|
| 274 |
+
name: str,
|
| 275 |
+
importable_directory_path: str,
|
| 276 |
+
subdirectory_name: str,
|
| 277 |
+
original_local_path: str,
|
| 278 |
+
local_imports: List[Tuple[str, str]],
|
| 279 |
+
additional_files: List[Tuple[str, str]],
|
| 280 |
+
download_mode: Optional[DownloadMode],
|
| 281 |
+
) -> str:
|
| 282 |
+
"""Copy a script and its required imports to an importable directory
|
| 283 |
+
|
| 284 |
+
Args:
|
| 285 |
+
name (str): name of the resource to load
|
| 286 |
+
importable_directory_path (str): path to the loadable folder in the dynamic modules directory
|
| 287 |
+
subdirectory_name (str): name of the subdirectory in importable_directory_path in which to place the script
|
| 288 |
+
original_local_path (str): local path to the resource script
|
| 289 |
+
local_imports (List[Tuple[str, str]]): list of (destination_filename, import_file_to_copy)
|
| 290 |
+
additional_files (List[Tuple[str, str]]): list of (destination_filename, additional_file_to_copy)
|
| 291 |
+
download_mode (Optional[DownloadMode]): download mode
|
| 292 |
+
|
| 293 |
+
Return:
|
| 294 |
+
importable_local_file: path to an importable module with importlib.import_module
|
| 295 |
+
"""
|
| 296 |
+
|
| 297 |
+
# Define a directory with a unique name in our dataset or metric folder
|
| 298 |
+
# path is: ./datasets|metrics/dataset|metric_name/hash_from_code/script.py
|
| 299 |
+
# we use a hash as subdirectory_name to be able to have multiple versions of a dataset/metric processing file together
|
| 300 |
+
importable_subdirectory = os.path.join(importable_directory_path, subdirectory_name)
|
| 301 |
+
importable_local_file = os.path.join(importable_subdirectory, name + ".py")
|
| 302 |
+
|
| 303 |
+
# Prevent parallel disk operations
|
| 304 |
+
lock_path = importable_directory_path + ".lock"
|
| 305 |
+
with FileLock(lock_path):
|
| 306 |
+
# Create main dataset/metrics folder if needed
|
| 307 |
+
if download_mode == DownloadMode.FORCE_REDOWNLOAD and os.path.exists(importable_directory_path):
|
| 308 |
+
shutil.rmtree(importable_directory_path)
|
| 309 |
+
os.makedirs(importable_directory_path, exist_ok=True)
|
| 310 |
+
|
| 311 |
+
# add an __init__ file to the main dataset folder if needed
|
| 312 |
+
init_file_path = os.path.join(importable_directory_path, "__init__.py")
|
| 313 |
+
if not os.path.exists(init_file_path):
|
| 314 |
+
with open(init_file_path, "w"):
|
| 315 |
+
pass
|
| 316 |
+
|
| 317 |
+
# Create hash dataset folder if needed
|
| 318 |
+
os.makedirs(importable_subdirectory, exist_ok=True)
|
| 319 |
+
# add an __init__ file to the hash dataset folder if needed
|
| 320 |
+
init_file_path = os.path.join(importable_subdirectory, "__init__.py")
|
| 321 |
+
if not os.path.exists(init_file_path):
|
| 322 |
+
with open(init_file_path, "w"):
|
| 323 |
+
pass
|
| 324 |
+
|
| 325 |
+
# Copy dataset.py file in hash folder if needed
|
| 326 |
+
if not os.path.exists(importable_local_file):
|
| 327 |
+
shutil.copyfile(original_local_path, importable_local_file)
|
| 328 |
+
|
| 329 |
+
# Record metadata associating original dataset path with local unique folder
|
| 330 |
+
meta_path = importable_local_file.split(".py")[0] + ".json"
|
| 331 |
+
if not os.path.exists(meta_path):
|
| 332 |
+
meta = {"original file path": original_local_path, "local file path": importable_local_file}
|
| 333 |
+
# the filename is *.py in our case, so better rename to filenam.json instead of filename.py.json
|
| 334 |
+
with open(meta_path, "w", encoding="utf-8") as meta_file:
|
| 335 |
+
json.dump(meta, meta_file)
|
| 336 |
+
|
| 337 |
+
# Copy all the additional imports
|
| 338 |
+
for import_name, import_path in local_imports:
|
| 339 |
+
if os.path.isfile(import_path):
|
| 340 |
+
full_path_local_import = os.path.join(importable_subdirectory, import_name + ".py")
|
| 341 |
+
if not os.path.exists(full_path_local_import):
|
| 342 |
+
shutil.copyfile(import_path, full_path_local_import)
|
| 343 |
+
elif os.path.isdir(import_path):
|
| 344 |
+
full_path_local_import = os.path.join(importable_subdirectory, import_name)
|
| 345 |
+
if not os.path.exists(full_path_local_import):
|
| 346 |
+
shutil.copytree(import_path, full_path_local_import)
|
| 347 |
+
else:
|
| 348 |
+
raise OSError(f"Error with local import at {import_path}")
|
| 349 |
+
|
| 350 |
+
# Copy aditional files like dataset infos file if needed
|
| 351 |
+
for file_name, original_path in additional_files:
|
| 352 |
+
destination_additional_path = os.path.join(importable_subdirectory, file_name)
|
| 353 |
+
if not os.path.exists(destination_additional_path) or not filecmp.cmp(
|
| 354 |
+
original_path, destination_additional_path
|
| 355 |
+
):
|
| 356 |
+
shutil.copyfile(original_path, destination_additional_path)
|
| 357 |
+
return importable_local_file
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
def _create_importable_file(
|
| 361 |
+
local_path: str,
|
| 362 |
+
local_imports: List[Tuple[str, str]],
|
| 363 |
+
additional_files: List[Tuple[str, str]],
|
| 364 |
+
dynamic_modules_path: str,
|
| 365 |
+
module_namespace: str,
|
| 366 |
+
name: str,
|
| 367 |
+
download_mode: DownloadMode,
|
| 368 |
+
) -> Tuple[str, str]:
|
| 369 |
+
importable_directory_path = os.path.join(dynamic_modules_path, module_namespace, name.replace("/", "--"))
|
| 370 |
+
Path(importable_directory_path).mkdir(parents=True, exist_ok=True)
|
| 371 |
+
(Path(importable_directory_path).parent / "__init__.py").touch(exist_ok=True)
|
| 372 |
+
hash = files_to_hash([local_path] + [loc[1] for loc in local_imports])
|
| 373 |
+
importable_local_file = _copy_script_and_other_resources_in_importable_dir(
|
| 374 |
+
name=name.split("/")[-1],
|
| 375 |
+
importable_directory_path=importable_directory_path,
|
| 376 |
+
subdirectory_name=hash,
|
| 377 |
+
original_local_path=local_path,
|
| 378 |
+
local_imports=local_imports,
|
| 379 |
+
additional_files=additional_files,
|
| 380 |
+
download_mode=download_mode,
|
| 381 |
+
)
|
| 382 |
+
logger.debug(f"Created importable dataset file at {importable_local_file}")
|
| 383 |
+
module_path = ".".join(
|
| 384 |
+
[os.path.basename(dynamic_modules_path), module_namespace, name.replace("/", "--"), hash, name.split("/")[-1]]
|
| 385 |
+
)
|
| 386 |
+
return module_path, hash
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
@dataclass
|
| 390 |
+
class ImportableModule:
|
| 391 |
+
module_path: str
|
| 392 |
+
hash: str
|
| 393 |
+
|
| 394 |
+
|
| 395 |
+
class _EvaluationModuleFactory:
|
| 396 |
+
def get_module(self) -> ImportableModule:
|
| 397 |
+
raise NotImplementedError
|
| 398 |
+
|
| 399 |
+
|
| 400 |
+
class LocalEvaluationModuleFactory(_EvaluationModuleFactory):
|
| 401 |
+
"""Get the module of a local metric. The metric script is loaded from a local script."""
|
| 402 |
+
|
| 403 |
+
def __init__(
|
| 404 |
+
self,
|
| 405 |
+
path: str,
|
| 406 |
+
module_type: str = "metrics",
|
| 407 |
+
download_config: Optional[DownloadConfig] = None,
|
| 408 |
+
download_mode: Optional[DownloadMode] = None,
|
| 409 |
+
dynamic_modules_path: Optional[str] = None,
|
| 410 |
+
):
|
| 411 |
+
self.path = path
|
| 412 |
+
self.module_type = module_type
|
| 413 |
+
self.name = Path(path).stem
|
| 414 |
+
self.download_config = download_config or DownloadConfig()
|
| 415 |
+
self.download_mode = download_mode
|
| 416 |
+
self.dynamic_modules_path = dynamic_modules_path
|
| 417 |
+
|
| 418 |
+
def get_module(self) -> ImportableModule:
|
| 419 |
+
# get script and other files
|
| 420 |
+
imports = get_imports(self.path)
|
| 421 |
+
local_imports = _download_additional_modules(
|
| 422 |
+
name=self.name,
|
| 423 |
+
base_path=str(Path(self.path).parent),
|
| 424 |
+
imports=imports,
|
| 425 |
+
download_config=self.download_config,
|
| 426 |
+
)
|
| 427 |
+
# copy the script and the files in an importable directory
|
| 428 |
+
dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
|
| 429 |
+
module_path, hash = _create_importable_file(
|
| 430 |
+
local_path=self.path,
|
| 431 |
+
local_imports=local_imports,
|
| 432 |
+
additional_files=[],
|
| 433 |
+
dynamic_modules_path=dynamic_modules_path,
|
| 434 |
+
module_namespace=self.module_type,
|
| 435 |
+
name=self.name,
|
| 436 |
+
download_mode=self.download_mode,
|
| 437 |
+
)
|
| 438 |
+
# make the new module to be noticed by the import system
|
| 439 |
+
importlib.invalidate_caches()
|
| 440 |
+
return ImportableModule(module_path, hash)
|
| 441 |
+
|
| 442 |
+
|
| 443 |
+
class HubEvaluationModuleFactory(_EvaluationModuleFactory):
|
| 444 |
+
"""Get the module of a metric from a metric repository on the Hub."""
|
| 445 |
+
|
| 446 |
+
def __init__(
|
| 447 |
+
self,
|
| 448 |
+
name: str,
|
| 449 |
+
module_type: str = "metrics",
|
| 450 |
+
revision: Optional[Union[str, Version]] = None,
|
| 451 |
+
download_config: Optional[DownloadConfig] = None,
|
| 452 |
+
download_mode: Optional[DownloadMode] = None,
|
| 453 |
+
dynamic_modules_path: Optional[str] = None,
|
| 454 |
+
):
|
| 455 |
+
self.name = name
|
| 456 |
+
self.module_type = module_type
|
| 457 |
+
self.revision = revision
|
| 458 |
+
self.download_config = download_config or DownloadConfig()
|
| 459 |
+
self.download_mode = download_mode
|
| 460 |
+
self.dynamic_modules_path = dynamic_modules_path
|
| 461 |
+
assert self.name.count("/") == 1
|
| 462 |
+
increase_load_count(name, resource_type="metric")
|
| 463 |
+
|
| 464 |
+
def download_loading_script(self, revision) -> str:
|
| 465 |
+
file_path = hf_hub_url(path=self.name, name=self.name.split("/")[1] + ".py", revision=revision)
|
| 466 |
+
download_config = self.download_config.copy()
|
| 467 |
+
if download_config.download_desc is None:
|
| 468 |
+
download_config.download_desc = "Downloading builder script"
|
| 469 |
+
return cached_path(file_path, download_config=download_config)
|
| 470 |
+
|
| 471 |
+
def get_module(self) -> ImportableModule:
|
| 472 |
+
revision = self.revision or os.getenv("HF_SCRIPTS_VERSION", SCRIPTS_VERSION)
|
| 473 |
+
|
| 474 |
+
if re.match(r"\d*\.\d*\.\d*", revision): # revision is version number (three digits separated by full stops)
|
| 475 |
+
revision = "v" + revision # tagging convention on evaluate repository starts with v
|
| 476 |
+
|
| 477 |
+
# get script and other files
|
| 478 |
+
try:
|
| 479 |
+
local_path = self.download_loading_script(revision)
|
| 480 |
+
except FileNotFoundError as err:
|
| 481 |
+
# if there is no file found with current revision tag try to load main
|
| 482 |
+
if self.revision is None and os.getenv("HF_SCRIPTS_VERSION", SCRIPTS_VERSION) != "main":
|
| 483 |
+
revision = "main"
|
| 484 |
+
local_path = self.download_loading_script(revision)
|
| 485 |
+
else:
|
| 486 |
+
raise err
|
| 487 |
+
|
| 488 |
+
imports = get_imports(local_path)
|
| 489 |
+
local_imports = _download_additional_modules(
|
| 490 |
+
name=self.name,
|
| 491 |
+
base_path=hf_hub_url(path=self.name, name="", revision=revision),
|
| 492 |
+
imports=imports,
|
| 493 |
+
download_config=self.download_config,
|
| 494 |
+
)
|
| 495 |
+
# copy the script and the files in an importable directory
|
| 496 |
+
dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
|
| 497 |
+
module_path, hash = _create_importable_file(
|
| 498 |
+
local_path=local_path,
|
| 499 |
+
local_imports=local_imports,
|
| 500 |
+
additional_files=[],
|
| 501 |
+
dynamic_modules_path=dynamic_modules_path,
|
| 502 |
+
module_namespace=self.module_type,
|
| 503 |
+
name=self.name,
|
| 504 |
+
download_mode=self.download_mode,
|
| 505 |
+
)
|
| 506 |
+
# make the new module to be noticed by the import system
|
| 507 |
+
importlib.invalidate_caches()
|
| 508 |
+
return ImportableModule(module_path, hash)
|
| 509 |
+
|
| 510 |
+
|
| 511 |
+
class CachedEvaluationModuleFactory(_EvaluationModuleFactory):
|
| 512 |
+
"""
|
| 513 |
+
Get the module of a metric that has been loaded once already and cached.
|
| 514 |
+
The script that is loaded from the cache is the most recent one with a matching name.
|
| 515 |
+
"""
|
| 516 |
+
|
| 517 |
+
def __init__(
|
| 518 |
+
self,
|
| 519 |
+
name: str,
|
| 520 |
+
module_type: str = "metrics",
|
| 521 |
+
dynamic_modules_path: Optional[str] = None,
|
| 522 |
+
):
|
| 523 |
+
self.name = name
|
| 524 |
+
self.module_type = module_type
|
| 525 |
+
self.dynamic_modules_path = dynamic_modules_path
|
| 526 |
+
assert self.name.count("/") == 0
|
| 527 |
+
|
| 528 |
+
def get_module(self) -> ImportableModule:
|
| 529 |
+
dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
|
| 530 |
+
importable_directory_path = os.path.join(dynamic_modules_path, self.module_type, self.name)
|
| 531 |
+
hashes = (
|
| 532 |
+
[h for h in os.listdir(importable_directory_path) if len(h) == 64]
|
| 533 |
+
if os.path.isdir(importable_directory_path)
|
| 534 |
+
else None
|
| 535 |
+
)
|
| 536 |
+
if not hashes:
|
| 537 |
+
raise FileNotFoundError(f"Metric {self.name} is not cached in {dynamic_modules_path}")
|
| 538 |
+
# get most recent
|
| 539 |
+
|
| 540 |
+
def _get_modification_time(module_hash):
|
| 541 |
+
return (
|
| 542 |
+
(Path(importable_directory_path) / module_hash / (self.name.split("--")[-1] + ".py")).stat().st_mtime
|
| 543 |
+
)
|
| 544 |
+
|
| 545 |
+
hash = sorted(hashes, key=_get_modification_time)[-1]
|
| 546 |
+
logger.warning(
|
| 547 |
+
f"Using the latest cached version of the module from {os.path.join(importable_directory_path, hash)} "
|
| 548 |
+
f"(last modified on {time.ctime(_get_modification_time(hash))}) since it "
|
| 549 |
+
f"couldn't be found locally at {self.name}, or remotely on the Hugging Face Hub."
|
| 550 |
+
)
|
| 551 |
+
# make the new module to be noticed by the import system
|
| 552 |
+
module_path = ".".join(
|
| 553 |
+
[os.path.basename(dynamic_modules_path), self.module_type, self.name, hash, self.name.split("--")[-1]]
|
| 554 |
+
)
|
| 555 |
+
importlib.invalidate_caches()
|
| 556 |
+
return ImportableModule(module_path, hash)
|
| 557 |
+
|
| 558 |
+
|
| 559 |
+
def evaluation_module_factory(
|
| 560 |
+
path: str,
|
| 561 |
+
module_type: Optional[str] = None,
|
| 562 |
+
revision: Optional[Union[str, Version]] = None,
|
| 563 |
+
download_config: Optional[DownloadConfig] = None,
|
| 564 |
+
download_mode: Optional[DownloadMode] = None,
|
| 565 |
+
force_local_path: Optional[str] = None,
|
| 566 |
+
dynamic_modules_path: Optional[str] = None,
|
| 567 |
+
**download_kwargs,
|
| 568 |
+
) -> ImportableModule:
|
| 569 |
+
"""
|
| 570 |
+
Download/extract/cache a metric module.
|
| 571 |
+
|
| 572 |
+
Metrics codes are cached inside the the dynamic modules cache to allow easy import (avoid ugly sys.path tweaks).
|
| 573 |
+
|
| 574 |
+
Args:
|
| 575 |
+
|
| 576 |
+
path (str): Path or name of the metric script.
|
| 577 |
+
|
| 578 |
+
- if ``path`` is a local metric script or a directory containing a local metric script (if the script has the same name as the directory):
|
| 579 |
+
-> load the module from the metric script
|
| 580 |
+
e.g. ``'./metrics/accuracy'`` or ``'./metrics/accuracy/accuracy.py'``.
|
| 581 |
+
- if ``path`` is a metric on the Hugging Face Hub (ex: `glue`, `squad`)
|
| 582 |
+
-> load the module from the metric script in the github repository at huggingface/datasets
|
| 583 |
+
e.g. ``'accuracy'`` or ``'rouge'``.
|
| 584 |
+
|
| 585 |
+
revision (Optional ``Union[str, datasets.Version]``):
|
| 586 |
+
If specified, the module will be loaded from the datasets repository at this version.
|
| 587 |
+
By default:
|
| 588 |
+
- it is set to the local version of the lib.
|
| 589 |
+
- it will also try to load it from the master branch if it's not available at the local version of the lib.
|
| 590 |
+
Specifying a version that is different from your local version of the lib might cause compatibility issues.
|
| 591 |
+
download_config (:class:`DownloadConfig`, optional): Specific download configuration parameters.
|
| 592 |
+
download_mode (:class:`DownloadMode`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode.
|
| 593 |
+
force_local_path (Optional str): Optional path to a local path to download and prepare the script to.
|
| 594 |
+
Used to inspect or modify the script folder.
|
| 595 |
+
dynamic_modules_path (Optional str, defaults to HF_MODULES_CACHE / "datasets_modules", i.e. ~/.cache/huggingface/modules/datasets_modules):
|
| 596 |
+
Optional path to the directory in which the dynamic modules are saved. It must have been initialized with :obj:`init_dynamic_modules`.
|
| 597 |
+
By default the datasets and metrics are stored inside the `datasets_modules` module.
|
| 598 |
+
download_kwargs: optional attributes for DownloadConfig() which will override the attributes in download_config if supplied.
|
| 599 |
+
|
| 600 |
+
Returns:
|
| 601 |
+
ImportableModule
|
| 602 |
+
"""
|
| 603 |
+
if download_config is None:
|
| 604 |
+
download_config = DownloadConfig(**download_kwargs)
|
| 605 |
+
download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
|
| 606 |
+
download_config.extract_compressed_file = True
|
| 607 |
+
download_config.force_extract = True
|
| 608 |
+
|
| 609 |
+
filename = list(filter(lambda x: x, path.replace(os.sep, "/").split("/")))[-1]
|
| 610 |
+
if not filename.endswith(".py"):
|
| 611 |
+
filename = filename + ".py"
|
| 612 |
+
combined_path = os.path.join(path, filename)
|
| 613 |
+
# Try locally
|
| 614 |
+
if path.endswith(filename):
|
| 615 |
+
if os.path.isfile(path):
|
| 616 |
+
return LocalEvaluationModuleFactory(
|
| 617 |
+
path, download_mode=download_mode, dynamic_modules_path=dynamic_modules_path
|
| 618 |
+
).get_module()
|
| 619 |
+
else:
|
| 620 |
+
raise FileNotFoundError(f"Couldn't find a metric script at {relative_to_absolute_path(path)}")
|
| 621 |
+
elif os.path.isfile(combined_path):
|
| 622 |
+
return LocalEvaluationModuleFactory(
|
| 623 |
+
combined_path, download_mode=download_mode, dynamic_modules_path=dynamic_modules_path
|
| 624 |
+
).get_module()
|
| 625 |
+
elif is_relative_path(path) and path.count("/") <= 1 and not force_local_path:
|
| 626 |
+
try:
|
| 627 |
+
# load a canonical evaluation module from hub
|
| 628 |
+
if path.count("/") == 0:
|
| 629 |
+
# if no type provided look through all possible modules
|
| 630 |
+
if module_type is None:
|
| 631 |
+
for current_type in ["metric", "comparison", "measurement"]:
|
| 632 |
+
try:
|
| 633 |
+
return HubEvaluationModuleFactory(
|
| 634 |
+
f"evaluate-{current_type}/{path}",
|
| 635 |
+
revision=revision,
|
| 636 |
+
download_config=download_config,
|
| 637 |
+
download_mode=download_mode,
|
| 638 |
+
dynamic_modules_path=dynamic_modules_path,
|
| 639 |
+
).get_module()
|
| 640 |
+
except ConnectionError:
|
| 641 |
+
pass
|
| 642 |
+
raise FileNotFoundError
|
| 643 |
+
# if module_type provided load specific module_type
|
| 644 |
+
else:
|
| 645 |
+
return HubEvaluationModuleFactory(
|
| 646 |
+
f"evaluate-{module_type}/{path}",
|
| 647 |
+
revision=revision,
|
| 648 |
+
download_config=download_config,
|
| 649 |
+
download_mode=download_mode,
|
| 650 |
+
dynamic_modules_path=dynamic_modules_path,
|
| 651 |
+
).get_module()
|
| 652 |
+
# load community evaluation module from hub
|
| 653 |
+
elif path.count("/") == 1:
|
| 654 |
+
return HubEvaluationModuleFactory(
|
| 655 |
+
path,
|
| 656 |
+
revision=revision,
|
| 657 |
+
download_config=download_config,
|
| 658 |
+
download_mode=download_mode,
|
| 659 |
+
dynamic_modules_path=dynamic_modules_path,
|
| 660 |
+
).get_module()
|
| 661 |
+
except Exception as e1: # noqa: all the attempts failed, before raising the error we should check if the module is already cached.
|
| 662 |
+
# if it's a canonical module we need to check if it's any of the types
|
| 663 |
+
if path.count("/") == 0:
|
| 664 |
+
for current_type in ["metric", "comparison", "measurement"]:
|
| 665 |
+
try:
|
| 666 |
+
return CachedEvaluationModuleFactory(
|
| 667 |
+
f"evaluate-{current_type}--{path}", dynamic_modules_path=dynamic_modules_path
|
| 668 |
+
).get_module()
|
| 669 |
+
except Exception as e2: # noqa: if it's not in the cache, then it doesn't exist.
|
| 670 |
+
pass
|
| 671 |
+
# if it's a community module we just need to check on path
|
| 672 |
+
elif path.count("/") == 1:
|
| 673 |
+
try:
|
| 674 |
+
return CachedEvaluationModuleFactory(
|
| 675 |
+
path.replace("/", "--"), dynamic_modules_path=dynamic_modules_path
|
| 676 |
+
).get_module()
|
| 677 |
+
except Exception as e2: # noqa: if it's not in the cache, then it doesn't exist.
|
| 678 |
+
pass
|
| 679 |
+
if not isinstance(e1, (ConnectionError, FileNotFoundError)):
|
| 680 |
+
raise e1 from None
|
| 681 |
+
raise FileNotFoundError(
|
| 682 |
+
f"Couldn't find a module script at {relative_to_absolute_path(combined_path)}. "
|
| 683 |
+
f"Module '{path}' doesn't exist on the Hugging Face Hub either."
|
| 684 |
+
) from None
|
| 685 |
+
else:
|
| 686 |
+
raise FileNotFoundError(f"Couldn't find a module script at {relative_to_absolute_path(combined_path)}.")
|
| 687 |
+
|
| 688 |
+
|
| 689 |
+
def load(
|
| 690 |
+
path: str,
|
| 691 |
+
config_name: Optional[str] = None,
|
| 692 |
+
module_type: Optional[str] = None,
|
| 693 |
+
process_id: int = 0,
|
| 694 |
+
num_process: int = 1,
|
| 695 |
+
cache_dir: Optional[str] = None,
|
| 696 |
+
experiment_id: Optional[str] = None,
|
| 697 |
+
keep_in_memory: bool = False,
|
| 698 |
+
download_config: Optional[DownloadConfig] = None,
|
| 699 |
+
download_mode: Optional[DownloadMode] = None,
|
| 700 |
+
revision: Optional[Union[str, Version]] = None,
|
| 701 |
+
**init_kwargs,
|
| 702 |
+
) -> EvaluationModule:
|
| 703 |
+
"""Load a [`~evaluate.EvaluationModule`].
|
| 704 |
+
|
| 705 |
+
Args:
|
| 706 |
+
|
| 707 |
+
path (`str`):
|
| 708 |
+
Path to the evaluation processing script with the evaluation builder. Can be either:
|
| 709 |
+
- a local path to processing script or the directory containing the script (if the script has the same name as the directory),
|
| 710 |
+
e.g. `'./metrics/rouge'` or `'./metrics/rouge/rouge.py'`
|
| 711 |
+
- a evaluation module identifier on the HuggingFace evaluate repo e.g. `'rouge'` or `'bleu'` that are in either `'metrics/'`,
|
| 712 |
+
`'comparisons/'`, or `'measurements/'` depending on the provided `module_type`
|
| 713 |
+
config_name (`str`, *optional*):
|
| 714 |
+
Selecting a configuration for the metric (e.g. the GLUE metric has a configuration for each subset).
|
| 715 |
+
module_type (`str`, default `'metric'`):
|
| 716 |
+
Type of evaluation module, can be one of `'metric'`, `'comparison'`, or `'measurement'`.
|
| 717 |
+
process_id (`int`, *optional*):
|
| 718 |
+
For distributed evaluation: id of the process.
|
| 719 |
+
num_process (`int`, *optional*):
|
| 720 |
+
For distributed evaluation: total number of processes.
|
| 721 |
+
cache_dir (`str`, *optional*):
|
| 722 |
+
Path to store the temporary predictions and references (default to `~/.cache/huggingface/evaluate/`).
|
| 723 |
+
experiment_id (`str`):
|
| 724 |
+
A specific experiment id. This is used if several distributed evaluations share the same file system.
|
| 725 |
+
This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
|
| 726 |
+
keep_in_memory (`bool`):
|
| 727 |
+
Whether to store the temporary results in memory (defaults to `False`).
|
| 728 |
+
download_config ([`~evaluate.DownloadConfig`], *optional*):
|
| 729 |
+
Specific download configuration parameters.
|
| 730 |
+
download_mode ([`DownloadMode`], defaults to `REUSE_DATASET_IF_EXISTS`):
|
| 731 |
+
Download/generate mode.
|
| 732 |
+
revision (`Union[str, evaluate.Version]`, *optional*):
|
| 733 |
+
If specified, the module will be loaded from the datasets repository
|
| 734 |
+
at this version. By default it is set to the local version of the lib. Specifying a version that is different from
|
| 735 |
+
your local version of the lib might cause compatibility issues.
|
| 736 |
+
|
| 737 |
+
Returns:
|
| 738 |
+
[`evaluate.EvaluationModule`]
|
| 739 |
+
|
| 740 |
+
Example:
|
| 741 |
+
|
| 742 |
+
```py
|
| 743 |
+
>>> from evaluate import load
|
| 744 |
+
>>> accuracy = evaluate.load("accuracy")
|
| 745 |
+
```
|
| 746 |
+
"""
|
| 747 |
+
download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
|
| 748 |
+
evaluation_module = evaluation_module_factory(
|
| 749 |
+
path, module_type=module_type, revision=revision, download_config=download_config, download_mode=download_mode
|
| 750 |
+
)
|
| 751 |
+
evaluation_cls = import_main_class(evaluation_module.module_path)
|
| 752 |
+
evaluation_instance = evaluation_cls(
|
| 753 |
+
config_name=config_name,
|
| 754 |
+
process_id=process_id,
|
| 755 |
+
num_process=num_process,
|
| 756 |
+
cache_dir=cache_dir,
|
| 757 |
+
keep_in_memory=keep_in_memory,
|
| 758 |
+
experiment_id=experiment_id,
|
| 759 |
+
hash=evaluation_module.hash,
|
| 760 |
+
**init_kwargs,
|
| 761 |
+
)
|
| 762 |
+
|
| 763 |
+
if module_type and module_type != evaluation_instance.module_type:
|
| 764 |
+
raise TypeError(
|
| 765 |
+
f"No module of module type '{module_type}' not found for '{path}' locally, or on the Hugging Face Hub. Found module of module type '{evaluation_instance.module_type}' instead."
|
| 766 |
+
)
|
| 767 |
+
|
| 768 |
+
# Download and prepare resources for the metric
|
| 769 |
+
evaluation_instance.download_and_prepare(download_config=download_config)
|
| 770 |
+
|
| 771 |
+
return evaluation_instance
|
venv/lib/python3.10/site-packages/evaluate/module.py
ADDED
|
@@ -0,0 +1,1029 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 The HuggingFace Datasets Authors
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
# Lint as: python3
|
| 16 |
+
""" EvaluationModule base class."""
|
| 17 |
+
import collections
|
| 18 |
+
import itertools
|
| 19 |
+
import os
|
| 20 |
+
import types
|
| 21 |
+
import uuid
|
| 22 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
| 23 |
+
|
| 24 |
+
import numpy as np
|
| 25 |
+
import pyarrow as pa
|
| 26 |
+
from datasets import DatasetInfo, DownloadConfig, DownloadManager
|
| 27 |
+
from datasets.arrow_dataset import Dataset
|
| 28 |
+
from datasets.arrow_reader import ArrowReader
|
| 29 |
+
from datasets.arrow_writer import ArrowWriter
|
| 30 |
+
from datasets.features import Features, Sequence, Value
|
| 31 |
+
from datasets.features.features import _check_non_null_non_empty_recursive
|
| 32 |
+
from datasets.utils.filelock import BaseFileLock, FileLock, Timeout
|
| 33 |
+
from datasets.utils.py_utils import copyfunc, temp_seed, zip_dict
|
| 34 |
+
|
| 35 |
+
from . import config
|
| 36 |
+
from .info import EvaluationModuleInfo
|
| 37 |
+
from .naming import camelcase_to_snakecase
|
| 38 |
+
from .utils.logging import get_logger
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
logger = get_logger(__name__)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class FileFreeLock(BaseFileLock):
|
| 45 |
+
"""Thread lock until a file **cannot** be locked"""
|
| 46 |
+
|
| 47 |
+
def __init__(self, lock_file, *args, **kwargs):
|
| 48 |
+
self.filelock = FileLock(lock_file)
|
| 49 |
+
super().__init__(lock_file, *args, **kwargs)
|
| 50 |
+
|
| 51 |
+
def _acquire(self):
|
| 52 |
+
try:
|
| 53 |
+
self.filelock.acquire(timeout=0.01, poll_intervall=0.02) # Try to lock once
|
| 54 |
+
except Timeout:
|
| 55 |
+
# We couldn't acquire the lock, the file is locked!
|
| 56 |
+
self._lock_file_fd = self.filelock.lock_file
|
| 57 |
+
else:
|
| 58 |
+
# We were able to acquire the lock, the file is not yet locked!
|
| 59 |
+
self.filelock.release()
|
| 60 |
+
self._lock_file_fd = None
|
| 61 |
+
|
| 62 |
+
def _release(self):
|
| 63 |
+
self._lock_file_fd = None
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
# lists - summarize long lists similarly to NumPy
|
| 67 |
+
# arrays/tensors - let the frameworks control formatting
|
| 68 |
+
def summarize_if_long_list(obj):
|
| 69 |
+
if not type(obj) == list or len(obj) <= 6:
|
| 70 |
+
return f"{obj}"
|
| 71 |
+
|
| 72 |
+
def format_chunk(chunk):
|
| 73 |
+
return ", ".join(repr(x) for x in chunk)
|
| 74 |
+
|
| 75 |
+
return f"[{format_chunk(obj[:3])}, ..., {format_chunk(obj[-3:])}]"
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
class EvaluationModuleInfoMixin:
|
| 79 |
+
"""This base class exposes some attributes of EvaluationModuleInfo
|
| 80 |
+
at the base level of the EvaluationModule for easy access.
|
| 81 |
+
"""
|
| 82 |
+
|
| 83 |
+
def __init__(self, info: EvaluationModuleInfo):
|
| 84 |
+
self._module_info = info
|
| 85 |
+
|
| 86 |
+
@property
|
| 87 |
+
def info(self):
|
| 88 |
+
""":class:`evaluate.EvaluationModuleInfo` object containing all the metadata in the evaluation module."""
|
| 89 |
+
return self._module_info
|
| 90 |
+
|
| 91 |
+
@property
|
| 92 |
+
def name(self) -> str:
|
| 93 |
+
return self._module_info.module_name
|
| 94 |
+
|
| 95 |
+
@property
|
| 96 |
+
def experiment_id(self) -> Optional[str]:
|
| 97 |
+
return self._module_info.experiment_id
|
| 98 |
+
|
| 99 |
+
@property
|
| 100 |
+
def description(self) -> str:
|
| 101 |
+
return self._module_info.description
|
| 102 |
+
|
| 103 |
+
@property
|
| 104 |
+
def citation(self) -> str:
|
| 105 |
+
return self._module_info.citation
|
| 106 |
+
|
| 107 |
+
@property
|
| 108 |
+
def features(self) -> Features:
|
| 109 |
+
return self._module_info.features
|
| 110 |
+
|
| 111 |
+
@property
|
| 112 |
+
def inputs_description(self) -> str:
|
| 113 |
+
return self._module_info.inputs_description
|
| 114 |
+
|
| 115 |
+
@property
|
| 116 |
+
def homepage(self) -> Optional[str]:
|
| 117 |
+
return self._module_info.homepage
|
| 118 |
+
|
| 119 |
+
@property
|
| 120 |
+
def license(self) -> str:
|
| 121 |
+
return self._module_info.license
|
| 122 |
+
|
| 123 |
+
@property
|
| 124 |
+
def codebase_urls(self) -> Optional[List[str]]:
|
| 125 |
+
return self._module_info.codebase_urls
|
| 126 |
+
|
| 127 |
+
@property
|
| 128 |
+
def reference_urls(self) -> Optional[List[str]]:
|
| 129 |
+
return self._module_info.reference_urls
|
| 130 |
+
|
| 131 |
+
@property
|
| 132 |
+
def streamable(self) -> bool:
|
| 133 |
+
return self._module_info.streamable
|
| 134 |
+
|
| 135 |
+
@property
|
| 136 |
+
def format(self) -> Optional[str]:
|
| 137 |
+
return self._module_info.format
|
| 138 |
+
|
| 139 |
+
@property
|
| 140 |
+
def module_type(self) -> str:
|
| 141 |
+
return self._module_info.module_type
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
class EvaluationModule(EvaluationModuleInfoMixin):
|
| 145 |
+
"""A `EvaluationModule` is the base class and common API for metrics, comparisons, and measurements.
|
| 146 |
+
|
| 147 |
+
Args:
|
| 148 |
+
config_name (`str`):
|
| 149 |
+
This is used to define a hash specific to a module computation script and prevents the module's data
|
| 150 |
+
to be overridden when the module loading script is modified.
|
| 151 |
+
keep_in_memory (`bool`):
|
| 152 |
+
Keep all predictions and references in memory. Not possible in distributed settings.
|
| 153 |
+
cache_dir (`str`):
|
| 154 |
+
Path to a directory in which temporary prediction/references data will be stored.
|
| 155 |
+
The data directory should be located on a shared file-system in distributed setups.
|
| 156 |
+
num_process (`int`):
|
| 157 |
+
Specify the total number of nodes in a distributed settings.
|
| 158 |
+
This is useful to compute module in distributed setups (in particular non-additive modules like F1).
|
| 159 |
+
process_id (`int`):
|
| 160 |
+
Specify the id of the current process in a distributed setup (between 0 and num_process-1)
|
| 161 |
+
This is useful to compute module in distributed setups (in particular non-additive metrics like F1).
|
| 162 |
+
seed (`int`, optional):
|
| 163 |
+
If specified, this will temporarily set numpy's random seed when [`~evaluate.EvaluationModule.compute`] is run.
|
| 164 |
+
experiment_id (`str`):
|
| 165 |
+
A specific experiment id. This is used if several distributed evaluations share the same file system.
|
| 166 |
+
This is useful to compute module in distributed setups (in particular non-additive metrics like F1).
|
| 167 |
+
hash (`str`):
|
| 168 |
+
Used to identify the evaluation module according to the hashed file contents.
|
| 169 |
+
max_concurrent_cache_files (`int`):
|
| 170 |
+
Max number of concurrent module cache files (default `10000`).
|
| 171 |
+
timeout (`Union[int, float]`):
|
| 172 |
+
Timeout in second for distributed setting synchronization.
|
| 173 |
+
"""
|
| 174 |
+
|
| 175 |
+
def __init__(
|
| 176 |
+
self,
|
| 177 |
+
config_name: Optional[str] = None,
|
| 178 |
+
keep_in_memory: bool = False,
|
| 179 |
+
cache_dir: Optional[str] = None,
|
| 180 |
+
num_process: int = 1,
|
| 181 |
+
process_id: int = 0,
|
| 182 |
+
seed: Optional[int] = None,
|
| 183 |
+
experiment_id: Optional[str] = None,
|
| 184 |
+
hash: str = None,
|
| 185 |
+
max_concurrent_cache_files: int = 10000,
|
| 186 |
+
timeout: Union[int, float] = 100,
|
| 187 |
+
**kwargs,
|
| 188 |
+
):
|
| 189 |
+
# prepare info
|
| 190 |
+
self.config_name = config_name or "default"
|
| 191 |
+
info = self._info()
|
| 192 |
+
info.module_name = camelcase_to_snakecase(self.__class__.__name__)
|
| 193 |
+
info.config_name = self.config_name
|
| 194 |
+
info.experiment_id = experiment_id or "default_experiment"
|
| 195 |
+
EvaluationModuleInfoMixin.__init__(self, info) # For easy access on low level
|
| 196 |
+
|
| 197 |
+
# Safety checks on num_process and process_id
|
| 198 |
+
if not isinstance(process_id, int) or process_id < 0:
|
| 199 |
+
raise ValueError("'process_id' should be a number greater than 0")
|
| 200 |
+
if not isinstance(num_process, int) or num_process <= process_id:
|
| 201 |
+
raise ValueError("'num_process' should be a number greater than process_id")
|
| 202 |
+
if keep_in_memory and num_process != 1:
|
| 203 |
+
raise ValueError("Using 'keep_in_memory' is not possible in distributed setting (num_process > 1).")
|
| 204 |
+
|
| 205 |
+
self.num_process = num_process
|
| 206 |
+
self.process_id = process_id
|
| 207 |
+
self.max_concurrent_cache_files = max_concurrent_cache_files
|
| 208 |
+
|
| 209 |
+
self.keep_in_memory = keep_in_memory
|
| 210 |
+
self._data_dir_root = os.path.expanduser(cache_dir or config.HF_METRICS_CACHE)
|
| 211 |
+
self.data_dir = self._build_data_dir()
|
| 212 |
+
if seed is None:
|
| 213 |
+
_, seed, pos, *_ = np.random.get_state()
|
| 214 |
+
self.seed: int = seed[pos] if pos < 624 else seed[0]
|
| 215 |
+
else:
|
| 216 |
+
self.seed: int = seed
|
| 217 |
+
self.timeout: Union[int, float] = timeout
|
| 218 |
+
|
| 219 |
+
# Update 'compute' and 'add' docstring
|
| 220 |
+
# methods need to be copied otherwise it changes the docstrings of every instance
|
| 221 |
+
self.compute = types.MethodType(copyfunc(self.compute), self)
|
| 222 |
+
self.add_batch = types.MethodType(copyfunc(self.add_batch), self)
|
| 223 |
+
self.add = types.MethodType(copyfunc(self.add), self)
|
| 224 |
+
self.compute.__func__.__doc__ += self.info.inputs_description
|
| 225 |
+
self.add_batch.__func__.__doc__ += self.info.inputs_description
|
| 226 |
+
self.add.__func__.__doc__ += self.info.inputs_description
|
| 227 |
+
|
| 228 |
+
# self.arrow_schema = pa.schema(field for field in self.info.features.type)
|
| 229 |
+
self.selected_feature_format = None
|
| 230 |
+
self.buf_writer = None
|
| 231 |
+
self.writer = None
|
| 232 |
+
self.writer_batch_size = None
|
| 233 |
+
self.data = None
|
| 234 |
+
|
| 235 |
+
# This is the cache file we store our predictions/references in
|
| 236 |
+
# Keep it None for now so we can (cloud)pickle the object
|
| 237 |
+
self.cache_file_name = None
|
| 238 |
+
self.filelock = None
|
| 239 |
+
self.rendez_vous_lock = None
|
| 240 |
+
|
| 241 |
+
# This is all the cache files on which we have a lock when we are in a distributed setting
|
| 242 |
+
self.file_paths = None
|
| 243 |
+
self.filelocks = None
|
| 244 |
+
|
| 245 |
+
# This fingerprints the evaluation module according to the hashed contents of the module code
|
| 246 |
+
self._hash = hash
|
| 247 |
+
|
| 248 |
+
def __len__(self):
|
| 249 |
+
"""Return the number of examples (predictions or predictions/references pair)
|
| 250 |
+
currently stored in the evaluation module's cache.
|
| 251 |
+
"""
|
| 252 |
+
return 0 if self.writer is None else len(self.writer)
|
| 253 |
+
|
| 254 |
+
def __repr__(self):
|
| 255 |
+
return (
|
| 256 |
+
f'EvaluationModule(name: "{self.name}", module_type: "{self.module_type}", '
|
| 257 |
+
f'features: {self.features}, usage: """{self.inputs_description}""", '
|
| 258 |
+
f"stored examples: {len(self)})"
|
| 259 |
+
)
|
| 260 |
+
|
| 261 |
+
def _build_data_dir(self):
|
| 262 |
+
"""Path of this evaluation module in cache_dir:
|
| 263 |
+
Will be:
|
| 264 |
+
self._data_dir_root/self.name/self.config_name/self.hash (if not none)/
|
| 265 |
+
If any of these element is missing or if ``with_version=False`` the corresponding subfolders are dropped.
|
| 266 |
+
"""
|
| 267 |
+
builder_data_dir = self._data_dir_root
|
| 268 |
+
builder_data_dir = os.path.join(builder_data_dir, self.name, self.config_name)
|
| 269 |
+
os.makedirs(builder_data_dir, exist_ok=True)
|
| 270 |
+
return builder_data_dir
|
| 271 |
+
|
| 272 |
+
def _create_cache_file(self, timeout=1) -> Tuple[str, FileLock]:
|
| 273 |
+
"""Create a new cache file. If the default cache file is used, we generated a new hash."""
|
| 274 |
+
file_path = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{self.process_id}.arrow")
|
| 275 |
+
filelock = None
|
| 276 |
+
for i in range(self.max_concurrent_cache_files):
|
| 277 |
+
filelock = FileLock(file_path + ".lock")
|
| 278 |
+
try:
|
| 279 |
+
filelock.acquire(timeout=timeout)
|
| 280 |
+
except Timeout:
|
| 281 |
+
# If we have reached the max number of attempts or we are not allow to find a free name (distributed setup)
|
| 282 |
+
# We raise an error
|
| 283 |
+
if self.num_process != 1:
|
| 284 |
+
raise ValueError(
|
| 285 |
+
f"Error in _create_cache_file: another evaluation module instance is already using the local cache file at {file_path}. "
|
| 286 |
+
f"Please specify an experiment_id (currently: {self.experiment_id}) to avoid collision "
|
| 287 |
+
f"between distributed evaluation module instances."
|
| 288 |
+
) from None
|
| 289 |
+
if i == self.max_concurrent_cache_files - 1:
|
| 290 |
+
raise ValueError(
|
| 291 |
+
f"Cannot acquire lock, too many evaluation module instance are operating concurrently on this file system."
|
| 292 |
+
f"You should set a larger value of max_concurrent_cache_files when creating the evaluation module "
|
| 293 |
+
f"(current value is {self.max_concurrent_cache_files})."
|
| 294 |
+
) from None
|
| 295 |
+
# In other cases (allow to find new file name + not yet at max num of attempts) we can try to sample a new hashing name.
|
| 296 |
+
file_uuid = str(uuid.uuid4())
|
| 297 |
+
file_path = os.path.join(
|
| 298 |
+
self.data_dir, f"{self.experiment_id}-{file_uuid}-{self.num_process}-{self.process_id}.arrow"
|
| 299 |
+
)
|
| 300 |
+
else:
|
| 301 |
+
break
|
| 302 |
+
|
| 303 |
+
return file_path, filelock
|
| 304 |
+
|
| 305 |
+
def _get_all_cache_files(self) -> Tuple[List[str], List[FileLock]]:
|
| 306 |
+
"""Get a lock on all the cache files in a distributed setup.
|
| 307 |
+
We wait for timeout second to let all the distributed node finish their tasks (default is 100 seconds).
|
| 308 |
+
"""
|
| 309 |
+
if self.num_process == 1:
|
| 310 |
+
if self.cache_file_name is None:
|
| 311 |
+
raise ValueError(
|
| 312 |
+
"Evaluation module cache file doesn't exist. Please make sure that you call `add` or `add_batch` "
|
| 313 |
+
"at least once before calling `compute`."
|
| 314 |
+
)
|
| 315 |
+
file_paths = [self.cache_file_name]
|
| 316 |
+
else:
|
| 317 |
+
file_paths = [
|
| 318 |
+
os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{process_id}.arrow")
|
| 319 |
+
for process_id in range(self.num_process)
|
| 320 |
+
]
|
| 321 |
+
|
| 322 |
+
# Let's acquire a lock on each process files to be sure they are finished writing
|
| 323 |
+
filelocks = []
|
| 324 |
+
for process_id, file_path in enumerate(file_paths):
|
| 325 |
+
if process_id == 0: # process 0 already has its lock file
|
| 326 |
+
filelocks.append(self.filelock)
|
| 327 |
+
else:
|
| 328 |
+
filelock = FileLock(file_path + ".lock")
|
| 329 |
+
try:
|
| 330 |
+
filelock.acquire(timeout=self.timeout)
|
| 331 |
+
except Timeout:
|
| 332 |
+
raise ValueError(
|
| 333 |
+
f"Cannot acquire lock on cached file {file_path} for process {process_id}."
|
| 334 |
+
) from None
|
| 335 |
+
else:
|
| 336 |
+
filelocks.append(filelock)
|
| 337 |
+
|
| 338 |
+
return file_paths, filelocks
|
| 339 |
+
|
| 340 |
+
def _check_all_processes_locks(self):
|
| 341 |
+
expected_lock_file_names = [
|
| 342 |
+
os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{process_id}.arrow.lock")
|
| 343 |
+
for process_id in range(self.num_process)
|
| 344 |
+
]
|
| 345 |
+
for expected_lock_file_name in expected_lock_file_names:
|
| 346 |
+
nofilelock = FileFreeLock(expected_lock_file_name)
|
| 347 |
+
try:
|
| 348 |
+
nofilelock.acquire(timeout=self.timeout)
|
| 349 |
+
except Timeout:
|
| 350 |
+
raise ValueError(
|
| 351 |
+
f"Expected to find locked file {expected_lock_file_name} from process {self.process_id} but it doesn't exist."
|
| 352 |
+
) from None
|
| 353 |
+
else:
|
| 354 |
+
nofilelock.release()
|
| 355 |
+
|
| 356 |
+
def _check_rendez_vous(self):
|
| 357 |
+
expected_lock_file_name = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-0.arrow.lock")
|
| 358 |
+
nofilelock = FileFreeLock(expected_lock_file_name)
|
| 359 |
+
try:
|
| 360 |
+
nofilelock.acquire(timeout=self.timeout)
|
| 361 |
+
except Timeout:
|
| 362 |
+
raise ValueError(
|
| 363 |
+
f"Expected to find locked file {expected_lock_file_name} from process {self.process_id} but it doesn't exist."
|
| 364 |
+
) from None
|
| 365 |
+
else:
|
| 366 |
+
nofilelock.release()
|
| 367 |
+
lock_file_name = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-rdv.lock")
|
| 368 |
+
rendez_vous_lock = FileLock(lock_file_name)
|
| 369 |
+
try:
|
| 370 |
+
rendez_vous_lock.acquire(timeout=self.timeout)
|
| 371 |
+
except Timeout:
|
| 372 |
+
raise ValueError(f"Couldn't acquire lock on {lock_file_name} from process {self.process_id}.") from None
|
| 373 |
+
else:
|
| 374 |
+
rendez_vous_lock.release()
|
| 375 |
+
|
| 376 |
+
def _finalize(self):
|
| 377 |
+
"""Close all the writing process and load/gather the data
|
| 378 |
+
from all the nodes if main node or all_process is True.
|
| 379 |
+
"""
|
| 380 |
+
if self.writer is not None:
|
| 381 |
+
self.writer.finalize()
|
| 382 |
+
self.writer = None
|
| 383 |
+
# release the locks of the processes > 0 so that process 0 can lock them to read + delete the data
|
| 384 |
+
if self.filelock is not None and self.process_id > 0:
|
| 385 |
+
self.filelock.release()
|
| 386 |
+
|
| 387 |
+
if self.keep_in_memory:
|
| 388 |
+
# Read the predictions and references
|
| 389 |
+
reader = ArrowReader(path=self.data_dir, info=DatasetInfo(features=self.selected_feature_format))
|
| 390 |
+
self.data = Dataset.from_buffer(self.buf_writer.getvalue())
|
| 391 |
+
|
| 392 |
+
elif self.process_id == 0:
|
| 393 |
+
# Let's acquire a lock on each node files to be sure they are finished writing
|
| 394 |
+
file_paths, filelocks = self._get_all_cache_files()
|
| 395 |
+
|
| 396 |
+
# Read the predictions and references
|
| 397 |
+
try:
|
| 398 |
+
reader = ArrowReader(path="", info=DatasetInfo(features=self.selected_feature_format))
|
| 399 |
+
self.data = Dataset(**reader.read_files([{"filename": f} for f in file_paths]))
|
| 400 |
+
except FileNotFoundError:
|
| 401 |
+
raise ValueError(
|
| 402 |
+
"Error in finalize: another evaluation module instance is already using the local cache file. "
|
| 403 |
+
"Please specify an experiment_id to avoid collision between distributed evaluation module instances."
|
| 404 |
+
) from None
|
| 405 |
+
|
| 406 |
+
# Store file paths and locks and we will release/delete them after the computation.
|
| 407 |
+
self.file_paths = file_paths
|
| 408 |
+
self.filelocks = filelocks
|
| 409 |
+
|
| 410 |
+
def compute(self, *, predictions=None, references=None, **kwargs) -> Optional[dict]:
|
| 411 |
+
"""Compute the evaluation module.
|
| 412 |
+
|
| 413 |
+
Usage of positional arguments is not allowed to prevent mistakes.
|
| 414 |
+
|
| 415 |
+
Args:
|
| 416 |
+
predictions (`list/array/tensor`, *optional*):
|
| 417 |
+
Predictions.
|
| 418 |
+
references (`list/array/tensor`, *optional*):
|
| 419 |
+
References.
|
| 420 |
+
**kwargs (optional):
|
| 421 |
+
Keyword arguments that will be forwarded to the evaluation module [`~evaluate.EvaluationModule.compute`]
|
| 422 |
+
method (see details in the docstring).
|
| 423 |
+
|
| 424 |
+
Return:
|
| 425 |
+
`dict` or `None`
|
| 426 |
+
|
| 427 |
+
- Dictionary with the results if this evaluation module is run on the main process (`process_id == 0`).
|
| 428 |
+
- `None` if the evaluation module is not run on the main process (`process_id != 0`).
|
| 429 |
+
|
| 430 |
+
```py
|
| 431 |
+
>>> import evaluate
|
| 432 |
+
>>> accuracy = evaluate.load("accuracy")
|
| 433 |
+
>>> accuracy.compute(predictions=[0, 1, 1, 0], references=[0, 1, 0, 1])
|
| 434 |
+
```
|
| 435 |
+
"""
|
| 436 |
+
all_kwargs = {"predictions": predictions, "references": references, **kwargs}
|
| 437 |
+
if predictions is None and references is None:
|
| 438 |
+
missing_kwargs = {k: None for k in self._feature_names() if k not in all_kwargs}
|
| 439 |
+
all_kwargs.update(missing_kwargs)
|
| 440 |
+
else:
|
| 441 |
+
missing_inputs = [k for k in self._feature_names() if k not in all_kwargs]
|
| 442 |
+
if missing_inputs:
|
| 443 |
+
raise ValueError(
|
| 444 |
+
f"Evaluation module inputs are missing: {missing_inputs}. All required inputs are {list(self._feature_names())}"
|
| 445 |
+
)
|
| 446 |
+
inputs = {input_name: all_kwargs[input_name] for input_name in self._feature_names()}
|
| 447 |
+
compute_kwargs = {k: kwargs[k] for k in kwargs if k not in self._feature_names()}
|
| 448 |
+
|
| 449 |
+
if any(v is not None for v in inputs.values()):
|
| 450 |
+
self.add_batch(**inputs)
|
| 451 |
+
self._finalize()
|
| 452 |
+
|
| 453 |
+
self.cache_file_name = None
|
| 454 |
+
self.filelock = None
|
| 455 |
+
self.selected_feature_format = None
|
| 456 |
+
|
| 457 |
+
if self.process_id == 0:
|
| 458 |
+
self.data.set_format(type=self.info.format)
|
| 459 |
+
|
| 460 |
+
inputs = {input_name: self.data[input_name] for input_name in self._feature_names()}
|
| 461 |
+
with temp_seed(self.seed):
|
| 462 |
+
output = self._compute(**inputs, **compute_kwargs)
|
| 463 |
+
|
| 464 |
+
if self.buf_writer is not None:
|
| 465 |
+
self.buf_writer = None
|
| 466 |
+
del self.data
|
| 467 |
+
self.data = None
|
| 468 |
+
else:
|
| 469 |
+
# Release locks and delete all the cache files. Process 0 is released last.
|
| 470 |
+
for filelock, file_path in reversed(list(zip(self.filelocks, self.file_paths))):
|
| 471 |
+
logger.info(f"Removing {file_path}")
|
| 472 |
+
del self.data
|
| 473 |
+
self.data = None
|
| 474 |
+
del self.writer
|
| 475 |
+
self.writer = None
|
| 476 |
+
os.remove(file_path)
|
| 477 |
+
filelock.release()
|
| 478 |
+
|
| 479 |
+
return output
|
| 480 |
+
else:
|
| 481 |
+
return None
|
| 482 |
+
|
| 483 |
+
def add_batch(self, *, predictions=None, references=None, **kwargs):
|
| 484 |
+
"""Add a batch of predictions and references for the evaluation module's stack.
|
| 485 |
+
|
| 486 |
+
Args:
|
| 487 |
+
predictions (`list/array/tensor`, *optional*):
|
| 488 |
+
Predictions.
|
| 489 |
+
references (`list/array/tensor`, *optional*):
|
| 490 |
+
References.
|
| 491 |
+
|
| 492 |
+
Example:
|
| 493 |
+
|
| 494 |
+
```py
|
| 495 |
+
>>> import evaluate
|
| 496 |
+
>>> accuracy = evaluate.load("accuracy")
|
| 497 |
+
>>> for refs, preds in zip([[0,1],[0,1]], [[1,0],[0,1]]):
|
| 498 |
+
... accuracy.add_batch(references=refs, predictions=preds)
|
| 499 |
+
```
|
| 500 |
+
"""
|
| 501 |
+
bad_inputs = [input_name for input_name in kwargs if input_name not in self._feature_names()]
|
| 502 |
+
if bad_inputs:
|
| 503 |
+
raise ValueError(
|
| 504 |
+
f"Bad inputs for evaluation module: {bad_inputs}. All required inputs are {list(self._feature_names())}"
|
| 505 |
+
)
|
| 506 |
+
batch = {"predictions": predictions, "references": references, **kwargs}
|
| 507 |
+
batch = {input_name: batch[input_name] for input_name in self._feature_names()}
|
| 508 |
+
if self.writer is None:
|
| 509 |
+
self.selected_feature_format = self._infer_feature_from_batch(batch)
|
| 510 |
+
self._init_writer()
|
| 511 |
+
try:
|
| 512 |
+
for key, column in batch.items():
|
| 513 |
+
if len(column) > 0:
|
| 514 |
+
self._enforce_nested_string_type(self.selected_feature_format[key], column[0])
|
| 515 |
+
batch = self.selected_feature_format.encode_batch(batch)
|
| 516 |
+
self.writer.write_batch(batch)
|
| 517 |
+
except (pa.ArrowInvalid, TypeError):
|
| 518 |
+
if any(len(batch[c]) != len(next(iter(batch.values()))) for c in batch):
|
| 519 |
+
col0 = next(iter(batch))
|
| 520 |
+
bad_col = [c for c in batch if len(batch[c]) != len(batch[col0])][0]
|
| 521 |
+
error_msg = (
|
| 522 |
+
f"Mismatch in the number of {col0} ({len(batch[col0])}) and {bad_col} ({len(batch[bad_col])})"
|
| 523 |
+
)
|
| 524 |
+
elif set(self.selected_feature_format) != {"references", "predictions"}:
|
| 525 |
+
error_msg = (
|
| 526 |
+
f"Module inputs don't match the expected format.\n"
|
| 527 |
+
f"Expected format: {self.selected_feature_format },\n"
|
| 528 |
+
)
|
| 529 |
+
error_msg_inputs = ",\n".join(
|
| 530 |
+
f"Input {input_name}: {summarize_if_long_list(batch[input_name])}"
|
| 531 |
+
for input_name in self.selected_feature_format
|
| 532 |
+
)
|
| 533 |
+
error_msg += error_msg_inputs
|
| 534 |
+
else:
|
| 535 |
+
error_msg = (
|
| 536 |
+
f"Predictions and/or references don't match the expected format.\n"
|
| 537 |
+
f"Expected format: {self.selected_feature_format },\n"
|
| 538 |
+
f"Input predictions: {summarize_if_long_list(predictions)},\n"
|
| 539 |
+
f"Input references: {summarize_if_long_list(references)}"
|
| 540 |
+
)
|
| 541 |
+
raise ValueError(error_msg) from None
|
| 542 |
+
|
| 543 |
+
def add(self, *, prediction=None, reference=None, **kwargs):
|
| 544 |
+
"""Add one prediction and reference for the evaluation module's stack.
|
| 545 |
+
|
| 546 |
+
Args:
|
| 547 |
+
prediction (`list/array/tensor`, *optional*):
|
| 548 |
+
Predictions.
|
| 549 |
+
reference (`list/array/tensor`, *optional*):
|
| 550 |
+
References.
|
| 551 |
+
|
| 552 |
+
Example:
|
| 553 |
+
|
| 554 |
+
```py
|
| 555 |
+
>>> import evaluate
|
| 556 |
+
>>> accuracy = evaluate.load("accuracy")
|
| 557 |
+
>>> accuracy.add(references=[0,1], predictions=[1,0])
|
| 558 |
+
```
|
| 559 |
+
"""
|
| 560 |
+
bad_inputs = [input_name for input_name in kwargs if input_name not in self._feature_names()]
|
| 561 |
+
if bad_inputs:
|
| 562 |
+
raise ValueError(
|
| 563 |
+
f"Bad inputs for evaluation module: {bad_inputs}. All required inputs are {list(self._feature_names())}"
|
| 564 |
+
)
|
| 565 |
+
example = {"predictions": prediction, "references": reference, **kwargs}
|
| 566 |
+
example = {input_name: example[input_name] for input_name in self._feature_names()}
|
| 567 |
+
if self.writer is None:
|
| 568 |
+
self.selected_feature_format = self._infer_feature_from_example(example)
|
| 569 |
+
self._init_writer()
|
| 570 |
+
try:
|
| 571 |
+
self._enforce_nested_string_type(self.selected_feature_format, example)
|
| 572 |
+
example = self.selected_feature_format.encode_example(example)
|
| 573 |
+
self.writer.write(example)
|
| 574 |
+
except (pa.ArrowInvalid, TypeError):
|
| 575 |
+
error_msg = (
|
| 576 |
+
f"Evaluation module inputs don't match the expected format.\n"
|
| 577 |
+
f"Expected format: {self.selected_feature_format},\n"
|
| 578 |
+
)
|
| 579 |
+
error_msg_inputs = ",\n".join(
|
| 580 |
+
f"Input {input_name}: {summarize_if_long_list(example[input_name])}"
|
| 581 |
+
for input_name in self.selected_feature_format
|
| 582 |
+
)
|
| 583 |
+
error_msg += error_msg_inputs
|
| 584 |
+
raise ValueError(error_msg) from None
|
| 585 |
+
|
| 586 |
+
def _infer_feature_from_batch(self, batch):
|
| 587 |
+
if isinstance(self.features, Features):
|
| 588 |
+
return self.features
|
| 589 |
+
else:
|
| 590 |
+
example = dict([(k, v[0]) for k, v in batch.items()])
|
| 591 |
+
return self._infer_feature_from_example(example)
|
| 592 |
+
|
| 593 |
+
def _infer_feature_from_example(self, example):
|
| 594 |
+
if isinstance(self.features, Features):
|
| 595 |
+
return self.features
|
| 596 |
+
else:
|
| 597 |
+
for features in self.features:
|
| 598 |
+
try:
|
| 599 |
+
self._enforce_nested_string_type(features, example)
|
| 600 |
+
features.encode_example(example)
|
| 601 |
+
return features
|
| 602 |
+
except (ValueError, TypeError):
|
| 603 |
+
continue
|
| 604 |
+
feature_strings = "\n".join([f"Feature option {i}: {feature}" for i, feature in enumerate(self.features)])
|
| 605 |
+
error_msg = (
|
| 606 |
+
f"Predictions and/or references don't match the expected format.\n"
|
| 607 |
+
f"Expected format:\n{feature_strings},\n"
|
| 608 |
+
f"Input predictions: {summarize_if_long_list(example['predictions'])},\n"
|
| 609 |
+
f"Input references: {summarize_if_long_list(example['references'])}"
|
| 610 |
+
)
|
| 611 |
+
raise ValueError(error_msg) from None
|
| 612 |
+
|
| 613 |
+
def _feature_names(self):
|
| 614 |
+
if isinstance(self.features, list):
|
| 615 |
+
feature_names = list(self.features[0].keys())
|
| 616 |
+
else:
|
| 617 |
+
feature_names = list(self.features.keys())
|
| 618 |
+
return feature_names
|
| 619 |
+
|
| 620 |
+
def _init_writer(self, timeout=1):
|
| 621 |
+
if self.num_process > 1:
|
| 622 |
+
if self.process_id == 0:
|
| 623 |
+
file_path = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-rdv.lock")
|
| 624 |
+
self.rendez_vous_lock = FileLock(file_path)
|
| 625 |
+
try:
|
| 626 |
+
self.rendez_vous_lock.acquire(timeout=timeout)
|
| 627 |
+
except TimeoutError:
|
| 628 |
+
raise ValueError(
|
| 629 |
+
f"Error in _init_writer: another evalution module instance is already using the local cache file at {file_path}. "
|
| 630 |
+
f"Please specify an experiment_id (currently: {self.experiment_id}) to avoid collision "
|
| 631 |
+
f"between distributed evaluation module instances."
|
| 632 |
+
) from None
|
| 633 |
+
|
| 634 |
+
if self.keep_in_memory:
|
| 635 |
+
self.buf_writer = pa.BufferOutputStream()
|
| 636 |
+
self.writer = ArrowWriter(
|
| 637 |
+
features=self.selected_feature_format, stream=self.buf_writer, writer_batch_size=self.writer_batch_size
|
| 638 |
+
)
|
| 639 |
+
else:
|
| 640 |
+
self.buf_writer = None
|
| 641 |
+
|
| 642 |
+
# Get cache file name and lock it
|
| 643 |
+
if self.cache_file_name is None or self.filelock is None:
|
| 644 |
+
cache_file_name, filelock = self._create_cache_file() # get ready
|
| 645 |
+
self.cache_file_name = cache_file_name
|
| 646 |
+
self.filelock = filelock
|
| 647 |
+
|
| 648 |
+
self.writer = ArrowWriter(
|
| 649 |
+
features=self.selected_feature_format,
|
| 650 |
+
path=self.cache_file_name,
|
| 651 |
+
writer_batch_size=self.writer_batch_size,
|
| 652 |
+
)
|
| 653 |
+
# Setup rendez-vous here if
|
| 654 |
+
if self.num_process > 1:
|
| 655 |
+
if self.process_id == 0:
|
| 656 |
+
self._check_all_processes_locks() # wait for everyone to be ready
|
| 657 |
+
self.rendez_vous_lock.release() # let everyone go
|
| 658 |
+
else:
|
| 659 |
+
self._check_rendez_vous() # wait for master to be ready and to let everyone go
|
| 660 |
+
|
| 661 |
+
def _info(self) -> EvaluationModuleInfo:
|
| 662 |
+
"""Construct the EvaluationModuleInfo object. See `EvaluationModuleInfo` for details.
|
| 663 |
+
|
| 664 |
+
Warning: This function is only called once and the result is cached for all
|
| 665 |
+
following .info() calls.
|
| 666 |
+
|
| 667 |
+
Returns:
|
| 668 |
+
info: (EvaluationModuleInfo) The EvaluationModule information
|
| 669 |
+
"""
|
| 670 |
+
raise NotImplementedError
|
| 671 |
+
|
| 672 |
+
def download_and_prepare(
|
| 673 |
+
self,
|
| 674 |
+
download_config: Optional[DownloadConfig] = None,
|
| 675 |
+
dl_manager: Optional[DownloadManager] = None,
|
| 676 |
+
):
|
| 677 |
+
"""Downloads and prepares evaluation module for reading.
|
| 678 |
+
|
| 679 |
+
Args:
|
| 680 |
+
download_config ([`DownloadConfig`], *optional*):
|
| 681 |
+
Specific download configuration parameters.
|
| 682 |
+
dl_manager ([`DownloadManager`], *optional*):
|
| 683 |
+
Specific download manager to use.
|
| 684 |
+
|
| 685 |
+
Example:
|
| 686 |
+
|
| 687 |
+
```py
|
| 688 |
+
>>> import evaluate
|
| 689 |
+
```
|
| 690 |
+
"""
|
| 691 |
+
if dl_manager is None:
|
| 692 |
+
if download_config is None:
|
| 693 |
+
download_config = DownloadConfig()
|
| 694 |
+
download_config.cache_dir = os.path.join(self.data_dir, "downloads")
|
| 695 |
+
download_config.force_download = False
|
| 696 |
+
|
| 697 |
+
dl_manager = DownloadManager(
|
| 698 |
+
dataset_name=self.name, download_config=download_config, data_dir=self.data_dir
|
| 699 |
+
)
|
| 700 |
+
|
| 701 |
+
self._download_and_prepare(dl_manager)
|
| 702 |
+
|
| 703 |
+
def _download_and_prepare(self, dl_manager):
|
| 704 |
+
"""Downloads and prepares resources for the evaluation module.
|
| 705 |
+
|
| 706 |
+
This is the internal implementation to overwrite called when user calls
|
| 707 |
+
`download_and_prepare`. It should download all required resources for the evaluation module.
|
| 708 |
+
|
| 709 |
+
Args:
|
| 710 |
+
dl_manager (:class:`DownloadManager`): `DownloadManager` used to download and cache data.
|
| 711 |
+
"""
|
| 712 |
+
return None
|
| 713 |
+
|
| 714 |
+
def _compute(self, *, predictions=None, references=None, **kwargs) -> Dict[str, Any]:
|
| 715 |
+
"""This method defines the common API for all the evaluation module in the library"""
|
| 716 |
+
raise NotImplementedError
|
| 717 |
+
|
| 718 |
+
def __del__(self):
|
| 719 |
+
if hasattr(self, "filelock") and self.filelock is not None:
|
| 720 |
+
self.filelock.release()
|
| 721 |
+
if hasattr(self, "rendez_vous_lock") and self.rendez_vous_lock is not None:
|
| 722 |
+
self.rendez_vous_lock.release()
|
| 723 |
+
if hasattr(self, "writer"): # in case it was already deleted
|
| 724 |
+
del self.writer
|
| 725 |
+
if hasattr(self, "data"): # in case it was already deleted
|
| 726 |
+
del self.data
|
| 727 |
+
|
| 728 |
+
def _enforce_nested_string_type(self, schema, obj):
|
| 729 |
+
"""
|
| 730 |
+
Recursively checks if there is any Value feature of type string and throws TypeError if corresponding object is not a string.
|
| 731 |
+
Since any Python object can be cast to string this avoids implicitly casting wrong input types (e.g. lists) to string without error.
|
| 732 |
+
"""
|
| 733 |
+
# Nested structures: we allow dict, list, tuples, sequences
|
| 734 |
+
if isinstance(schema, dict):
|
| 735 |
+
return [self._enforce_nested_string_type(sub_schema, o) for k, (sub_schema, o) in zip_dict(schema, obj)]
|
| 736 |
+
|
| 737 |
+
elif isinstance(schema, (list, tuple)):
|
| 738 |
+
sub_schema = schema[0]
|
| 739 |
+
return [self._enforce_nested_string_type(sub_schema, o) for o in obj]
|
| 740 |
+
elif isinstance(schema, Sequence):
|
| 741 |
+
# We allow to reverse list of dict => dict of list for compatiblity with tfds
|
| 742 |
+
if isinstance(schema.feature, dict):
|
| 743 |
+
if isinstance(obj, (list, tuple)):
|
| 744 |
+
# obj is a list of dict
|
| 745 |
+
for k, dict_tuples in zip_dict(schema.feature, *obj):
|
| 746 |
+
for sub_obj in dict_tuples[1:]:
|
| 747 |
+
if _check_non_null_non_empty_recursive(sub_obj, dict_tuples[0]):
|
| 748 |
+
self._enforce_nested_string_type(dict_tuples[0], sub_obj)
|
| 749 |
+
break
|
| 750 |
+
return None
|
| 751 |
+
else:
|
| 752 |
+
# obj is a single dict
|
| 753 |
+
for k, (sub_schema, sub_objs) in zip_dict(schema.feature, obj):
|
| 754 |
+
for sub_obj in sub_objs:
|
| 755 |
+
if _check_non_null_non_empty_recursive(sub_obj, sub_schema):
|
| 756 |
+
self._enforce_nested_string_type(sub_schema, sub_obj)
|
| 757 |
+
break
|
| 758 |
+
return None
|
| 759 |
+
# schema.feature is not a dict
|
| 760 |
+
if isinstance(obj, str): # don't interpret a string as a list
|
| 761 |
+
raise ValueError(f"Got a string but expected a list instead: '{obj}'")
|
| 762 |
+
if obj is None:
|
| 763 |
+
return None
|
| 764 |
+
else:
|
| 765 |
+
if len(obj) > 0:
|
| 766 |
+
for first_elmt in obj:
|
| 767 |
+
if _check_non_null_non_empty_recursive(first_elmt, schema.feature):
|
| 768 |
+
break
|
| 769 |
+
if not isinstance(first_elmt, list):
|
| 770 |
+
return self._enforce_nested_string_type(schema.feature, first_elmt)
|
| 771 |
+
|
| 772 |
+
elif isinstance(schema, Value):
|
| 773 |
+
if pa.types.is_string(schema.pa_type) and not isinstance(obj, str):
|
| 774 |
+
raise TypeError(f"Expected type str but got {type(obj)}.")
|
| 775 |
+
|
| 776 |
+
|
| 777 |
+
class Metric(EvaluationModule):
|
| 778 |
+
"""A Metric is the base class and common API for all metrics.
|
| 779 |
+
|
| 780 |
+
Args:
|
| 781 |
+
config_name (`str`):
|
| 782 |
+
This is used to define a hash specific to a metric computation script and prevents the metric's data
|
| 783 |
+
to be overridden when the metric loading script is modified.
|
| 784 |
+
keep_in_memory (`bool`):
|
| 785 |
+
Keep all predictions and references in memory. Not possible in distributed settings.
|
| 786 |
+
cache_dir (`str`):
|
| 787 |
+
Path to a directory in which temporary prediction/references data will be stored.
|
| 788 |
+
The data directory should be located on a shared file-system in distributed setups.
|
| 789 |
+
num_process (`int`):
|
| 790 |
+
Specify the total number of nodes in a distributed settings.
|
| 791 |
+
This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
|
| 792 |
+
process_id (`int`):
|
| 793 |
+
Specify the id of the current process in a distributed setup (between 0 and num_process-1)
|
| 794 |
+
This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
|
| 795 |
+
seed (`int`, *optional*):
|
| 796 |
+
If specified, this will temporarily set numpy's random seed when [`~evaluate.Metric.compute`] is run.
|
| 797 |
+
experiment_id (`str`):
|
| 798 |
+
A specific experiment id. This is used if several distributed evaluations share the same file system.
|
| 799 |
+
This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
|
| 800 |
+
max_concurrent_cache_files (`int`):
|
| 801 |
+
Max number of concurrent metric cache files (default `10000`).
|
| 802 |
+
timeout (`Union[int, float]`):
|
| 803 |
+
Timeout in second for distributed setting synchronization.
|
| 804 |
+
"""
|
| 805 |
+
|
| 806 |
+
|
| 807 |
+
class Comparison(EvaluationModule):
|
| 808 |
+
"""A Comparison is the base class and common API for all comparisons.
|
| 809 |
+
|
| 810 |
+
Args:
|
| 811 |
+
config_name (`str`):
|
| 812 |
+
This is used to define a hash specific to a comparison computation script and prevents the comparison's data
|
| 813 |
+
to be overridden when the comparison loading script is modified.
|
| 814 |
+
keep_in_memory (`bool`):
|
| 815 |
+
Keep all predictions and references in memory. Not possible in distributed settings.
|
| 816 |
+
cache_dir (`str`):
|
| 817 |
+
Path to a directory in which temporary prediction/references data will be stored.
|
| 818 |
+
The data directory should be located on a shared file-system in distributed setups.
|
| 819 |
+
num_process (`int`):
|
| 820 |
+
Specify the total number of nodes in a distributed settings.
|
| 821 |
+
This is useful to compute comparisons in distributed setups (in particular non-additive comparisons).
|
| 822 |
+
process_id (`int`):
|
| 823 |
+
Specify the id of the current process in a distributed setup (between 0 and num_process-1)
|
| 824 |
+
This is useful to compute comparisons in distributed setups (in particular non-additive comparisons).
|
| 825 |
+
seed (`int`, *optional*):
|
| 826 |
+
If specified, this will temporarily set numpy's random seed when [`~evaluate.Comparison.compute`] is run.
|
| 827 |
+
experiment_id (`str`):
|
| 828 |
+
A specific experiment id. This is used if several distributed evaluations share the same file system.
|
| 829 |
+
This is useful to compute comparisons in distributed setups (in particular non-additive comparisons).
|
| 830 |
+
max_concurrent_cache_files (`int`):
|
| 831 |
+
Max number of concurrent comparison cache files (default `10000`).
|
| 832 |
+
timeout (`Union[int, float]`):
|
| 833 |
+
Timeout in second for distributed setting synchronization.
|
| 834 |
+
"""
|
| 835 |
+
|
| 836 |
+
|
| 837 |
+
class Measurement(EvaluationModule):
|
| 838 |
+
"""A Measurement is the base class and common API for all measurements.
|
| 839 |
+
|
| 840 |
+
Args:
|
| 841 |
+
config_name (`str`):
|
| 842 |
+
This is used to define a hash specific to a measurement computation script and prevents the measurement's data
|
| 843 |
+
to be overridden when the measurement loading script is modified.
|
| 844 |
+
keep_in_memory (`bool`):
|
| 845 |
+
Keep all predictions and references in memory. Not possible in distributed settings.
|
| 846 |
+
cache_dir (`str`):
|
| 847 |
+
Path to a directory in which temporary prediction/references data will be stored.
|
| 848 |
+
The data directory should be located on a shared file-system in distributed setups.
|
| 849 |
+
num_process (`int`):
|
| 850 |
+
Specify the total number of nodes in a distributed settings.
|
| 851 |
+
This is useful to compute measurements in distributed setups (in particular non-additive measurements).
|
| 852 |
+
process_id (`int`):
|
| 853 |
+
Specify the id of the current process in a distributed setup (between 0 and num_process-1)
|
| 854 |
+
This is useful to compute measurements in distributed setups (in particular non-additive measurements).
|
| 855 |
+
seed (`int`, *optional*):
|
| 856 |
+
If specified, this will temporarily set numpy's random seed when [`~evaluate.Measurement.compute`] is run.
|
| 857 |
+
experiment_id (`str`):
|
| 858 |
+
A specific experiment id. This is used if several distributed evaluations share the same file system.
|
| 859 |
+
This is useful to compute measurements in distributed setups (in particular non-additive measurements).
|
| 860 |
+
max_concurrent_cache_files (`int`):
|
| 861 |
+
Max number of concurrent measurement cache files (default `10000`).
|
| 862 |
+
timeout (`Union[int, float]`):
|
| 863 |
+
Timeout in second for distributed setting synchronization.
|
| 864 |
+
"""
|
| 865 |
+
|
| 866 |
+
|
| 867 |
+
class CombinedEvaluations:
|
| 868 |
+
def __init__(self, evaluation_modules, force_prefix=False):
|
| 869 |
+
from .loading import load # avoid circular imports
|
| 870 |
+
|
| 871 |
+
self.evaluation_module_names = None
|
| 872 |
+
if isinstance(evaluation_modules, list):
|
| 873 |
+
self.evaluation_modules = evaluation_modules
|
| 874 |
+
elif isinstance(evaluation_modules, dict):
|
| 875 |
+
self.evaluation_modules = list(evaluation_modules.values())
|
| 876 |
+
self.evaluation_module_names = list(evaluation_modules.keys())
|
| 877 |
+
loaded_modules = []
|
| 878 |
+
|
| 879 |
+
for module in self.evaluation_modules:
|
| 880 |
+
if isinstance(module, str):
|
| 881 |
+
module = load(module)
|
| 882 |
+
loaded_modules.append(module)
|
| 883 |
+
self.evaluation_modules = loaded_modules
|
| 884 |
+
|
| 885 |
+
if self.evaluation_module_names is None:
|
| 886 |
+
self.evaluation_module_names = [module.name for module in self.evaluation_modules]
|
| 887 |
+
|
| 888 |
+
self.force_prefix = force_prefix
|
| 889 |
+
|
| 890 |
+
def add(self, prediction=None, reference=None, **kwargs):
|
| 891 |
+
"""Add one prediction and reference for each evaluation module's stack.
|
| 892 |
+
|
| 893 |
+
Args:
|
| 894 |
+
predictions (`list/array/tensor`, *optional*):
|
| 895 |
+
Predictions.
|
| 896 |
+
references (`list/array/tensor`, *optional*):
|
| 897 |
+
References.
|
| 898 |
+
|
| 899 |
+
Example:
|
| 900 |
+
|
| 901 |
+
```py
|
| 902 |
+
>>> import evaluate
|
| 903 |
+
>>> accuracy = evaluate.load("accuracy")
|
| 904 |
+
>>> f1 = evaluate.load("f1")
|
| 905 |
+
>>> clf_metrics = combine(["accuracy", "f1"])
|
| 906 |
+
>>> for ref, pred in zip([0,1,0,1], [1,0,0,1]):
|
| 907 |
+
... clf_metrics.add(references=ref, predictions=pred)
|
| 908 |
+
```
|
| 909 |
+
"""
|
| 910 |
+
for evaluation_module in self.evaluation_modules:
|
| 911 |
+
batch = {"predictions": prediction, "references": reference, **kwargs}
|
| 912 |
+
batch = {input_name: batch[input_name] for input_name in evaluation_module._feature_names()}
|
| 913 |
+
evaluation_module.add(**batch)
|
| 914 |
+
|
| 915 |
+
def add_batch(self, predictions=None, references=None, **kwargs):
|
| 916 |
+
"""Add a batch of predictions and references for each evaluation module's stack.
|
| 917 |
+
|
| 918 |
+
Args:
|
| 919 |
+
predictions (`list/array/tensor`, *optional*):
|
| 920 |
+
Predictions.
|
| 921 |
+
references (`list/array/tensor`, *optional*):
|
| 922 |
+
References.
|
| 923 |
+
|
| 924 |
+
Example:
|
| 925 |
+
```py
|
| 926 |
+
>>> import evaluate
|
| 927 |
+
>>> accuracy = evaluate.load("accuracy")
|
| 928 |
+
>>> f1 = evaluate.load("f1")
|
| 929 |
+
>>> clf_metrics = combine(["accuracy", "f1"])
|
| 930 |
+
>>> for refs, preds in zip([[0,1],[0,1]], [[1,0],[0,1]]):
|
| 931 |
+
... clf_metrics.add(references=refs, predictions=preds)
|
| 932 |
+
```
|
| 933 |
+
"""
|
| 934 |
+
for evaluation_module in self.evaluation_modules:
|
| 935 |
+
batch = {"predictions": predictions, "references": references, **kwargs}
|
| 936 |
+
batch = {input_name: batch[input_name] for input_name in evaluation_module._feature_names()}
|
| 937 |
+
evaluation_module.add_batch(**batch)
|
| 938 |
+
|
| 939 |
+
def compute(self, predictions=None, references=None, **kwargs):
|
| 940 |
+
"""Compute each evaluation module.
|
| 941 |
+
|
| 942 |
+
Usage of positional arguments is not allowed to prevent mistakes.
|
| 943 |
+
|
| 944 |
+
Args:
|
| 945 |
+
predictions (`list/array/tensor`, *optional*):
|
| 946 |
+
Predictions.
|
| 947 |
+
references (`list/array/tensor`, *optional*):
|
| 948 |
+
References.
|
| 949 |
+
**kwargs (*optional*):
|
| 950 |
+
Keyword arguments that will be forwarded to the evaluation module [`~evaluate.EvaluationModule.compute`]
|
| 951 |
+
method (see details in the docstring).
|
| 952 |
+
|
| 953 |
+
Return:
|
| 954 |
+
`dict` or `None`
|
| 955 |
+
|
| 956 |
+
- Dictionary with the results if this evaluation module is run on the main process (`process_id == 0`).
|
| 957 |
+
- `None` if the evaluation module is not run on the main process (`process_id != 0`).
|
| 958 |
+
|
| 959 |
+
Example:
|
| 960 |
+
|
| 961 |
+
```py
|
| 962 |
+
>>> import evaluate
|
| 963 |
+
>>> accuracy = evaluate.load("accuracy")
|
| 964 |
+
>>> f1 = evaluate.load("f1")
|
| 965 |
+
>>> clf_metrics = combine(["accuracy", "f1"])
|
| 966 |
+
>>> clf_metrics.compute(predictions=[0,1], references=[1,1])
|
| 967 |
+
{'accuracy': 0.5, 'f1': 0.6666666666666666}
|
| 968 |
+
```
|
| 969 |
+
"""
|
| 970 |
+
results = []
|
| 971 |
+
|
| 972 |
+
for evaluation_module in self.evaluation_modules:
|
| 973 |
+
batch = {"predictions": predictions, "references": references, **kwargs}
|
| 974 |
+
results.append(evaluation_module.compute(**batch))
|
| 975 |
+
|
| 976 |
+
return self._merge_results(results)
|
| 977 |
+
|
| 978 |
+
def _merge_results(self, results):
|
| 979 |
+
merged_results = {}
|
| 980 |
+
results_keys = list(itertools.chain.from_iterable([r.keys() for r in results]))
|
| 981 |
+
duplicate_keys = {item for item, count in collections.Counter(results_keys).items() if count > 1}
|
| 982 |
+
|
| 983 |
+
duplicate_names = [
|
| 984 |
+
item for item, count in collections.Counter(self.evaluation_module_names).items() if count > 1
|
| 985 |
+
]
|
| 986 |
+
duplicate_counter = {name: 0 for name in duplicate_names}
|
| 987 |
+
|
| 988 |
+
for module_name, result in zip(self.evaluation_module_names, results):
|
| 989 |
+
for k, v in result.items():
|
| 990 |
+
if k not in duplicate_keys and not self.force_prefix:
|
| 991 |
+
merged_results[f"{k}"] = v
|
| 992 |
+
elif module_name in duplicate_counter:
|
| 993 |
+
merged_results[f"{module_name}_{duplicate_counter[module_name]}_{k}"] = v
|
| 994 |
+
else:
|
| 995 |
+
merged_results[f"{module_name}_{k}"] = v
|
| 996 |
+
|
| 997 |
+
if module_name in duplicate_counter:
|
| 998 |
+
duplicate_counter[module_name] += 1
|
| 999 |
+
|
| 1000 |
+
return merged_results
|
| 1001 |
+
|
| 1002 |
+
|
| 1003 |
+
def combine(evaluations, force_prefix=False):
|
| 1004 |
+
"""Combines several metrics, comparisons, or measurements into a single `CombinedEvaluations` object that
|
| 1005 |
+
can be used like a single evaluation module.
|
| 1006 |
+
|
| 1007 |
+
If two scores have the same name, then they are prefixed with their module names.
|
| 1008 |
+
And if two modules have the same name, please use a dictionary to give them different names, otherwise an integer id is appended to the prefix.
|
| 1009 |
+
|
| 1010 |
+
Args:
|
| 1011 |
+
evaluations (`Union[list, dict]`):
|
| 1012 |
+
A list or dictionary of evaluation modules. The modules can either be passed
|
| 1013 |
+
as strings or loaded `EvaluationModule`s. If a dictionary is passed its keys are the names used and the values the modules.
|
| 1014 |
+
The names are used as prefix in case there are name overlaps in the returned results of each module or if `force_prefix=True`.
|
| 1015 |
+
force_prefix (`bool`, *optional*, defaults to `False`):
|
| 1016 |
+
If `True` all scores from the modules are prefixed with their name. If
|
| 1017 |
+
a dictionary is passed the keys are used as name otherwise the module's name.
|
| 1018 |
+
|
| 1019 |
+
Examples:
|
| 1020 |
+
|
| 1021 |
+
```py
|
| 1022 |
+
>>> import evaluate
|
| 1023 |
+
>>> accuracy = evaluate.load("accuracy")
|
| 1024 |
+
>>> f1 = evaluate.load("f1")
|
| 1025 |
+
>>> clf_metrics = combine(["accuracy", "f1"])
|
| 1026 |
+
```
|
| 1027 |
+
"""
|
| 1028 |
+
|
| 1029 |
+
return CombinedEvaluations(evaluations, force_prefix=force_prefix)
|
venv/lib/python3.10/site-packages/evaluate/naming.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
# Lint as: python3
|
| 16 |
+
"""Utilities for file names."""
|
| 17 |
+
|
| 18 |
+
import itertools
|
| 19 |
+
import os
|
| 20 |
+
import re
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
_uppercase_uppercase_re = re.compile(r"([A-Z]+)([A-Z][a-z])")
|
| 24 |
+
_lowercase_uppercase_re = re.compile(r"([a-z\d])([A-Z])")
|
| 25 |
+
|
| 26 |
+
_single_underscore_re = re.compile(r"(?<!_)_(?!_)")
|
| 27 |
+
_multiple_underscores_re = re.compile(r"(_{2,})")
|
| 28 |
+
|
| 29 |
+
_split_re = r"^\w+(\.\w+)*$"
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def camelcase_to_snakecase(name):
|
| 33 |
+
"""Convert camel-case string to snake-case."""
|
| 34 |
+
name = _uppercase_uppercase_re.sub(r"\1_\2", name)
|
| 35 |
+
name = _lowercase_uppercase_re.sub(r"\1_\2", name)
|
| 36 |
+
return name.lower()
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def snakecase_to_camelcase(name):
|
| 40 |
+
"""Convert snake-case string to camel-case string."""
|
| 41 |
+
name = _single_underscore_re.split(name)
|
| 42 |
+
name = [_multiple_underscores_re.split(n) for n in name]
|
| 43 |
+
return "".join(n.capitalize() for n in itertools.chain.from_iterable(name) if n != "")
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def filename_prefix_for_name(name):
|
| 47 |
+
if os.path.basename(name) != name:
|
| 48 |
+
raise ValueError(f"Should be a dataset name, not a path: {name}")
|
| 49 |
+
return camelcase_to_snakecase(name)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def filename_prefix_for_split(name, split):
|
| 53 |
+
if os.path.basename(name) != name:
|
| 54 |
+
raise ValueError(f"Should be a dataset name, not a path: {name}")
|
| 55 |
+
if not re.match(_split_re, split):
|
| 56 |
+
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'.")
|
| 57 |
+
return f"{filename_prefix_for_name(name)}-{split}"
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def filepattern_for_dataset_split(dataset_name, split, data_dir, filetype_suffix=None):
|
| 61 |
+
prefix = filename_prefix_for_split(dataset_name, split)
|
| 62 |
+
if filetype_suffix:
|
| 63 |
+
prefix += f".{filetype_suffix}"
|
| 64 |
+
filepath = os.path.join(data_dir, prefix)
|
| 65 |
+
return f"{filepath}*"
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def filename_for_dataset_split(dataset_name, split, filetype_suffix=None):
|
| 69 |
+
prefix = filename_prefix_for_split(dataset_name, split)
|
| 70 |
+
if filetype_suffix:
|
| 71 |
+
prefix += f".{filetype_suffix}"
|
| 72 |
+
return prefix
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def filepath_for_dataset_split(dataset_name, split, data_dir, filetype_suffix=None):
|
| 76 |
+
filename = filename_for_dataset_split(
|
| 77 |
+
dataset_name=dataset_name,
|
| 78 |
+
split=split,
|
| 79 |
+
filetype_suffix=filetype_suffix,
|
| 80 |
+
)
|
| 81 |
+
filepath = os.path.join(data_dir, filename)
|
| 82 |
+
return filepath
|
venv/lib/python3.10/site-packages/evaluate/saving.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
import subprocess
|
| 4 |
+
import sys
|
| 5 |
+
from datetime import datetime
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
|
| 8 |
+
from datasets.utils.filelock import FileLock
|
| 9 |
+
|
| 10 |
+
from . import __version__
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def save(path_or_file, **data):
|
| 14 |
+
"""
|
| 15 |
+
Saves results to a JSON file. Also saves system information such as current time, current commit
|
| 16 |
+
hash if inside a repository, and Python system information.
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
path_or_file (`str`):
|
| 20 |
+
Path or file to store the file. If only a folder is provided
|
| 21 |
+
the results file will be saved in the format `"result-%Y_%m_%d-%H_%M_%S.json"`.
|
| 22 |
+
|
| 23 |
+
Example:
|
| 24 |
+
```py
|
| 25 |
+
>>> import evaluate
|
| 26 |
+
>>> result = {"bleu": 0.7}
|
| 27 |
+
>>> params = {"model": "gpt-2"}
|
| 28 |
+
>>> evaluate.save("./results/", **result, **params)
|
| 29 |
+
```
|
| 30 |
+
"""
|
| 31 |
+
current_time = datetime.now()
|
| 32 |
+
|
| 33 |
+
file_path = _setup_path(path_or_file, current_time)
|
| 34 |
+
|
| 35 |
+
data["_timestamp"] = current_time.isoformat()
|
| 36 |
+
data["_git_commit_hash"] = _git_commit_hash()
|
| 37 |
+
data["_evaluate_version"] = __version__
|
| 38 |
+
data["_python_version"] = sys.version
|
| 39 |
+
data["_interpreter_path"] = sys.executable
|
| 40 |
+
|
| 41 |
+
with FileLock(str(file_path) + ".lock"):
|
| 42 |
+
with open(file_path, "w") as f:
|
| 43 |
+
json.dump(data, f)
|
| 44 |
+
|
| 45 |
+
# cleanup lock file
|
| 46 |
+
try:
|
| 47 |
+
os.remove(str(file_path) + ".lock")
|
| 48 |
+
except FileNotFoundError:
|
| 49 |
+
pass
|
| 50 |
+
|
| 51 |
+
return file_path
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def _setup_path(path_or_file, current_time):
|
| 55 |
+
path_or_file = Path(path_or_file)
|
| 56 |
+
is_file = len(path_or_file.suffix) > 0
|
| 57 |
+
if is_file:
|
| 58 |
+
folder = path_or_file.parent
|
| 59 |
+
file_name = path_or_file.name
|
| 60 |
+
else:
|
| 61 |
+
folder = path_or_file
|
| 62 |
+
file_name = "result-" + current_time.strftime("%Y_%m_%d-%H_%M_%S") + ".json"
|
| 63 |
+
folder.mkdir(parents=True, exist_ok=True)
|
| 64 |
+
return folder / file_name
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def _git_commit_hash():
|
| 68 |
+
res = subprocess.run("git rev-parse --is-inside-work-tree".split(), cwd="./", stdout=subprocess.PIPE)
|
| 69 |
+
if res.stdout.decode().strip() == "true":
|
| 70 |
+
res = subprocess.run("git rev-parse HEAD".split(), cwd=os.getcwd(), stdout=subprocess.PIPE)
|
| 71 |
+
return res.stdout.decode().strip()
|
| 72 |
+
else:
|
| 73 |
+
return None
|
venv/lib/python3.10/site-packages/evaluate/utils/__init__.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
# flake8: noqa
|
| 16 |
+
# Lint as: python3
|
| 17 |
+
"""Util import."""
|
| 18 |
+
|
| 19 |
+
__all__ = [
|
| 20 |
+
"disable_progress_bar",
|
| 21 |
+
"enable_progress_bar",
|
| 22 |
+
"is_progress_bar_enabled",
|
| 23 |
+
"infer_gradio_input_types",
|
| 24 |
+
"json_to_string_type",
|
| 25 |
+
"parse_readme",
|
| 26 |
+
"parse_gradio_data",
|
| 27 |
+
"parse_test_cases",
|
| 28 |
+
"launch_gradio_widget",
|
| 29 |
+
]
|
| 30 |
+
|
| 31 |
+
from .gradio import (
|
| 32 |
+
infer_gradio_input_types,
|
| 33 |
+
json_to_string_type,
|
| 34 |
+
launch_gradio_widget,
|
| 35 |
+
parse_gradio_data,
|
| 36 |
+
parse_readme,
|
| 37 |
+
parse_test_cases,
|
| 38 |
+
)
|
| 39 |
+
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
|
venv/lib/python3.10/site-packages/evaluate/utils/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (587 Bytes). View file
|
|
|
venv/lib/python3.10/site-packages/evaluate/utils/__pycache__/file_utils.cpython-310.pyc
ADDED
|
Binary file (17.8 kB). View file
|
|
|
venv/lib/python3.10/site-packages/evaluate/utils/__pycache__/gradio.cpython-310.pyc
ADDED
|
Binary file (4.51 kB). View file
|
|
|
venv/lib/python3.10/site-packages/evaluate/utils/__pycache__/logging.cpython-310.pyc
ADDED
|
Binary file (7.24 kB). View file
|
|
|
venv/lib/python3.10/site-packages/evaluate/utils/file_utils.py
ADDED
|
@@ -0,0 +1,618 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Utilities for working with the local dataset cache.
|
| 3 |
+
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
|
| 4 |
+
Copyright by the AllenNLP authors.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import copy
|
| 8 |
+
import io
|
| 9 |
+
import json
|
| 10 |
+
import os
|
| 11 |
+
import posixpath
|
| 12 |
+
import re
|
| 13 |
+
import shutil
|
| 14 |
+
import sys
|
| 15 |
+
import tempfile
|
| 16 |
+
import time
|
| 17 |
+
import urllib
|
| 18 |
+
from contextlib import closing, contextmanager
|
| 19 |
+
from functools import partial
|
| 20 |
+
from hashlib import sha256
|
| 21 |
+
from pathlib import Path
|
| 22 |
+
from typing import List, Optional, Type, TypeVar, Union
|
| 23 |
+
from urllib.parse import urljoin, urlparse
|
| 24 |
+
|
| 25 |
+
import requests
|
| 26 |
+
from datasets import DownloadConfig
|
| 27 |
+
from datasets.utils.extract import ExtractManager
|
| 28 |
+
from datasets.utils.filelock import FileLock
|
| 29 |
+
|
| 30 |
+
from .. import __version__, config
|
| 31 |
+
from . import logging
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 35 |
+
|
| 36 |
+
INCOMPLETE_SUFFIX = ".incomplete"
|
| 37 |
+
|
| 38 |
+
T = TypeVar("T", str, Path)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def init_hf_modules(hf_modules_cache: Optional[Union[Path, str]] = None) -> str:
|
| 42 |
+
"""
|
| 43 |
+
Add hf_modules_cache to the python path.
|
| 44 |
+
By default hf_modules_cache='~/.cache/huggingface/modules'.
|
| 45 |
+
It can also be set with the environment variable HF_MODULES_CACHE.
|
| 46 |
+
This is used to add modules such as `datasets_modules`
|
| 47 |
+
"""
|
| 48 |
+
hf_modules_cache = hf_modules_cache if hf_modules_cache is not None else config.HF_MODULES_CACHE
|
| 49 |
+
hf_modules_cache = str(hf_modules_cache)
|
| 50 |
+
if hf_modules_cache not in sys.path:
|
| 51 |
+
sys.path.append(hf_modules_cache)
|
| 52 |
+
|
| 53 |
+
os.makedirs(hf_modules_cache, exist_ok=True)
|
| 54 |
+
if not os.path.exists(os.path.join(hf_modules_cache, "__init__.py")):
|
| 55 |
+
with open(os.path.join(hf_modules_cache, "__init__.py"), "w"):
|
| 56 |
+
pass
|
| 57 |
+
return hf_modules_cache
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def is_remote_url(url_or_filename: str) -> bool:
|
| 61 |
+
parsed = urlparse(url_or_filename)
|
| 62 |
+
return parsed.scheme in ("http", "https", "s3", "gs", "hdfs", "ftp")
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def is_local_path(url_or_filename: str) -> bool:
|
| 66 |
+
# On unix the scheme of a local path is empty (for both absolute and relative),
|
| 67 |
+
# while on windows the scheme is the drive name (ex: "c") for absolute paths.
|
| 68 |
+
# for details on the windows behavior, see https://bugs.python.org/issue42215
|
| 69 |
+
return urlparse(url_or_filename).scheme == "" or os.path.ismount(urlparse(url_or_filename).scheme + ":/")
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def is_relative_path(url_or_filename: str) -> bool:
|
| 73 |
+
return urlparse(url_or_filename).scheme == "" and not os.path.isabs(url_or_filename)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def relative_to_absolute_path(path: T) -> T:
|
| 77 |
+
"""Convert relative path to absolute path."""
|
| 78 |
+
abs_path_str = os.path.abspath(os.path.expanduser(os.path.expandvars(str(path))))
|
| 79 |
+
return Path(abs_path_str) if isinstance(path, Path) else abs_path_str
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def hf_bucket_url(identifier: str, filename: str, use_cdn=False, dataset=True) -> str:
|
| 83 |
+
if dataset:
|
| 84 |
+
endpoint = config.CLOUDFRONT_DATASETS_DISTRIB_PREFIX if use_cdn else config.S3_DATASETS_BUCKET_PREFIX
|
| 85 |
+
else:
|
| 86 |
+
endpoint = config.CLOUDFRONT_METRICS_DISTRIB_PREFIX if use_cdn else config.S3_METRICS_BUCKET_PREFIX
|
| 87 |
+
return "/".join((endpoint, identifier, filename))
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def head_hf_s3(
|
| 91 |
+
identifier: str, filename: str, use_cdn=False, dataset=True, max_retries=0
|
| 92 |
+
) -> Union[requests.Response, Exception]:
|
| 93 |
+
return http_head(
|
| 94 |
+
hf_bucket_url(identifier=identifier, filename=filename, use_cdn=use_cdn, dataset=dataset),
|
| 95 |
+
max_retries=max_retries,
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def hf_hub_url(path: str, name: str, revision: Optional[str] = None) -> str:
|
| 100 |
+
revision = revision or config.HUB_DEFAULT_VERSION
|
| 101 |
+
return config.HUB_EVALUATE_URL.format(path=path, name=name, revision=revision)
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def url_or_path_join(base_name: str, *pathnames: str) -> str:
|
| 105 |
+
if is_remote_url(base_name):
|
| 106 |
+
return posixpath.join(base_name, *(str(pathname).replace(os.sep, "/").lstrip("/") for pathname in pathnames))
|
| 107 |
+
else:
|
| 108 |
+
return Path(base_name, *pathnames).as_posix()
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def url_or_path_parent(url_or_path: str) -> str:
|
| 112 |
+
if is_remote_url(url_or_path):
|
| 113 |
+
return url_or_path[: url_or_path.rindex("/")]
|
| 114 |
+
else:
|
| 115 |
+
return os.path.dirname(url_or_path)
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def hash_url_to_filename(url, etag=None):
|
| 119 |
+
"""
|
| 120 |
+
Convert `url` into a hashed filename in a repeatable way.
|
| 121 |
+
If `etag` is specified, append its hash to the url's, delimited
|
| 122 |
+
by a period.
|
| 123 |
+
If the url ends with .h5 (Keras HDF5 weights) adds '.h5' to the name
|
| 124 |
+
so that TF 2.0 can identify it as a HDF5 file
|
| 125 |
+
(see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1380)
|
| 126 |
+
"""
|
| 127 |
+
url_bytes = url.encode("utf-8")
|
| 128 |
+
url_hash = sha256(url_bytes)
|
| 129 |
+
filename = url_hash.hexdigest()
|
| 130 |
+
|
| 131 |
+
if etag:
|
| 132 |
+
etag_bytes = etag.encode("utf-8")
|
| 133 |
+
etag_hash = sha256(etag_bytes)
|
| 134 |
+
filename += "." + etag_hash.hexdigest()
|
| 135 |
+
|
| 136 |
+
if url.endswith(".py"):
|
| 137 |
+
filename += ".py"
|
| 138 |
+
|
| 139 |
+
return filename
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
def cached_path(
|
| 143 |
+
url_or_filename,
|
| 144 |
+
download_config=None,
|
| 145 |
+
**download_kwargs,
|
| 146 |
+
) -> str:
|
| 147 |
+
"""
|
| 148 |
+
Given something that might be a URL (or might be a local path),
|
| 149 |
+
determine which. If it's a URL, download the file and cache it, and
|
| 150 |
+
return the path to the cached file. If it's already a local path,
|
| 151 |
+
make sure the file exists and then return the path.
|
| 152 |
+
|
| 153 |
+
Return:
|
| 154 |
+
Local path (string)
|
| 155 |
+
|
| 156 |
+
Raises:
|
| 157 |
+
FileNotFoundError: in case of non-recoverable file
|
| 158 |
+
(non-existent or no cache on disk)
|
| 159 |
+
ConnectionError: in case of unreachable url
|
| 160 |
+
and no cache on disk
|
| 161 |
+
ValueError: if it couldn't parse the url or filename correctly
|
| 162 |
+
requests.exceptions.ConnectionError: in case of internet connection issue
|
| 163 |
+
"""
|
| 164 |
+
if download_config is None:
|
| 165 |
+
download_config = DownloadConfig(**download_kwargs)
|
| 166 |
+
|
| 167 |
+
cache_dir = download_config.cache_dir or config.DOWNLOADED_EVALUATE_PATH
|
| 168 |
+
if isinstance(cache_dir, Path):
|
| 169 |
+
cache_dir = str(cache_dir)
|
| 170 |
+
if isinstance(url_or_filename, Path):
|
| 171 |
+
url_or_filename = str(url_or_filename)
|
| 172 |
+
|
| 173 |
+
if is_remote_url(url_or_filename):
|
| 174 |
+
# URL, so get it from the cache (downloading if necessary)
|
| 175 |
+
output_path = get_from_cache(
|
| 176 |
+
url_or_filename,
|
| 177 |
+
cache_dir=cache_dir,
|
| 178 |
+
force_download=download_config.force_download,
|
| 179 |
+
proxies=download_config.proxies,
|
| 180 |
+
resume_download=download_config.resume_download,
|
| 181 |
+
user_agent=download_config.user_agent,
|
| 182 |
+
local_files_only=download_config.local_files_only,
|
| 183 |
+
use_etag=download_config.use_etag,
|
| 184 |
+
max_retries=download_config.max_retries,
|
| 185 |
+
use_auth_token=download_config.use_auth_token,
|
| 186 |
+
ignore_url_params=download_config.ignore_url_params,
|
| 187 |
+
download_desc=download_config.download_desc,
|
| 188 |
+
)
|
| 189 |
+
elif os.path.exists(url_or_filename):
|
| 190 |
+
# File, and it exists.
|
| 191 |
+
output_path = url_or_filename
|
| 192 |
+
elif is_local_path(url_or_filename):
|
| 193 |
+
# File, but it doesn't exist.
|
| 194 |
+
raise FileNotFoundError(f"Local file {url_or_filename} doesn't exist")
|
| 195 |
+
else:
|
| 196 |
+
# Something unknown
|
| 197 |
+
raise ValueError(f"unable to parse {url_or_filename} as a URL or as a local path")
|
| 198 |
+
|
| 199 |
+
if output_path is None:
|
| 200 |
+
return output_path
|
| 201 |
+
|
| 202 |
+
if download_config.extract_compressed_file:
|
| 203 |
+
output_path = ExtractManager(cache_dir=download_config.cache_dir).extract(
|
| 204 |
+
output_path, force_extract=download_config.force_extract
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
return output_path
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
def get_datasets_user_agent(user_agent: Optional[Union[str, dict]] = None) -> str:
|
| 211 |
+
ua = f"datasets/{__version__}; python/{config.PY_VERSION}"
|
| 212 |
+
ua += f"; pyarrow/{config.PYARROW_VERSION}"
|
| 213 |
+
if config.TORCH_AVAILABLE:
|
| 214 |
+
ua += f"; torch/{config.TORCH_VERSION}"
|
| 215 |
+
if config.TF_AVAILABLE:
|
| 216 |
+
ua += f"; tensorflow/{config.TF_VERSION}"
|
| 217 |
+
if config.JAX_AVAILABLE:
|
| 218 |
+
ua += f"; jax/{config.JAX_VERSION}"
|
| 219 |
+
if isinstance(user_agent, dict):
|
| 220 |
+
ua += f"; {'; '.join(f'{k}/{v}' for k, v in user_agent.items())}"
|
| 221 |
+
elif isinstance(user_agent, str):
|
| 222 |
+
ua += "; " + user_agent
|
| 223 |
+
return ua
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
def get_authentication_headers_for_url(url: str, use_auth_token: Optional[Union[str, bool]] = None) -> dict:
|
| 227 |
+
"""Handle the HF authentication"""
|
| 228 |
+
headers = {}
|
| 229 |
+
if url.startswith(config.HF_ENDPOINT):
|
| 230 |
+
token = None
|
| 231 |
+
if isinstance(use_auth_token, str):
|
| 232 |
+
token = use_auth_token
|
| 233 |
+
elif bool(use_auth_token):
|
| 234 |
+
from huggingface_hub import hf_api
|
| 235 |
+
|
| 236 |
+
token = hf_api.HfFolder.get_token()
|
| 237 |
+
if token:
|
| 238 |
+
headers["authorization"] = f"Bearer {token}"
|
| 239 |
+
return headers
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
class OfflineModeIsEnabled(ConnectionError):
|
| 243 |
+
pass
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
def _raise_if_offline_mode_is_enabled(msg: Optional[str] = None):
|
| 247 |
+
"""Raise an OfflineModeIsEnabled error (subclass of ConnectionError) if HF_EVALUATE_OFFLINE is True."""
|
| 248 |
+
if config.HF_EVALUATE_OFFLINE:
|
| 249 |
+
raise OfflineModeIsEnabled(
|
| 250 |
+
"Offline mode is enabled." if msg is None else "Offline mode is enabled. " + str(msg)
|
| 251 |
+
)
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
def _retry(
|
| 255 |
+
func,
|
| 256 |
+
func_args: Optional[tuple] = None,
|
| 257 |
+
func_kwargs: Optional[dict] = None,
|
| 258 |
+
exceptions: Type[requests.exceptions.RequestException] = requests.exceptions.RequestException,
|
| 259 |
+
status_codes: Optional[List[int]] = None,
|
| 260 |
+
max_retries: int = 0,
|
| 261 |
+
base_wait_time: float = 0.5,
|
| 262 |
+
max_wait_time: float = 2,
|
| 263 |
+
):
|
| 264 |
+
func_args = func_args or ()
|
| 265 |
+
func_kwargs = func_kwargs or {}
|
| 266 |
+
retry = 0
|
| 267 |
+
while True:
|
| 268 |
+
try:
|
| 269 |
+
return func(*func_args, **func_kwargs)
|
| 270 |
+
except exceptions as err:
|
| 271 |
+
if retry >= max_retries or (status_codes and err.response.status_code not in status_codes):
|
| 272 |
+
raise err
|
| 273 |
+
else:
|
| 274 |
+
sleep_time = min(max_wait_time, base_wait_time * 2**retry) # Exponential backoff
|
| 275 |
+
logger.info(f"{func} timed out, retrying in {sleep_time}s... [{retry/max_retries}]")
|
| 276 |
+
time.sleep(sleep_time)
|
| 277 |
+
retry += 1
|
| 278 |
+
|
| 279 |
+
|
| 280 |
+
def _request_with_retry(
|
| 281 |
+
method: str,
|
| 282 |
+
url: str,
|
| 283 |
+
max_retries: int = 0,
|
| 284 |
+
base_wait_time: float = 0.5,
|
| 285 |
+
max_wait_time: float = 2,
|
| 286 |
+
timeout: float = 10.0,
|
| 287 |
+
**params,
|
| 288 |
+
) -> requests.Response:
|
| 289 |
+
"""Wrapper around requests to retry in case it fails with a ConnectTimeout, with exponential backoff.
|
| 290 |
+
|
| 291 |
+
Note that if the environment variable HF_EVALUATE_OFFLINE is set to 1, then a OfflineModeIsEnabled error is raised.
|
| 292 |
+
|
| 293 |
+
Args:
|
| 294 |
+
method (str): HTTP method, such as 'GET' or 'HEAD'.
|
| 295 |
+
url (str): The URL of the resource to fetch.
|
| 296 |
+
max_retries (int): Maximum number of retries, defaults to 0 (no retries).
|
| 297 |
+
base_wait_time (float): Duration (in seconds) to wait before retrying the first time. Wait time between
|
| 298 |
+
retries then grows exponentially, capped by max_wait_time.
|
| 299 |
+
max_wait_time (float): Maximum amount of time between two retries, in seconds.
|
| 300 |
+
**params: Params to pass to :obj:`requests.request`.
|
| 301 |
+
"""
|
| 302 |
+
_raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
|
| 303 |
+
tries, success = 0, False
|
| 304 |
+
while not success:
|
| 305 |
+
tries += 1
|
| 306 |
+
try:
|
| 307 |
+
response = requests.request(method=method.upper(), url=url, timeout=timeout, **params)
|
| 308 |
+
success = True
|
| 309 |
+
except (requests.exceptions.ConnectTimeout, requests.exceptions.ConnectionError) as err:
|
| 310 |
+
if tries > max_retries:
|
| 311 |
+
raise err
|
| 312 |
+
else:
|
| 313 |
+
logger.info(f"{method} request to {url} timed out, retrying... [{tries/max_retries}]")
|
| 314 |
+
sleep_time = min(max_wait_time, base_wait_time * 2 ** (tries - 1)) # Exponential backoff
|
| 315 |
+
time.sleep(sleep_time)
|
| 316 |
+
return response
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
def ftp_head(url, timeout=10.0):
|
| 320 |
+
_raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
|
| 321 |
+
try:
|
| 322 |
+
with closing(urllib.request.urlopen(url, timeout=timeout)) as r:
|
| 323 |
+
r.read(1)
|
| 324 |
+
except Exception:
|
| 325 |
+
return False
|
| 326 |
+
return True
|
| 327 |
+
|
| 328 |
+
|
| 329 |
+
def ftp_get(url, temp_file, timeout=10.0):
|
| 330 |
+
_raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
|
| 331 |
+
try:
|
| 332 |
+
logger.info(f"Getting through FTP {url} into {temp_file.name}")
|
| 333 |
+
with closing(urllib.request.urlopen(url, timeout=timeout)) as r:
|
| 334 |
+
shutil.copyfileobj(r, temp_file)
|
| 335 |
+
except urllib.error.URLError as e:
|
| 336 |
+
raise ConnectionError(e) from None
|
| 337 |
+
|
| 338 |
+
|
| 339 |
+
def http_get(
|
| 340 |
+
url, temp_file, proxies=None, resume_size=0, headers=None, cookies=None, timeout=100.0, max_retries=0, desc=None
|
| 341 |
+
):
|
| 342 |
+
headers = copy.deepcopy(headers) or {}
|
| 343 |
+
headers["user-agent"] = get_datasets_user_agent(user_agent=headers.get("user-agent"))
|
| 344 |
+
if resume_size > 0:
|
| 345 |
+
headers["Range"] = f"bytes={resume_size:d}-"
|
| 346 |
+
response = _request_with_retry(
|
| 347 |
+
method="GET",
|
| 348 |
+
url=url,
|
| 349 |
+
stream=True,
|
| 350 |
+
proxies=proxies,
|
| 351 |
+
headers=headers,
|
| 352 |
+
cookies=cookies,
|
| 353 |
+
max_retries=max_retries,
|
| 354 |
+
timeout=timeout,
|
| 355 |
+
)
|
| 356 |
+
if response.status_code == 416: # Range not satisfiable
|
| 357 |
+
return
|
| 358 |
+
content_length = response.headers.get("Content-Length")
|
| 359 |
+
total = resume_size + int(content_length) if content_length is not None else None
|
| 360 |
+
with logging.tqdm(
|
| 361 |
+
unit="B",
|
| 362 |
+
unit_scale=True,
|
| 363 |
+
total=total,
|
| 364 |
+
initial=resume_size,
|
| 365 |
+
desc=desc or "Downloading",
|
| 366 |
+
disable=not logging.is_progress_bar_enabled(),
|
| 367 |
+
) as progress:
|
| 368 |
+
for chunk in response.iter_content(chunk_size=1024):
|
| 369 |
+
progress.update(len(chunk))
|
| 370 |
+
temp_file.write(chunk)
|
| 371 |
+
|
| 372 |
+
|
| 373 |
+
def http_head(
|
| 374 |
+
url, proxies=None, headers=None, cookies=None, allow_redirects=True, timeout=10.0, max_retries=0
|
| 375 |
+
) -> requests.Response:
|
| 376 |
+
headers = copy.deepcopy(headers) or {}
|
| 377 |
+
headers["user-agent"] = get_datasets_user_agent(user_agent=headers.get("user-agent"))
|
| 378 |
+
response = _request_with_retry(
|
| 379 |
+
method="HEAD",
|
| 380 |
+
url=url,
|
| 381 |
+
proxies=proxies,
|
| 382 |
+
headers=headers,
|
| 383 |
+
cookies=cookies,
|
| 384 |
+
allow_redirects=allow_redirects,
|
| 385 |
+
timeout=timeout,
|
| 386 |
+
max_retries=max_retries,
|
| 387 |
+
)
|
| 388 |
+
return response
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
def request_etag(url: str, use_auth_token: Optional[Union[str, bool]] = None) -> Optional[str]:
|
| 392 |
+
headers = get_authentication_headers_for_url(url, use_auth_token=use_auth_token)
|
| 393 |
+
response = http_head(url, headers=headers, max_retries=3)
|
| 394 |
+
response.raise_for_status()
|
| 395 |
+
etag = response.headers.get("ETag") if response.ok else None
|
| 396 |
+
return etag
|
| 397 |
+
|
| 398 |
+
|
| 399 |
+
def get_from_cache(
|
| 400 |
+
url,
|
| 401 |
+
cache_dir=None,
|
| 402 |
+
force_download=False,
|
| 403 |
+
proxies=None,
|
| 404 |
+
etag_timeout=100,
|
| 405 |
+
resume_download=False,
|
| 406 |
+
user_agent=None,
|
| 407 |
+
local_files_only=False,
|
| 408 |
+
use_etag=True,
|
| 409 |
+
max_retries=0,
|
| 410 |
+
use_auth_token=None,
|
| 411 |
+
ignore_url_params=False,
|
| 412 |
+
download_desc=None,
|
| 413 |
+
) -> str:
|
| 414 |
+
"""
|
| 415 |
+
Given a URL, look for the corresponding file in the local cache.
|
| 416 |
+
If it's not there, download it. Then return the path to the cached file.
|
| 417 |
+
|
| 418 |
+
Return:
|
| 419 |
+
Local path (string)
|
| 420 |
+
|
| 421 |
+
Raises:
|
| 422 |
+
FileNotFoundError: in case of non-recoverable file
|
| 423 |
+
(non-existent or no cache on disk)
|
| 424 |
+
ConnectionError: in case of unreachable url
|
| 425 |
+
and no cache on disk
|
| 426 |
+
"""
|
| 427 |
+
if cache_dir is None:
|
| 428 |
+
cache_dir = config.HF_EVALUATE_CACHE
|
| 429 |
+
if isinstance(cache_dir, Path):
|
| 430 |
+
cache_dir = str(cache_dir)
|
| 431 |
+
|
| 432 |
+
os.makedirs(cache_dir, exist_ok=True)
|
| 433 |
+
|
| 434 |
+
if ignore_url_params:
|
| 435 |
+
# strip all query parameters and #fragments from the URL
|
| 436 |
+
cached_url = urljoin(url, urlparse(url).path)
|
| 437 |
+
else:
|
| 438 |
+
cached_url = url # additional parameters may be added to the given URL
|
| 439 |
+
|
| 440 |
+
connected = False
|
| 441 |
+
response = None
|
| 442 |
+
cookies = None
|
| 443 |
+
etag = None
|
| 444 |
+
head_error = None
|
| 445 |
+
|
| 446 |
+
# Try a first time to file the file on the local file system without eTag (None)
|
| 447 |
+
# if we don't ask for 'force_download' then we spare a request
|
| 448 |
+
filename = hash_url_to_filename(cached_url, etag=None)
|
| 449 |
+
cache_path = os.path.join(cache_dir, filename)
|
| 450 |
+
|
| 451 |
+
if os.path.exists(cache_path) and not force_download and not use_etag:
|
| 452 |
+
return cache_path
|
| 453 |
+
|
| 454 |
+
# Prepare headers for authentication
|
| 455 |
+
headers = get_authentication_headers_for_url(url, use_auth_token=use_auth_token)
|
| 456 |
+
if user_agent is not None:
|
| 457 |
+
headers["user-agent"] = user_agent
|
| 458 |
+
|
| 459 |
+
# We don't have the file locally or we need an eTag
|
| 460 |
+
if not local_files_only:
|
| 461 |
+
if url.startswith("ftp://"):
|
| 462 |
+
connected = ftp_head(url)
|
| 463 |
+
try:
|
| 464 |
+
response = http_head(
|
| 465 |
+
url,
|
| 466 |
+
allow_redirects=True,
|
| 467 |
+
proxies=proxies,
|
| 468 |
+
timeout=etag_timeout,
|
| 469 |
+
max_retries=max_retries,
|
| 470 |
+
headers=headers,
|
| 471 |
+
)
|
| 472 |
+
if response.status_code == 200: # ok
|
| 473 |
+
etag = response.headers.get("ETag") if use_etag else None
|
| 474 |
+
for k, v in response.cookies.items():
|
| 475 |
+
# In some edge cases, we need to get a confirmation token
|
| 476 |
+
if k.startswith("download_warning") and "drive.google.com" in url:
|
| 477 |
+
url += "&confirm=" + v
|
| 478 |
+
cookies = response.cookies
|
| 479 |
+
connected = True
|
| 480 |
+
# Fix Google Drive URL to avoid Virus scan warning
|
| 481 |
+
if "drive.google.com" in url and "confirm=" not in url:
|
| 482 |
+
url += "&confirm=t"
|
| 483 |
+
# In some edge cases, head request returns 400 but the connection is actually ok
|
| 484 |
+
elif (
|
| 485 |
+
(response.status_code == 400 and "firebasestorage.googleapis.com" in url)
|
| 486 |
+
or (response.status_code == 405 and "drive.google.com" in url)
|
| 487 |
+
or (
|
| 488 |
+
response.status_code == 403
|
| 489 |
+
and (
|
| 490 |
+
re.match(r"^https?://github.com/.*?/.*?/releases/download/.*?/.*?$", url)
|
| 491 |
+
or re.match(r"^https://.*?s3.*?amazonaws.com/.*?$", response.url)
|
| 492 |
+
)
|
| 493 |
+
)
|
| 494 |
+
or (response.status_code == 403 and "ndownloader.figstatic.com" in url)
|
| 495 |
+
):
|
| 496 |
+
connected = True
|
| 497 |
+
logger.info(f"Couldn't get ETag version for url {url}")
|
| 498 |
+
elif response.status_code == 401 and config.HF_ENDPOINT in url and use_auth_token is None:
|
| 499 |
+
raise ConnectionError(
|
| 500 |
+
f"Unauthorized for URL {url}. Please use the parameter ``use_auth_token=True`` after logging in with ``huggingface-cli login``"
|
| 501 |
+
)
|
| 502 |
+
except (OSError, requests.exceptions.Timeout) as e:
|
| 503 |
+
# not connected
|
| 504 |
+
head_error = e
|
| 505 |
+
pass
|
| 506 |
+
|
| 507 |
+
# connected == False = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
|
| 508 |
+
# try to get the last downloaded one
|
| 509 |
+
if not connected:
|
| 510 |
+
if os.path.exists(cache_path) and not force_download:
|
| 511 |
+
return cache_path
|
| 512 |
+
if local_files_only:
|
| 513 |
+
raise FileNotFoundError(
|
| 514 |
+
f"Cannot find the requested files in the cached path at {cache_path} and outgoing traffic has been"
|
| 515 |
+
" disabled. To enable file online look-ups, set 'local_files_only' to False."
|
| 516 |
+
)
|
| 517 |
+
elif response is not None and response.status_code == 404:
|
| 518 |
+
raise FileNotFoundError(f"Couldn't find file at {url}")
|
| 519 |
+
_raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
|
| 520 |
+
if head_error is not None:
|
| 521 |
+
raise ConnectionError(f"Couldn't reach {url} ({repr(head_error)})")
|
| 522 |
+
elif response is not None:
|
| 523 |
+
raise ConnectionError(f"Couldn't reach {url} (error {response.status_code})")
|
| 524 |
+
else:
|
| 525 |
+
raise ConnectionError(f"Couldn't reach {url}")
|
| 526 |
+
|
| 527 |
+
# Try a second time
|
| 528 |
+
filename = hash_url_to_filename(cached_url, etag)
|
| 529 |
+
cache_path = os.path.join(cache_dir, filename)
|
| 530 |
+
|
| 531 |
+
if os.path.exists(cache_path) and not force_download:
|
| 532 |
+
return cache_path
|
| 533 |
+
|
| 534 |
+
# From now on, connected is True.
|
| 535 |
+
# Prevent parallel downloads of the same file with a lock.
|
| 536 |
+
lock_path = cache_path + ".lock"
|
| 537 |
+
with FileLock(lock_path):
|
| 538 |
+
|
| 539 |
+
if resume_download:
|
| 540 |
+
incomplete_path = cache_path + ".incomplete"
|
| 541 |
+
|
| 542 |
+
@contextmanager
|
| 543 |
+
def _resumable_file_manager():
|
| 544 |
+
with open(incomplete_path, "a+b") as f:
|
| 545 |
+
yield f
|
| 546 |
+
|
| 547 |
+
temp_file_manager = _resumable_file_manager
|
| 548 |
+
if os.path.exists(incomplete_path):
|
| 549 |
+
resume_size = os.stat(incomplete_path).st_size
|
| 550 |
+
else:
|
| 551 |
+
resume_size = 0
|
| 552 |
+
else:
|
| 553 |
+
temp_file_manager = partial(tempfile.NamedTemporaryFile, dir=cache_dir, delete=False)
|
| 554 |
+
resume_size = 0
|
| 555 |
+
|
| 556 |
+
# Download to temporary file, then copy to cache dir once finished.
|
| 557 |
+
# Otherwise you get corrupt cache entries if the download gets interrupted.
|
| 558 |
+
with temp_file_manager() as temp_file:
|
| 559 |
+
logger.info(f"{url} not found in cache or force_download set to True, downloading to {temp_file.name}")
|
| 560 |
+
|
| 561 |
+
# GET file object
|
| 562 |
+
if url.startswith("ftp://"):
|
| 563 |
+
ftp_get(url, temp_file)
|
| 564 |
+
else:
|
| 565 |
+
http_get(
|
| 566 |
+
url,
|
| 567 |
+
temp_file,
|
| 568 |
+
proxies=proxies,
|
| 569 |
+
resume_size=resume_size,
|
| 570 |
+
headers=headers,
|
| 571 |
+
cookies=cookies,
|
| 572 |
+
max_retries=max_retries,
|
| 573 |
+
desc=download_desc,
|
| 574 |
+
)
|
| 575 |
+
|
| 576 |
+
logger.info(f"storing {url} in cache at {cache_path}")
|
| 577 |
+
shutil.move(temp_file.name, cache_path)
|
| 578 |
+
|
| 579 |
+
logger.info(f"creating metadata file for {cache_path}")
|
| 580 |
+
meta = {"url": url, "etag": etag}
|
| 581 |
+
meta_path = cache_path + ".json"
|
| 582 |
+
with open(meta_path, "w", encoding="utf-8") as meta_file:
|
| 583 |
+
json.dump(meta, meta_file)
|
| 584 |
+
|
| 585 |
+
return cache_path
|
| 586 |
+
|
| 587 |
+
|
| 588 |
+
def add_start_docstrings(*docstr):
|
| 589 |
+
def docstring_decorator(fn):
|
| 590 |
+
fn.__doc__ = "".join(docstr) + "\n\n" + (fn.__doc__ if fn.__doc__ is not None else "")
|
| 591 |
+
return fn
|
| 592 |
+
|
| 593 |
+
return docstring_decorator
|
| 594 |
+
|
| 595 |
+
|
| 596 |
+
def add_end_docstrings(*docstr):
|
| 597 |
+
def docstring_decorator(fn):
|
| 598 |
+
fn.__doc__ = (fn.__doc__ if fn.__doc__ is not None else "") + "\n\n" + "".join(docstr)
|
| 599 |
+
return fn
|
| 600 |
+
|
| 601 |
+
return docstring_decorator
|
| 602 |
+
|
| 603 |
+
|
| 604 |
+
def estimate_dataset_size(paths):
|
| 605 |
+
return sum(path.stat().st_size for path in paths)
|
| 606 |
+
|
| 607 |
+
|
| 608 |
+
def readline(f: io.RawIOBase):
|
| 609 |
+
# From: https://github.com/python/cpython/blob/d27e2f4d118e7a9909b6a3e5da06c5ff95806a85/Lib/_pyio.py#L525
|
| 610 |
+
res = bytearray()
|
| 611 |
+
while True:
|
| 612 |
+
b = f.read(1)
|
| 613 |
+
if not b:
|
| 614 |
+
break
|
| 615 |
+
res += b
|
| 616 |
+
if res.endswith(b"\n"):
|
| 617 |
+
break
|
| 618 |
+
return bytes(res)
|
venv/lib/python3.10/site-packages/evaluate/utils/gradio.py
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
import re
|
| 4 |
+
import sys
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
from datasets import Value
|
| 9 |
+
|
| 10 |
+
from .logging import get_logger
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
logger = get_logger(__name__)
|
| 14 |
+
|
| 15 |
+
REGEX_YAML_BLOCK = re.compile(r"---[\n\r]+([\S\s]*?)[\n\r]+---[\n\r]")
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def infer_gradio_input_types(feature_types):
|
| 19 |
+
"""
|
| 20 |
+
Maps metric feature types to input types for gradio Dataframes:
|
| 21 |
+
- float/int -> numbers
|
| 22 |
+
- string -> strings
|
| 23 |
+
- any other -> json
|
| 24 |
+
Note that json is not a native gradio type but will be treated as string that
|
| 25 |
+
is then parsed as a json.
|
| 26 |
+
"""
|
| 27 |
+
input_types = []
|
| 28 |
+
for feature_type in feature_types:
|
| 29 |
+
input_type = "json"
|
| 30 |
+
if isinstance(feature_type, Value):
|
| 31 |
+
if feature_type.dtype.startswith("int") or feature_type.dtype.startswith("float"):
|
| 32 |
+
input_type = "number"
|
| 33 |
+
elif feature_type.dtype == "string":
|
| 34 |
+
input_type = "str"
|
| 35 |
+
input_types.append(input_type)
|
| 36 |
+
return input_types
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def json_to_string_type(input_types):
|
| 40 |
+
"""Maps json input type to str."""
|
| 41 |
+
return ["str" if i == "json" else i for i in input_types]
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def parse_readme(filepath):
|
| 45 |
+
"""Parses a repositories README and removes"""
|
| 46 |
+
if not os.path.exists(filepath):
|
| 47 |
+
return "No README.md found."
|
| 48 |
+
with open(filepath, "r") as f:
|
| 49 |
+
text = f.read()
|
| 50 |
+
match = REGEX_YAML_BLOCK.search(text)
|
| 51 |
+
if match:
|
| 52 |
+
text = text[match.end() :]
|
| 53 |
+
return text
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def parse_gradio_data(data, input_types):
|
| 57 |
+
"""Parses data from gradio Dataframe for use in metric."""
|
| 58 |
+
metric_inputs = {}
|
| 59 |
+
data.replace("", np.nan, inplace=True)
|
| 60 |
+
data.dropna(inplace=True)
|
| 61 |
+
for feature_name, input_type in zip(data, input_types):
|
| 62 |
+
if input_type == "json":
|
| 63 |
+
metric_inputs[feature_name] = [json.loads(d) for d in data[feature_name].to_list()]
|
| 64 |
+
elif input_type == "str":
|
| 65 |
+
metric_inputs[feature_name] = [d.strip('"') for d in data[feature_name].to_list()]
|
| 66 |
+
else:
|
| 67 |
+
metric_inputs[feature_name] = data[feature_name]
|
| 68 |
+
return metric_inputs
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def parse_test_cases(test_cases, feature_names, input_types):
|
| 72 |
+
"""
|
| 73 |
+
Parses test cases to be used in gradio Dataframe. Note that an apostrophe is added
|
| 74 |
+
to strings to follow the format in json.
|
| 75 |
+
"""
|
| 76 |
+
if len(test_cases) == 0:
|
| 77 |
+
return None
|
| 78 |
+
examples = []
|
| 79 |
+
for test_case in test_cases:
|
| 80 |
+
parsed_cases = []
|
| 81 |
+
for feat, input_type in zip(feature_names, input_types):
|
| 82 |
+
if input_type == "json":
|
| 83 |
+
parsed_cases.append([str(element) for element in test_case[feat]])
|
| 84 |
+
elif input_type == "str":
|
| 85 |
+
parsed_cases.append(['"' + element + '"' for element in test_case[feat]])
|
| 86 |
+
else:
|
| 87 |
+
parsed_cases.append(test_case[feat])
|
| 88 |
+
examples.append([list(i) for i in zip(*parsed_cases)])
|
| 89 |
+
return examples
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def launch_gradio_widget(metric):
|
| 93 |
+
"""Launches `metric` widget with Gradio."""
|
| 94 |
+
|
| 95 |
+
try:
|
| 96 |
+
import gradio as gr
|
| 97 |
+
except ImportError as error:
|
| 98 |
+
logger.error("To create a metric widget with Gradio make sure gradio is installed.")
|
| 99 |
+
raise error
|
| 100 |
+
|
| 101 |
+
local_path = Path(sys.path[0])
|
| 102 |
+
# if there are several input types, use first as default.
|
| 103 |
+
if isinstance(metric.features, list):
|
| 104 |
+
(feature_names, feature_types) = zip(*metric.features[0].items())
|
| 105 |
+
else:
|
| 106 |
+
(feature_names, feature_types) = zip(*metric.features.items())
|
| 107 |
+
gradio_input_types = infer_gradio_input_types(feature_types)
|
| 108 |
+
|
| 109 |
+
def compute(data):
|
| 110 |
+
return metric.compute(**parse_gradio_data(data, gradio_input_types))
|
| 111 |
+
|
| 112 |
+
iface = gr.Interface(
|
| 113 |
+
fn=compute,
|
| 114 |
+
inputs=gr.inputs.Dataframe(
|
| 115 |
+
headers=feature_names,
|
| 116 |
+
col_count=len(feature_names),
|
| 117 |
+
row_count=1,
|
| 118 |
+
datatype=json_to_string_type(gradio_input_types),
|
| 119 |
+
),
|
| 120 |
+
outputs=gr.outputs.Textbox(label=metric.name),
|
| 121 |
+
description=(
|
| 122 |
+
metric.info.description + "\nIf this is a text-based metric, make sure to wrap you input in double quotes."
|
| 123 |
+
" Alternatively you can use a JSON-formatted list as input."
|
| 124 |
+
),
|
| 125 |
+
title=f"Metric: {metric.name}",
|
| 126 |
+
article=parse_readme(local_path / "README.md"),
|
| 127 |
+
# TODO: load test cases and use them to populate examples
|
| 128 |
+
# examples=[parse_test_cases(test_cases, feature_names, gradio_input_types)]
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
iface.launch()
|
venv/lib/python3.10/site-packages/evaluate/utils/logging.py
ADDED
|
@@ -0,0 +1,234 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 Optuna, Hugging Face
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
""" Logging utilities. """
|
| 15 |
+
|
| 16 |
+
import logging
|
| 17 |
+
import os
|
| 18 |
+
from logging import CRITICAL # NOQA
|
| 19 |
+
from logging import DEBUG # NOQA
|
| 20 |
+
from logging import ERROR # NOQA
|
| 21 |
+
from logging import FATAL # NOQA
|
| 22 |
+
from logging import INFO # NOQA
|
| 23 |
+
from logging import NOTSET # NOQA
|
| 24 |
+
from logging import WARN # NOQA
|
| 25 |
+
from logging import WARNING # NOQA
|
| 26 |
+
from typing import Optional
|
| 27 |
+
|
| 28 |
+
from tqdm import auto as tqdm_lib
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
log_levels = {
|
| 32 |
+
"debug": logging.DEBUG,
|
| 33 |
+
"info": logging.INFO,
|
| 34 |
+
"warning": logging.WARNING,
|
| 35 |
+
"error": logging.ERROR,
|
| 36 |
+
"critical": logging.CRITICAL,
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
_default_log_level = logging.WARNING
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def _get_default_logging_level():
|
| 43 |
+
"""
|
| 44 |
+
If EVALUATE_VERBOSITY env var is set to one of the valid choices return that as the new default level.
|
| 45 |
+
If it is not - fall back to ``_default_log_level``
|
| 46 |
+
"""
|
| 47 |
+
env_level_str = os.getenv("EVALUATE_VERBOSITY", None)
|
| 48 |
+
if env_level_str:
|
| 49 |
+
if env_level_str in log_levels:
|
| 50 |
+
return log_levels[env_level_str]
|
| 51 |
+
else:
|
| 52 |
+
logging.getLogger().warning(
|
| 53 |
+
f"Unknown option EVALUATE_VERBOSITY={env_level_str}, "
|
| 54 |
+
f"has to be one of: { ', '.join(log_levels.keys()) }"
|
| 55 |
+
)
|
| 56 |
+
return _default_log_level
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def _get_library_name() -> str:
|
| 60 |
+
return __name__.split(".")[0]
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def _get_library_root_logger() -> logging.Logger:
|
| 64 |
+
return logging.getLogger(_get_library_name())
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def _configure_library_root_logger() -> None:
|
| 68 |
+
# Apply our default configuration to the library root logger.
|
| 69 |
+
library_root_logger = _get_library_root_logger()
|
| 70 |
+
library_root_logger.setLevel(_get_default_logging_level())
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def _reset_library_root_logger() -> None:
|
| 74 |
+
library_root_logger = _get_library_root_logger()
|
| 75 |
+
library_root_logger.setLevel(logging.NOTSET)
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def get_logger(name: Optional[str] = None) -> logging.Logger:
|
| 79 |
+
"""Return a logger with the specified name."""
|
| 80 |
+
if name is None:
|
| 81 |
+
name = _get_library_name()
|
| 82 |
+
return logging.getLogger(name)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def get_verbosity() -> int:
|
| 86 |
+
"""Return the current level for the Hugging Face Evaluate library's root logger.
|
| 87 |
+
Returns:
|
| 88 |
+
Logging level, e.g., `evaluate.logging.DEBUG` and `evaluate.logging.INFO`.
|
| 89 |
+
|
| 90 |
+
<Tip>
|
| 91 |
+
|
| 92 |
+
Hugging Face Evaluate library has following logging levels:
|
| 93 |
+
- `evaluate.logging.CRITICAL`, `evaluate.logging.FATAL`
|
| 94 |
+
- `evaluate.logging.ERROR`
|
| 95 |
+
- `evaluate.logging.WARNING`, `evaluate.logging.WARN`
|
| 96 |
+
- `evaluate.logging.INFO`
|
| 97 |
+
- `evaluate.logging.DEBUG`
|
| 98 |
+
|
| 99 |
+
</Tip>
|
| 100 |
+
"""
|
| 101 |
+
return _get_library_root_logger().getEffectiveLevel()
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def set_verbosity(verbosity: int) -> None:
|
| 105 |
+
"""Set the level for the Hugging Face Evaluate library's root logger.
|
| 106 |
+
Args:
|
| 107 |
+
verbosity:
|
| 108 |
+
Logging level, e.g., `evaluate.logging.DEBUG` and `evaluate.logging.INFO`.
|
| 109 |
+
"""
|
| 110 |
+
_get_library_root_logger().setLevel(verbosity)
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def set_verbosity_info():
|
| 114 |
+
"""Set the level for the Hugging Face Evaluate library's root logger to `INFO`.
|
| 115 |
+
|
| 116 |
+
This will display most of the logging information and tqdm bars.
|
| 117 |
+
|
| 118 |
+
Shortcut to `evaluate.logging.set_verbosity(evaluate.logging.INFO)`.
|
| 119 |
+
"""
|
| 120 |
+
return set_verbosity(INFO)
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def set_verbosity_warning():
|
| 124 |
+
"""Set the level for the Hugging Face Evaluate library's root logger to `WARNING`.
|
| 125 |
+
|
| 126 |
+
This will display only the warning and errors logging information and tqdm bars.
|
| 127 |
+
|
| 128 |
+
Shortcut to `evaluate.logging.set_verbosity(evaluate.logging.WARNING)`.
|
| 129 |
+
"""
|
| 130 |
+
return set_verbosity(WARNING)
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def set_verbosity_debug():
|
| 134 |
+
"""Set the level for the Hugging Face Evaluate library's root logger to `DEBUG`.
|
| 135 |
+
|
| 136 |
+
This will display all the logging information and tqdm bars.
|
| 137 |
+
|
| 138 |
+
Shortcut to `evaluate.logging.set_verbosity(evaluate.logging.DEBUG)`.
|
| 139 |
+
"""
|
| 140 |
+
return set_verbosity(DEBUG)
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def set_verbosity_error():
|
| 144 |
+
"""Set the level for the Hugging Face Evaluate library's root logger to `ERROR`.
|
| 145 |
+
|
| 146 |
+
This will display only the errors logging information and tqdm bars.
|
| 147 |
+
|
| 148 |
+
Shortcut to `evaluate.logging.set_verbosity(evaluate.logging.ERROR)`.
|
| 149 |
+
"""
|
| 150 |
+
return set_verbosity(ERROR)
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def disable_propagation() -> None:
|
| 154 |
+
"""Disable propagation of the library log outputs.
|
| 155 |
+
Note that log propagation is disabled by default.
|
| 156 |
+
"""
|
| 157 |
+
_get_library_root_logger().propagate = False
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def enable_propagation() -> None:
|
| 161 |
+
"""Enable propagation of the library log outputs.
|
| 162 |
+
Please disable the Hugging Face Evaluate library's default handler to prevent double logging if the root logger has
|
| 163 |
+
been configured.
|
| 164 |
+
"""
|
| 165 |
+
_get_library_root_logger().propagate = True
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
# Configure the library root logger at the module level (singleton-like)
|
| 169 |
+
_configure_library_root_logger()
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
class EmptyTqdm:
|
| 173 |
+
"""Dummy tqdm which doesn't do anything."""
|
| 174 |
+
|
| 175 |
+
def __init__(self, *args, **kwargs): # pylint: disable=unused-argument
|
| 176 |
+
self._iterator = args[0] if args else None
|
| 177 |
+
|
| 178 |
+
def __iter__(self):
|
| 179 |
+
return iter(self._iterator)
|
| 180 |
+
|
| 181 |
+
def __getattr__(self, _):
|
| 182 |
+
"""Return empty function."""
|
| 183 |
+
|
| 184 |
+
def empty_fn(*args, **kwargs): # pylint: disable=unused-argument
|
| 185 |
+
return
|
| 186 |
+
|
| 187 |
+
return empty_fn
|
| 188 |
+
|
| 189 |
+
def __enter__(self):
|
| 190 |
+
return self
|
| 191 |
+
|
| 192 |
+
def __exit__(self, type_, value, traceback):
|
| 193 |
+
return
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
_tqdm_active = True
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
class _tqdm_cls:
|
| 200 |
+
def __call__(self, *args, **kwargs):
|
| 201 |
+
if _tqdm_active:
|
| 202 |
+
return tqdm_lib.tqdm(*args, **kwargs)
|
| 203 |
+
else:
|
| 204 |
+
return EmptyTqdm(*args, **kwargs)
|
| 205 |
+
|
| 206 |
+
def set_lock(self, *args, **kwargs):
|
| 207 |
+
self._lock = None
|
| 208 |
+
if _tqdm_active:
|
| 209 |
+
return tqdm_lib.tqdm.set_lock(*args, **kwargs)
|
| 210 |
+
|
| 211 |
+
def get_lock(self):
|
| 212 |
+
if _tqdm_active:
|
| 213 |
+
return tqdm_lib.tqdm.get_lock()
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
tqdm = _tqdm_cls()
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
def is_progress_bar_enabled() -> bool:
|
| 220 |
+
"""Return a boolean indicating whether tqdm progress bars are enabled."""
|
| 221 |
+
global _tqdm_active
|
| 222 |
+
return bool(_tqdm_active)
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
def enable_progress_bar():
|
| 226 |
+
"""Enable tqdm progress bar."""
|
| 227 |
+
global _tqdm_active
|
| 228 |
+
_tqdm_active = True
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
def disable_progress_bar():
|
| 232 |
+
"""Enable tqdm progress bar."""
|
| 233 |
+
global _tqdm_active
|
| 234 |
+
_tqdm_active = False
|
venv/lib/python3.10/site-packages/evaluate/visualization.py
ADDED
|
@@ -0,0 +1,230 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import textwrap
|
| 2 |
+
|
| 3 |
+
import matplotlib.pyplot as plt
|
| 4 |
+
import numpy as np
|
| 5 |
+
import pandas as pd
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class ComplexRadar:
|
| 9 |
+
"""Create a complex radar chart with different scales for each variable
|
| 10 |
+
Args:
|
| 11 |
+
fig (`matplotlib.figure`) : A matplotlib figure object to add the axes on.
|
| 12 |
+
variables (`list`) : a list of variables to. plot
|
| 13 |
+
ranges (`list` of `tuples`): A list of ranges (min, max) for each variable
|
| 14 |
+
n_ring_levels (`int): Number of ordinate or ring levels to draw.
|
| 15 |
+
Default: 5.
|
| 16 |
+
show_scales (`bool`): Indicates if we the ranges for each variable are plotted.
|
| 17 |
+
Default: True.
|
| 18 |
+
format_cfg (`dict`): A dictionary with formatting configurations.
|
| 19 |
+
Default: None.
|
| 20 |
+
Returns:
|
| 21 |
+
`matplotlib.figure.Figure`: a radar plot.
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
def __init__(self, fig, variables, ranges, n_ring_levels=5, show_scales=True, format_cfg=None):
|
| 25 |
+
|
| 26 |
+
self.format_cfg = format_cfg
|
| 27 |
+
|
| 28 |
+
# Calculate angles and create for each variable an axes
|
| 29 |
+
# Consider here the trick with having the first axes element twice (len+1)
|
| 30 |
+
angles = np.arange(0, 360, 360.0 / len(variables))
|
| 31 |
+
axes = [
|
| 32 |
+
fig.add_axes([0.1, 0.1, 0.9, 0.9], polar=True, label="axes{}".format(i), **self.format_cfg["axes_args"])
|
| 33 |
+
for i in range(len(variables) + 1)
|
| 34 |
+
]
|
| 35 |
+
|
| 36 |
+
# Ensure clockwise rotation (first variable at the top N)
|
| 37 |
+
for ax in axes:
|
| 38 |
+
ax.set_theta_zero_location("N")
|
| 39 |
+
ax.set_theta_direction(-1)
|
| 40 |
+
ax.set_axisbelow(True)
|
| 41 |
+
|
| 42 |
+
# Writing the ranges on each axes
|
| 43 |
+
for i, ax in enumerate(axes):
|
| 44 |
+
|
| 45 |
+
# Here we do the trick by repeating the first iteration
|
| 46 |
+
j = 0 if (i == 0 or i == 1) else i - 1
|
| 47 |
+
ax.set_ylim(*ranges[j])
|
| 48 |
+
# Set endpoint to True if you like to have values right before the last circle
|
| 49 |
+
grid = np.linspace(*ranges[j], num=n_ring_levels, endpoint=self.format_cfg["incl_endpoint"])
|
| 50 |
+
gridlabel = ["{}".format(round(x, 2)) for x in grid]
|
| 51 |
+
gridlabel[0] = "" # remove values from the center
|
| 52 |
+
lines, labels = ax.set_rgrids(
|
| 53 |
+
grid, labels=gridlabel, angle=angles[j], **self.format_cfg["rgrid_tick_lbls_args"]
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
ax.set_ylim(*ranges[j])
|
| 57 |
+
ax.spines["polar"].set_visible(False)
|
| 58 |
+
ax.grid(visible=False)
|
| 59 |
+
|
| 60 |
+
if show_scales is False:
|
| 61 |
+
ax.set_yticklabels([])
|
| 62 |
+
|
| 63 |
+
# Set all axes except the first one unvisible
|
| 64 |
+
for ax in axes[1:]:
|
| 65 |
+
ax.patch.set_visible(False)
|
| 66 |
+
ax.xaxis.set_visible(False)
|
| 67 |
+
|
| 68 |
+
# Setting the attributes
|
| 69 |
+
self.angle = np.deg2rad(np.r_[angles, angles[0]])
|
| 70 |
+
self.ranges = ranges
|
| 71 |
+
self.ax = axes[0]
|
| 72 |
+
self.ax1 = axes[1]
|
| 73 |
+
self.plot_counter = 0
|
| 74 |
+
|
| 75 |
+
# Draw (inner) circles and lines
|
| 76 |
+
self.ax.yaxis.grid(**self.format_cfg["rad_ln_args"])
|
| 77 |
+
# Draw outer circle
|
| 78 |
+
self.ax.spines["polar"].set(**self.format_cfg["outer_ring"])
|
| 79 |
+
# Draw angle lines
|
| 80 |
+
self.ax.xaxis.grid(**self.format_cfg["angle_ln_args"])
|
| 81 |
+
|
| 82 |
+
# ax1 is the duplicate of axes[0] (self.ax)
|
| 83 |
+
# Remove everything from ax1 except the plot itself
|
| 84 |
+
self.ax1.axis("off")
|
| 85 |
+
self.ax1.set_zorder(9)
|
| 86 |
+
|
| 87 |
+
# Create the outer labels for each variable
|
| 88 |
+
l, text = self.ax.set_thetagrids(angles, labels=variables)
|
| 89 |
+
|
| 90 |
+
# Beautify them
|
| 91 |
+
labels = [t.get_text() for t in self.ax.get_xticklabels()]
|
| 92 |
+
labels = [
|
| 93 |
+
"\n".join(
|
| 94 |
+
textwrap.wrap(
|
| 95 |
+
label,
|
| 96 |
+
self.format_cfg["theta_tick_lbls_txt_wrap"],
|
| 97 |
+
break_long_words=self.format_cfg["theta_tick_lbls_brk_lng_wrds"],
|
| 98 |
+
)
|
| 99 |
+
)
|
| 100 |
+
for label in labels
|
| 101 |
+
]
|
| 102 |
+
self.ax.set_xticklabels(labels, **self.format_cfg["theta_tick_lbls"])
|
| 103 |
+
|
| 104 |
+
for t, a in zip(self.ax.get_xticklabels(), angles):
|
| 105 |
+
if a == 0:
|
| 106 |
+
t.set_ha("center")
|
| 107 |
+
elif a > 0 and a < 180:
|
| 108 |
+
t.set_ha("left")
|
| 109 |
+
elif a == 180:
|
| 110 |
+
t.set_ha("center")
|
| 111 |
+
else:
|
| 112 |
+
t.set_ha("right")
|
| 113 |
+
|
| 114 |
+
self.ax.tick_params(axis="both", pad=self.format_cfg["theta_tick_lbls_pad"])
|
| 115 |
+
|
| 116 |
+
def _scale_data(self, data, ranges):
|
| 117 |
+
"""Scales data[1:] to ranges[0]"""
|
| 118 |
+
for d, (y1, y2) in zip(data[1:], ranges[1:]):
|
| 119 |
+
assert (y1 <= d <= y2) or (y2 <= d <= y1)
|
| 120 |
+
x1, x2 = ranges[0]
|
| 121 |
+
d = data[0]
|
| 122 |
+
sdata = [d]
|
| 123 |
+
for d, (y1, y2) in zip(data[1:], ranges[1:]):
|
| 124 |
+
sdata.append((d - y1) / (y2 - y1) * (x2 - x1) + x1)
|
| 125 |
+
return sdata
|
| 126 |
+
|
| 127 |
+
def plot(self, data, *args, **kwargs):
|
| 128 |
+
"""Plots a line"""
|
| 129 |
+
sdata = self._scale_data(data, self.ranges)
|
| 130 |
+
self.ax1.plot(self.angle, np.r_[sdata, sdata[0]], *args, **kwargs)
|
| 131 |
+
self.plot_counter = self.plot_counter + 1
|
| 132 |
+
|
| 133 |
+
def use_legend(self, *args, **kwargs):
|
| 134 |
+
"""Shows a legend"""
|
| 135 |
+
self.ax1.legend(*args, **kwargs)
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def radar_plot(data, model_names, invert_range=[], config=None, fig=None):
|
| 139 |
+
"""Create a complex radar chart with different scales for each variable
|
| 140 |
+
Source: https://towardsdatascience.com/how-to-create-and-visualize-complex-radar-charts-f7764d0f3652
|
| 141 |
+
|
| 142 |
+
Args:
|
| 143 |
+
data (`List[dict]`): the results (list of metric + value pairs).
|
| 144 |
+
E.g. data = [{"accuracy": 0.9, "precision":0.8},{"accuracy": 0.7, "precision":0.6}]
|
| 145 |
+
names (`List[dict]`): model names.
|
| 146 |
+
E.g. names = ["model1", "model 2", ...]
|
| 147 |
+
invert_range (`List[dict]`, optional): the metrics to invert (in cases when smaller is better, e.g. speed)
|
| 148 |
+
E.g. invert_range=["latency_in_seconds"]
|
| 149 |
+
config (`dict`, optional) : a specification of the formatting configurations, namely:
|
| 150 |
+
|
| 151 |
+
- rad_ln_args (`dict`, default `{"visible": True}`): The visibility of the radial (circle) lines.
|
| 152 |
+
|
| 153 |
+
- outer_ring (`dict`, default `{"visible": True}`): The visibility of the outer ring.
|
| 154 |
+
|
| 155 |
+
- angle_ln_args (`dict`, default `{"visible": True}`): The visibility of the angle lines.
|
| 156 |
+
|
| 157 |
+
- rgrid_tick_lbls_args (`dict`, default `{"fontsize": 12}`): The font size of the tick labels on the scales.
|
| 158 |
+
|
| 159 |
+
- theta_tick_lbls (`dict`, default `{"fontsize": 12}`): The font size of the variable labels on the plot.
|
| 160 |
+
|
| 161 |
+
- theta_tick_lbls_pad (`int`, default `3`): The padding of the variable labels on the plot.
|
| 162 |
+
|
| 163 |
+
- theta_tick_lbls_brk_lng_wrds (`bool`, default `True` ): Whether long words in the label are broken up or not.
|
| 164 |
+
|
| 165 |
+
- theta_tick_lbls_txt_wrap (`int`, default `15`): Text wrap for tick labels
|
| 166 |
+
|
| 167 |
+
- incl_endpoint (`bool`, default `False`): Include value endpoints on calse
|
| 168 |
+
|
| 169 |
+
- marker (`str`, default `"o"`): the shape of the marker used in the radar plot.
|
| 170 |
+
|
| 171 |
+
- markersize (`int`, default `3`): the shape of the marker used in the radar plot.
|
| 172 |
+
|
| 173 |
+
- legend_loc (`str`, default `"upper right"`): the location of the legend in the radar plot. Must be one of: 'upper left', 'upper right', 'lower left', 'lower right'.
|
| 174 |
+
|
| 175 |
+
- bbox_to_anchor (`tuple`, default `(2, 1)`: anchor for the legend.
|
| 176 |
+
fig (`matplotlib.figure.Figure`, optional): figure used to plot the radar plot.
|
| 177 |
+
|
| 178 |
+
Returns:
|
| 179 |
+
`matplotlib.figure.Figure`
|
| 180 |
+
"""
|
| 181 |
+
data = pd.DataFrame(data)
|
| 182 |
+
data.index = model_names
|
| 183 |
+
variables = data.keys()
|
| 184 |
+
if all(x in variables for x in invert_range) is False:
|
| 185 |
+
raise ValueError("All of the metrics in `invert_range` should be in the data provided.")
|
| 186 |
+
min_max_per_variable = data.describe().T[["min", "max"]]
|
| 187 |
+
min_max_per_variable["min"] = min_max_per_variable["min"] - 0.1 * (
|
| 188 |
+
min_max_per_variable["max"] - min_max_per_variable["min"]
|
| 189 |
+
)
|
| 190 |
+
min_max_per_variable["max"] = min_max_per_variable["max"] + 0.1 * (
|
| 191 |
+
min_max_per_variable["max"] - min_max_per_variable["min"]
|
| 192 |
+
)
|
| 193 |
+
|
| 194 |
+
ranges = list(min_max_per_variable.itertuples(index=False, name=None))
|
| 195 |
+
ranges = [
|
| 196 |
+
(max_value, min_value) if var in invert_range else (min_value, max_value)
|
| 197 |
+
for var, (min_value, max_value) in zip(variables, ranges)
|
| 198 |
+
]
|
| 199 |
+
format_cfg = {
|
| 200 |
+
"axes_args": {},
|
| 201 |
+
"rad_ln_args": {"visible": True},
|
| 202 |
+
"outer_ring": {"visible": True},
|
| 203 |
+
"angle_ln_args": {"visible": True},
|
| 204 |
+
"rgrid_tick_lbls_args": {"fontsize": 12},
|
| 205 |
+
"theta_tick_lbls": {"fontsize": 12},
|
| 206 |
+
"theta_tick_lbls_pad": 3,
|
| 207 |
+
"theta_tick_lbls_brk_lng_wrds": True,
|
| 208 |
+
"theta_tick_lbls_txt_wrap": 15,
|
| 209 |
+
"incl_endpoint": False,
|
| 210 |
+
"marker": "o",
|
| 211 |
+
"markersize": 3,
|
| 212 |
+
"legend_loc": "upper right",
|
| 213 |
+
"bbox_to_anchor": (2, 1),
|
| 214 |
+
}
|
| 215 |
+
if config is not None:
|
| 216 |
+
format_cfg.update(config)
|
| 217 |
+
if fig is None:
|
| 218 |
+
fig = plt.figure()
|
| 219 |
+
radar = ComplexRadar(
|
| 220 |
+
fig,
|
| 221 |
+
variables,
|
| 222 |
+
ranges,
|
| 223 |
+
n_ring_levels=3,
|
| 224 |
+
show_scales=True,
|
| 225 |
+
format_cfg=format_cfg,
|
| 226 |
+
)
|
| 227 |
+
for g in zip(data.index):
|
| 228 |
+
radar.plot(data.loc[g].values, label=g, marker=format_cfg["marker"], markersize=format_cfg["markersize"])
|
| 229 |
+
radar.use_legend(**{"loc": format_cfg["legend_loc"], "bbox_to_anchor": format_cfg["bbox_to_anchor"]})
|
| 230 |
+
return fig
|
venv/lib/python3.10/site-packages/tqdm/__init__.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ._monitor import TMonitor, TqdmSynchronisationWarning
|
| 2 |
+
from ._tqdm_pandas import tqdm_pandas
|
| 3 |
+
from .cli import main # TODO: remove in v5.0.0
|
| 4 |
+
from .gui import tqdm as tqdm_gui # TODO: remove in v5.0.0
|
| 5 |
+
from .gui import trange as tgrange # TODO: remove in v5.0.0
|
| 6 |
+
from .std import (
|
| 7 |
+
TqdmDeprecationWarning, TqdmExperimentalWarning, TqdmKeyError, TqdmMonitorWarning,
|
| 8 |
+
TqdmTypeError, TqdmWarning, tqdm, trange)
|
| 9 |
+
from .version import __version__
|
| 10 |
+
|
| 11 |
+
__all__ = ['tqdm', 'tqdm_gui', 'trange', 'tgrange', 'tqdm_pandas',
|
| 12 |
+
'tqdm_notebook', 'tnrange', 'main', 'TMonitor',
|
| 13 |
+
'TqdmTypeError', 'TqdmKeyError',
|
| 14 |
+
'TqdmWarning', 'TqdmDeprecationWarning',
|
| 15 |
+
'TqdmExperimentalWarning',
|
| 16 |
+
'TqdmMonitorWarning', 'TqdmSynchronisationWarning',
|
| 17 |
+
'__version__']
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def tqdm_notebook(*args, **kwargs): # pragma: no cover
|
| 21 |
+
"""See tqdm.notebook.tqdm for full documentation"""
|
| 22 |
+
from warnings import warn
|
| 23 |
+
|
| 24 |
+
from .notebook import tqdm as _tqdm_notebook
|
| 25 |
+
warn("This function will be removed in tqdm==5.0.0\n"
|
| 26 |
+
"Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`",
|
| 27 |
+
TqdmDeprecationWarning, stacklevel=2)
|
| 28 |
+
return _tqdm_notebook(*args, **kwargs)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def tnrange(*args, **kwargs): # pragma: no cover
|
| 32 |
+
"""Shortcut for `tqdm.notebook.tqdm(range(*args), **kwargs)`."""
|
| 33 |
+
from warnings import warn
|
| 34 |
+
|
| 35 |
+
from .notebook import trange as _tnrange
|
| 36 |
+
warn("Please use `tqdm.notebook.trange` instead of `tqdm.tnrange`",
|
| 37 |
+
TqdmDeprecationWarning, stacklevel=2)
|
| 38 |
+
return _tnrange(*args, **kwargs)
|
venv/lib/python3.10/site-packages/tqdm/__main__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .cli import main
|
| 2 |
+
|
| 3 |
+
main()
|
venv/lib/python3.10/site-packages/tqdm/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.49 kB). View file
|
|
|
venv/lib/python3.10/site-packages/tqdm/__pycache__/__main__.cpython-310.pyc
ADDED
|
Binary file (212 Bytes). View file
|
|
|
venv/lib/python3.10/site-packages/tqdm/__pycache__/_dist_ver.cpython-310.pyc
ADDED
|
Binary file (195 Bytes). View file
|
|
|
venv/lib/python3.10/site-packages/tqdm/__pycache__/_main.cpython-310.pyc
ADDED
|
Binary file (446 Bytes). View file
|
|
|
venv/lib/python3.10/site-packages/tqdm/__pycache__/_monitor.cpython-310.pyc
ADDED
|
Binary file (2.81 kB). View file
|
|
|
venv/lib/python3.10/site-packages/tqdm/__pycache__/_tqdm.cpython-310.pyc
ADDED
|
Binary file (441 Bytes). View file
|
|
|
venv/lib/python3.10/site-packages/tqdm/__pycache__/_tqdm_gui.cpython-310.pyc
ADDED
|
Binary file (454 Bytes). View file
|
|
|
venv/lib/python3.10/site-packages/tqdm/__pycache__/_tqdm_notebook.cpython-310.pyc
ADDED
|
Binary file (474 Bytes). View file
|
|
|
venv/lib/python3.10/site-packages/tqdm/__pycache__/_tqdm_pandas.cpython-310.pyc
ADDED
|
Binary file (962 Bytes). View file
|
|
|
venv/lib/python3.10/site-packages/tqdm/__pycache__/_utils.cpython-310.pyc
ADDED
|
Binary file (827 Bytes). View file
|
|
|
venv/lib/python3.10/site-packages/tqdm/__pycache__/asyncio.cpython-310.pyc
ADDED
|
Binary file (3.38 kB). View file
|
|
|
venv/lib/python3.10/site-packages/tqdm/__pycache__/auto.cpython-310.pyc
ADDED
|
Binary file (1.12 kB). View file
|
|
|
venv/lib/python3.10/site-packages/tqdm/__pycache__/autonotebook.cpython-310.pyc
ADDED
|
Binary file (1.01 kB). View file
|
|
|
venv/lib/python3.10/site-packages/tqdm/__pycache__/cli.cpython-310.pyc
ADDED
|
Binary file (8.91 kB). View file
|
|
|
venv/lib/python3.10/site-packages/tqdm/__pycache__/dask.cpython-310.pyc
ADDED
|
Binary file (2.07 kB). View file
|
|
|
venv/lib/python3.10/site-packages/tqdm/__pycache__/gui.cpython-310.pyc
ADDED
|
Binary file (4.56 kB). View file
|
|
|
venv/lib/python3.10/site-packages/tqdm/__pycache__/keras.cpython-310.pyc
ADDED
|
Binary file (4.94 kB). View file
|
|
|
venv/lib/python3.10/site-packages/tqdm/__pycache__/notebook.cpython-310.pyc
ADDED
|
Binary file (7.44 kB). View file
|
|
|
venv/lib/python3.10/site-packages/tqdm/__pycache__/rich.cpython-310.pyc
ADDED
|
Binary file (5.07 kB). View file
|
|
|
venv/lib/python3.10/site-packages/tqdm/__pycache__/std.cpython-310.pyc
ADDED
|
Binary file (45.4 kB). View file
|
|
|
venv/lib/python3.10/site-packages/tqdm/__pycache__/tk.cpython-310.pyc
ADDED
|
Binary file (6.18 kB). View file
|
|
|