applied-ai-018 commited on
Commit
c9cfa33
·
verified ·
1 Parent(s): 92fc703

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/evaluate/__init__.py +51 -0
  2. env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/__init__.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/config.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/hub.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/info.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/inspect.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/loading.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/module.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/naming.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/saving.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/visualization.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/evaluate/commands/__init__.py +0 -0
  13. env-llmeval/lib/python3.10/site-packages/evaluate/commands/__pycache__/__init__.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/evaluate/commands/__pycache__/evaluate_cli.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/evaluate/commands/evaluate_cli.py +137 -0
  16. env-llmeval/lib/python3.10/site-packages/evaluate/config.py +192 -0
  17. env-llmeval/lib/python3.10/site-packages/evaluate/evaluation_suite/__init__.py +128 -0
  18. env-llmeval/lib/python3.10/site-packages/evaluate/evaluation_suite/__pycache__/__init__.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__init__.py +140 -0
  20. env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/__init__.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/audio_classification.cpython-310.pyc +0 -0
  22. env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/automatic_speech_recognition.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/base.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/image_classification.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/question_answering.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/text2text_generation.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/text_classification.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/text_generation.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/token_classification.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/utils.cpython-310.pyc +0 -0
  31. env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/audio_classification.py +151 -0
  32. env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/automatic_speech_recognition.py +112 -0
  33. env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/base.py +544 -0
  34. env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/image_classification.py +119 -0
  35. env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/question_answering.py +239 -0
  36. env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/text2text_generation.py +267 -0
  37. env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/text_classification.py +160 -0
  38. env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/text_generation.py +69 -0
  39. env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/token_classification.py +278 -0
  40. env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/utils.py +84 -0
  41. env-llmeval/lib/python3.10/site-packages/evaluate/hub.py +133 -0
  42. env-llmeval/lib/python3.10/site-packages/evaluate/info.py +157 -0
  43. env-llmeval/lib/python3.10/site-packages/evaluate/inspect.py +129 -0
  44. env-llmeval/lib/python3.10/site-packages/evaluate/loading.py +771 -0
  45. env-llmeval/lib/python3.10/site-packages/evaluate/module.py +1029 -0
  46. env-llmeval/lib/python3.10/site-packages/evaluate/naming.py +82 -0
  47. env-llmeval/lib/python3.10/site-packages/evaluate/utils/__init__.py +39 -0
  48. env-llmeval/lib/python3.10/site-packages/evaluate/utils/__pycache__/__init__.cpython-310.pyc +0 -0
  49. env-llmeval/lib/python3.10/site-packages/evaluate/utils/__pycache__/file_utils.cpython-310.pyc +0 -0
  50. env-llmeval/lib/python3.10/site-packages/evaluate/utils/__pycache__/gradio.cpython-310.pyc +0 -0
env-llmeval/lib/python3.10/site-packages/evaluate/__init__.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa
2
+ # Copyright 2020 The HuggingFace Evaluate Authors and the TensorFlow Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ # pylint: enable=line-too-long
18
+ # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
19
+
20
+ __version__ = "0.4.1"
21
+
22
+ from packaging import version
23
+
24
+
25
+ SCRIPTS_VERSION = "main" if version.parse(__version__).is_devrelease else __version__
26
+
27
+ del version
28
+
29
+ from .evaluation_suite import EvaluationSuite
30
+ from .evaluator import (
31
+ AudioClassificationEvaluator,
32
+ AutomaticSpeechRecognitionEvaluator,
33
+ Evaluator,
34
+ ImageClassificationEvaluator,
35
+ QuestionAnsweringEvaluator,
36
+ SummarizationEvaluator,
37
+ Text2TextGenerationEvaluator,
38
+ TextClassificationEvaluator,
39
+ TextGenerationEvaluator,
40
+ TokenClassificationEvaluator,
41
+ TranslationEvaluator,
42
+ evaluator,
43
+ )
44
+ from .hub import push_to_hub
45
+ from .info import ComparisonInfo, EvaluationModuleInfo, MeasurementInfo, MetricInfo
46
+ from .inspect import inspect_evaluation_module, list_evaluation_modules
47
+ from .loading import load
48
+ from .module import CombinedEvaluations, Comparison, EvaluationModule, Measurement, Metric, combine
49
+ from .saving import save
50
+ from .utils import *
51
+ from .utils import gradio, logging
env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.35 kB). View file
 
env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/config.cpython-310.pyc ADDED
Binary file (4.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/hub.cpython-310.pyc ADDED
Binary file (4.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/info.cpython-310.pyc ADDED
Binary file (5.76 kB). View file
 
env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/inspect.cpython-310.pyc ADDED
Binary file (4.25 kB). View file
 
env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/loading.cpython-310.pyc ADDED
Binary file (24.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/module.cpython-310.pyc ADDED
Binary file (40.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/naming.cpython-310.pyc ADDED
Binary file (2.54 kB). View file
 
env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/saving.cpython-310.pyc ADDED
Binary file (2.26 kB). View file
 
env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/visualization.cpython-310.pyc ADDED
Binary file (8.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/evaluate/commands/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/evaluate/commands/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (182 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/evaluate/commands/__pycache__/evaluate_cli.cpython-310.pyc ADDED
Binary file (4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/evaluate/commands/evaluate_cli.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import subprocess
4
+ from pathlib import Path
5
+
6
+ from cookiecutter.main import cookiecutter
7
+ from huggingface_hub import HfApi, Repository, create_repo
8
+
9
+ from evaluate.utils.logging import get_logger
10
+
11
+
12
+ logger = get_logger(__name__)
13
+
14
+ INSTRUCTIONS = """\
15
+ A new repository for your module "{module_name}" of type "{module_type}" has been created at {output_dir} and pushed to the Hugging Face Hub: {repo_url}.
16
+
17
+ Here are the next steps:
18
+ - implement the module logic in {module_slug}/{module_slug}.py
19
+ - document your module in {module_slug}/README.md
20
+ - add test cases for your module in {module_slug}/tests.py
21
+ - if your module has any dependencies update them in {module_slug}/requirements.txt
22
+
23
+ You can test your module's widget locally by running:
24
+
25
+ ```
26
+ python {output_dir}/{module_slug}/app.py
27
+ ```
28
+
29
+ When you are happy with your changes you can push your changes with the following commands to the Hugging Face Hub:
30
+
31
+ ```
32
+ cd {output_dir}/{module_slug}
33
+ git add .
34
+ git commit -m "Updating module"
35
+ git push
36
+ ```
37
+
38
+ You should then see the update widget on the Hugging Face Hub: {repo_url}
39
+ And you can load your module in Python with the following code:
40
+
41
+ ```
42
+ from evaluate import load
43
+ module = load("{namespace}/{module_slug}")
44
+ ```
45
+ """
46
+
47
+
48
+ def main():
49
+ parser = argparse.ArgumentParser("HuggingFace Evaluate CLI tool", usage="evaluate-cli <command> [<args>]")
50
+ subparsers = parser.add_subparsers()
51
+ parser_create = subparsers.add_parser("create", help="Create new evaluation module.")
52
+ parser_create.add_argument(
53
+ "module_name", type=str, help='Pretty name of new evaluation module, e.g. "Recall" or "Exact Match".'
54
+ )
55
+ parser_create.add_argument(
56
+ "--module_type",
57
+ default="metric",
58
+ type=str,
59
+ help="Type of module, has to be one of [metric|comparison|measurement].",
60
+ )
61
+ parser_create.add_argument(
62
+ "--dataset_name", default="", type=str, help="Name of dataset if evaluation module is dataset specific."
63
+ )
64
+ parser_create.add_argument("--module_description", type=str, help="Short description of evaluation module.")
65
+ parser_create.add_argument("--output_dir", default=Path.cwd(), type=str, help="Path to output directory.")
66
+ parser_create.add_argument(
67
+ "--organization", default=None, type=str, help="Organization on the Hub to push evaluation module to."
68
+ )
69
+ parser_create.add_argument("--private", action="store_true", help="Sets evaluation module repository to private.")
70
+ args = vars(parser.parse_args())
71
+
72
+ if args["module_type"] not in ["metric", "comparison", "measurement"]:
73
+ raise ValueError("The module_type needs to be one of metric, comparison, or measurement")
74
+
75
+ if "-" in args["module_name"]:
76
+ raise ValueError("Hyphens ('-') are not allowed in module names.")
77
+
78
+ output_dir = Path(args["output_dir"])
79
+ organization = args["organization"]
80
+ module_slug = args["module_name"].lower().replace(" ", "_")
81
+
82
+ if organization is None:
83
+ hfapi = HfApi()
84
+ namespace = hfapi.whoami()["name"]
85
+ else:
86
+ namespace = organization
87
+ args["namespace"] = namespace
88
+ repo_url = f"https://huggingface.co/spaces/{namespace}/{module_slug}"
89
+
90
+ try:
91
+ create_repo(namespace + "/" + module_slug, repo_type="space", space_sdk="gradio", private=args["private"])
92
+ except Exception as exception:
93
+ logger.error(
94
+ f"Could not create Space for module at hf.co/spaces/{namespace}/{module_slug}. Make sure this space does not exist already."
95
+ )
96
+ raise exception
97
+ subprocess.run(
98
+ f"git clone {repo_url}".split(),
99
+ stderr=subprocess.PIPE,
100
+ stdout=subprocess.PIPE,
101
+ check=True,
102
+ encoding="utf-8",
103
+ cwd=output_dir,
104
+ env=os.environ.copy(),
105
+ )
106
+
107
+ repo = Repository(
108
+ local_dir=output_dir / module_slug,
109
+ )
110
+
111
+ cookiecutter(
112
+ "https://github.com/huggingface/evaluate/",
113
+ directory="templates",
114
+ no_input=True,
115
+ extra_context=args,
116
+ output_dir=output_dir,
117
+ overwrite_if_exists=True,
118
+ )
119
+
120
+ repo.git_add()
121
+ repo.git_commit("add module default template")
122
+ repo.git_push()
123
+
124
+ print(
125
+ INSTRUCTIONS.format(
126
+ module_name=args["module_name"],
127
+ module_type=args["module_type"],
128
+ module_slug=module_slug,
129
+ namespace=namespace,
130
+ repo_url=repo_url,
131
+ output_dir=output_dir,
132
+ )
133
+ )
134
+
135
+
136
+ if __name__ == "__main__":
137
+ main()
env-llmeval/lib/python3.10/site-packages/evaluate/config.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+ import os
3
+ import platform
4
+ from pathlib import Path
5
+
6
+ from packaging import version
7
+
8
+ from .utils.logging import get_logger
9
+
10
+
11
+ logger = get_logger(__name__)
12
+
13
+
14
+ # Metrics
15
+ S3_METRICS_BUCKET_PREFIX = "https://s3.amazonaws.com/datasets.huggingface.co/datasets/metrics"
16
+ CLOUDFRONT_METRICS_DISTRIB_PREFIX = "https://cdn-datasets.huggingface.co/datasets/metric"
17
+ REPO_METRICS_URL = "https://raw.githubusercontent.com/huggingface/evaluate/{revision}/metrics/{path}/{name}"
18
+ REPO_MEASUREMENTS_URL = "https://raw.githubusercontent.com/huggingface/evaluate/{revision}/measurements/{path}/{name}"
19
+ REPO_COMPARISONS_URL = "https://raw.githubusercontent.com/huggingface/evaluate/{revision}/comparisons/{path}/{name}"
20
+
21
+ # Evaluation module types
22
+ EVALUATION_MODULE_TYPES = ["metric", "comparison", "measurement"]
23
+
24
+ # Hub
25
+ HF_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co")
26
+ HF_LIST_ENDPOINT = HF_ENDPOINT + "/api/spaces?filter={type}"
27
+ HUB_EVALUATE_URL = HF_ENDPOINT + "/spaces/{path}/resolve/{revision}/{name}"
28
+ HUB_DEFAULT_VERSION = "main"
29
+
30
+ PY_VERSION = version.parse(platform.python_version())
31
+
32
+ if PY_VERSION < version.parse("3.8"):
33
+ import importlib_metadata
34
+ else:
35
+ import importlib.metadata as importlib_metadata
36
+
37
+ # General environment variables accepted values for booleans
38
+ ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"}
39
+ ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"})
40
+
41
+
42
+ # Imports
43
+ PANDAS_VERSION = version.parse(importlib_metadata.version("pandas"))
44
+ PYARROW_VERSION = version.parse(importlib_metadata.version("pyarrow"))
45
+
46
+ USE_TF = os.environ.get("USE_TF", "AUTO").upper()
47
+ USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper()
48
+ USE_JAX = os.environ.get("USE_JAX", "AUTO").upper()
49
+
50
+ TORCH_VERSION = "N/A"
51
+ TORCH_AVAILABLE = False
52
+
53
+ if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES:
54
+ TORCH_AVAILABLE = importlib.util.find_spec("torch") is not None
55
+ if TORCH_AVAILABLE:
56
+ try:
57
+ TORCH_VERSION = version.parse(importlib_metadata.version("torch"))
58
+ logger.info(f"PyTorch version {TORCH_VERSION} available.")
59
+ except importlib_metadata.PackageNotFoundError:
60
+ pass
61
+ else:
62
+ logger.info("Disabling PyTorch because USE_TF is set")
63
+
64
+ TF_VERSION = "N/A"
65
+ TF_AVAILABLE = False
66
+
67
+ if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES:
68
+ TF_AVAILABLE = importlib.util.find_spec("tensorflow") is not None
69
+ if TF_AVAILABLE:
70
+ # For the metadata, we have to look for both tensorflow and tensorflow-cpu
71
+ for package in [
72
+ "tensorflow",
73
+ "tensorflow-cpu",
74
+ "tensorflow-gpu",
75
+ "tf-nightly",
76
+ "tf-nightly-cpu",
77
+ "tf-nightly-gpu",
78
+ "intel-tensorflow",
79
+ "tensorflow-rocm",
80
+ "tensorflow-macos",
81
+ ]:
82
+ try:
83
+ TF_VERSION = version.parse(importlib_metadata.version(package))
84
+ except importlib_metadata.PackageNotFoundError:
85
+ continue
86
+ else:
87
+ break
88
+ else:
89
+ TF_AVAILABLE = False
90
+ if TF_AVAILABLE:
91
+ if TF_VERSION.major < 2:
92
+ logger.info(f"TensorFlow found but with version {TF_VERSION}. `datasets` requires version 2 minimum.")
93
+ TF_AVAILABLE = False
94
+ else:
95
+ logger.info(f"TensorFlow version {TF_VERSION} available.")
96
+ else:
97
+ logger.info("Disabling Tensorflow because USE_TORCH is set")
98
+
99
+
100
+ JAX_VERSION = "N/A"
101
+ JAX_AVAILABLE = False
102
+
103
+ if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES:
104
+ JAX_AVAILABLE = importlib.util.find_spec("jax") is not None
105
+ if JAX_AVAILABLE:
106
+ try:
107
+ JAX_VERSION = version.parse(importlib_metadata.version("jax"))
108
+ logger.info(f"JAX version {JAX_VERSION} available.")
109
+ except importlib_metadata.PackageNotFoundError:
110
+ pass
111
+ else:
112
+ logger.info("Disabling JAX because USE_JAX is set to False")
113
+
114
+
115
+ # Cache location
116
+ DEFAULT_XDG_CACHE_HOME = "~/.cache"
117
+ XDG_CACHE_HOME = os.getenv("XDG_CACHE_HOME", DEFAULT_XDG_CACHE_HOME)
118
+ DEFAULT_HF_CACHE_HOME = os.path.join(XDG_CACHE_HOME, "huggingface")
119
+ HF_CACHE_HOME = os.path.expanduser(os.getenv("HF_HOME", DEFAULT_HF_CACHE_HOME))
120
+
121
+ DEFAULT_HF_EVALUATE_CACHE = os.path.join(HF_CACHE_HOME, "evaluate")
122
+ HF_EVALUATE_CACHE = Path(os.getenv("HF_EVALUATE_CACHE", DEFAULT_HF_EVALUATE_CACHE))
123
+
124
+ DEFAULT_HF_METRICS_CACHE = os.path.join(HF_CACHE_HOME, "metrics")
125
+ HF_METRICS_CACHE = Path(os.getenv("HF_METRICS_CACHE", DEFAULT_HF_METRICS_CACHE))
126
+
127
+ DEFAULT_HF_MODULES_CACHE = os.path.join(HF_CACHE_HOME, "modules")
128
+ HF_MODULES_CACHE = Path(os.getenv("HF_MODULES_CACHE", DEFAULT_HF_MODULES_CACHE))
129
+
130
+ DOWNLOADED_DATASETS_DIR = "downloads"
131
+ DEFAULT_DOWNLOADED_EVALUATE_PATH = os.path.join(HF_EVALUATE_CACHE, DOWNLOADED_DATASETS_DIR)
132
+ DOWNLOADED_EVALUATE_PATH = Path(os.getenv("HF_DATASETS_DOWNLOADED_EVALUATE_PATH", DEFAULT_DOWNLOADED_EVALUATE_PATH))
133
+
134
+ EXTRACTED_EVALUATE_DIR = "extracted"
135
+ DEFAULT_EXTRACTED_EVALUATE_PATH = os.path.join(DEFAULT_DOWNLOADED_EVALUATE_PATH, EXTRACTED_EVALUATE_DIR)
136
+ EXTRACTED_EVALUATE_PATH = Path(os.getenv("HF_DATASETS_EXTRACTED_EVALUATE_PATH", DEFAULT_EXTRACTED_EVALUATE_PATH))
137
+
138
+ # Download count for the website
139
+ HF_UPDATE_DOWNLOAD_COUNTS = (
140
+ os.environ.get("HF_UPDATE_DOWNLOAD_COUNTS", "AUTO").upper() in ENV_VARS_TRUE_AND_AUTO_VALUES
141
+ )
142
+
143
+ # Offline mode
144
+ HF_EVALUATE_OFFLINE = os.environ.get("HF_EVALUATE_OFFLINE", "AUTO").upper() in ENV_VARS_TRUE_VALUES
145
+
146
+
147
+ # File names
148
+ LICENSE_FILENAME = "LICENSE"
149
+ METRIC_INFO_FILENAME = "metric_info.json"
150
+ DATASETDICT_JSON_FILENAME = "dataset_dict.json"
151
+
152
+ MODULE_NAME_FOR_DYNAMIC_MODULES = "evaluate_modules"
153
+
154
+ HF_HUB_ALLOWED_TASKS = [
155
+ "image-classification",
156
+ "translation",
157
+ "image-segmentation",
158
+ "fill-mask",
159
+ "automatic-speech-recognition",
160
+ "token-classification",
161
+ "sentence-similarity",
162
+ "audio-classification",
163
+ "question-answering",
164
+ "summarization",
165
+ "zero-shot-classification",
166
+ "table-to-text",
167
+ "feature-extraction",
168
+ "other",
169
+ "multiple-choice",
170
+ "text-classification",
171
+ "text-to-image",
172
+ "text2text-generation",
173
+ "zero-shot-image-classification",
174
+ "tabular-classification",
175
+ "tabular-regression",
176
+ "image-to-image",
177
+ "tabular-to-text",
178
+ "unconditional-image-generation",
179
+ "text-retrieval",
180
+ "text-to-speech",
181
+ "object-detection",
182
+ "audio-to-audio",
183
+ "text-generation",
184
+ "conversational",
185
+ "table-question-answering",
186
+ "visual-question-answering",
187
+ "image-to-text",
188
+ "reinforcement-learning",
189
+ "voice-activity-detection",
190
+ "time-series-forecasting",
191
+ "document-question-answering",
192
+ ]
env-llmeval/lib/python3.10/site-packages/evaluate/evaluation_suite/__init__.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+ import inspect
3
+ from dataclasses import dataclass
4
+ from pathlib import Path
5
+ from typing import Callable, Dict, Optional, Union
6
+
7
+ from datasets import Dataset, DownloadConfig, DownloadMode, load_dataset
8
+ from datasets.utils.version import Version
9
+
10
+ from ..evaluator import evaluator
11
+ from ..loading import evaluation_module_factory
12
+ from ..utils.logging import get_logger
13
+
14
+
15
+ logger = get_logger(__name__)
16
+
17
+
18
+ @dataclass
19
+ class SubTask:
20
+ task_type: str
21
+ data: Optional[Union[str, Dataset]] = None
22
+ subset: Optional[str] = None
23
+ split: Optional[str] = None
24
+ data_preprocessor: Optional[Callable] = None
25
+ args_for_task: Optional[dict] = None
26
+
27
+ def __post_init__(self):
28
+ if type(self.task_type) is not str:
29
+ raise ValueError(f"'task_type' must be type 'str', got {type(self.task_type)}")
30
+ if type(self.data) not in [Dataset, str]:
31
+ raise ValueError(
32
+ f"'data' must be an already-instantiated Dataset object or type 'str', got {type(self.data)}"
33
+ )
34
+ if self.subset and type(self.subset) is not str:
35
+ raise ValueError(f"'subset' must be type 'str', got {type(self.subset)}")
36
+ if self.split and type(self.split) is not str:
37
+ raise ValueError(f"'split' must be type 'str', got {type(self.split)}")
38
+ if self.data_preprocessor and not callable(self.data_preprocessor):
39
+ raise ValueError(f"'data_preprocessor' must be a Callable', got {self.data_preprocessor}")
40
+ if self.args_for_task and type(self.args_for_task) is not dict:
41
+ raise ValueError(f"'args_for_task' must be type 'dict', got {type(self.args_for_task)}")
42
+
43
+
44
+ def import_main_class(module_path):
45
+ """Import a module at module_path and return the EvaluationSuite class"""
46
+ module = importlib.import_module(module_path)
47
+
48
+ module_main_cls = None
49
+ for name, obj in module.__dict__.items():
50
+ if isinstance(obj, type) and obj.__name__ == "Suite":
51
+ if inspect.isabstract(obj):
52
+ continue
53
+ module_main_cls = obj
54
+ break
55
+
56
+ return module_main_cls
57
+
58
+
59
+ class EvaluationSuite:
60
+ """
61
+ This class instantiates an evaluation suite made up of multiple tasks, where each task consists of a dataset and
62
+ an associated metric, and runs evaluation on a model or pipeline. Evaluation suites can be a Python script found
63
+ either locally or uploaded as a Space on the Hugging Face Hub.
64
+ Usage:
65
+ ```python
66
+ from evaluate import EvaluationSuite
67
+ suite = EvaluationSuite.load("evaluate/evaluation-suite-ci")
68
+ results = suite.run("lvwerra/distilbert-imdb")
69
+ ```
70
+ """
71
+
72
+ def __init__(self, name):
73
+ self.name = name
74
+
75
+ @staticmethod
76
+ def load(
77
+ path: str,
78
+ download_mode: Optional[DownloadMode] = None,
79
+ revision: Optional[Union[str, Version]] = None,
80
+ download_config: Optional[DownloadConfig] = None,
81
+ ):
82
+ download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
83
+ evaluation_module = evaluation_module_factory(
84
+ path, module_type=None, revision=revision, download_config=download_config, download_mode=download_mode
85
+ )
86
+ name = Path(path).stem
87
+ evaluation_cls = import_main_class(evaluation_module.module_path)
88
+ evaluation_instance = evaluation_cls(name)
89
+
90
+ return evaluation_instance
91
+
92
+ def __repr__(self):
93
+ self.tasks = [str(task) for task in self.suite]
94
+ return f'EvaluationSuite name: "{self.name}", ' f"Tasks: {self.tasks})"
95
+
96
+ def assert_suite_nonempty(self):
97
+ if not self.suite:
98
+ raise ValueError(
99
+ "No evaluation tasks found. The EvaluationSuite must include at least one SubTask definition."
100
+ )
101
+
102
+ def run(
103
+ self, model_or_pipeline: Union[str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel"] # noqa: F821
104
+ ) -> Dict[str, float]:
105
+
106
+ self.assert_suite_nonempty()
107
+
108
+ results_all = []
109
+ for task in self.suite:
110
+
111
+ task_name = task.data
112
+
113
+ if task.data_preprocessor: # task requires extra preprocessing
114
+ ds = load_dataset(task.data, name=task.subset, split=task.split)
115
+ task.data = ds.map(task.data_preprocessor)
116
+
117
+ task_evaluator = evaluator(task.task_type)
118
+ args_for_task = task.args_for_task
119
+ args_for_task["model_or_pipeline"] = model_or_pipeline
120
+ args_for_task["data"] = task.data
121
+ args_for_task["subset"] = task.subset
122
+ args_for_task["split"] = task.split
123
+ results = task_evaluator.compute(**args_for_task)
124
+
125
+ results["task_name"] = task_name + "/" + task.subset if task.subset else task_name
126
+ results["data_preprocessor"] = str(task.data_preprocessor) if task.data_preprocessor is not None else None
127
+ results_all.append(results)
128
+ return results_all
env-llmeval/lib/python3.10/site-packages/evaluate/evaluation_suite/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (4.91 kB). View file
 
env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__init__.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ try:
17
+ from transformers.pipelines import SUPPORTED_TASKS as SUPPORTED_PIPELINE_TASKS
18
+ from transformers.pipelines import TASK_ALIASES
19
+ from transformers.pipelines import check_task as check_pipeline_task
20
+
21
+ TRANSFORMERS_AVAILABLE = True
22
+ except ImportError:
23
+ TRANSFORMERS_AVAILABLE = False
24
+
25
+ from typing import Dict, List
26
+
27
+ from .audio_classification import AudioClassificationEvaluator
28
+ from .automatic_speech_recognition import AutomaticSpeechRecognitionEvaluator
29
+ from .base import Evaluator
30
+ from .image_classification import ImageClassificationEvaluator
31
+ from .question_answering import QuestionAnsweringEvaluator
32
+ from .text2text_generation import SummarizationEvaluator, Text2TextGenerationEvaluator, TranslationEvaluator
33
+ from .text_classification import TextClassificationEvaluator
34
+ from .text_generation import TextGenerationEvaluator
35
+ from .token_classification import TokenClassificationEvaluator
36
+
37
+
38
+ SUPPORTED_EVALUATOR_TASKS = {
39
+ "text-classification": {
40
+ "implementation": TextClassificationEvaluator,
41
+ "default_metric_name": "accuracy",
42
+ },
43
+ "image-classification": {
44
+ "implementation": ImageClassificationEvaluator,
45
+ "default_metric_name": "accuracy",
46
+ },
47
+ "question-answering": {
48
+ "implementation": QuestionAnsweringEvaluator,
49
+ "default_metric_name": "squad",
50
+ },
51
+ "token-classification": {
52
+ "implementation": TokenClassificationEvaluator,
53
+ "default_metric_name": "seqeval",
54
+ },
55
+ "text-generation": {
56
+ "implementation": TextGenerationEvaluator,
57
+ "default_metric_name": "word_count",
58
+ },
59
+ "text2text-generation": {
60
+ "implementation": Text2TextGenerationEvaluator,
61
+ "default_metric_name": "bleu",
62
+ },
63
+ "summarization": {
64
+ "implementation": SummarizationEvaluator,
65
+ "default_metric_name": "rouge",
66
+ },
67
+ "translation": {
68
+ "implementation": TranslationEvaluator,
69
+ "default_metric_name": "bleu",
70
+ },
71
+ "automatic-speech-recognition": {
72
+ "implementation": AutomaticSpeechRecognitionEvaluator,
73
+ "default_metric_name": "wer",
74
+ },
75
+ "audio-classification": {
76
+ "implementation": AudioClassificationEvaluator,
77
+ "default_metric_name": "accuracy",
78
+ },
79
+ }
80
+
81
+
82
+ def get_supported_tasks() -> List[str]:
83
+ """
84
+ Returns a list of supported task strings.
85
+ """
86
+ return list(SUPPORTED_EVALUATOR_TASKS.keys())
87
+
88
+
89
+ def check_task(task: str) -> Dict:
90
+ """
91
+ Checks an incoming task string, to validate it's correct and returns the default Evaluator class and default metric
92
+ name. It first performs a check to validata that the string is a valid `Pipeline` task, then it checks if it's a
93
+ valid `Evaluator` task. `Evaluator` tasks are a substet of `Pipeline` tasks.
94
+ Args:
95
+ task (`str`):
96
+ The task defining which evaluator will be returned. Currently accepted tasks are:
97
+ - `"image-classification"`
98
+ - `"question-answering"`
99
+ - `"text-classification"` (alias `"sentiment-analysis"` available)
100
+ - `"token-classification"`
101
+ Returns:
102
+ task_defaults: `dict`, contains the implementasion class of a give Evaluator and the default metric name.
103
+ """
104
+ if task in TASK_ALIASES:
105
+ task = TASK_ALIASES[task]
106
+ if not check_pipeline_task(task):
107
+ raise KeyError(f"Unknown task {task}, available tasks are: {get_supported_tasks()}.")
108
+ if task in SUPPORTED_EVALUATOR_TASKS.keys() and task in SUPPORTED_PIPELINE_TASKS.keys():
109
+ return SUPPORTED_EVALUATOR_TASKS[task]
110
+ raise KeyError(f"Unknown task {task}, available tasks are: {get_supported_tasks()}.")
111
+
112
+
113
+ def evaluator(task: str = None) -> Evaluator:
114
+ """
115
+ Utility factory method to build an [`Evaluator`].
116
+ Evaluators encapsulate a task and a default metric name. They leverage `pipeline` functionality from `transformers`
117
+ to simplify the evaluation of multiple combinations of models, datasets and metrics for a given task.
118
+ Args:
119
+ task (`str`):
120
+ The task defining which evaluator will be returned. Currently accepted tasks are:
121
+ - `"image-classification"`: will return a [`ImageClassificationEvaluator`].
122
+ - `"question-answering"`: will return a [`QuestionAnsweringEvaluator`].
123
+ - `"text-classification"` (alias `"sentiment-analysis"` available): will return a [`TextClassificationEvaluator`].
124
+ - `"token-classification"`: will return a [`TokenClassificationEvaluator`].
125
+ Returns:
126
+ [`Evaluator`]: An evaluator suitable for the task.
127
+ Examples:
128
+ ```python
129
+ >>> from evaluate import evaluator
130
+ >>> # Sentiment analysis evaluator
131
+ >>> evaluator("sentiment-analysis")
132
+ ```"""
133
+ if not TRANSFORMERS_AVAILABLE:
134
+ raise ImportError(
135
+ "If you want to use the `Evaluator` you need `transformers`. Run `pip install evaluate[transformers]`."
136
+ )
137
+ targeted_task = check_task(task)
138
+ evaluator_class = targeted_task["implementation"]
139
+ default_metric_name = targeted_task["default_metric_name"]
140
+ return evaluator_class(task=task, default_metric_name=default_metric_name)
env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (4.32 kB). View file
 
env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/audio_classification.cpython-310.pyc ADDED
Binary file (5.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/automatic_speech_recognition.cpython-310.pyc ADDED
Binary file (4.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/base.cpython-310.pyc ADDED
Binary file (19.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/image_classification.cpython-310.pyc ADDED
Binary file (4.64 kB). View file
 
env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/question_answering.cpython-310.pyc ADDED
Binary file (8.25 kB). View file
 
env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/text2text_generation.cpython-310.pyc ADDED
Binary file (7.72 kB). View file
 
env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/text_classification.cpython-310.pyc ADDED
Binary file (5.71 kB). View file
 
env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/text_generation.cpython-310.pyc ADDED
Binary file (2.81 kB). View file
 
env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/token_classification.cpython-310.pyc ADDED
Binary file (9.78 kB). View file
 
env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/utils.cpython-310.pyc ADDED
Binary file (3.27 kB). View file
 
env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/audio_classification.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Evaluate Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from numbers import Number
16
+ from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Union
17
+
18
+ from datasets import Dataset
19
+ from typing_extensions import Literal
20
+
21
+ from ..module import EvaluationModule
22
+ from ..utils.file_utils import add_end_docstrings, add_start_docstrings
23
+ from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator
24
+
25
+
26
+ if TYPE_CHECKING:
27
+ from transformers import FeatureExtractionMixin, Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel
28
+
29
+
30
+ TASK_DOCUMENTATION = r"""
31
+ Examples:
32
+
33
+ <Tip>
34
+
35
+ Remember that, in order to process audio files, you need ffmpeg installed (https://ffmpeg.org/download.html)
36
+
37
+ </Tip>
38
+
39
+ ```python
40
+ >>> from evaluate import evaluator
41
+ >>> from datasets import load_dataset
42
+
43
+ >>> task_evaluator = evaluator("audio-classification")
44
+ >>> data = load_dataset("superb", 'ks', split="test[:40]")
45
+ >>> results = task_evaluator.compute(
46
+ >>> model_or_pipeline=""superb/wav2vec2-base-superb-ks"",
47
+ >>> data=data,
48
+ >>> label_column="label",
49
+ >>> input_column="file",
50
+ >>> metric="accuracy",
51
+ >>> label_mapping={0: "yes", 1: "no", 2: "up", 3: "down"}
52
+ >>> )
53
+ ```
54
+
55
+ <Tip>
56
+
57
+ The evaluator supports raw audio data as well, in the form of a numpy array. However, be aware that calling
58
+ the audio column automatically decodes and resamples the audio files, which can be slow for large datasets.
59
+
60
+ </Tip>
61
+
62
+ ```python
63
+ >>> from evaluate import evaluator
64
+ >>> from datasets import load_dataset
65
+
66
+ >>> task_evaluator = evaluator("audio-classification")
67
+ >>> data = load_dataset("superb", 'ks', split="test[:40]")
68
+ >>> data = data.map(lambda example: {"audio": example["audio"]["array"]})
69
+ >>> results = task_evaluator.compute(
70
+ >>> model_or_pipeline=""superb/wav2vec2-base-superb-ks"",
71
+ >>> data=data,
72
+ >>> label_column="label",
73
+ >>> input_column="audio",
74
+ >>> metric="accuracy",
75
+ >>> label_mapping={0: "yes", 1: "no", 2: "up", 3: "down"}
76
+ >>> )
77
+ ```
78
+ """
79
+
80
+
81
+ class AudioClassificationEvaluator(Evaluator):
82
+ """
83
+ Audio classification evaluator.
84
+ This audio classification evaluator can currently be loaded from [`evaluator`] using the default task name
85
+ `audio-classification`.
86
+ Methods in this class assume a data format compatible with the [`transformers.AudioClassificationPipeline`].
87
+ """
88
+
89
+ PIPELINE_KWARGS = {}
90
+
91
+ def __init__(self, task="audio-classification", default_metric_name=None):
92
+ super().__init__(task, default_metric_name=default_metric_name)
93
+
94
+ def predictions_processor(self, predictions, label_mapping):
95
+ pred_label = [max(pred, key=lambda x: x["score"])["label"] for pred in predictions]
96
+ pred_label = [label_mapping[pred] if label_mapping is not None else pred for pred in pred_label]
97
+
98
+ return {"predictions": pred_label}
99
+
100
+ @add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING)
101
+ @add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION)
102
+ def compute(
103
+ self,
104
+ model_or_pipeline: Union[
105
+ str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
106
+ ] = None,
107
+ data: Union[str, Dataset] = None,
108
+ subset: Optional[str] = None,
109
+ split: Optional[str] = None,
110
+ metric: Union[str, EvaluationModule] = None,
111
+ tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
112
+ feature_extractor: Optional[Union[str, "FeatureExtractionMixin"]] = None, # noqa: F821
113
+ strategy: Literal["simple", "bootstrap"] = "simple",
114
+ confidence_level: float = 0.95,
115
+ n_resamples: int = 9999,
116
+ device: int = None,
117
+ random_state: Optional[int] = None,
118
+ input_column: str = "file",
119
+ label_column: str = "label",
120
+ label_mapping: Optional[Dict[str, Number]] = None,
121
+ ) -> Tuple[Dict[str, float], Any]:
122
+
123
+ """
124
+ input_column (`str`, defaults to `"file"`):
125
+ The name of the column containing either the audio files or a raw waveform, represented as a numpy array, in the dataset specified by `data`.
126
+ label_column (`str`, defaults to `"label"`):
127
+ The name of the column containing the labels in the dataset specified by `data`.
128
+ label_mapping (`Dict[str, Number]`, *optional*, defaults to `None`):
129
+ We want to map class labels defined by the model in the pipeline to values consistent with those
130
+ defined in the `label_column` of the `data` dataset.
131
+ """
132
+
133
+ result = super().compute(
134
+ model_or_pipeline=model_or_pipeline,
135
+ data=data,
136
+ subset=subset,
137
+ split=split,
138
+ metric=metric,
139
+ tokenizer=tokenizer,
140
+ feature_extractor=feature_extractor,
141
+ strategy=strategy,
142
+ confidence_level=confidence_level,
143
+ n_resamples=n_resamples,
144
+ device=device,
145
+ random_state=random_state,
146
+ input_column=input_column,
147
+ label_column=label_column,
148
+ label_mapping=label_mapping,
149
+ )
150
+
151
+ return result
env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/automatic_speech_recognition.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Evaluate Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Union
16
+
17
+ from datasets import Dataset
18
+ from typing_extensions import Literal
19
+
20
+ from ..module import EvaluationModule
21
+ from ..utils.file_utils import add_end_docstrings, add_start_docstrings
22
+ from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator
23
+
24
+
25
+ if TYPE_CHECKING:
26
+ from transformers import Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel
27
+
28
+
29
+ TASK_DOCUMENTATION = r"""
30
+ Examples:
31
+ ```python
32
+ >>> from evaluate import evaluator
33
+ >>> from datasets import load_dataset
34
+ >>> task_evaluator = evaluator("automatic-speech-recognition")
35
+ >>> data = load_dataset("mozilla-foundation/common_voice_11_0", "en", split="validation[:40]")
36
+ >>> results = task_evaluator.compute(
37
+ >>> model_or_pipeline="https://huggingface.co/openai/whisper-tiny.en",
38
+ >>> data=data,
39
+ >>> input_column="path",
40
+ >>> label_column="sentence",
41
+ >>> metric="wer",
42
+ >>> )
43
+ ```
44
+ """
45
+
46
+
47
+ class AutomaticSpeechRecognitionEvaluator(Evaluator):
48
+ """
49
+ Automatic speech recognition evaluator.
50
+ This automatic speech recognition evaluator can currently be loaded from [`evaluator`] using the default task name
51
+ `automatic-speech-recognition`.
52
+ Methods in this class assume a data format compatible with the [`AutomaticSpeechRecognitionPipeline`].
53
+ """
54
+
55
+ PIPELINE_KWARGS = {"truncation": True}
56
+
57
+ def __init__(self, task="automatic-speech-recognition", default_metric_name=None):
58
+ super().__init__(task, default_metric_name=default_metric_name)
59
+
60
+ def predictions_processor(self, predictions, label_mapping):
61
+ return {"predictions": [pred["text"] for pred in predictions]}
62
+
63
+ @add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING)
64
+ @add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION)
65
+ def compute(
66
+ self,
67
+ model_or_pipeline: Union[
68
+ str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
69
+ ] = None,
70
+ data: Union[str, Dataset] = None,
71
+ subset: Optional[str] = None,
72
+ split: Optional[str] = None,
73
+ metric: Union[str, EvaluationModule] = None,
74
+ tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
75
+ strategy: Literal["simple", "bootstrap"] = "simple",
76
+ confidence_level: float = 0.95,
77
+ n_resamples: int = 9999,
78
+ device: int = None,
79
+ random_state: Optional[int] = None,
80
+ input_column: str = "path",
81
+ label_column: str = "sentence",
82
+ generation_kwargs: dict = None,
83
+ ) -> Tuple[Dict[str, float], Any]:
84
+ """
85
+ input_column (`str`, defaults to `"path"`):
86
+ the name of the column containing the input audio path in the dataset specified by `data`.
87
+ label_column (`str`, defaults to `"sentence"`):
88
+ the name of the column containing the labels in the dataset specified by `data`.
89
+ generation_kwargs (`Dict`, *optional*, defaults to `None`):
90
+ The generation kwargs are passed to the pipeline and set the text generation strategy.
91
+ """
92
+
93
+ if generation_kwargs is not None:
94
+ self.PIPELINE_KWARGS.update(generation_kwargs)
95
+
96
+ result = super().compute(
97
+ model_or_pipeline=model_or_pipeline,
98
+ data=data,
99
+ subset=subset,
100
+ split=split,
101
+ metric=metric,
102
+ tokenizer=tokenizer,
103
+ strategy=strategy,
104
+ confidence_level=confidence_level,
105
+ n_resamples=n_resamples,
106
+ device=device,
107
+ random_state=random_state,
108
+ input_column=input_column,
109
+ label_column=label_column,
110
+ )
111
+
112
+ return result
env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/base.py ADDED
@@ -0,0 +1,544 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from abc import ABC, abstractmethod
16
+ from numbers import Number
17
+ from typing import Any, Callable, Dict, List, Optional, Union
18
+
19
+ # Lint as: python3
20
+ from datasets import Dataset, load_dataset
21
+
22
+ from evaluate.evaluator.utils import choose_split
23
+
24
+
25
+ try:
26
+ from scipy.stats import bootstrap
27
+
28
+ SCIPY_AVAILABLE = True
29
+ except ImportError:
30
+ SCIPY_AVAILABLE = False
31
+
32
+ try:
33
+ import transformers
34
+ from transformers import Pipeline, pipeline
35
+
36
+ TRANSFORMERS_AVAILABLE = True
37
+ except ImportError:
38
+ TRANSFORMERS_AVAILABLE = False
39
+
40
+ from time import perf_counter
41
+
42
+ from typing_extensions import Literal
43
+
44
+ from ..loading import load
45
+ from ..module import EvaluationModule
46
+ from ..utils.logging import get_logger
47
+ from .utils import DatasetColumn
48
+
49
+
50
+ logger = get_logger(__name__)
51
+
52
+
53
+ EVALUTOR_COMPUTE_START_DOCSTRING = r"""
54
+ Compute the metric for a given pipeline and dataset combination.
55
+ Args:
56
+ model_or_pipeline (`str` or `Pipeline` or `Callable` or `PreTrainedModel` or `TFPreTrainedModel`, defaults to `None`):
57
+ If the argument in not specified, we initialize the default pipeline for the task (in this case
58
+ `text-classification` or its alias - `sentiment-analysis`). If the argument is of the type `str` or
59
+ is a model instance, we use it to initialize a new `Pipeline` with the given model. Otherwise we assume the
60
+ argument specifies a pre-initialized pipeline.
61
+ data (`str` or `Dataset`, defaults to `None`):
62
+ Specifies the dataset we will run evaluation on. If it is of type `str`, we treat it as the dataset
63
+ name, and load it. Otherwise we assume it represents a pre-loaded dataset.
64
+ subset (`str`, defaults to `None`):
65
+ Defines which dataset subset to load. If `None` is passed the default subset is loaded.
66
+ split (`str`, defaults to `None`):
67
+ Defines which dataset split to load. If `None` is passed, infers based on the `choose_split` function.
68
+ metric (`str` or `EvaluationModule`, defaults to `None`):
69
+ Specifies the metric we use in evaluator. If it is of type `str`, we treat it as the metric name, and
70
+ load it. Otherwise we assume it represents a pre-loaded metric.
71
+ tokenizer (`str` or `PreTrainedTokenizer`, *optional*, defaults to `None`):
72
+ Argument can be used to overwrite a default tokenizer if `model_or_pipeline` represents a model for
73
+ which we build a pipeline. If `model_or_pipeline` is `None` or a pre-initialized pipeline, we ignore
74
+ this argument.
75
+ strategy (`Literal["simple", "bootstrap"]`, defaults to "simple"):
76
+ specifies the evaluation strategy. Possible values are:
77
+ - `"simple"` - we evaluate the metric and return the scores.
78
+ - `"bootstrap"` - on top of computing the metric scores, we calculate the confidence interval for each
79
+ of the returned metric keys, using `scipy`'s `bootstrap` method
80
+ https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.bootstrap.html.
81
+ confidence_level (`float`, defaults to `0.95`):
82
+ The `confidence_level` value passed to `bootstrap` if `"bootstrap"` strategy is chosen.
83
+ n_resamples (`int`, defaults to `9999`):
84
+ The `n_resamples` value passed to `bootstrap` if `"bootstrap"` strategy is chosen.
85
+ device (`int`, defaults to `None`):
86
+ Device ordinal for CPU/GPU support of the pipeline. Setting this to -1 will leverage CPU, a positive
87
+ integer will run the model on the associated CUDA device ID. If `None` is provided it will be inferred and
88
+ CUDA:0 used if available, CPU otherwise.
89
+ random_state (`int`, *optional*, defaults to `None`):
90
+ The `random_state` value passed to `bootstrap` if `"bootstrap"` strategy is chosen. Useful for
91
+ debugging.
92
+ """
93
+
94
+ EVALUATOR_COMPUTE_RETURN_DOCSTRING = r"""
95
+ Return:
96
+ A `Dict`. The keys represent metric keys calculated for the `metric` spefied in function arguments. For the
97
+ `"simple"` strategy, the value is the metric score. For the `"bootstrap"` strategy, the value is a `Dict`
98
+ containing the score, the confidence interval and the standard error calculated for each metric key.
99
+ """
100
+
101
+
102
+ class Evaluator(ABC):
103
+ """
104
+ The [`Evaluator`] class is the class from which all evaluators inherit. Refer to this class for methods shared across
105
+ different evaluators.
106
+ Base class implementing evaluator operations.
107
+ """
108
+
109
+ PIPELINE_KWARGS = {}
110
+ METRIC_KWARGS = {}
111
+
112
+ def __init__(self, task: str, default_metric_name: str = None):
113
+ if not TRANSFORMERS_AVAILABLE:
114
+ raise ImportError(
115
+ "If you want to use the `Evaluator` you need `transformers`. Run `pip install evaluate[evaluator]`."
116
+ )
117
+ if not SCIPY_AVAILABLE:
118
+ raise ImportError(
119
+ "If you want to use the `Evaluator` you need `scipy>=1.7.1`. Run `pip install evaluate[evaluator]`."
120
+ )
121
+ self.task = task
122
+ self.default_metric_name = default_metric_name
123
+
124
+ @staticmethod
125
+ def _compute_confidence_interval(
126
+ metric,
127
+ metric_inputs,
128
+ metric_keys: List[str],
129
+ confidence_level: float = 0.95,
130
+ n_resamples: int = 9999,
131
+ random_state: Optional[int] = None,
132
+ ) -> Dict[str, Any]:
133
+ """
134
+ A utility function enabling the confidence interval calculation for metrics computed
135
+ by the evaluator based on `scipy`'s `bootstrap` method.
136
+ """
137
+
138
+ # bootstrap only works with functions that use args and no kwargs
139
+ def build_args_metric(metric, key, **kwargs):
140
+ def args_metric(*args):
141
+ return metric.compute(**{k: v for k, v in zip(kwargs.keys(), args)})[key]
142
+
143
+ return args_metric
144
+
145
+ bootstrap_dict = {}
146
+ for key in metric_keys:
147
+ bs = bootstrap(
148
+ data=list(metric_inputs.values()),
149
+ statistic=build_args_metric(metric, key, **metric_inputs),
150
+ paired=True,
151
+ vectorized=False,
152
+ confidence_level=confidence_level,
153
+ n_resamples=n_resamples,
154
+ random_state=random_state,
155
+ )
156
+ bootstrap_dict[key] = {
157
+ "confidence_interval": (bs.confidence_interval.low, bs.confidence_interval.high),
158
+ "standard_error": bs.standard_error,
159
+ }
160
+ return bootstrap_dict
161
+
162
+ @staticmethod
163
+ def _compute_time_perf(start_time: float, end_time: float, num_samples: int) -> Dict[str, Any]:
164
+ """
165
+ A utility function computing time performance metrics:
166
+ - `total_time_in_seconds` - pipeline inference runtime for the evaluation data in seconds,
167
+ - `samples_per_second` - pipeline throughput in the number of samples per second.
168
+ - `latency_in_seconds` - pipeline inference runtime for the evaluation data in seconds per sample,
169
+
170
+ """
171
+ latency = end_time - start_time
172
+ throughput = num_samples / latency
173
+ latency_sample = 1.0 / throughput
174
+
175
+ return {
176
+ "total_time_in_seconds": latency,
177
+ "samples_per_second": throughput,
178
+ "latency_in_seconds": latency_sample,
179
+ }
180
+
181
+ @staticmethod
182
+ def _infer_device() -> int:
183
+ """Helper function to check if GPU or CPU is available for inference."""
184
+ # try infer with torch first
185
+ try:
186
+ import torch
187
+
188
+ if torch.cuda.is_available():
189
+ device = 0 # first GPU
190
+ else:
191
+ device = -1 # CPU
192
+ except ImportError:
193
+ # if not available try TF
194
+ try:
195
+ import tensorflow as tf
196
+
197
+ if len(tf.config.list_physical_devices("GPU")) > 0:
198
+ device = 0 # first GPU
199
+ else:
200
+ device = -1 # CPU
201
+ except ImportError:
202
+ device = -1
203
+
204
+ if device == -1:
205
+ logger.info("No GPU found. The default device for pipeline inference is set to CPU.")
206
+ else:
207
+ logger.info("GPU found. The default device for pipeline inference is set to GPU (CUDA:0).")
208
+
209
+ return device
210
+
211
+ @abstractmethod
212
+ def predictions_processor(self, *args, **kwargs):
213
+ """
214
+ A core method of the `Evaluator` class, which processes the pipeline outputs for compatibility with the metric.
215
+ """
216
+ raise NotImplementedError()
217
+
218
+ def compute(
219
+ self,
220
+ model_or_pipeline: Union[
221
+ str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
222
+ ] = None,
223
+ data: Union[str, Dataset] = None,
224
+ subset: Optional[str] = None,
225
+ split: Optional[str] = None,
226
+ metric: Union[str, EvaluationModule] = None,
227
+ tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
228
+ feature_extractor: Optional[Union[str, "FeatureExtractionMixin"]] = None, # noqa: F821
229
+ strategy: Literal["simple", "bootstrap"] = "simple",
230
+ confidence_level: float = 0.95,
231
+ n_resamples: int = 9999,
232
+ device: int = None,
233
+ random_state: Optional[int] = None,
234
+ input_column: str = "text",
235
+ label_column: str = "label",
236
+ label_mapping: Optional[Dict[str, Number]] = None,
237
+ ) -> Dict[str, float]:
238
+
239
+ result = {}
240
+
241
+ self.check_for_mismatch_in_device_setup(device, model_or_pipeline)
242
+
243
+ # Prepare inputs
244
+ data = self.load_data(data=data, subset=subset, split=split)
245
+ metric_inputs, pipe_inputs = self.prepare_data(data=data, input_column=input_column, label_column=label_column)
246
+ pipe = self.prepare_pipeline(
247
+ model_or_pipeline=model_or_pipeline,
248
+ tokenizer=tokenizer,
249
+ feature_extractor=feature_extractor,
250
+ device=device,
251
+ )
252
+ metric = self.prepare_metric(metric)
253
+
254
+ # Compute predictions
255
+ predictions, perf_results = self.call_pipeline(pipe, pipe_inputs)
256
+ predictions = self.predictions_processor(predictions, label_mapping)
257
+
258
+ metric_inputs.update(predictions)
259
+
260
+ # Compute metrics from references and predictions
261
+ metric_results = self.compute_metric(
262
+ metric=metric,
263
+ metric_inputs=metric_inputs,
264
+ strategy=strategy,
265
+ confidence_level=confidence_level,
266
+ n_resamples=n_resamples,
267
+ random_state=random_state,
268
+ )
269
+
270
+ # TODO: To clarify why `wer` and `cer` return float
271
+ # even though metric.compute contract says that it
272
+ # returns Optional[dict].
273
+ if type(metric_results) == float:
274
+ metric_results = {metric.name: metric_results}
275
+
276
+ result.update(metric_results)
277
+ result.update(perf_results)
278
+
279
+ return result
280
+
281
+ @staticmethod
282
+ def check_for_mismatch_in_device_setup(device, model_or_pipeline):
283
+ if device is not None and device != -1 and isinstance(model_or_pipeline, Pipeline):
284
+ if model_or_pipeline.device.type == "cpu":
285
+ raise ValueError(
286
+ "The value of the `device` kwarg passed to `compute` suggests that this pipe should be run on an "
287
+ "accelerator, but the pipe was instantiated on CPU. Pass `device` to the pipeline during "
288
+ "initialization to use an accelerator, or pass `device=None` to `compute`. "
289
+ )
290
+ elif device != model_or_pipeline.device.index:
291
+ raise ValueError(
292
+ f"This pipeline was instantiated on device {model_or_pipeline.device.index} but device={device} was passed to `compute`."
293
+ )
294
+
295
+ def check_required_columns(self, data: Union[str, Dataset], columns_names: Dict[str, str]):
296
+ """
297
+ Ensure the columns required for the evaluation are present in the dataset.
298
+
299
+ Args:
300
+ data (`str` or [`Dataset`]):
301
+ Specifies the dataset we will run evaluation on.
302
+ columns_names (`List[str]`):
303
+ List of column names to check in the dataset. The keys are the arguments to the [`evaluate.EvaluationModule.compute`] method,
304
+ while the values are the column names to check.
305
+
306
+ Example:
307
+
308
+ ```py
309
+ >>> from datasets import load_dataset
310
+ >>> from evaluate import evaluator
311
+ >>> data = load_dataset("rotten_tomatoes', split="train")
312
+ >>> evaluator.check_required_columns(data, {"input_column": "text", "label_column": "label"})
313
+ ```
314
+ """
315
+ for input_name, column_name in columns_names.items():
316
+ if column_name not in data.column_names:
317
+ raise ValueError(
318
+ f"Invalid `{input_name}` {column_name} specified. The dataset contains the following columns: {data.column_names}."
319
+ )
320
+
321
+ @staticmethod
322
+ def get_dataset_split(data, subset=None, split=None):
323
+ """
324
+ Infers which split to use if `None` is given.
325
+
326
+ Args:
327
+ data (`str`):
328
+ Name of dataset.
329
+ subset (`str`):
330
+ Name of config for datasets with multiple configurations (e.g. 'glue/cola').
331
+ split (`str`, defaults to `None`):
332
+ Split to use.
333
+ Returns:
334
+ `split`: `str` containing which split to use
335
+
336
+ Example:
337
+
338
+ ```py
339
+ >>> from evaluate import evaluator
340
+ >>> evaluator("text-classification").get_dataset_split(data="rotten_tomatoes")
341
+ WARNING:evaluate.evaluator.base:Dataset split not defined! Automatically evaluating with split: TEST
342
+ 'test'
343
+ ```
344
+ """
345
+ if split is None:
346
+ split = choose_split(data, subset)
347
+ logger.warning(f"Dataset split not defined! Automatically evaluating with split: {split.upper()}")
348
+ return split
349
+
350
+ def load_data(self, data: Union[str, Dataset], subset: str = None, split: str = None):
351
+ """
352
+ Load dataset with given subset and split.
353
+ Args:
354
+ data ([`Dataset`] or `str`, defaults to `None`):
355
+ Specifies the dataset we will run evaluation on. If it is of
356
+ type `str`, we treat it as the dataset name, and load it. Otherwise we assume it represents a pre-loaded dataset.
357
+ subset (`str`, defaults to `None`):
358
+ Specifies dataset subset to be passed to `name` in `load_dataset`. To be
359
+ used with datasets with several configurations (e.g. glue/sst2).
360
+ split (`str`, defaults to `None`):
361
+ User-defined dataset split by name (e.g. train, validation, test). Supports slice-split (`test[:n]`).
362
+ If not defined and data is a `str` type, will automatically select the best one via `choose_split()`.
363
+ Returns:
364
+ data ([`Dataset`]): Loaded dataset which will be used for evaluation.
365
+
366
+ Example:
367
+
368
+ ```py
369
+ >>> from evaluate import evaluator
370
+ >>> evaluator("text-classification").load_data(data="rotten_tomatoes", split="train")
371
+ Dataset({
372
+ features: ['text', 'label'],
373
+ num_rows: 8530
374
+ })
375
+ ```
376
+ """
377
+ if isinstance(data, str):
378
+ split = self.get_dataset_split(data, subset, split)
379
+ data = load_dataset(data, name=subset, split=split)
380
+ return data
381
+ elif isinstance(data, Dataset):
382
+ if split is not None or subset is not None:
383
+ logger.warning("`data` is a preloaded Dataset! Ignoring `subset` and `split`.")
384
+ return data
385
+ else:
386
+ raise ValueError(
387
+ "Please specify a valid `data` object - either a `str` with a name or a `Dataset` object."
388
+ )
389
+
390
+ def prepare_data(self, data: Dataset, input_column: str, label_column: str, *args, **kwargs):
391
+ """
392
+ Prepare data.
393
+
394
+ Args:
395
+ data ([`Dataset`]):
396
+ Specifies the dataset we will run evaluation on.
397
+ input_column (`str`, defaults to `"text"`):
398
+ The name of the column containing the text feature in the dataset specified by `data`.
399
+ second_input_column(`str`, *optional*):
400
+ The name of the column containing the second text feature if there is one. Otherwise, set to `None`.
401
+ label_column (`str`, defaults to `"label"`):
402
+ The name of the column containing the labels in the dataset specified by `data`.
403
+ Returns:
404
+ `dict`: metric inputs.
405
+ `list`: pipeline inputs.
406
+
407
+ Example:
408
+
409
+ ```py
410
+ >>> from evaluate import evaluator
411
+ >>> from datasets import load_dataset
412
+
413
+ >>> ds = load_dataset("rotten_tomatoes", split="train")
414
+ >>> evaluator("text-classification").prepare_data(ds, input_column="text", second_input_column=None, label_column="label")
415
+ ```
416
+ """
417
+
418
+ self.check_required_columns(data, {"input_column": input_column, "label_column": label_column})
419
+
420
+ return {"references": data[label_column]}, DatasetColumn(data, input_column)
421
+
422
+ def prepare_pipeline(
423
+ self,
424
+ model_or_pipeline: Union[str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel"], # noqa: F821
425
+ tokenizer: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] = None, # noqa: F821
426
+ feature_extractor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] = None, # noqa: F821
427
+ device: int = None,
428
+ ):
429
+ """
430
+ Prepare pipeline.
431
+
432
+ Args:
433
+ model_or_pipeline (`str` or [`~transformers.Pipeline`] or `Callable` or [`~transformers.PreTrainedModel`] or [`~transformers.TFPreTrainedModel`], defaults to `None`):
434
+ If the argument in not specified, we initialize the default pipeline for the task. If the argument is of the type `str` or
435
+ is a model instance, we use it to initialize a new [`~transformers.Pipeline`] with the given model. Otherwise we assume the
436
+ argument specifies a pre-initialized pipeline.
437
+ preprocessor ([`~transformers.PreTrainedTokenizerBase`] or [`~transformers.FeatureExtractionMixin`], *optional*, defaults to `None`):
438
+ Argument can be used to overwrite a default preprocessor if `model_or_pipeline` represents a model for
439
+ which we build a pipeline. If `model_or_pipeline` is `None` or a pre-initialized pipeline, we ignore
440
+ this argument.
441
+ Returns:
442
+ The initialized pipeline.
443
+
444
+ Example:
445
+
446
+ ```py
447
+ >>> from evaluate import evaluator
448
+ >>> evaluator("text-classification").prepare_pipeline(model_or_pipeline="distilbert-base-uncased")
449
+ ```
450
+ """
451
+
452
+ if device is None:
453
+ device = self._infer_device()
454
+
455
+ if (
456
+ isinstance(model_or_pipeline, str)
457
+ or isinstance(model_or_pipeline, transformers.PreTrainedModel)
458
+ or isinstance(model_or_pipeline, transformers.TFPreTrainedModel)
459
+ ):
460
+ pipe = pipeline(
461
+ self.task,
462
+ model=model_or_pipeline,
463
+ tokenizer=tokenizer,
464
+ feature_extractor=feature_extractor,
465
+ device=device,
466
+ )
467
+ else:
468
+ if model_or_pipeline is None:
469
+ pipe = pipeline(self.task, device=device)
470
+ else:
471
+ pipe = model_or_pipeline
472
+ if tokenizer is not None and feature_extractor is not None:
473
+ logger.warning("Ignoring the value of the preprocessor argument (`tokenizer` or `feature_extractor`).")
474
+ if (pipe.task != self.task) and not (self.task == "translation" and pipe.task.startswith("translation")):
475
+ raise ValueError(
476
+ f"Incompatible `model_or_pipeline`. Please specify `model_or_pipeline` compatible with the `{self.task}` task."
477
+ )
478
+ return pipe
479
+
480
+ def prepare_metric(self, metric: Union[str, EvaluationModule]):
481
+ """
482
+ Prepare metric.
483
+
484
+ Args:
485
+ metric (`str` or [`EvaluationModule`], defaults to `None`):
486
+ Specifies the metric we use in evaluator. If it is of type `str`, we treat it as the metric name, and
487
+ load it. Otherwise we assume it represents a pre-loaded metric.
488
+
489
+ Returns:
490
+ The loaded metric.
491
+
492
+ Example:
493
+
494
+ ```py
495
+ >>> from evaluate import evaluator
496
+ >>> evaluator("text-classification").prepare_metric("accuracy")
497
+ ```
498
+ """
499
+ # Prepare metric.
500
+ if metric is None:
501
+ if self.default_metric_name is None:
502
+ raise ValueError(
503
+ "`Evaluator` doesn't specify a default metric. Please specify a valid `metric` argument."
504
+ )
505
+ metric = load(self.default_metric_name)
506
+ elif isinstance(metric, str):
507
+ metric = load(metric)
508
+
509
+ return metric
510
+
511
+ def call_pipeline(self, pipe, *args, **kwargs):
512
+ start_time = perf_counter()
513
+ pipe_output = pipe(*args, **kwargs, **self.PIPELINE_KWARGS)
514
+ end_time = perf_counter()
515
+ return pipe_output, self._compute_time_perf(start_time, end_time, len(pipe_output))
516
+
517
+ def compute_metric(
518
+ self,
519
+ metric: EvaluationModule,
520
+ metric_inputs: Dict,
521
+ strategy: Literal["simple", "bootstrap"] = "simple",
522
+ confidence_level: float = 0.95,
523
+ n_resamples: int = 9999,
524
+ random_state: Optional[int] = None,
525
+ ):
526
+ """Compute and return metrics."""
527
+ result = metric.compute(**metric_inputs, **self.METRIC_KWARGS)
528
+
529
+ if strategy == "bootstrap":
530
+ metric_keys = result.keys()
531
+ bootstrap_dict = self._compute_confidence_interval(
532
+ metric,
533
+ metric_inputs,
534
+ metric_keys,
535
+ confidence_level,
536
+ n_resamples,
537
+ random_state,
538
+ )
539
+ for key in metric_keys:
540
+ bootstrap_dict[key]["score"] = result[key]
541
+
542
+ return bootstrap_dict
543
+
544
+ return result
env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/image_classification.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Evaluate Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from numbers import Number
16
+ from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Union
17
+
18
+ from datasets import Dataset
19
+ from typing_extensions import Literal
20
+
21
+ from ..module import EvaluationModule
22
+ from ..utils.file_utils import add_end_docstrings, add_start_docstrings
23
+ from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator
24
+
25
+
26
+ if TYPE_CHECKING:
27
+ from transformers import FeatureExtractionMixin, Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel
28
+
29
+
30
+ TASK_DOCUMENTATION = r"""
31
+ Examples:
32
+ ```python
33
+ >>> from evaluate import evaluator
34
+ >>> from datasets import load_dataset
35
+ >>> task_evaluator = evaluator("image-classification")
36
+ >>> data = load_dataset("beans", split="test[:40]")
37
+ >>> results = task_evaluator.compute(
38
+ >>> model_or_pipeline="nateraw/vit-base-beans",
39
+ >>> data=data,
40
+ >>> label_column="labels",
41
+ >>> metric="accuracy",
42
+ >>> label_mapping={'angular_leaf_spot': 0, 'bean_rust': 1, 'healthy': 2},
43
+ >>> strategy="bootstrap"
44
+ >>> )
45
+ ```
46
+ """
47
+
48
+
49
+ class ImageClassificationEvaluator(Evaluator):
50
+ """
51
+ Image classification evaluator.
52
+ This image classification evaluator can currently be loaded from [`evaluator`] using the default task name
53
+ `image-classification`.
54
+ Methods in this class assume a data format compatible with the [`ImageClassificationPipeline`].
55
+ """
56
+
57
+ PIPELINE_KWARGS = {}
58
+
59
+ def __init__(self, task="image-classification", default_metric_name=None):
60
+ super().__init__(task, default_metric_name=default_metric_name)
61
+
62
+ def predictions_processor(self, predictions, label_mapping):
63
+ pred_label = [max(pred, key=lambda x: x["score"])["label"] for pred in predictions]
64
+ pred_label = [label_mapping[pred] if label_mapping is not None else pred for pred in pred_label]
65
+
66
+ return {"predictions": pred_label}
67
+
68
+ @add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING)
69
+ @add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION)
70
+ def compute(
71
+ self,
72
+ model_or_pipeline: Union[
73
+ str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
74
+ ] = None,
75
+ data: Union[str, Dataset] = None,
76
+ subset: Optional[str] = None,
77
+ split: Optional[str] = None,
78
+ metric: Union[str, EvaluationModule] = None,
79
+ tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
80
+ feature_extractor: Optional[Union[str, "FeatureExtractionMixin"]] = None, # noqa: F821
81
+ strategy: Literal["simple", "bootstrap"] = "simple",
82
+ confidence_level: float = 0.95,
83
+ n_resamples: int = 9999,
84
+ device: int = None,
85
+ random_state: Optional[int] = None,
86
+ input_column: str = "image",
87
+ label_column: str = "label",
88
+ label_mapping: Optional[Dict[str, Number]] = None,
89
+ ) -> Tuple[Dict[str, float], Any]:
90
+
91
+ """
92
+ input_column (`str`, defaults to `"image"`):
93
+ The name of the column containing the images as PIL ImageFile in the dataset specified by `data`.
94
+ label_column (`str`, defaults to `"label"`):
95
+ The name of the column containing the labels in the dataset specified by `data`.
96
+ label_mapping (`Dict[str, Number]`, *optional*, defaults to `None`):
97
+ We want to map class labels defined by the model in the pipeline to values consistent with those
98
+ defined in the `label_column` of the `data` dataset.
99
+ """
100
+
101
+ result = super().compute(
102
+ model_or_pipeline=model_or_pipeline,
103
+ data=data,
104
+ subset=subset,
105
+ split=split,
106
+ metric=metric,
107
+ tokenizer=tokenizer,
108
+ feature_extractor=feature_extractor,
109
+ strategy=strategy,
110
+ confidence_level=confidence_level,
111
+ n_resamples=n_resamples,
112
+ device=device,
113
+ random_state=random_state,
114
+ input_column=input_column,
115
+ label_column=label_column,
116
+ label_mapping=label_mapping,
117
+ )
118
+
119
+ return result
env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/question_answering.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Evaluate Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
16
+
17
+ # Lint as: python3
18
+ from datasets import Dataset
19
+
20
+
21
+ try:
22
+ TRANSFORMERS_AVAILABLE = True
23
+ except ImportError:
24
+ TRANSFORMERS_AVAILABLE = False
25
+
26
+ from typing_extensions import Literal
27
+
28
+ from ..module import EvaluationModule
29
+ from ..utils.file_utils import add_end_docstrings, add_start_docstrings
30
+ from ..utils.logging import get_logger
31
+ from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator
32
+ from .utils import DatasetColumn
33
+
34
+
35
+ if TYPE_CHECKING:
36
+ from transformers import Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel
37
+
38
+
39
+ logger = get_logger(__name__)
40
+
41
+
42
+ TASK_DOCUMENTATION = r"""
43
+ Examples:
44
+ ```python
45
+ >>> from evaluate import evaluator
46
+ >>> from datasets import load_dataset
47
+ >>> task_evaluator = evaluator("question-answering")
48
+ >>> data = load_dataset("squad", split="validation[:2]")
49
+ >>> results = task_evaluator.compute(
50
+ >>> model_or_pipeline="sshleifer/tiny-distilbert-base-cased-distilled-squad",
51
+ >>> data=data,
52
+ >>> metric="squad",
53
+ >>> )
54
+ ```
55
+
56
+ <Tip>
57
+
58
+ Datasets where the answer may be missing in the context are supported, for example SQuAD v2 dataset. In this case, it is safer to pass `squad_v2_format=True` to
59
+ the compute() call.
60
+
61
+ </Tip>
62
+
63
+ ```python
64
+ >>> from evaluate import evaluator
65
+ >>> from datasets import load_dataset
66
+ >>> task_evaluator = evaluator("question-answering")
67
+ >>> data = load_dataset("squad_v2", split="validation[:2]")
68
+ >>> results = task_evaluator.compute(
69
+ >>> model_or_pipeline="mrm8488/bert-tiny-finetuned-squadv2",
70
+ >>> data=data,
71
+ >>> metric="squad_v2",
72
+ >>> squad_v2_format=True,
73
+ >>> )
74
+ ```
75
+ """
76
+
77
+
78
+ class QuestionAnsweringEvaluator(Evaluator):
79
+ """
80
+ Question answering evaluator. This evaluator handles
81
+ [**extractive** question answering](https://huggingface.co/docs/transformers/task_summary#extractive-question-answering),
82
+ where the answer to the question is extracted from a context.
83
+
84
+ This question answering evaluator can currently be loaded from [`evaluator`] using the default task name
85
+ `question-answering`.
86
+
87
+ Methods in this class assume a data format compatible with the
88
+ [`~transformers.QuestionAnsweringPipeline`].
89
+ """
90
+
91
+ PIPELINE_KWARGS = {}
92
+
93
+ def __init__(self, task="question-answering", default_metric_name=None):
94
+ super().__init__(task, default_metric_name=default_metric_name)
95
+
96
+ def prepare_data(
97
+ self, data: Dataset, question_column: str, context_column: str, id_column: str, label_column: str
98
+ ):
99
+ """Prepare data."""
100
+ if data is None:
101
+ raise ValueError(
102
+ "Please specify a valid `data` object - either a `str` with a name or a `Dataset` object."
103
+ )
104
+ self.check_required_columns(
105
+ data,
106
+ {
107
+ "question_column": question_column,
108
+ "context_column": context_column,
109
+ "id_column": id_column,
110
+ "label_column": label_column,
111
+ },
112
+ )
113
+
114
+ metric_inputs = dict()
115
+ metric_inputs["references"] = [
116
+ {"id": element[id_column], "answers": element[label_column]} for element in data
117
+ ]
118
+
119
+ return metric_inputs, {
120
+ "question": DatasetColumn(data, question_column),
121
+ "context": DatasetColumn(data, context_column),
122
+ }
123
+
124
+ def is_squad_v2_format(self, data: Dataset, label_column: str = "answers"):
125
+ """
126
+ Check if the provided dataset follows the squad v2 data schema, namely possible samples where the answer is not in the context.
127
+ In this case, the answer text list should be `[]`.
128
+ """
129
+ original_num_rows = data.num_rows
130
+ nonempty_num_rows = data.filter(
131
+ lambda x: len(x[label_column]["text"]) > 0, load_from_cache_file=False
132
+ ).num_rows
133
+ if original_num_rows > nonempty_num_rows:
134
+ return True
135
+ else:
136
+ return False
137
+
138
+ def predictions_processor(self, predictions: List, squad_v2_format: bool, ids: List):
139
+ result = []
140
+ for i in range(len(predictions)):
141
+ pred = {"prediction_text": predictions[i]["answer"], "id": ids[i]}
142
+ if squad_v2_format:
143
+ pred["no_answer_probability"] = predictions[i]["score"]
144
+ result.append(pred)
145
+ return {"predictions": result}
146
+
147
+ @add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING)
148
+ @add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION)
149
+ def compute(
150
+ self,
151
+ model_or_pipeline: Union[
152
+ str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
153
+ ] = None,
154
+ data: Union[str, Dataset] = None,
155
+ subset: Optional[str] = None,
156
+ split: Optional[str] = None,
157
+ metric: Union[str, EvaluationModule] = None,
158
+ tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
159
+ strategy: Literal["simple", "bootstrap"] = "simple",
160
+ confidence_level: float = 0.95,
161
+ n_resamples: int = 9999,
162
+ device: int = None,
163
+ random_state: Optional[int] = None,
164
+ question_column: str = "question",
165
+ context_column: str = "context",
166
+ id_column: str = "id",
167
+ label_column: str = "answers",
168
+ squad_v2_format: Optional[bool] = None,
169
+ ) -> Tuple[Dict[str, float], Any]:
170
+ """
171
+ question_column (`str`, defaults to `"question"`):
172
+ The name of the column containing the question in the dataset specified by `data`.
173
+ context_column (`str`, defaults to `"context"`):
174
+ The name of the column containing the context in the dataset specified by `data`.
175
+ id_column (`str`, defaults to `"id"`):
176
+ The name of the column containing the identification field of the question and answer pair in the
177
+ dataset specified by `data`.
178
+ label_column (`str`, defaults to `"answers"`):
179
+ The name of the column containing the answers in the dataset specified by `data`.
180
+ squad_v2_format (`bool`, *optional*, defaults to `None`):
181
+ Whether the dataset follows the format of squad_v2 dataset. This is the case when the provided dataset
182
+ has questions where the answer is not in the context, more specifically when are answers as
183
+ `{"text": [], "answer_start": []}` in the answer column. If all questions have at least one answer, this parameter
184
+ should be set to `False`. If this parameter is not provided, the format will be automatically inferred.
185
+ """
186
+ result = {}
187
+ self.check_for_mismatch_in_device_setup(device, model_or_pipeline)
188
+
189
+ data = self.load_data(data=data, subset=subset, split=split)
190
+ metric_inputs, pipe_inputs = self.prepare_data(
191
+ data=data,
192
+ question_column=question_column,
193
+ context_column=context_column,
194
+ id_column=id_column,
195
+ label_column=label_column,
196
+ )
197
+
198
+ if squad_v2_format is None:
199
+ squad_v2_format = self.is_squad_v2_format(data=data, label_column=label_column)
200
+ logger.warning(
201
+ f"`squad_v2_format` parameter not provided to QuestionAnsweringEvaluator.compute(). Automatically inferred `squad_v2_format` as {squad_v2_format}."
202
+ )
203
+ pipe = self.prepare_pipeline(model_or_pipeline=model_or_pipeline, tokenizer=tokenizer, device=device)
204
+
205
+ metric = self.prepare_metric(metric)
206
+
207
+ if squad_v2_format and metric.name == "squad":
208
+ logger.warning(
209
+ "The dataset has SQuAD v2 format but you are using the SQuAD metric. Consider passing the 'squad_v2' metric."
210
+ )
211
+ if not squad_v2_format and metric.name == "squad_v2":
212
+ logger.warning(
213
+ "The dataset has SQuAD v1 format but you are using the SQuAD v2 metric. Consider passing the 'squad' metric."
214
+ )
215
+
216
+ if squad_v2_format:
217
+ self.PIPELINE_KWARGS["handle_impossible_answer"] = True
218
+ else:
219
+ self.PIPELINE_KWARGS["handle_impossible_answer"] = False
220
+
221
+ # Compute predictions
222
+ predictions, perf_results = self.call_pipeline(pipe, **pipe_inputs)
223
+ predictions = self.predictions_processor(predictions, squad_v2_format=squad_v2_format, ids=data[id_column])
224
+ metric_inputs.update(predictions)
225
+
226
+ # Compute metrics from references and predictions
227
+ metric_results = self.compute_metric(
228
+ metric=metric,
229
+ metric_inputs=metric_inputs,
230
+ strategy=strategy,
231
+ confidence_level=confidence_level,
232
+ n_resamples=n_resamples,
233
+ random_state=random_state,
234
+ )
235
+
236
+ result.update(metric_results)
237
+ result.update(perf_results)
238
+
239
+ return result
env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/text2text_generation.py ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Evaluate Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Union
16
+
17
+ from datasets import Dataset
18
+ from typing_extensions import Literal
19
+
20
+ from ..module import EvaluationModule
21
+ from ..utils.file_utils import add_start_docstrings
22
+ from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator
23
+
24
+
25
+ if TYPE_CHECKING:
26
+ from transformers import Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel
27
+
28
+
29
+ TASK_DOCUMENTATION_KWARGS = r"""
30
+ input_column (`str`, defaults to `"text"`):
31
+ the name of the column containing the input text in the dataset specified by `data`.
32
+ label_column (`str`, defaults to `"label"`):
33
+ the name of the column containing the labels in the dataset specified by `data`.
34
+ generation_kwargs (`Dict`, *optional*, defaults to `None`):
35
+ The generation kwargs are passed to the pipeline and set the text generation strategy.
36
+ """
37
+
38
+ TEXT2TEXT_TASK_DOCSTRING_EXAMPLE = r"""
39
+ Examples:
40
+ ```python
41
+ >>> from evaluate import evaluator
42
+ >>> from datasets import load_dataset
43
+ >>> task_evaluator = evaluator("text2text-generation")
44
+ >>> data = load_dataset("cnn_dailymail", "3.0.0", split="validation[:40]")
45
+ >>> results = task_evaluator.compute(
46
+ >>> model_or_pipeline="facebook/bart-large-cnn",
47
+ >>> data=data,
48
+ >>> input_column="article",
49
+ >>> label_column="highlights",
50
+ >>> metric="rouge",
51
+ >>> )
52
+ ```
53
+ """
54
+
55
+ SUMMARIZATION_TASK_DOCSTRING_EXAMPLE = r"""
56
+ Examples:
57
+ ```python
58
+ >>> from evaluate import evaluator
59
+ >>> from datasets import load_dataset
60
+ >>> task_evaluator = evaluator("summarization")
61
+ >>> data = load_dataset("cnn_dailymail", "3.0.0", split="validation[:40]")
62
+ >>> results = task_evaluator.compute(
63
+ >>> model_or_pipeline="facebook/bart-large-cnn",
64
+ >>> data=data,
65
+ >>> input_column="article",
66
+ >>> label_column="highlights",
67
+ >>> )
68
+ ```
69
+ """
70
+
71
+
72
+ TRANSLATION_TASK_DOCSTRING_EXAMPLE = r"""
73
+ Examples:
74
+ ```python
75
+ >>> from evaluate import evaluator
76
+ >>> from datasets import load_dataset
77
+ >>> task_evaluator = evaluator("translation")
78
+ >>> data = load_dataset("wmt19", "fr-de", split="validation[:40]")
79
+ >>> data = data.map(lambda x: {"text": x["translation"]["de"], "label": x["translation"]["fr"]})
80
+ >>> results = task_evaluator.compute(
81
+ >>> model_or_pipeline="Helsinki-NLP/opus-mt-de-fr",
82
+ >>> data=data,
83
+ >>> )
84
+ ```
85
+ """
86
+
87
+
88
+ class Text2TextGenerationEvaluator(Evaluator):
89
+ """
90
+ Text2Text generation evaluator.
91
+ This Text2Text generation evaluator can currently be loaded from [`evaluator`] using the default task name
92
+ `text2text-generation`.
93
+ Methods in this class assume a data format compatible with the [`~transformers.Text2TextGenerationPipeline`].
94
+ """
95
+
96
+ PREDICTION_PREFIX = "generated"
97
+ PIPELINE_KWARGS = {"truncation": True}
98
+
99
+ def __init__(self, task="text2text-generation", default_metric_name=None):
100
+ super().__init__(task, default_metric_name=default_metric_name)
101
+
102
+ def predictions_processor(self, predictions, label_mapping):
103
+ return {"predictions": [pred[f"{self.PREDICTION_PREFIX}_text"] for pred in predictions]}
104
+
105
+ @add_start_docstrings(
106
+ EVALUTOR_COMPUTE_START_DOCSTRING,
107
+ TASK_DOCUMENTATION_KWARGS,
108
+ EVALUATOR_COMPUTE_RETURN_DOCSTRING,
109
+ TEXT2TEXT_TASK_DOCSTRING_EXAMPLE,
110
+ )
111
+ def compute(
112
+ self,
113
+ model_or_pipeline: Union[
114
+ str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
115
+ ] = None,
116
+ data: Union[str, Dataset] = None,
117
+ subset: Optional[str] = None,
118
+ split: Optional[str] = None,
119
+ metric: Union[str, EvaluationModule] = None,
120
+ tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
121
+ strategy: Literal["simple", "bootstrap"] = "simple",
122
+ confidence_level: float = 0.95,
123
+ n_resamples: int = 9999,
124
+ device: int = None,
125
+ random_state: Optional[int] = None,
126
+ input_column: str = "text",
127
+ label_column: str = "label",
128
+ generation_kwargs: dict = None,
129
+ ) -> Tuple[Dict[str, float], Any]:
130
+ if generation_kwargs is not None:
131
+ self.PIPELINE_KWARGS.update(generation_kwargs)
132
+
133
+ result = super().compute(
134
+ model_or_pipeline=model_or_pipeline,
135
+ data=data,
136
+ subset=subset,
137
+ split=split,
138
+ metric=metric,
139
+ tokenizer=tokenizer,
140
+ strategy=strategy,
141
+ confidence_level=confidence_level,
142
+ n_resamples=n_resamples,
143
+ device=device,
144
+ random_state=random_state,
145
+ input_column=input_column,
146
+ label_column=label_column,
147
+ )
148
+
149
+ return result
150
+
151
+
152
+ class SummarizationEvaluator(Text2TextGenerationEvaluator):
153
+ """
154
+ Text summarization evaluator.
155
+ This text summarization evaluator can currently be loaded from [`evaluator`] using the default task name
156
+ `summarization`.
157
+ Methods in this class assume a data format compatible with the [`SummarizationEvaluator`].
158
+ """
159
+
160
+ PREDICTION_PREFIX = "summary"
161
+ PIPELINE_KWARGS = {"truncation": True}
162
+
163
+ def __init__(self, task="summarization", default_metric_name=None):
164
+ super().__init__(task, default_metric_name=default_metric_name)
165
+
166
+ @add_start_docstrings(
167
+ EVALUTOR_COMPUTE_START_DOCSTRING,
168
+ TASK_DOCUMENTATION_KWARGS,
169
+ EVALUATOR_COMPUTE_RETURN_DOCSTRING,
170
+ SUMMARIZATION_TASK_DOCSTRING_EXAMPLE,
171
+ )
172
+ def compute(
173
+ self,
174
+ model_or_pipeline: Union[
175
+ str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
176
+ ] = None,
177
+ data: Union[str, Dataset] = None,
178
+ subset: Optional[str] = None,
179
+ split: Optional[str] = None,
180
+ metric: Union[str, EvaluationModule] = None,
181
+ tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
182
+ strategy: Literal["simple", "bootstrap"] = "simple",
183
+ confidence_level: float = 0.95,
184
+ n_resamples: int = 9999,
185
+ device: int = None,
186
+ random_state: Optional[int] = None,
187
+ input_column: str = "text",
188
+ label_column: str = "label",
189
+ generation_kwargs: dict = None,
190
+ ) -> Tuple[Dict[str, float], Any]:
191
+ result = super().compute(
192
+ model_or_pipeline=model_or_pipeline,
193
+ data=data,
194
+ subset=subset,
195
+ split=split,
196
+ metric=metric,
197
+ tokenizer=tokenizer,
198
+ strategy=strategy,
199
+ confidence_level=confidence_level,
200
+ n_resamples=n_resamples,
201
+ device=device,
202
+ random_state=random_state,
203
+ input_column=input_column,
204
+ label_column=label_column,
205
+ generation_kwargs=generation_kwargs,
206
+ )
207
+
208
+ return result
209
+
210
+
211
+ class TranslationEvaluator(Text2TextGenerationEvaluator):
212
+ """
213
+ Translation evaluator.
214
+ This translation generation evaluator can currently be loaded from [`evaluator`] using the default task name
215
+ `translation`.
216
+ Methods in this class assume a data format compatible with the [`~transformers.TranslationPipeline`].
217
+ """
218
+
219
+ PREDICTION_PREFIX = "translation"
220
+ PIPELINE_KWARGS = {"truncation": True}
221
+
222
+ def __init__(self, task="translation", default_metric_name=None):
223
+ super().__init__(task, default_metric_name=default_metric_name)
224
+
225
+ @add_start_docstrings(
226
+ EVALUTOR_COMPUTE_START_DOCSTRING,
227
+ TASK_DOCUMENTATION_KWARGS,
228
+ EVALUATOR_COMPUTE_RETURN_DOCSTRING,
229
+ TRANSLATION_TASK_DOCSTRING_EXAMPLE,
230
+ )
231
+ def compute(
232
+ self,
233
+ model_or_pipeline: Union[
234
+ str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
235
+ ] = None,
236
+ data: Union[str, Dataset] = None,
237
+ subset: Optional[str] = None,
238
+ split: Optional[str] = None,
239
+ metric: Union[str, EvaluationModule] = None,
240
+ tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
241
+ strategy: Literal["simple", "bootstrap"] = "simple",
242
+ confidence_level: float = 0.95,
243
+ n_resamples: int = 9999,
244
+ device: int = None,
245
+ random_state: Optional[int] = None,
246
+ input_column: str = "text",
247
+ label_column: str = "label",
248
+ generation_kwargs: dict = None,
249
+ ) -> Tuple[Dict[str, float], Any]:
250
+ result = super().compute(
251
+ model_or_pipeline=model_or_pipeline,
252
+ data=data,
253
+ subset=subset,
254
+ split=split,
255
+ metric=metric,
256
+ tokenizer=tokenizer,
257
+ strategy=strategy,
258
+ confidence_level=confidence_level,
259
+ n_resamples=n_resamples,
260
+ device=device,
261
+ random_state=random_state,
262
+ input_column=input_column,
263
+ label_column=label_column,
264
+ generation_kwargs=generation_kwargs,
265
+ )
266
+
267
+ return result
env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/text_classification.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Evaluate Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from numbers import Number
16
+ from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Union
17
+
18
+ from datasets import Dataset, load_dataset
19
+ from typing_extensions import Literal
20
+
21
+ from ..module import EvaluationModule
22
+ from ..utils.file_utils import add_end_docstrings, add_start_docstrings
23
+ from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator
24
+ from .utils import DatasetColumnPair
25
+
26
+
27
+ if TYPE_CHECKING:
28
+ from transformers import FeatureExtractionMixin, Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel
29
+
30
+
31
+ TASK_DOCUMENTATION = r"""
32
+ Examples:
33
+ ```python
34
+ >>> from evaluate import evaluator
35
+ >>> from datasets import load_dataset
36
+ >>> task_evaluator = evaluator("text-classification")
37
+ >>> data = load_dataset("imdb", split="test[:2]")
38
+ >>> results = task_evaluator.compute(
39
+ >>> model_or_pipeline="huggingface/prunebert-base-uncased-6-finepruned-w-distil-mnli",
40
+ >>> data=data,
41
+ >>> metric="accuracy",
42
+ >>> label_mapping={"LABEL_0": 0.0, "LABEL_1": 1.0},
43
+ >>> strategy="bootstrap",
44
+ >>> n_resamples=10,
45
+ >>> random_state=0
46
+ >>> )
47
+ ```
48
+ """
49
+
50
+
51
+ class TextClassificationEvaluator(Evaluator):
52
+ """
53
+ Text classification evaluator.
54
+ This text classification evaluator can currently be loaded from [`evaluator`] using the default task name
55
+ `text-classification` or with a `"sentiment-analysis"` alias.
56
+ Methods in this class assume a data format compatible with the [`~transformers.TextClassificationPipeline`] - a single textual
57
+ feature as input and a categorical label as output.
58
+ """
59
+
60
+ PIPELINE_KWARGS = {"truncation": True}
61
+
62
+ def __init__(self, task="text-classification", default_metric_name=None):
63
+ super().__init__(task, default_metric_name=default_metric_name)
64
+
65
+ def prepare_data(self, data: Union[str, Dataset], input_column: str, second_input_column: str, label_column: str):
66
+ if data is None:
67
+ raise ValueError(
68
+ "Please specify a valid `data` object - either a `str` with a name or a `Dataset` object."
69
+ )
70
+
71
+ self.check_required_columns(data, {"input_column": input_column, "label_column": label_column})
72
+
73
+ if second_input_column is not None:
74
+ self.check_required_columns(data, {"second_input_column": second_input_column})
75
+
76
+ data = load_dataset(data) if isinstance(data, str) else data
77
+
78
+ return {"references": data[label_column]}, DatasetColumnPair(
79
+ data, input_column, second_input_column, "text", "text_pair"
80
+ )
81
+
82
+ def predictions_processor(self, predictions, label_mapping):
83
+ predictions = [
84
+ label_mapping[element["label"]] if label_mapping is not None else element["label"]
85
+ for element in predictions
86
+ ]
87
+ return {"predictions": predictions}
88
+
89
+ @add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING)
90
+ @add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION)
91
+ def compute(
92
+ self,
93
+ model_or_pipeline: Union[
94
+ str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
95
+ ] = None,
96
+ data: Union[str, Dataset] = None,
97
+ subset: Optional[str] = None,
98
+ split: Optional[str] = None,
99
+ metric: Union[str, EvaluationModule] = None,
100
+ tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
101
+ feature_extractor: Optional[Union[str, "FeatureExtractionMixin"]] = None, # noqa: F821
102
+ strategy: Literal["simple", "bootstrap"] = "simple",
103
+ confidence_level: float = 0.95,
104
+ n_resamples: int = 9999,
105
+ device: int = None,
106
+ random_state: Optional[int] = None,
107
+ input_column: str = "text",
108
+ second_input_column: Optional[str] = None,
109
+ label_column: str = "label",
110
+ label_mapping: Optional[Dict[str, Number]] = None,
111
+ ) -> Tuple[Dict[str, float], Any]:
112
+ """
113
+ input_column (`str`, *optional*, defaults to `"text"`):
114
+ The name of the column containing the text feature in the dataset specified by `data`.
115
+ second_input_column (`str`, *optional*, defaults to `None`):
116
+ The name of the second column containing the text features. This may be useful for classification tasks
117
+ as MNLI, where two columns are used.
118
+ label_column (`str`, defaults to `"label"`):
119
+ The name of the column containing the labels in the dataset specified by `data`.
120
+ label_mapping (`Dict[str, Number]`, *optional*, defaults to `None`):
121
+ We want to map class labels defined by the model in the pipeline to values consistent with those
122
+ defined in the `label_column` of the `data` dataset.
123
+ """
124
+
125
+ result = {}
126
+
127
+ self.check_for_mismatch_in_device_setup(device, model_or_pipeline)
128
+
129
+ # Prepare inputs
130
+ data = self.load_data(data=data, subset=subset, split=split)
131
+ metric_inputs, pipe_inputs = self.prepare_data(
132
+ data=data, input_column=input_column, second_input_column=second_input_column, label_column=label_column
133
+ )
134
+ pipe = self.prepare_pipeline(
135
+ model_or_pipeline=model_or_pipeline,
136
+ tokenizer=tokenizer,
137
+ feature_extractor=feature_extractor,
138
+ device=device,
139
+ )
140
+ metric = self.prepare_metric(metric)
141
+
142
+ # Compute predictions
143
+ predictions, perf_results = self.call_pipeline(pipe, pipe_inputs)
144
+ predictions = self.predictions_processor(predictions, label_mapping)
145
+ metric_inputs.update(predictions)
146
+
147
+ # Compute metrics from references and predictions
148
+ metric_results = self.compute_metric(
149
+ metric=metric,
150
+ metric_inputs=metric_inputs,
151
+ strategy=strategy,
152
+ confidence_level=confidence_level,
153
+ n_resamples=n_resamples,
154
+ random_state=random_state,
155
+ )
156
+
157
+ result.update(metric_results)
158
+ result.update(perf_results)
159
+
160
+ return result
env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/text_generation.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Evaluate Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Dict, Tuple
16
+
17
+ from datasets import Dataset
18
+
19
+ from .base import Evaluator
20
+ from .utils import DatasetColumn
21
+
22
+
23
+ TASK_DOCUMENTATION_KWARGS = r"""
24
+ input_column (`str`, defaults to `"text"`):
25
+ the name of the column containing the input text in the dataset specified by `data`.
26
+ generation_kwargs (`Dict`, *optional*, defaults to `None`):
27
+ The generation kwargs are passed to the pipeline and set the text generation strategy.
28
+ """
29
+
30
+
31
+ class TextGenerationEvaluator(Evaluator):
32
+ """
33
+ Text generation evaluator.
34
+ This Text generation evaluator can currently be loaded from [`evaluator`] using the default task name
35
+ `text-generation`.
36
+ Methods in this class assume a data format compatible with the [`~transformers.TextGenerationPipeline`].
37
+ """
38
+
39
+ def predictions_processor(self, predictions, *args, **kwargs):
40
+ """
41
+ Args:
42
+ predictions: A list of lists of dicts
43
+
44
+ Returns:
45
+ `dict`: All the generated texts are flattened and stored under the "data" key.
46
+ """
47
+ return {"data": [pred[f"{self.predictions_prefix}_text"] for pred_list in predictions for pred in pred_list]}
48
+
49
+ def __init__(self, task="text-generation", default_metric_name=None, predictions_prefix: str = "generated"):
50
+ super().__init__(task=task, default_metric_name=default_metric_name)
51
+ self.predictions_prefix = predictions_prefix
52
+
53
+ def prepare_data(self, data: Dataset, input_column: str, *args, **kwargs) -> Tuple[Dict, DatasetColumn]:
54
+ """
55
+ Prepare data.
56
+
57
+ Args:
58
+ data ([`Dataset`]):
59
+ Specifies the dataset we will run evaluation on.
60
+ input_column (`str`, defaults to `"text"`):
61
+ The name of the column containing the text feature in the dataset specified by `data`.
62
+ Returns:
63
+ `dict`: metric inputs.
64
+ `list`: pipeline inputs.
65
+ """
66
+
67
+ self.check_required_columns(data, {"input_column": input_column})
68
+
69
+ return {}, DatasetColumn(data, input_column)
env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/token_classification.py ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Evaluate Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
16
+
17
+ from datasets import ClassLabel, Dataset, Sequence
18
+ from typing_extensions import Literal
19
+
20
+ from ..module import EvaluationModule
21
+ from ..utils.file_utils import add_end_docstrings, add_start_docstrings
22
+ from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator
23
+ from .utils import DatasetColumn
24
+
25
+
26
+ if TYPE_CHECKING:
27
+ from transformers import Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel
28
+
29
+
30
+ TASK_DOCUMENTATION = r"""
31
+ The dataset input and label columns are expected to be formatted as a list of words and a list of labels respectively, following [conll2003 dataset](https://huggingface.co/datasets/conll2003). Datasets whose inputs are single strings, and labels are a list of offset are not supported.
32
+
33
+ Examples:
34
+ ```python
35
+ >>> from evaluate import evaluator
36
+ >>> from datasets import load_dataset
37
+ >>> task_evaluator = evaluator("token-classification")
38
+ >>> data = load_dataset("conll2003", split="validation[:2]")
39
+ >>> results = task_evaluator.compute(
40
+ >>> model_or_pipeline="elastic/distilbert-base-uncased-finetuned-conll03-english",
41
+ >>> data=data,
42
+ >>> metric="seqeval",
43
+ >>> )
44
+ ```
45
+
46
+ <Tip>
47
+
48
+ For example, the following dataset format is accepted by the evaluator:
49
+
50
+ ```python
51
+ dataset = Dataset.from_dict(
52
+ mapping={
53
+ "tokens": [["New", "York", "is", "a", "city", "and", "Felix", "a", "person", "."]],
54
+ "ner_tags": [[1, 2, 0, 0, 0, 0, 3, 0, 0, 0]],
55
+ },
56
+ features=Features({
57
+ "tokens": Sequence(feature=Value(dtype="string")),
58
+ "ner_tags": Sequence(feature=ClassLabel(names=["O", "B-LOC", "I-LOC", "B-PER", "I-PER"])),
59
+ }),
60
+ )
61
+ ```
62
+
63
+ </Tip>
64
+
65
+ <Tip warning={true}>
66
+
67
+ For example, the following dataset format is **not** accepted by the evaluator:
68
+
69
+ ```python
70
+ dataset = Dataset.from_dict(
71
+ mapping={
72
+ "tokens": [["New York is a city and Felix a person."]],
73
+ "starts": [[0, 23]],
74
+ "ends": [[7, 27]],
75
+ "ner_tags": [["LOC", "PER"]],
76
+ },
77
+ features=Features({
78
+ "tokens": Value(dtype="string"),
79
+ "starts": Sequence(feature=Value(dtype="int32")),
80
+ "ends": Sequence(feature=Value(dtype="int32")),
81
+ "ner_tags": Sequence(feature=Value(dtype="string")),
82
+ }),
83
+ )
84
+ ```
85
+
86
+ </Tip>
87
+ """
88
+
89
+
90
+ class TokenClassificationEvaluator(Evaluator):
91
+ """
92
+ Token classification evaluator.
93
+
94
+ This token classification evaluator can currently be loaded from [`evaluator`] using the default task name
95
+ `token-classification`.
96
+
97
+ Methods in this class assume a data format compatible with the [`~transformers.TokenClassificationPipeline`].
98
+ """
99
+
100
+ PIPELINE_KWARGS = {"ignore_labels": []}
101
+
102
+ def __init__(self, task="token-classification", default_metric_name=None):
103
+ super().__init__(task, default_metric_name=default_metric_name)
104
+
105
+ def predictions_processor(self, predictions: List[List[Dict]], words: List[List[str]], join_by: str):
106
+ """
107
+ Transform the pipeline predictions into a list of predicted labels of the same length as the true labels.
108
+
109
+ Args:
110
+ predictions (`List[List[Dict]]`):
111
+ List of pipeline predictions, where each token has been labeled.
112
+ words (`List[List[str]]`):
113
+ Original input data to the pipeline, used to build predicted labels of the same length.
114
+ join_by (`str`):
115
+ String to use to join two words. In English, it will typically be " ".
116
+
117
+ Returns:
118
+ `dict`: a dictionary holding the predictions
119
+ """
120
+ preds = []
121
+
122
+ # iterate over the data rows
123
+ for i, prediction in enumerate(predictions):
124
+ pred_processed = []
125
+
126
+ # get a list of tuples giving the indexes of the start and end character of each word
127
+ words_offsets = self.words_to_offsets(words[i], join_by)
128
+
129
+ token_index = 0
130
+ for word_offset in words_offsets:
131
+ # for each word, we may keep only the predicted label for the first token, discard the others
132
+ while prediction[token_index]["start"] < word_offset[0]:
133
+ token_index += 1
134
+
135
+ if prediction[token_index]["start"] > word_offset[0]: # bad indexing
136
+ pred_processed.append("O")
137
+ elif prediction[token_index]["start"] == word_offset[0]:
138
+ pred_processed.append(prediction[token_index]["entity"])
139
+
140
+ preds.append(pred_processed)
141
+
142
+ return {"predictions": preds}
143
+
144
+ def words_to_offsets(self, words: List[str], join_by: str):
145
+ """
146
+ Convert a list of words to a list of offsets, where word are joined by `join_by`.
147
+
148
+ Args:
149
+ words (`List[str]`):
150
+ List of words to get offsets from.
151
+ join_by (`str`):
152
+ String to insert between words.
153
+
154
+ Returns:
155
+ `List[Tuple[int, int]]`: List of the characters (start index, end index) for each of the words.
156
+ """
157
+ offsets = []
158
+
159
+ start = 0
160
+ for word in words:
161
+ end = start + len(word) - 1
162
+ offsets.append((start, end))
163
+ start = end + len(join_by) + 1
164
+
165
+ return offsets
166
+
167
+ def prepare_data(self, data: Union[str, Dataset], input_column: str, label_column: str, join_by: str):
168
+ super().prepare_data(data, input_column, label_column)
169
+
170
+ if not isinstance(data.features[input_column], Sequence) or not isinstance(
171
+ data.features[label_column], Sequence
172
+ ):
173
+ raise ValueError(
174
+ "TokenClassificationEvaluator expects the input and label columns to be provided as lists."
175
+ )
176
+
177
+ # If the labels are of type ClassLabel, they are already integers and we have the map stored somewhere.
178
+ # Otherwise, we have to get the list of labels manually.
179
+ labels_are_int = isinstance(data.features[label_column].feature, ClassLabel)
180
+ if labels_are_int:
181
+ label_list = data.features[label_column].feature.names # list of string labels
182
+ id_to_label = {i: label for i, label in enumerate(label_list)}
183
+ references = [[id_to_label[label_id] for label_id in label_ids] for label_ids in data[label_column]]
184
+ elif data.features[label_column].feature.dtype.startswith("int"):
185
+ raise NotImplementedError(
186
+ "References provided as integers, but the reference column is not a Sequence of ClassLabels."
187
+ )
188
+ else:
189
+ # In the event the labels are not a `Sequence[ClassLabel]`, we have already labels as strings
190
+ # An example is labels as ["PER", "PER", "O", "LOC", "O", "LOC", "O"], e.g. in polyglot_ner dataset
191
+ references = data[label_column]
192
+
193
+ metric_inputs = {"references": references}
194
+ data = data.map(lambda x: {input_column: join_by.join(x[input_column])})
195
+ pipeline_inputs = DatasetColumn(data, input_column)
196
+
197
+ return metric_inputs, pipeline_inputs
198
+
199
+ def prepare_pipeline(
200
+ self,
201
+ model_or_pipeline: Union[str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel"], # noqa: F821
202
+ tokenizer: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] = None, # noqa: F821
203
+ feature_extractor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] = None, # noqa: F821
204
+ device: int = None,
205
+ ):
206
+ pipe = super().prepare_pipeline(model_or_pipeline, tokenizer, feature_extractor, device)
207
+
208
+ # check the pipeline outputs start characters in its predictions
209
+ dummy_output = pipe(["2003 New York Gregory"], **self.PIPELINE_KWARGS)
210
+ if dummy_output[0][0]["start"] is None:
211
+ raise ValueError(
212
+ "TokenClassificationEvaluator supports only pipelines giving 'start' index as a pipeline output (got None). "
213
+ "Transformers pipelines with a slow tokenizer will raise this error."
214
+ )
215
+
216
+ return pipe
217
+
218
+ @add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING)
219
+ @add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION)
220
+ def compute(
221
+ self,
222
+ model_or_pipeline: Union[
223
+ str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
224
+ ] = None,
225
+ data: Union[str, Dataset] = None,
226
+ subset: Optional[str] = None,
227
+ split: str = None,
228
+ metric: Union[str, EvaluationModule] = None,
229
+ tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
230
+ strategy: Literal["simple", "bootstrap"] = "simple",
231
+ confidence_level: float = 0.95,
232
+ n_resamples: int = 9999,
233
+ device: Optional[int] = None,
234
+ random_state: Optional[int] = None,
235
+ input_column: str = "tokens",
236
+ label_column: str = "ner_tags",
237
+ join_by: Optional[str] = " ",
238
+ ) -> Tuple[Dict[str, float], Any]:
239
+ """
240
+ input_column (`str`, defaults to `"tokens"`):
241
+ The name of the column containing the tokens feature in the dataset specified by `data`.
242
+ label_column (`str`, defaults to `"label"`):
243
+ The name of the column containing the labels in the dataset specified by `data`.
244
+ join_by (`str`, *optional*, defaults to `" "`):
245
+ This evaluator supports dataset whose input column is a list of words. This parameter specifies how to join
246
+ words to generate a string input. This is especially useful for languages that do not separate words by a space.
247
+ """
248
+ result = {}
249
+
250
+ self.check_for_mismatch_in_device_setup(device, model_or_pipeline)
251
+
252
+ # Prepare inputs
253
+ data = self.load_data(data=data, subset=subset, split=split)
254
+ metric_inputs, pipe_inputs = self.prepare_data(
255
+ data=data, input_column=input_column, label_column=label_column, join_by=join_by
256
+ )
257
+ pipe = self.prepare_pipeline(model_or_pipeline=model_or_pipeline, tokenizer=tokenizer, device=device)
258
+ metric = self.prepare_metric(metric)
259
+
260
+ # Compute predictions
261
+ predictions, perf_results = self.call_pipeline(pipe, pipe_inputs)
262
+ predictions = self.predictions_processor(predictions, data[input_column], join_by)
263
+ metric_inputs.update(predictions)
264
+
265
+ # Compute metrics from references and predictions
266
+ metric_results = self.compute_metric(
267
+ metric=metric,
268
+ metric_inputs=metric_inputs,
269
+ strategy=strategy,
270
+ confidence_level=confidence_level,
271
+ n_resamples=n_resamples,
272
+ random_state=random_state,
273
+ )
274
+
275
+ result.update(metric_results)
276
+ result.update(perf_results)
277
+
278
+ return result
env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/utils.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import Dataset, get_dataset_split_names
2
+
3
+
4
+ class DatasetColumn(list):
5
+ """Helper class to avoid loading a dataset column into memory when accessing it."""
6
+
7
+ def __init__(self, dataset: Dataset, key: str):
8
+ self.dataset = dataset
9
+ self.key = key
10
+
11
+ def __len__(self):
12
+ return len(self.dataset)
13
+
14
+ def __getitem__(self, i):
15
+ return self.dataset[i][self.key]
16
+
17
+ def __iter__(self):
18
+ return (self.dataset[i][self.key] for i in range(len(self)))
19
+
20
+
21
+ def choose_split(data, subset=None):
22
+ available_splits = get_dataset_split_names(data, subset)
23
+ preferred_split_order = [
24
+ "test",
25
+ "testing",
26
+ "eval",
27
+ "evaluation",
28
+ "validation",
29
+ "val",
30
+ "valid",
31
+ "dev",
32
+ "train",
33
+ "training",
34
+ ]
35
+ for split in preferred_split_order:
36
+ if split in available_splits:
37
+ return split
38
+ raise ValueError("No dataset split defined! Pass an explicit value to the `split` kwarg.")
39
+
40
+
41
+ class DatasetColumnPair(list):
42
+ """Helper class to avoid loading two dataset columns into memory when accessing it."""
43
+
44
+ def __init__(
45
+ self,
46
+ dataset: Dataset,
47
+ first_col: str,
48
+ second_col: str,
49
+ first_key: str,
50
+ second_key: str,
51
+ ):
52
+ """
53
+ Args:
54
+ dataset (Dataset): dataset to build an iterator on
55
+ first_col (str): first column name to use in the dataset
56
+ second_col (str): second column name to use in the dataset
57
+ first_key (str): key name used for the first column in the returned dictionary
58
+ second_key (str): key name used for the second column in the returned dictionary
59
+ """
60
+ self.dataset = dataset
61
+
62
+ self.first_col = first_col
63
+ self.second_col = second_col
64
+
65
+ self.first_key = first_key
66
+ self.second_key = second_key
67
+
68
+ def __len__(self):
69
+ return len(self.dataset)
70
+
71
+ def __getitem__(self, i):
72
+ return {
73
+ self.first_key: self.dataset[i][self.first_col],
74
+ self.second_key: self.dataset[i][self.second_col] if self.second_col else None,
75
+ }
76
+
77
+ def __iter__(self):
78
+ return (
79
+ {
80
+ self.first_key: self.dataset[i][self.first_col],
81
+ self.second_key: self.dataset[i][self.second_col] if self.second_col else None,
82
+ }
83
+ for i in range(len(self))
84
+ )
env-llmeval/lib/python3.10/site-packages/evaluate/hub.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict
2
+
3
+ import requests
4
+ from huggingface_hub import dataset_info, model_info
5
+ from huggingface_hub.repocard import metadata_update
6
+
7
+ from .config import HF_HUB_ALLOWED_TASKS
8
+ from .utils.logging import get_logger
9
+
10
+
11
+ logger = get_logger(__name__)
12
+
13
+
14
+ def push_to_hub(
15
+ model_id: str,
16
+ task_type: str,
17
+ dataset_type: str,
18
+ dataset_name: str,
19
+ metric_type: str,
20
+ metric_name: str,
21
+ metric_value: float,
22
+ task_name: str = None,
23
+ dataset_config: str = None,
24
+ dataset_split: str = None,
25
+ dataset_revision: str = None,
26
+ dataset_args: Dict[str, int] = None,
27
+ metric_config: str = None,
28
+ metric_args: Dict[str, int] = None,
29
+ overwrite: bool = False,
30
+ ):
31
+ r"""
32
+ Pushes the result of a metric to the metadata of a model repository in the Hub.
33
+
34
+ Args:
35
+ model_id (`str`):
36
+ Model id from https://hf.co/models.
37
+ task_type (`str`):
38
+ Task id, refer to the [Hub allowed tasks](https://github.com/huggingface/evaluate/blob/main/src/evaluate/config.py#L154) for allowed values.
39
+ dataset_type (`str`):
40
+ Dataset id from https://hf.co/datasets.
41
+ dataset_name (`str`):
42
+ Pretty name for the dataset.
43
+ metric_type (`str`):
44
+ Metric id from https://hf.co/metrics.
45
+ metric_name (`str`):
46
+ Pretty name for the metric.
47
+ metric_value (`float`):
48
+ Computed metric value.
49
+ task_name (`str`, *optional*):
50
+ Pretty name for the task.
51
+ dataset_config (`str`, *optional*):
52
+ Dataset configuration used in [`~datasets.load_dataset`].
53
+ See [`~datasets.load_dataset`] for more info.
54
+ dataset_split (`str`, *optional*):
55
+ Name of split used for metric computation.
56
+ dataset_revision (`str`, *optional*):
57
+ Git hash for the specific version of the dataset.
58
+ dataset_args (`dict[str, int]`, *optional*):
59
+ Additional arguments passed to [`~datasets.load_dataset`].
60
+ metric_config (`str`, *optional*):
61
+ Configuration for the metric (e.g. the GLUE metric has a configuration for each subset).
62
+ metric_args (`dict[str, int]`, *optional*):
63
+ Arguments passed during [`~evaluate.EvaluationModule.compute`].
64
+ overwrite (`bool`, *optional*, defaults to `False`):
65
+ If set to `True` an existing metric field can be overwritten, otherwise
66
+ attempting to overwrite any existing fields will cause an error.
67
+
68
+ Example:
69
+
70
+ ```python
71
+ >>> push_to_hub(
72
+ ... model_id="huggingface/gpt2-wikitext2",
73
+ ... metric_value=0.5
74
+ ... metric_type="bleu",
75
+ ... metric_name="BLEU",
76
+ ... dataset_name="WikiText",
77
+ ... dataset_type="wikitext",
78
+ ... dataset_split="test",
79
+ ... task_type="text-generation",
80
+ ... task_name="Text Generation"
81
+ ... )
82
+ ```"""
83
+ if task_type not in HF_HUB_ALLOWED_TASKS:
84
+ raise ValueError(f"Task type not supported. Task has to be one of {HF_HUB_ALLOWED_TASKS}")
85
+
86
+ try:
87
+ dataset_info(dataset_type)
88
+ except requests.exceptions.HTTPError:
89
+ logger.warning(f"Dataset {dataset_type} not found on the Hub at hf.co/datasets/{dataset_type}")
90
+
91
+ try:
92
+ model_info(model_id)
93
+ except requests.exceptions.HTTPError:
94
+ raise ValueError(f"Model {model_id} not found on the Hub at hf.co/{model_id}")
95
+
96
+ result = {
97
+ "task": {
98
+ "type": task_type,
99
+ },
100
+ "dataset": {
101
+ "type": dataset_type,
102
+ "name": dataset_name,
103
+ },
104
+ "metrics": [
105
+ {
106
+ "type": metric_type,
107
+ "value": metric_value,
108
+ },
109
+ ],
110
+ }
111
+
112
+ if dataset_config is not None:
113
+ result["dataset"]["config"] = dataset_config
114
+ if dataset_split is not None:
115
+ result["dataset"]["split"] = dataset_split
116
+ if dataset_revision is not None:
117
+ result["dataset"]["revision"] = dataset_revision
118
+ if dataset_args is not None:
119
+ result["dataset"]["args"] = dataset_args
120
+
121
+ if task_name is not None:
122
+ result["task"]["name"] = task_name
123
+
124
+ if metric_name is not None:
125
+ result["metrics"][0]["name"] = metric_name
126
+ if metric_config is not None:
127
+ result["metrics"][0]["config"] = metric_config
128
+ if metric_args is not None:
129
+ result["metrics"][0]["args"] = metric_args
130
+
131
+ metadata = {"model-index": [{"results": [result]}]}
132
+
133
+ return metadata_update(repo_id=model_id, metadata=metadata, overwrite=overwrite)
env-llmeval/lib/python3.10/site-packages/evaluate/info.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Lint as: python3
16
+ """ EvaluationModuleInfo records information we know about a dataset and a metric.
17
+ """
18
+
19
+ import dataclasses
20
+ import json
21
+ import os
22
+ from dataclasses import asdict, dataclass, field
23
+ from typing import List, Optional, Union
24
+
25
+ from datasets.features import Features, Value
26
+
27
+ from . import config
28
+ from .utils.logging import get_logger
29
+
30
+
31
+ logger = get_logger(__name__)
32
+
33
+
34
+ @dataclass
35
+ class EvaluationModuleInfo:
36
+ """Base class to store information about an evaluation used for `MetricInfo`, `ComparisonInfo`,
37
+ and `MeasurementInfo`.
38
+
39
+ `EvaluationModuleInfo` documents an evaluation, including its name, version, and features.
40
+ See the constructor arguments and properties for a full list.
41
+
42
+ Note: Not all fields are known on construction and may be updated later.
43
+ """
44
+
45
+ # Set in the dataset scripts
46
+ description: str
47
+ citation: str
48
+ features: Union[Features, List[Features]]
49
+ inputs_description: str = field(default_factory=str)
50
+ homepage: str = field(default_factory=str)
51
+ license: str = field(default_factory=str)
52
+ codebase_urls: List[str] = field(default_factory=list)
53
+ reference_urls: List[str] = field(default_factory=list)
54
+ streamable: bool = False
55
+ format: Optional[str] = None
56
+ module_type: str = "metric" # deprecate this in the future
57
+
58
+ # Set later by the builder
59
+ module_name: Optional[str] = None
60
+ config_name: Optional[str] = None
61
+ experiment_id: Optional[str] = None
62
+
63
+ def __post_init__(self):
64
+ if self.format is not None:
65
+ for key, value in self.features.items():
66
+ if not isinstance(value, Value):
67
+ raise ValueError(
68
+ f"When using 'numpy' format, all features should be a `datasets.Value` feature. "
69
+ f"Here {key} is an instance of {value.__class__.__name__}"
70
+ )
71
+
72
+ def write_to_directory(self, metric_info_dir):
73
+ """Write `EvaluationModuleInfo` as JSON to `metric_info_dir`.
74
+ Also save the license separately in LICENSE.
75
+
76
+ Args:
77
+ metric_info_dir (`str`):
78
+ The directory to save `metric_info_dir` to.
79
+
80
+ Example:
81
+
82
+ ```py
83
+ >>> my_metric.info.write_to_directory("/path/to/directory/")
84
+ ```
85
+ """
86
+ with open(os.path.join(metric_info_dir, config.METRIC_INFO_FILENAME), "w", encoding="utf-8") as f:
87
+ json.dump(asdict(self), f)
88
+
89
+ with open(os.path.join(metric_info_dir, config.LICENSE_FILENAME), "w", encoding="utf-8") as f:
90
+ f.write(self.license)
91
+
92
+ @classmethod
93
+ def from_directory(cls, metric_info_dir) -> "EvaluationModuleInfo":
94
+ """Create `EvaluationModuleInfo` from the JSON file in `metric_info_dir`.
95
+
96
+ Args:
97
+ metric_info_dir (`str`):
98
+ The directory containing the `metric_info` JSON file. This
99
+ should be the root directory of a specific metric version.
100
+
101
+ Example:
102
+
103
+ ```py
104
+ >>> my_metric = EvaluationModuleInfo.from_directory("/path/to/directory/")
105
+ ```
106
+ """
107
+ logger.info(f"Loading Metric info from {metric_info_dir}")
108
+ if not metric_info_dir:
109
+ raise ValueError("Calling EvaluationModuleInfo.from_directory() with undefined metric_info_dir.")
110
+
111
+ with open(os.path.join(metric_info_dir, config.METRIC_INFO_FILENAME), encoding="utf-8") as f:
112
+ metric_info_dict = json.load(f)
113
+ return cls.from_dict(metric_info_dict)
114
+
115
+ @classmethod
116
+ def from_dict(cls, metric_info_dict: dict) -> "EvaluationModuleInfo":
117
+ field_names = {f.name for f in dataclasses.fields(cls)}
118
+ return cls(**{k: v for k, v in metric_info_dict.items() if k in field_names})
119
+
120
+
121
+ @dataclass
122
+ class MetricInfo(EvaluationModuleInfo):
123
+ """Information about a metric.
124
+
125
+ `EvaluationModuleInfo` documents a metric, including its name, version, and features.
126
+ See the constructor arguments and properties for a full list.
127
+
128
+ Note: Not all fields are known on construction and may be updated later.
129
+ """
130
+
131
+ module_type: str = "metric"
132
+
133
+
134
+ @dataclass
135
+ class ComparisonInfo(EvaluationModuleInfo):
136
+ """Information about a comparison.
137
+
138
+ `EvaluationModuleInfo` documents a comparison, including its name, version, and features.
139
+ See the constructor arguments and properties for a full list.
140
+
141
+ Note: Not all fields are known on construction and may be updated later.
142
+ """
143
+
144
+ module_type: str = "comparison"
145
+
146
+
147
+ @dataclass
148
+ class MeasurementInfo(EvaluationModuleInfo):
149
+ """Information about a measurement.
150
+
151
+ `EvaluationModuleInfo` documents a measurement, including its name, version, and features.
152
+ See the constructor arguments and properties for a full list.
153
+
154
+ Note: Not all fields are known on construction and may be updated later.
155
+ """
156
+
157
+ module_type: str = "measurement"
env-llmeval/lib/python3.10/site-packages/evaluate/inspect.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Evaluate Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Lint as: python3
16
+ """ List and inspect metrics."""
17
+
18
+ from typing import Optional
19
+
20
+ import requests
21
+ from datasets import DownloadConfig
22
+
23
+ from .config import EVALUATION_MODULE_TYPES, HF_LIST_ENDPOINT
24
+ from .loading import evaluation_module_factory
25
+ from .utils.logging import get_logger
26
+
27
+
28
+ logger = get_logger(__name__)
29
+
30
+
31
+ class SplitsNotFoundError(ValueError):
32
+ pass
33
+
34
+
35
+ def list_evaluation_modules(module_type=None, include_community=True, with_details=False):
36
+ """List all evaluation modules available on the Hugging Face Hub.
37
+
38
+ Args:
39
+ module_type (`str`, *optional*, defaults to `None`):
40
+ Type of evaluation modules to list. Has to be one of `'metric'`, `'comparison'`, or `'measurement'`. If `None`, all types are listed.
41
+ include_community (`bool`, *optional*, defaults to `True`):
42
+ Include community modules in the list.
43
+ with_details (`bool`, *optional*, defaults to `False`):
44
+ Return the full details on the metrics instead of only the ID.
45
+
46
+ Returns:
47
+ `List[Union[str, dict]]`
48
+
49
+ Example:
50
+
51
+ ```py
52
+ >>> from evaluate import list_evaluation_modules
53
+ >>> list_evaluation_modules(module_type="metric")
54
+ ```
55
+ """
56
+
57
+ if module_type is None:
58
+ evaluations_list = []
59
+ for module_type in EVALUATION_MODULE_TYPES:
60
+ evaluations_list.extend(
61
+ _list_evaluation_modules_type(
62
+ module_type, include_community=include_community, with_details=with_details
63
+ )
64
+ )
65
+ else:
66
+ if module_type not in EVALUATION_MODULE_TYPES:
67
+ raise ValueError(f"Invalid module type '{module_type}'. Has to be one of {EVALUATION_MODULE_TYPES}.")
68
+ evaluations_list = _list_evaluation_modules_type(
69
+ module_type, include_community=include_community, with_details=with_details
70
+ )
71
+ return evaluations_list
72
+
73
+
74
+ def _list_evaluation_modules_type(module_type, include_community=True, with_details=False):
75
+
76
+ r = requests.get(HF_LIST_ENDPOINT.format(type=module_type))
77
+ r.raise_for_status()
78
+ d = r.json()
79
+
80
+ if not include_community:
81
+ d = [element for element in d if element["id"].split("/")[0] == f"evaluate-{module_type}"]
82
+
83
+ # remove namespace for canonical modules and add community tag
84
+ for element in d:
85
+ if element["id"].split("/")[0] == f"evaluate-{module_type}":
86
+ element["id"] = element["id"].split("/")[1]
87
+ element["community"] = False
88
+ else:
89
+ element["community"] = True
90
+
91
+ if with_details:
92
+ return [
93
+ {
94
+ "name": element["id"],
95
+ "type": module_type,
96
+ "community": element["community"],
97
+ "likes": element.get("likes", 0),
98
+ }
99
+ for element in d
100
+ ]
101
+ else:
102
+ return [element["id"] for element in d]
103
+
104
+
105
+ def inspect_evaluation_module(
106
+ path: str, local_path: str, download_config: Optional[DownloadConfig] = None, **download_kwargs
107
+ ):
108
+ r"""
109
+ Allow inspection/modification of a evaluation script by copying it on local drive at local_path.
110
+
111
+ Args:
112
+ path (``str``): path to the evaluation script. Can be either:
113
+
114
+ - a local path to script or the directory containing the script (if the script has the same name as the directory),
115
+ e.g. ``'./metrics/accuracy'`` or ``'./metrics/accuracy/accuracy.py'``
116
+ - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with ``evaluate.list_evaluation_modules()``)
117
+ e.g. ``'accuracy'``, ``'bleu'`` or ``'word_length'``
118
+ local_path (``str``): path to the local folder to copy the datset script to.
119
+ download_config (Optional ``datasets.DownloadConfig``: specific download configuration parameters.
120
+ **download_kwargs: optional attributes for DownloadConfig() which will override the attributes in download_config if supplied.
121
+ """
122
+ evaluation_module = evaluation_module_factory(
123
+ path, download_config=download_config, force_local_path=local_path, **download_kwargs
124
+ )
125
+ print(
126
+ f"The processing scripts for metric {path} can be inspected at {local_path}. "
127
+ f"The main class is in {evaluation_module.module_path}. "
128
+ f"You can modify this processing scripts and use it with `evaluate.load({local_path})`."
129
+ )
env-llmeval/lib/python3.10/site-packages/evaluate/loading.py ADDED
@@ -0,0 +1,771 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Lint as: python3
16
+ """Access datasets."""
17
+ import filecmp
18
+ import importlib
19
+ import inspect
20
+ import json
21
+ import os
22
+ import re
23
+ import shutil
24
+ import time
25
+ from dataclasses import dataclass
26
+ from pathlib import Path
27
+ from typing import List, Optional, Tuple, Type, Union
28
+ from urllib.parse import urlparse
29
+
30
+ from datasets import DownloadConfig, DownloadMode
31
+ from datasets.builder import DatasetBuilder
32
+ from datasets.packaged_modules import _EXTENSION_TO_MODULE, _hash_python_lines
33
+ from datasets.utils.filelock import FileLock
34
+ from datasets.utils.version import Version
35
+
36
+ from . import SCRIPTS_VERSION, config
37
+ from .module import EvaluationModule
38
+ from .utils.file_utils import (
39
+ cached_path,
40
+ head_hf_s3,
41
+ hf_hub_url,
42
+ init_hf_modules,
43
+ is_relative_path,
44
+ relative_to_absolute_path,
45
+ url_or_path_join,
46
+ )
47
+ from .utils.logging import get_logger
48
+
49
+
50
+ logger = get_logger(__name__)
51
+
52
+
53
+ ALL_ALLOWED_EXTENSIONS = list(_EXTENSION_TO_MODULE.keys()) + ["zip"]
54
+
55
+
56
+ def init_dynamic_modules(
57
+ name: str = config.MODULE_NAME_FOR_DYNAMIC_MODULES, hf_modules_cache: Optional[Union[Path, str]] = None
58
+ ):
59
+ """
60
+ Create a module with name `name` in which you can add dynamic modules
61
+ such as metrics or datasets. The module can be imported using its name.
62
+ The module is created in the HF_MODULE_CACHE directory by default (~/.cache/huggingface/modules) but it can
63
+ be overriden by specifying a path to another directory in `hf_modules_cache`.
64
+ """
65
+ hf_modules_cache = init_hf_modules(hf_modules_cache)
66
+ dynamic_modules_path = os.path.join(hf_modules_cache, name)
67
+ os.makedirs(dynamic_modules_path, exist_ok=True)
68
+ if not os.path.exists(os.path.join(dynamic_modules_path, "__init__.py")):
69
+ with open(os.path.join(dynamic_modules_path, "__init__.py"), "w"):
70
+ pass
71
+ return dynamic_modules_path
72
+
73
+
74
+ def import_main_class(module_path) -> Optional[Union[Type[DatasetBuilder], Type[EvaluationModule]]]:
75
+ """Import a module at module_path and return its main class, a Metric by default"""
76
+ module = importlib.import_module(module_path)
77
+ main_cls_type = EvaluationModule
78
+
79
+ # Find the main class in our imported module
80
+ module_main_cls = None
81
+ for name, obj in module.__dict__.items():
82
+ if isinstance(obj, type) and issubclass(obj, main_cls_type):
83
+ if inspect.isabstract(obj):
84
+ continue
85
+ module_main_cls = obj
86
+ break
87
+
88
+ return module_main_cls
89
+
90
+
91
+ def files_to_hash(file_paths: List[str]) -> str:
92
+ """
93
+ Convert a list of scripts or text files provided in file_paths into a hashed filename in a repeatable way.
94
+ """
95
+ # List all python files in directories if directories are supplied as part of external imports
96
+ to_use_files: List[Union[Path, str]] = []
97
+ for file_path in file_paths:
98
+ if os.path.isdir(file_path):
99
+ to_use_files.extend(list(Path(file_path).rglob("*.[pP][yY]")))
100
+ else:
101
+ to_use_files.append(file_path)
102
+
103
+ # Get the code from all these files
104
+ lines = []
105
+ for file_path in to_use_files:
106
+ with open(file_path, encoding="utf-8") as f:
107
+ lines.extend(f.readlines())
108
+ return _hash_python_lines(lines)
109
+
110
+
111
+ def convert_github_url(url_path: str) -> Tuple[str, Optional[str]]:
112
+ """Convert a link to a file on a github repo in a link to the raw github object."""
113
+ parsed = urlparse(url_path)
114
+ sub_directory = None
115
+ if parsed.scheme in ("http", "https", "s3") and parsed.netloc == "github.com":
116
+ if "blob" in url_path:
117
+ if not url_path.endswith(".py"):
118
+ raise ValueError(f"External import from github at {url_path} should point to a file ending with '.py'")
119
+ url_path = url_path.replace("blob", "raw") # Point to the raw file
120
+ else:
121
+ # Parse github url to point to zip
122
+ github_path = parsed.path[1:]
123
+ repo_info, branch = github_path.split("/tree/") if "/tree/" in github_path else (github_path, "master")
124
+ repo_owner, repo_name = repo_info.split("/")
125
+ url_path = f"https://github.com/{repo_owner}/{repo_name}/archive/{branch}.zip"
126
+ sub_directory = f"{repo_name}-{branch}"
127
+ return url_path, sub_directory
128
+
129
+
130
+ def increase_load_count(name: str, resource_type: str):
131
+ """Update the download count of a dataset or metric."""
132
+ if not config.HF_EVALUATE_OFFLINE and config.HF_UPDATE_DOWNLOAD_COUNTS:
133
+ try:
134
+ head_hf_s3(name, filename=name + ".py", dataset=(resource_type == "dataset"))
135
+ except Exception:
136
+ pass
137
+
138
+
139
+ def get_imports(file_path: str) -> Tuple[str, str, str, str]:
140
+ """Find whether we should import or clone additional files for a given processing script.
141
+ And list the import.
142
+
143
+ We allow:
144
+ - library dependencies,
145
+ - local dependencies and
146
+ - external dependencies whose url is specified with a comment starting from "# From:' followed by the raw url to a file, an archive or a github repository.
147
+ external dependencies will be downloaded (and extracted if needed in the dataset folder).
148
+ We also add an `__init__.py` to each sub-folder of a downloaded folder so the user can import from them in the script.
149
+
150
+ Note that only direct import in the dataset processing script will be handled
151
+ We don't recursively explore the additional import to download further files.
152
+
153
+ Example::
154
+
155
+ import tensorflow
156
+ import .c4_utils
157
+ import .clicr.dataset-code.build_json_dataset # From: https://raw.githubusercontent.com/clips/clicr/master/dataset-code/build_json_dataset
158
+ """
159
+ lines = []
160
+ with open(file_path, encoding="utf-8") as f:
161
+ lines.extend(f.readlines())
162
+
163
+ logger.debug(f"Checking {file_path} for additional imports.")
164
+ imports: List[Tuple[str, str, str, Optional[str]]] = []
165
+ is_in_docstring = False
166
+ for line in lines:
167
+ docstr_start_match = re.findall(r'[\s\S]*?"""[\s\S]*?', line)
168
+
169
+ if len(docstr_start_match) == 1:
170
+ # flip True <=> False only if doctstring
171
+ # starts at line without finishing
172
+ is_in_docstring = not is_in_docstring
173
+
174
+ if is_in_docstring:
175
+ # import statements in doctstrings should
176
+ # not be added as required dependencies
177
+ continue
178
+
179
+ match = re.match(r"^import\s+(\.?)([^\s\.]+)[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)", line, flags=re.MULTILINE)
180
+ if match is None:
181
+ match = re.match(
182
+ r"^from\s+(\.?)([^\s\.]+)(?:[^\s]*)\s+import\s+[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)",
183
+ line,
184
+ flags=re.MULTILINE,
185
+ )
186
+ if match is None:
187
+ continue
188
+ if match.group(1):
189
+ # The import starts with a '.', we will download the relevant file
190
+ if any(imp[1] == match.group(2) for imp in imports):
191
+ # We already have this import
192
+ continue
193
+ if match.group(3):
194
+ # The import has a comment with 'From:', we'll retrieve it from the given url
195
+ url_path = match.group(3)
196
+ url_path, sub_directory = convert_github_url(url_path)
197
+ imports.append(("external", match.group(2), url_path, sub_directory))
198
+ elif match.group(2):
199
+ # The import should be at the same place as the file
200
+ imports.append(("internal", match.group(2), match.group(2), None))
201
+ else:
202
+ if match.group(3):
203
+ # The import has a comment with `From: git+https:...`, asks user to pip install from git.
204
+ url_path = match.group(3)
205
+ imports.append(("library", match.group(2), url_path, None))
206
+ else:
207
+ imports.append(("library", match.group(2), match.group(2), None))
208
+
209
+ return imports
210
+
211
+
212
+ def _download_additional_modules(
213
+ name: str, base_path: str, imports: Tuple[str, str, str, str], download_config: Optional[DownloadConfig]
214
+ ) -> List[Tuple[str, str]]:
215
+ """
216
+ Download additional module for a module <name>.py at URL (or local path) <base_path>/<name>.py
217
+ The imports must have been parsed first using ``get_imports``.
218
+
219
+ If some modules need to be installed with pip, an error is raised showing how to install them.
220
+ This function return the list of downloaded modules as tuples (import_name, module_file_path).
221
+
222
+ The downloaded modules can then be moved into an importable directory with ``_copy_script_and_other_resources_in_importable_dir``.
223
+ """
224
+ local_imports = []
225
+ library_imports = []
226
+ download_config = download_config.copy()
227
+ if download_config.download_desc is None:
228
+ download_config.download_desc = "Downloading extra modules"
229
+ for import_type, import_name, import_path, sub_directory in imports:
230
+ if import_type == "library":
231
+ library_imports.append((import_name, import_path)) # Import from a library
232
+ continue
233
+
234
+ if import_name == name:
235
+ raise ValueError(
236
+ f"Error in the {name} script, importing relative {import_name} module "
237
+ f"but {import_name} is the name of the script. "
238
+ f"Please change relative import {import_name} to another name and add a '# From: URL_OR_PATH' "
239
+ f"comment pointing to the original relative import file path."
240
+ )
241
+ if import_type == "internal":
242
+ url_or_filename = url_or_path_join(base_path, import_path + ".py")
243
+ elif import_type == "external":
244
+ url_or_filename = import_path
245
+ else:
246
+ raise ValueError("Wrong import_type")
247
+
248
+ local_import_path = cached_path(
249
+ url_or_filename,
250
+ download_config=download_config,
251
+ )
252
+ if sub_directory is not None:
253
+ local_import_path = os.path.join(local_import_path, sub_directory)
254
+ local_imports.append((import_name, local_import_path))
255
+
256
+ # Check library imports
257
+ needs_to_be_installed = set()
258
+ for library_import_name, library_import_path in library_imports:
259
+ try:
260
+ lib = importlib.import_module(library_import_name) # noqa F841
261
+ except ImportError:
262
+ library_import_name = "scikit-learn" if library_import_name == "sklearn" else library_import_name
263
+ needs_to_be_installed.add((library_import_name, library_import_path))
264
+ if needs_to_be_installed:
265
+ raise ImportError(
266
+ f"To be able to use {name}, you need to install the following dependencies"
267
+ f"{[lib_name for lib_name, lib_path in needs_to_be_installed]} using 'pip install "
268
+ f"{' '.join([lib_path for lib_name, lib_path in needs_to_be_installed])}' for instance'"
269
+ )
270
+ return local_imports
271
+
272
+
273
+ def _copy_script_and_other_resources_in_importable_dir(
274
+ name: str,
275
+ importable_directory_path: str,
276
+ subdirectory_name: str,
277
+ original_local_path: str,
278
+ local_imports: List[Tuple[str, str]],
279
+ additional_files: List[Tuple[str, str]],
280
+ download_mode: Optional[DownloadMode],
281
+ ) -> str:
282
+ """Copy a script and its required imports to an importable directory
283
+
284
+ Args:
285
+ name (str): name of the resource to load
286
+ importable_directory_path (str): path to the loadable folder in the dynamic modules directory
287
+ subdirectory_name (str): name of the subdirectory in importable_directory_path in which to place the script
288
+ original_local_path (str): local path to the resource script
289
+ local_imports (List[Tuple[str, str]]): list of (destination_filename, import_file_to_copy)
290
+ additional_files (List[Tuple[str, str]]): list of (destination_filename, additional_file_to_copy)
291
+ download_mode (Optional[DownloadMode]): download mode
292
+
293
+ Return:
294
+ importable_local_file: path to an importable module with importlib.import_module
295
+ """
296
+
297
+ # Define a directory with a unique name in our dataset or metric folder
298
+ # path is: ./datasets|metrics/dataset|metric_name/hash_from_code/script.py
299
+ # we use a hash as subdirectory_name to be able to have multiple versions of a dataset/metric processing file together
300
+ importable_subdirectory = os.path.join(importable_directory_path, subdirectory_name)
301
+ importable_local_file = os.path.join(importable_subdirectory, name + ".py")
302
+
303
+ # Prevent parallel disk operations
304
+ lock_path = importable_directory_path + ".lock"
305
+ with FileLock(lock_path):
306
+ # Create main dataset/metrics folder if needed
307
+ if download_mode == DownloadMode.FORCE_REDOWNLOAD and os.path.exists(importable_directory_path):
308
+ shutil.rmtree(importable_directory_path)
309
+ os.makedirs(importable_directory_path, exist_ok=True)
310
+
311
+ # add an __init__ file to the main dataset folder if needed
312
+ init_file_path = os.path.join(importable_directory_path, "__init__.py")
313
+ if not os.path.exists(init_file_path):
314
+ with open(init_file_path, "w"):
315
+ pass
316
+
317
+ # Create hash dataset folder if needed
318
+ os.makedirs(importable_subdirectory, exist_ok=True)
319
+ # add an __init__ file to the hash dataset folder if needed
320
+ init_file_path = os.path.join(importable_subdirectory, "__init__.py")
321
+ if not os.path.exists(init_file_path):
322
+ with open(init_file_path, "w"):
323
+ pass
324
+
325
+ # Copy dataset.py file in hash folder if needed
326
+ if not os.path.exists(importable_local_file):
327
+ shutil.copyfile(original_local_path, importable_local_file)
328
+
329
+ # Record metadata associating original dataset path with local unique folder
330
+ meta_path = importable_local_file.split(".py")[0] + ".json"
331
+ if not os.path.exists(meta_path):
332
+ meta = {"original file path": original_local_path, "local file path": importable_local_file}
333
+ # the filename is *.py in our case, so better rename to filenam.json instead of filename.py.json
334
+ with open(meta_path, "w", encoding="utf-8") as meta_file:
335
+ json.dump(meta, meta_file)
336
+
337
+ # Copy all the additional imports
338
+ for import_name, import_path in local_imports:
339
+ if os.path.isfile(import_path):
340
+ full_path_local_import = os.path.join(importable_subdirectory, import_name + ".py")
341
+ if not os.path.exists(full_path_local_import):
342
+ shutil.copyfile(import_path, full_path_local_import)
343
+ elif os.path.isdir(import_path):
344
+ full_path_local_import = os.path.join(importable_subdirectory, import_name)
345
+ if not os.path.exists(full_path_local_import):
346
+ shutil.copytree(import_path, full_path_local_import)
347
+ else:
348
+ raise OSError(f"Error with local import at {import_path}")
349
+
350
+ # Copy aditional files like dataset infos file if needed
351
+ for file_name, original_path in additional_files:
352
+ destination_additional_path = os.path.join(importable_subdirectory, file_name)
353
+ if not os.path.exists(destination_additional_path) or not filecmp.cmp(
354
+ original_path, destination_additional_path
355
+ ):
356
+ shutil.copyfile(original_path, destination_additional_path)
357
+ return importable_local_file
358
+
359
+
360
+ def _create_importable_file(
361
+ local_path: str,
362
+ local_imports: List[Tuple[str, str]],
363
+ additional_files: List[Tuple[str, str]],
364
+ dynamic_modules_path: str,
365
+ module_namespace: str,
366
+ name: str,
367
+ download_mode: DownloadMode,
368
+ ) -> Tuple[str, str]:
369
+ importable_directory_path = os.path.join(dynamic_modules_path, module_namespace, name.replace("/", "--"))
370
+ Path(importable_directory_path).mkdir(parents=True, exist_ok=True)
371
+ (Path(importable_directory_path).parent / "__init__.py").touch(exist_ok=True)
372
+ hash = files_to_hash([local_path] + [loc[1] for loc in local_imports])
373
+ importable_local_file = _copy_script_and_other_resources_in_importable_dir(
374
+ name=name.split("/")[-1],
375
+ importable_directory_path=importable_directory_path,
376
+ subdirectory_name=hash,
377
+ original_local_path=local_path,
378
+ local_imports=local_imports,
379
+ additional_files=additional_files,
380
+ download_mode=download_mode,
381
+ )
382
+ logger.debug(f"Created importable dataset file at {importable_local_file}")
383
+ module_path = ".".join(
384
+ [os.path.basename(dynamic_modules_path), module_namespace, name.replace("/", "--"), hash, name.split("/")[-1]]
385
+ )
386
+ return module_path, hash
387
+
388
+
389
+ @dataclass
390
+ class ImportableModule:
391
+ module_path: str
392
+ hash: str
393
+
394
+
395
+ class _EvaluationModuleFactory:
396
+ def get_module(self) -> ImportableModule:
397
+ raise NotImplementedError
398
+
399
+
400
+ class LocalEvaluationModuleFactory(_EvaluationModuleFactory):
401
+ """Get the module of a local metric. The metric script is loaded from a local script."""
402
+
403
+ def __init__(
404
+ self,
405
+ path: str,
406
+ module_type: str = "metrics",
407
+ download_config: Optional[DownloadConfig] = None,
408
+ download_mode: Optional[DownloadMode] = None,
409
+ dynamic_modules_path: Optional[str] = None,
410
+ ):
411
+ self.path = path
412
+ self.module_type = module_type
413
+ self.name = Path(path).stem
414
+ self.download_config = download_config or DownloadConfig()
415
+ self.download_mode = download_mode
416
+ self.dynamic_modules_path = dynamic_modules_path
417
+
418
+ def get_module(self) -> ImportableModule:
419
+ # get script and other files
420
+ imports = get_imports(self.path)
421
+ local_imports = _download_additional_modules(
422
+ name=self.name,
423
+ base_path=str(Path(self.path).parent),
424
+ imports=imports,
425
+ download_config=self.download_config,
426
+ )
427
+ # copy the script and the files in an importable directory
428
+ dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
429
+ module_path, hash = _create_importable_file(
430
+ local_path=self.path,
431
+ local_imports=local_imports,
432
+ additional_files=[],
433
+ dynamic_modules_path=dynamic_modules_path,
434
+ module_namespace=self.module_type,
435
+ name=self.name,
436
+ download_mode=self.download_mode,
437
+ )
438
+ # make the new module to be noticed by the import system
439
+ importlib.invalidate_caches()
440
+ return ImportableModule(module_path, hash)
441
+
442
+
443
+ class HubEvaluationModuleFactory(_EvaluationModuleFactory):
444
+ """Get the module of a metric from a metric repository on the Hub."""
445
+
446
+ def __init__(
447
+ self,
448
+ name: str,
449
+ module_type: str = "metrics",
450
+ revision: Optional[Union[str, Version]] = None,
451
+ download_config: Optional[DownloadConfig] = None,
452
+ download_mode: Optional[DownloadMode] = None,
453
+ dynamic_modules_path: Optional[str] = None,
454
+ ):
455
+ self.name = name
456
+ self.module_type = module_type
457
+ self.revision = revision
458
+ self.download_config = download_config or DownloadConfig()
459
+ self.download_mode = download_mode
460
+ self.dynamic_modules_path = dynamic_modules_path
461
+ assert self.name.count("/") == 1
462
+ increase_load_count(name, resource_type="metric")
463
+
464
+ def download_loading_script(self, revision) -> str:
465
+ file_path = hf_hub_url(path=self.name, name=self.name.split("/")[1] + ".py", revision=revision)
466
+ download_config = self.download_config.copy()
467
+ if download_config.download_desc is None:
468
+ download_config.download_desc = "Downloading builder script"
469
+ return cached_path(file_path, download_config=download_config)
470
+
471
+ def get_module(self) -> ImportableModule:
472
+ revision = self.revision or os.getenv("HF_SCRIPTS_VERSION", SCRIPTS_VERSION)
473
+
474
+ if re.match(r"\d*\.\d*\.\d*", revision): # revision is version number (three digits separated by full stops)
475
+ revision = "v" + revision # tagging convention on evaluate repository starts with v
476
+
477
+ # get script and other files
478
+ try:
479
+ local_path = self.download_loading_script(revision)
480
+ except FileNotFoundError as err:
481
+ # if there is no file found with current revision tag try to load main
482
+ if self.revision is None and os.getenv("HF_SCRIPTS_VERSION", SCRIPTS_VERSION) != "main":
483
+ revision = "main"
484
+ local_path = self.download_loading_script(revision)
485
+ else:
486
+ raise err
487
+
488
+ imports = get_imports(local_path)
489
+ local_imports = _download_additional_modules(
490
+ name=self.name,
491
+ base_path=hf_hub_url(path=self.name, name="", revision=revision),
492
+ imports=imports,
493
+ download_config=self.download_config,
494
+ )
495
+ # copy the script and the files in an importable directory
496
+ dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
497
+ module_path, hash = _create_importable_file(
498
+ local_path=local_path,
499
+ local_imports=local_imports,
500
+ additional_files=[],
501
+ dynamic_modules_path=dynamic_modules_path,
502
+ module_namespace=self.module_type,
503
+ name=self.name,
504
+ download_mode=self.download_mode,
505
+ )
506
+ # make the new module to be noticed by the import system
507
+ importlib.invalidate_caches()
508
+ return ImportableModule(module_path, hash)
509
+
510
+
511
+ class CachedEvaluationModuleFactory(_EvaluationModuleFactory):
512
+ """
513
+ Get the module of a metric that has been loaded once already and cached.
514
+ The script that is loaded from the cache is the most recent one with a matching name.
515
+ """
516
+
517
+ def __init__(
518
+ self,
519
+ name: str,
520
+ module_type: str = "metrics",
521
+ dynamic_modules_path: Optional[str] = None,
522
+ ):
523
+ self.name = name
524
+ self.module_type = module_type
525
+ self.dynamic_modules_path = dynamic_modules_path
526
+ assert self.name.count("/") == 0
527
+
528
+ def get_module(self) -> ImportableModule:
529
+ dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
530
+ importable_directory_path = os.path.join(dynamic_modules_path, self.module_type, self.name)
531
+ hashes = (
532
+ [h for h in os.listdir(importable_directory_path) if len(h) == 64]
533
+ if os.path.isdir(importable_directory_path)
534
+ else None
535
+ )
536
+ if not hashes:
537
+ raise FileNotFoundError(f"Metric {self.name} is not cached in {dynamic_modules_path}")
538
+ # get most recent
539
+
540
+ def _get_modification_time(module_hash):
541
+ return (
542
+ (Path(importable_directory_path) / module_hash / (self.name.split("--")[-1] + ".py")).stat().st_mtime
543
+ )
544
+
545
+ hash = sorted(hashes, key=_get_modification_time)[-1]
546
+ logger.warning(
547
+ f"Using the latest cached version of the module from {os.path.join(importable_directory_path, hash)} "
548
+ f"(last modified on {time.ctime(_get_modification_time(hash))}) since it "
549
+ f"couldn't be found locally at {self.name}, or remotely on the Hugging Face Hub."
550
+ )
551
+ # make the new module to be noticed by the import system
552
+ module_path = ".".join(
553
+ [os.path.basename(dynamic_modules_path), self.module_type, self.name, hash, self.name.split("--")[-1]]
554
+ )
555
+ importlib.invalidate_caches()
556
+ return ImportableModule(module_path, hash)
557
+
558
+
559
+ def evaluation_module_factory(
560
+ path: str,
561
+ module_type: Optional[str] = None,
562
+ revision: Optional[Union[str, Version]] = None,
563
+ download_config: Optional[DownloadConfig] = None,
564
+ download_mode: Optional[DownloadMode] = None,
565
+ force_local_path: Optional[str] = None,
566
+ dynamic_modules_path: Optional[str] = None,
567
+ **download_kwargs,
568
+ ) -> ImportableModule:
569
+ """
570
+ Download/extract/cache a metric module.
571
+
572
+ Metrics codes are cached inside the the dynamic modules cache to allow easy import (avoid ugly sys.path tweaks).
573
+
574
+ Args:
575
+
576
+ path (str): Path or name of the metric script.
577
+
578
+ - if ``path`` is a local metric script or a directory containing a local metric script (if the script has the same name as the directory):
579
+ -> load the module from the metric script
580
+ e.g. ``'./metrics/accuracy'`` or ``'./metrics/accuracy/accuracy.py'``.
581
+ - if ``path`` is a metric on the Hugging Face Hub (ex: `glue`, `squad`)
582
+ -> load the module from the metric script in the github repository at huggingface/datasets
583
+ e.g. ``'accuracy'`` or ``'rouge'``.
584
+
585
+ revision (Optional ``Union[str, datasets.Version]``):
586
+ If specified, the module will be loaded from the datasets repository at this version.
587
+ By default:
588
+ - it is set to the local version of the lib.
589
+ - it will also try to load it from the master branch if it's not available at the local version of the lib.
590
+ Specifying a version that is different from your local version of the lib might cause compatibility issues.
591
+ download_config (:class:`DownloadConfig`, optional): Specific download configuration parameters.
592
+ download_mode (:class:`DownloadMode`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode.
593
+ force_local_path (Optional str): Optional path to a local path to download and prepare the script to.
594
+ Used to inspect or modify the script folder.
595
+ dynamic_modules_path (Optional str, defaults to HF_MODULES_CACHE / "datasets_modules", i.e. ~/.cache/huggingface/modules/datasets_modules):
596
+ Optional path to the directory in which the dynamic modules are saved. It must have been initialized with :obj:`init_dynamic_modules`.
597
+ By default the datasets and metrics are stored inside the `datasets_modules` module.
598
+ download_kwargs: optional attributes for DownloadConfig() which will override the attributes in download_config if supplied.
599
+
600
+ Returns:
601
+ ImportableModule
602
+ """
603
+ if download_config is None:
604
+ download_config = DownloadConfig(**download_kwargs)
605
+ download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
606
+ download_config.extract_compressed_file = True
607
+ download_config.force_extract = True
608
+
609
+ filename = list(filter(lambda x: x, path.replace(os.sep, "/").split("/")))[-1]
610
+ if not filename.endswith(".py"):
611
+ filename = filename + ".py"
612
+ combined_path = os.path.join(path, filename)
613
+ # Try locally
614
+ if path.endswith(filename):
615
+ if os.path.isfile(path):
616
+ return LocalEvaluationModuleFactory(
617
+ path, download_mode=download_mode, dynamic_modules_path=dynamic_modules_path
618
+ ).get_module()
619
+ else:
620
+ raise FileNotFoundError(f"Couldn't find a metric script at {relative_to_absolute_path(path)}")
621
+ elif os.path.isfile(combined_path):
622
+ return LocalEvaluationModuleFactory(
623
+ combined_path, download_mode=download_mode, dynamic_modules_path=dynamic_modules_path
624
+ ).get_module()
625
+ elif is_relative_path(path) and path.count("/") <= 1 and not force_local_path:
626
+ try:
627
+ # load a canonical evaluation module from hub
628
+ if path.count("/") == 0:
629
+ # if no type provided look through all possible modules
630
+ if module_type is None:
631
+ for current_type in ["metric", "comparison", "measurement"]:
632
+ try:
633
+ return HubEvaluationModuleFactory(
634
+ f"evaluate-{current_type}/{path}",
635
+ revision=revision,
636
+ download_config=download_config,
637
+ download_mode=download_mode,
638
+ dynamic_modules_path=dynamic_modules_path,
639
+ ).get_module()
640
+ except ConnectionError:
641
+ pass
642
+ raise FileNotFoundError
643
+ # if module_type provided load specific module_type
644
+ else:
645
+ return HubEvaluationModuleFactory(
646
+ f"evaluate-{module_type}/{path}",
647
+ revision=revision,
648
+ download_config=download_config,
649
+ download_mode=download_mode,
650
+ dynamic_modules_path=dynamic_modules_path,
651
+ ).get_module()
652
+ # load community evaluation module from hub
653
+ elif path.count("/") == 1:
654
+ return HubEvaluationModuleFactory(
655
+ path,
656
+ revision=revision,
657
+ download_config=download_config,
658
+ download_mode=download_mode,
659
+ dynamic_modules_path=dynamic_modules_path,
660
+ ).get_module()
661
+ except Exception as e1: # noqa: all the attempts failed, before raising the error we should check if the module is already cached.
662
+ # if it's a canonical module we need to check if it's any of the types
663
+ if path.count("/") == 0:
664
+ for current_type in ["metric", "comparison", "measurement"]:
665
+ try:
666
+ return CachedEvaluationModuleFactory(
667
+ f"evaluate-{current_type}--{path}", dynamic_modules_path=dynamic_modules_path
668
+ ).get_module()
669
+ except Exception as e2: # noqa: if it's not in the cache, then it doesn't exist.
670
+ pass
671
+ # if it's a community module we just need to check on path
672
+ elif path.count("/") == 1:
673
+ try:
674
+ return CachedEvaluationModuleFactory(
675
+ path.replace("/", "--"), dynamic_modules_path=dynamic_modules_path
676
+ ).get_module()
677
+ except Exception as e2: # noqa: if it's not in the cache, then it doesn't exist.
678
+ pass
679
+ if not isinstance(e1, (ConnectionError, FileNotFoundError)):
680
+ raise e1 from None
681
+ raise FileNotFoundError(
682
+ f"Couldn't find a module script at {relative_to_absolute_path(combined_path)}. "
683
+ f"Module '{path}' doesn't exist on the Hugging Face Hub either."
684
+ ) from None
685
+ else:
686
+ raise FileNotFoundError(f"Couldn't find a module script at {relative_to_absolute_path(combined_path)}.")
687
+
688
+
689
+ def load(
690
+ path: str,
691
+ config_name: Optional[str] = None,
692
+ module_type: Optional[str] = None,
693
+ process_id: int = 0,
694
+ num_process: int = 1,
695
+ cache_dir: Optional[str] = None,
696
+ experiment_id: Optional[str] = None,
697
+ keep_in_memory: bool = False,
698
+ download_config: Optional[DownloadConfig] = None,
699
+ download_mode: Optional[DownloadMode] = None,
700
+ revision: Optional[Union[str, Version]] = None,
701
+ **init_kwargs,
702
+ ) -> EvaluationModule:
703
+ """Load a [`~evaluate.EvaluationModule`].
704
+
705
+ Args:
706
+
707
+ path (`str`):
708
+ Path to the evaluation processing script with the evaluation builder. Can be either:
709
+ - a local path to processing script or the directory containing the script (if the script has the same name as the directory),
710
+ e.g. `'./metrics/rouge'` or `'./metrics/rouge/rouge.py'`
711
+ - a evaluation module identifier on the HuggingFace evaluate repo e.g. `'rouge'` or `'bleu'` that are in either `'metrics/'`,
712
+ `'comparisons/'`, or `'measurements/'` depending on the provided `module_type`
713
+ config_name (`str`, *optional*):
714
+ Selecting a configuration for the metric (e.g. the GLUE metric has a configuration for each subset).
715
+ module_type (`str`, default `'metric'`):
716
+ Type of evaluation module, can be one of `'metric'`, `'comparison'`, or `'measurement'`.
717
+ process_id (`int`, *optional*):
718
+ For distributed evaluation: id of the process.
719
+ num_process (`int`, *optional*):
720
+ For distributed evaluation: total number of processes.
721
+ cache_dir (`str`, *optional*):
722
+ Path to store the temporary predictions and references (default to `~/.cache/huggingface/evaluate/`).
723
+ experiment_id (`str`):
724
+ A specific experiment id. This is used if several distributed evaluations share the same file system.
725
+ This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
726
+ keep_in_memory (`bool`):
727
+ Whether to store the temporary results in memory (defaults to `False`).
728
+ download_config ([`~evaluate.DownloadConfig`], *optional*):
729
+ Specific download configuration parameters.
730
+ download_mode ([`DownloadMode`], defaults to `REUSE_DATASET_IF_EXISTS`):
731
+ Download/generate mode.
732
+ revision (`Union[str, evaluate.Version]`, *optional*):
733
+ If specified, the module will be loaded from the datasets repository
734
+ at this version. By default it is set to the local version of the lib. Specifying a version that is different from
735
+ your local version of the lib might cause compatibility issues.
736
+
737
+ Returns:
738
+ [`evaluate.EvaluationModule`]
739
+
740
+ Example:
741
+
742
+ ```py
743
+ >>> from evaluate import load
744
+ >>> accuracy = evaluate.load("accuracy")
745
+ ```
746
+ """
747
+ download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
748
+ evaluation_module = evaluation_module_factory(
749
+ path, module_type=module_type, revision=revision, download_config=download_config, download_mode=download_mode
750
+ )
751
+ evaluation_cls = import_main_class(evaluation_module.module_path)
752
+ evaluation_instance = evaluation_cls(
753
+ config_name=config_name,
754
+ process_id=process_id,
755
+ num_process=num_process,
756
+ cache_dir=cache_dir,
757
+ keep_in_memory=keep_in_memory,
758
+ experiment_id=experiment_id,
759
+ hash=evaluation_module.hash,
760
+ **init_kwargs,
761
+ )
762
+
763
+ if module_type and module_type != evaluation_instance.module_type:
764
+ raise TypeError(
765
+ f"No module of module type '{module_type}' not found for '{path}' locally, or on the Hugging Face Hub. Found module of module type '{evaluation_instance.module_type}' instead."
766
+ )
767
+
768
+ # Download and prepare resources for the metric
769
+ evaluation_instance.download_and_prepare(download_config=download_config)
770
+
771
+ return evaluation_instance
env-llmeval/lib/python3.10/site-packages/evaluate/module.py ADDED
@@ -0,0 +1,1029 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Lint as: python3
16
+ """ EvaluationModule base class."""
17
+ import collections
18
+ import itertools
19
+ import os
20
+ import types
21
+ import uuid
22
+ from typing import Any, Dict, List, Optional, Tuple, Union
23
+
24
+ import numpy as np
25
+ import pyarrow as pa
26
+ from datasets import DatasetInfo, DownloadConfig, DownloadManager
27
+ from datasets.arrow_dataset import Dataset
28
+ from datasets.arrow_reader import ArrowReader
29
+ from datasets.arrow_writer import ArrowWriter
30
+ from datasets.features import Features, Sequence, Value
31
+ from datasets.features.features import _check_non_null_non_empty_recursive
32
+ from datasets.utils.filelock import BaseFileLock, FileLock, Timeout
33
+ from datasets.utils.py_utils import copyfunc, temp_seed, zip_dict
34
+
35
+ from . import config
36
+ from .info import EvaluationModuleInfo
37
+ from .naming import camelcase_to_snakecase
38
+ from .utils.logging import get_logger
39
+
40
+
41
+ logger = get_logger(__name__)
42
+
43
+
44
+ class FileFreeLock(BaseFileLock):
45
+ """Thread lock until a file **cannot** be locked"""
46
+
47
+ def __init__(self, lock_file, *args, **kwargs):
48
+ self.filelock = FileLock(lock_file)
49
+ super().__init__(lock_file, *args, **kwargs)
50
+
51
+ def _acquire(self):
52
+ try:
53
+ self.filelock.acquire(timeout=0.01, poll_intervall=0.02) # Try to lock once
54
+ except Timeout:
55
+ # We couldn't acquire the lock, the file is locked!
56
+ self._lock_file_fd = self.filelock.lock_file
57
+ else:
58
+ # We were able to acquire the lock, the file is not yet locked!
59
+ self.filelock.release()
60
+ self._lock_file_fd = None
61
+
62
+ def _release(self):
63
+ self._lock_file_fd = None
64
+
65
+
66
+ # lists - summarize long lists similarly to NumPy
67
+ # arrays/tensors - let the frameworks control formatting
68
+ def summarize_if_long_list(obj):
69
+ if not type(obj) == list or len(obj) <= 6:
70
+ return f"{obj}"
71
+
72
+ def format_chunk(chunk):
73
+ return ", ".join(repr(x) for x in chunk)
74
+
75
+ return f"[{format_chunk(obj[:3])}, ..., {format_chunk(obj[-3:])}]"
76
+
77
+
78
+ class EvaluationModuleInfoMixin:
79
+ """This base class exposes some attributes of EvaluationModuleInfo
80
+ at the base level of the EvaluationModule for easy access.
81
+ """
82
+
83
+ def __init__(self, info: EvaluationModuleInfo):
84
+ self._module_info = info
85
+
86
+ @property
87
+ def info(self):
88
+ """:class:`evaluate.EvaluationModuleInfo` object containing all the metadata in the evaluation module."""
89
+ return self._module_info
90
+
91
+ @property
92
+ def name(self) -> str:
93
+ return self._module_info.module_name
94
+
95
+ @property
96
+ def experiment_id(self) -> Optional[str]:
97
+ return self._module_info.experiment_id
98
+
99
+ @property
100
+ def description(self) -> str:
101
+ return self._module_info.description
102
+
103
+ @property
104
+ def citation(self) -> str:
105
+ return self._module_info.citation
106
+
107
+ @property
108
+ def features(self) -> Features:
109
+ return self._module_info.features
110
+
111
+ @property
112
+ def inputs_description(self) -> str:
113
+ return self._module_info.inputs_description
114
+
115
+ @property
116
+ def homepage(self) -> Optional[str]:
117
+ return self._module_info.homepage
118
+
119
+ @property
120
+ def license(self) -> str:
121
+ return self._module_info.license
122
+
123
+ @property
124
+ def codebase_urls(self) -> Optional[List[str]]:
125
+ return self._module_info.codebase_urls
126
+
127
+ @property
128
+ def reference_urls(self) -> Optional[List[str]]:
129
+ return self._module_info.reference_urls
130
+
131
+ @property
132
+ def streamable(self) -> bool:
133
+ return self._module_info.streamable
134
+
135
+ @property
136
+ def format(self) -> Optional[str]:
137
+ return self._module_info.format
138
+
139
+ @property
140
+ def module_type(self) -> str:
141
+ return self._module_info.module_type
142
+
143
+
144
+ class EvaluationModule(EvaluationModuleInfoMixin):
145
+ """A `EvaluationModule` is the base class and common API for metrics, comparisons, and measurements.
146
+
147
+ Args:
148
+ config_name (`str`):
149
+ This is used to define a hash specific to a module computation script and prevents the module's data
150
+ to be overridden when the module loading script is modified.
151
+ keep_in_memory (`bool`):
152
+ Keep all predictions and references in memory. Not possible in distributed settings.
153
+ cache_dir (`str`):
154
+ Path to a directory in which temporary prediction/references data will be stored.
155
+ The data directory should be located on a shared file-system in distributed setups.
156
+ num_process (`int`):
157
+ Specify the total number of nodes in a distributed settings.
158
+ This is useful to compute module in distributed setups (in particular non-additive modules like F1).
159
+ process_id (`int`):
160
+ Specify the id of the current process in a distributed setup (between 0 and num_process-1)
161
+ This is useful to compute module in distributed setups (in particular non-additive metrics like F1).
162
+ seed (`int`, optional):
163
+ If specified, this will temporarily set numpy's random seed when [`~evaluate.EvaluationModule.compute`] is run.
164
+ experiment_id (`str`):
165
+ A specific experiment id. This is used if several distributed evaluations share the same file system.
166
+ This is useful to compute module in distributed setups (in particular non-additive metrics like F1).
167
+ hash (`str`):
168
+ Used to identify the evaluation module according to the hashed file contents.
169
+ max_concurrent_cache_files (`int`):
170
+ Max number of concurrent module cache files (default `10000`).
171
+ timeout (`Union[int, float]`):
172
+ Timeout in second for distributed setting synchronization.
173
+ """
174
+
175
+ def __init__(
176
+ self,
177
+ config_name: Optional[str] = None,
178
+ keep_in_memory: bool = False,
179
+ cache_dir: Optional[str] = None,
180
+ num_process: int = 1,
181
+ process_id: int = 0,
182
+ seed: Optional[int] = None,
183
+ experiment_id: Optional[str] = None,
184
+ hash: str = None,
185
+ max_concurrent_cache_files: int = 10000,
186
+ timeout: Union[int, float] = 100,
187
+ **kwargs,
188
+ ):
189
+ # prepare info
190
+ self.config_name = config_name or "default"
191
+ info = self._info()
192
+ info.module_name = camelcase_to_snakecase(self.__class__.__name__)
193
+ info.config_name = self.config_name
194
+ info.experiment_id = experiment_id or "default_experiment"
195
+ EvaluationModuleInfoMixin.__init__(self, info) # For easy access on low level
196
+
197
+ # Safety checks on num_process and process_id
198
+ if not isinstance(process_id, int) or process_id < 0:
199
+ raise ValueError("'process_id' should be a number greater than 0")
200
+ if not isinstance(num_process, int) or num_process <= process_id:
201
+ raise ValueError("'num_process' should be a number greater than process_id")
202
+ if keep_in_memory and num_process != 1:
203
+ raise ValueError("Using 'keep_in_memory' is not possible in distributed setting (num_process > 1).")
204
+
205
+ self.num_process = num_process
206
+ self.process_id = process_id
207
+ self.max_concurrent_cache_files = max_concurrent_cache_files
208
+
209
+ self.keep_in_memory = keep_in_memory
210
+ self._data_dir_root = os.path.expanduser(cache_dir or config.HF_METRICS_CACHE)
211
+ self.data_dir = self._build_data_dir()
212
+ if seed is None:
213
+ _, seed, pos, *_ = np.random.get_state()
214
+ self.seed: int = seed[pos] if pos < 624 else seed[0]
215
+ else:
216
+ self.seed: int = seed
217
+ self.timeout: Union[int, float] = timeout
218
+
219
+ # Update 'compute' and 'add' docstring
220
+ # methods need to be copied otherwise it changes the docstrings of every instance
221
+ self.compute = types.MethodType(copyfunc(self.compute), self)
222
+ self.add_batch = types.MethodType(copyfunc(self.add_batch), self)
223
+ self.add = types.MethodType(copyfunc(self.add), self)
224
+ self.compute.__func__.__doc__ += self.info.inputs_description
225
+ self.add_batch.__func__.__doc__ += self.info.inputs_description
226
+ self.add.__func__.__doc__ += self.info.inputs_description
227
+
228
+ # self.arrow_schema = pa.schema(field for field in self.info.features.type)
229
+ self.selected_feature_format = None
230
+ self.buf_writer = None
231
+ self.writer = None
232
+ self.writer_batch_size = None
233
+ self.data = None
234
+
235
+ # This is the cache file we store our predictions/references in
236
+ # Keep it None for now so we can (cloud)pickle the object
237
+ self.cache_file_name = None
238
+ self.filelock = None
239
+ self.rendez_vous_lock = None
240
+
241
+ # This is all the cache files on which we have a lock when we are in a distributed setting
242
+ self.file_paths = None
243
+ self.filelocks = None
244
+
245
+ # This fingerprints the evaluation module according to the hashed contents of the module code
246
+ self._hash = hash
247
+
248
+ def __len__(self):
249
+ """Return the number of examples (predictions or predictions/references pair)
250
+ currently stored in the evaluation module's cache.
251
+ """
252
+ return 0 if self.writer is None else len(self.writer)
253
+
254
+ def __repr__(self):
255
+ return (
256
+ f'EvaluationModule(name: "{self.name}", module_type: "{self.module_type}", '
257
+ f'features: {self.features}, usage: """{self.inputs_description}""", '
258
+ f"stored examples: {len(self)})"
259
+ )
260
+
261
+ def _build_data_dir(self):
262
+ """Path of this evaluation module in cache_dir:
263
+ Will be:
264
+ self._data_dir_root/self.name/self.config_name/self.hash (if not none)/
265
+ If any of these element is missing or if ``with_version=False`` the corresponding subfolders are dropped.
266
+ """
267
+ builder_data_dir = self._data_dir_root
268
+ builder_data_dir = os.path.join(builder_data_dir, self.name, self.config_name)
269
+ os.makedirs(builder_data_dir, exist_ok=True)
270
+ return builder_data_dir
271
+
272
+ def _create_cache_file(self, timeout=1) -> Tuple[str, FileLock]:
273
+ """Create a new cache file. If the default cache file is used, we generated a new hash."""
274
+ file_path = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{self.process_id}.arrow")
275
+ filelock = None
276
+ for i in range(self.max_concurrent_cache_files):
277
+ filelock = FileLock(file_path + ".lock")
278
+ try:
279
+ filelock.acquire(timeout=timeout)
280
+ except Timeout:
281
+ # If we have reached the max number of attempts or we are not allow to find a free name (distributed setup)
282
+ # We raise an error
283
+ if self.num_process != 1:
284
+ raise ValueError(
285
+ f"Error in _create_cache_file: another evaluation module instance is already using the local cache file at {file_path}. "
286
+ f"Please specify an experiment_id (currently: {self.experiment_id}) to avoid collision "
287
+ f"between distributed evaluation module instances."
288
+ ) from None
289
+ if i == self.max_concurrent_cache_files - 1:
290
+ raise ValueError(
291
+ f"Cannot acquire lock, too many evaluation module instance are operating concurrently on this file system."
292
+ f"You should set a larger value of max_concurrent_cache_files when creating the evaluation module "
293
+ f"(current value is {self.max_concurrent_cache_files})."
294
+ ) from None
295
+ # In other cases (allow to find new file name + not yet at max num of attempts) we can try to sample a new hashing name.
296
+ file_uuid = str(uuid.uuid4())
297
+ file_path = os.path.join(
298
+ self.data_dir, f"{self.experiment_id}-{file_uuid}-{self.num_process}-{self.process_id}.arrow"
299
+ )
300
+ else:
301
+ break
302
+
303
+ return file_path, filelock
304
+
305
+ def _get_all_cache_files(self) -> Tuple[List[str], List[FileLock]]:
306
+ """Get a lock on all the cache files in a distributed setup.
307
+ We wait for timeout second to let all the distributed node finish their tasks (default is 100 seconds).
308
+ """
309
+ if self.num_process == 1:
310
+ if self.cache_file_name is None:
311
+ raise ValueError(
312
+ "Evaluation module cache file doesn't exist. Please make sure that you call `add` or `add_batch` "
313
+ "at least once before calling `compute`."
314
+ )
315
+ file_paths = [self.cache_file_name]
316
+ else:
317
+ file_paths = [
318
+ os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{process_id}.arrow")
319
+ for process_id in range(self.num_process)
320
+ ]
321
+
322
+ # Let's acquire a lock on each process files to be sure they are finished writing
323
+ filelocks = []
324
+ for process_id, file_path in enumerate(file_paths):
325
+ if process_id == 0: # process 0 already has its lock file
326
+ filelocks.append(self.filelock)
327
+ else:
328
+ filelock = FileLock(file_path + ".lock")
329
+ try:
330
+ filelock.acquire(timeout=self.timeout)
331
+ except Timeout:
332
+ raise ValueError(
333
+ f"Cannot acquire lock on cached file {file_path} for process {process_id}."
334
+ ) from None
335
+ else:
336
+ filelocks.append(filelock)
337
+
338
+ return file_paths, filelocks
339
+
340
+ def _check_all_processes_locks(self):
341
+ expected_lock_file_names = [
342
+ os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{process_id}.arrow.lock")
343
+ for process_id in range(self.num_process)
344
+ ]
345
+ for expected_lock_file_name in expected_lock_file_names:
346
+ nofilelock = FileFreeLock(expected_lock_file_name)
347
+ try:
348
+ nofilelock.acquire(timeout=self.timeout)
349
+ except Timeout:
350
+ raise ValueError(
351
+ f"Expected to find locked file {expected_lock_file_name} from process {self.process_id} but it doesn't exist."
352
+ ) from None
353
+ else:
354
+ nofilelock.release()
355
+
356
+ def _check_rendez_vous(self):
357
+ expected_lock_file_name = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-0.arrow.lock")
358
+ nofilelock = FileFreeLock(expected_lock_file_name)
359
+ try:
360
+ nofilelock.acquire(timeout=self.timeout)
361
+ except Timeout:
362
+ raise ValueError(
363
+ f"Expected to find locked file {expected_lock_file_name} from process {self.process_id} but it doesn't exist."
364
+ ) from None
365
+ else:
366
+ nofilelock.release()
367
+ lock_file_name = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-rdv.lock")
368
+ rendez_vous_lock = FileLock(lock_file_name)
369
+ try:
370
+ rendez_vous_lock.acquire(timeout=self.timeout)
371
+ except Timeout:
372
+ raise ValueError(f"Couldn't acquire lock on {lock_file_name} from process {self.process_id}.") from None
373
+ else:
374
+ rendez_vous_lock.release()
375
+
376
+ def _finalize(self):
377
+ """Close all the writing process and load/gather the data
378
+ from all the nodes if main node or all_process is True.
379
+ """
380
+ if self.writer is not None:
381
+ self.writer.finalize()
382
+ self.writer = None
383
+ # release the locks of the processes > 0 so that process 0 can lock them to read + delete the data
384
+ if self.filelock is not None and self.process_id > 0:
385
+ self.filelock.release()
386
+
387
+ if self.keep_in_memory:
388
+ # Read the predictions and references
389
+ reader = ArrowReader(path=self.data_dir, info=DatasetInfo(features=self.selected_feature_format))
390
+ self.data = Dataset.from_buffer(self.buf_writer.getvalue())
391
+
392
+ elif self.process_id == 0:
393
+ # Let's acquire a lock on each node files to be sure they are finished writing
394
+ file_paths, filelocks = self._get_all_cache_files()
395
+
396
+ # Read the predictions and references
397
+ try:
398
+ reader = ArrowReader(path="", info=DatasetInfo(features=self.selected_feature_format))
399
+ self.data = Dataset(**reader.read_files([{"filename": f} for f in file_paths]))
400
+ except FileNotFoundError:
401
+ raise ValueError(
402
+ "Error in finalize: another evaluation module instance is already using the local cache file. "
403
+ "Please specify an experiment_id to avoid collision between distributed evaluation module instances."
404
+ ) from None
405
+
406
+ # Store file paths and locks and we will release/delete them after the computation.
407
+ self.file_paths = file_paths
408
+ self.filelocks = filelocks
409
+
410
+ def compute(self, *, predictions=None, references=None, **kwargs) -> Optional[dict]:
411
+ """Compute the evaluation module.
412
+
413
+ Usage of positional arguments is not allowed to prevent mistakes.
414
+
415
+ Args:
416
+ predictions (`list/array/tensor`, *optional*):
417
+ Predictions.
418
+ references (`list/array/tensor`, *optional*):
419
+ References.
420
+ **kwargs (optional):
421
+ Keyword arguments that will be forwarded to the evaluation module [`~evaluate.EvaluationModule.compute`]
422
+ method (see details in the docstring).
423
+
424
+ Return:
425
+ `dict` or `None`
426
+
427
+ - Dictionary with the results if this evaluation module is run on the main process (`process_id == 0`).
428
+ - `None` if the evaluation module is not run on the main process (`process_id != 0`).
429
+
430
+ ```py
431
+ >>> import evaluate
432
+ >>> accuracy = evaluate.load("accuracy")
433
+ >>> accuracy.compute(predictions=[0, 1, 1, 0], references=[0, 1, 0, 1])
434
+ ```
435
+ """
436
+ all_kwargs = {"predictions": predictions, "references": references, **kwargs}
437
+ if predictions is None and references is None:
438
+ missing_kwargs = {k: None for k in self._feature_names() if k not in all_kwargs}
439
+ all_kwargs.update(missing_kwargs)
440
+ else:
441
+ missing_inputs = [k for k in self._feature_names() if k not in all_kwargs]
442
+ if missing_inputs:
443
+ raise ValueError(
444
+ f"Evaluation module inputs are missing: {missing_inputs}. All required inputs are {list(self._feature_names())}"
445
+ )
446
+ inputs = {input_name: all_kwargs[input_name] for input_name in self._feature_names()}
447
+ compute_kwargs = {k: kwargs[k] for k in kwargs if k not in self._feature_names()}
448
+
449
+ if any(v is not None for v in inputs.values()):
450
+ self.add_batch(**inputs)
451
+ self._finalize()
452
+
453
+ self.cache_file_name = None
454
+ self.filelock = None
455
+ self.selected_feature_format = None
456
+
457
+ if self.process_id == 0:
458
+ self.data.set_format(type=self.info.format)
459
+
460
+ inputs = {input_name: self.data[input_name] for input_name in self._feature_names()}
461
+ with temp_seed(self.seed):
462
+ output = self._compute(**inputs, **compute_kwargs)
463
+
464
+ if self.buf_writer is not None:
465
+ self.buf_writer = None
466
+ del self.data
467
+ self.data = None
468
+ else:
469
+ # Release locks and delete all the cache files. Process 0 is released last.
470
+ for filelock, file_path in reversed(list(zip(self.filelocks, self.file_paths))):
471
+ logger.info(f"Removing {file_path}")
472
+ del self.data
473
+ self.data = None
474
+ del self.writer
475
+ self.writer = None
476
+ os.remove(file_path)
477
+ filelock.release()
478
+
479
+ return output
480
+ else:
481
+ return None
482
+
483
+ def add_batch(self, *, predictions=None, references=None, **kwargs):
484
+ """Add a batch of predictions and references for the evaluation module's stack.
485
+
486
+ Args:
487
+ predictions (`list/array/tensor`, *optional*):
488
+ Predictions.
489
+ references (`list/array/tensor`, *optional*):
490
+ References.
491
+
492
+ Example:
493
+
494
+ ```py
495
+ >>> import evaluate
496
+ >>> accuracy = evaluate.load("accuracy")
497
+ >>> for refs, preds in zip([[0,1],[0,1]], [[1,0],[0,1]]):
498
+ ... accuracy.add_batch(references=refs, predictions=preds)
499
+ ```
500
+ """
501
+ bad_inputs = [input_name for input_name in kwargs if input_name not in self._feature_names()]
502
+ if bad_inputs:
503
+ raise ValueError(
504
+ f"Bad inputs for evaluation module: {bad_inputs}. All required inputs are {list(self._feature_names())}"
505
+ )
506
+ batch = {"predictions": predictions, "references": references, **kwargs}
507
+ batch = {input_name: batch[input_name] for input_name in self._feature_names()}
508
+ if self.writer is None:
509
+ self.selected_feature_format = self._infer_feature_from_batch(batch)
510
+ self._init_writer()
511
+ try:
512
+ for key, column in batch.items():
513
+ if len(column) > 0:
514
+ self._enforce_nested_string_type(self.selected_feature_format[key], column[0])
515
+ batch = self.selected_feature_format.encode_batch(batch)
516
+ self.writer.write_batch(batch)
517
+ except (pa.ArrowInvalid, TypeError):
518
+ if any(len(batch[c]) != len(next(iter(batch.values()))) for c in batch):
519
+ col0 = next(iter(batch))
520
+ bad_col = [c for c in batch if len(batch[c]) != len(batch[col0])][0]
521
+ error_msg = (
522
+ f"Mismatch in the number of {col0} ({len(batch[col0])}) and {bad_col} ({len(batch[bad_col])})"
523
+ )
524
+ elif set(self.selected_feature_format) != {"references", "predictions"}:
525
+ error_msg = (
526
+ f"Module inputs don't match the expected format.\n"
527
+ f"Expected format: {self.selected_feature_format },\n"
528
+ )
529
+ error_msg_inputs = ",\n".join(
530
+ f"Input {input_name}: {summarize_if_long_list(batch[input_name])}"
531
+ for input_name in self.selected_feature_format
532
+ )
533
+ error_msg += error_msg_inputs
534
+ else:
535
+ error_msg = (
536
+ f"Predictions and/or references don't match the expected format.\n"
537
+ f"Expected format: {self.selected_feature_format },\n"
538
+ f"Input predictions: {summarize_if_long_list(predictions)},\n"
539
+ f"Input references: {summarize_if_long_list(references)}"
540
+ )
541
+ raise ValueError(error_msg) from None
542
+
543
+ def add(self, *, prediction=None, reference=None, **kwargs):
544
+ """Add one prediction and reference for the evaluation module's stack.
545
+
546
+ Args:
547
+ prediction (`list/array/tensor`, *optional*):
548
+ Predictions.
549
+ reference (`list/array/tensor`, *optional*):
550
+ References.
551
+
552
+ Example:
553
+
554
+ ```py
555
+ >>> import evaluate
556
+ >>> accuracy = evaluate.load("accuracy")
557
+ >>> accuracy.add(references=[0,1], predictions=[1,0])
558
+ ```
559
+ """
560
+ bad_inputs = [input_name for input_name in kwargs if input_name not in self._feature_names()]
561
+ if bad_inputs:
562
+ raise ValueError(
563
+ f"Bad inputs for evaluation module: {bad_inputs}. All required inputs are {list(self._feature_names())}"
564
+ )
565
+ example = {"predictions": prediction, "references": reference, **kwargs}
566
+ example = {input_name: example[input_name] for input_name in self._feature_names()}
567
+ if self.writer is None:
568
+ self.selected_feature_format = self._infer_feature_from_example(example)
569
+ self._init_writer()
570
+ try:
571
+ self._enforce_nested_string_type(self.selected_feature_format, example)
572
+ example = self.selected_feature_format.encode_example(example)
573
+ self.writer.write(example)
574
+ except (pa.ArrowInvalid, TypeError):
575
+ error_msg = (
576
+ f"Evaluation module inputs don't match the expected format.\n"
577
+ f"Expected format: {self.selected_feature_format},\n"
578
+ )
579
+ error_msg_inputs = ",\n".join(
580
+ f"Input {input_name}: {summarize_if_long_list(example[input_name])}"
581
+ for input_name in self.selected_feature_format
582
+ )
583
+ error_msg += error_msg_inputs
584
+ raise ValueError(error_msg) from None
585
+
586
+ def _infer_feature_from_batch(self, batch):
587
+ if isinstance(self.features, Features):
588
+ return self.features
589
+ else:
590
+ example = dict([(k, v[0]) for k, v in batch.items()])
591
+ return self._infer_feature_from_example(example)
592
+
593
+ def _infer_feature_from_example(self, example):
594
+ if isinstance(self.features, Features):
595
+ return self.features
596
+ else:
597
+ for features in self.features:
598
+ try:
599
+ self._enforce_nested_string_type(features, example)
600
+ features.encode_example(example)
601
+ return features
602
+ except (ValueError, TypeError):
603
+ continue
604
+ feature_strings = "\n".join([f"Feature option {i}: {feature}" for i, feature in enumerate(self.features)])
605
+ error_msg = (
606
+ f"Predictions and/or references don't match the expected format.\n"
607
+ f"Expected format:\n{feature_strings},\n"
608
+ f"Input predictions: {summarize_if_long_list(example['predictions'])},\n"
609
+ f"Input references: {summarize_if_long_list(example['references'])}"
610
+ )
611
+ raise ValueError(error_msg) from None
612
+
613
+ def _feature_names(self):
614
+ if isinstance(self.features, list):
615
+ feature_names = list(self.features[0].keys())
616
+ else:
617
+ feature_names = list(self.features.keys())
618
+ return feature_names
619
+
620
+ def _init_writer(self, timeout=1):
621
+ if self.num_process > 1:
622
+ if self.process_id == 0:
623
+ file_path = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-rdv.lock")
624
+ self.rendez_vous_lock = FileLock(file_path)
625
+ try:
626
+ self.rendez_vous_lock.acquire(timeout=timeout)
627
+ except TimeoutError:
628
+ raise ValueError(
629
+ f"Error in _init_writer: another evalution module instance is already using the local cache file at {file_path}. "
630
+ f"Please specify an experiment_id (currently: {self.experiment_id}) to avoid collision "
631
+ f"between distributed evaluation module instances."
632
+ ) from None
633
+
634
+ if self.keep_in_memory:
635
+ self.buf_writer = pa.BufferOutputStream()
636
+ self.writer = ArrowWriter(
637
+ features=self.selected_feature_format, stream=self.buf_writer, writer_batch_size=self.writer_batch_size
638
+ )
639
+ else:
640
+ self.buf_writer = None
641
+
642
+ # Get cache file name and lock it
643
+ if self.cache_file_name is None or self.filelock is None:
644
+ cache_file_name, filelock = self._create_cache_file() # get ready
645
+ self.cache_file_name = cache_file_name
646
+ self.filelock = filelock
647
+
648
+ self.writer = ArrowWriter(
649
+ features=self.selected_feature_format,
650
+ path=self.cache_file_name,
651
+ writer_batch_size=self.writer_batch_size,
652
+ )
653
+ # Setup rendez-vous here if
654
+ if self.num_process > 1:
655
+ if self.process_id == 0:
656
+ self._check_all_processes_locks() # wait for everyone to be ready
657
+ self.rendez_vous_lock.release() # let everyone go
658
+ else:
659
+ self._check_rendez_vous() # wait for master to be ready and to let everyone go
660
+
661
+ def _info(self) -> EvaluationModuleInfo:
662
+ """Construct the EvaluationModuleInfo object. See `EvaluationModuleInfo` for details.
663
+
664
+ Warning: This function is only called once and the result is cached for all
665
+ following .info() calls.
666
+
667
+ Returns:
668
+ info: (EvaluationModuleInfo) The EvaluationModule information
669
+ """
670
+ raise NotImplementedError
671
+
672
+ def download_and_prepare(
673
+ self,
674
+ download_config: Optional[DownloadConfig] = None,
675
+ dl_manager: Optional[DownloadManager] = None,
676
+ ):
677
+ """Downloads and prepares evaluation module for reading.
678
+
679
+ Args:
680
+ download_config ([`DownloadConfig`], *optional*):
681
+ Specific download configuration parameters.
682
+ dl_manager ([`DownloadManager`], *optional*):
683
+ Specific download manager to use.
684
+
685
+ Example:
686
+
687
+ ```py
688
+ >>> import evaluate
689
+ ```
690
+ """
691
+ if dl_manager is None:
692
+ if download_config is None:
693
+ download_config = DownloadConfig()
694
+ download_config.cache_dir = os.path.join(self.data_dir, "downloads")
695
+ download_config.force_download = False
696
+
697
+ dl_manager = DownloadManager(
698
+ dataset_name=self.name, download_config=download_config, data_dir=self.data_dir
699
+ )
700
+
701
+ self._download_and_prepare(dl_manager)
702
+
703
+ def _download_and_prepare(self, dl_manager):
704
+ """Downloads and prepares resources for the evaluation module.
705
+
706
+ This is the internal implementation to overwrite called when user calls
707
+ `download_and_prepare`. It should download all required resources for the evaluation module.
708
+
709
+ Args:
710
+ dl_manager (:class:`DownloadManager`): `DownloadManager` used to download and cache data.
711
+ """
712
+ return None
713
+
714
+ def _compute(self, *, predictions=None, references=None, **kwargs) -> Dict[str, Any]:
715
+ """This method defines the common API for all the evaluation module in the library"""
716
+ raise NotImplementedError
717
+
718
+ def __del__(self):
719
+ if hasattr(self, "filelock") and self.filelock is not None:
720
+ self.filelock.release()
721
+ if hasattr(self, "rendez_vous_lock") and self.rendez_vous_lock is not None:
722
+ self.rendez_vous_lock.release()
723
+ if hasattr(self, "writer"): # in case it was already deleted
724
+ del self.writer
725
+ if hasattr(self, "data"): # in case it was already deleted
726
+ del self.data
727
+
728
+ def _enforce_nested_string_type(self, schema, obj):
729
+ """
730
+ Recursively checks if there is any Value feature of type string and throws TypeError if corresponding object is not a string.
731
+ Since any Python object can be cast to string this avoids implicitly casting wrong input types (e.g. lists) to string without error.
732
+ """
733
+ # Nested structures: we allow dict, list, tuples, sequences
734
+ if isinstance(schema, dict):
735
+ return [self._enforce_nested_string_type(sub_schema, o) for k, (sub_schema, o) in zip_dict(schema, obj)]
736
+
737
+ elif isinstance(schema, (list, tuple)):
738
+ sub_schema = schema[0]
739
+ return [self._enforce_nested_string_type(sub_schema, o) for o in obj]
740
+ elif isinstance(schema, Sequence):
741
+ # We allow to reverse list of dict => dict of list for compatiblity with tfds
742
+ if isinstance(schema.feature, dict):
743
+ if isinstance(obj, (list, tuple)):
744
+ # obj is a list of dict
745
+ for k, dict_tuples in zip_dict(schema.feature, *obj):
746
+ for sub_obj in dict_tuples[1:]:
747
+ if _check_non_null_non_empty_recursive(sub_obj, dict_tuples[0]):
748
+ self._enforce_nested_string_type(dict_tuples[0], sub_obj)
749
+ break
750
+ return None
751
+ else:
752
+ # obj is a single dict
753
+ for k, (sub_schema, sub_objs) in zip_dict(schema.feature, obj):
754
+ for sub_obj in sub_objs:
755
+ if _check_non_null_non_empty_recursive(sub_obj, sub_schema):
756
+ self._enforce_nested_string_type(sub_schema, sub_obj)
757
+ break
758
+ return None
759
+ # schema.feature is not a dict
760
+ if isinstance(obj, str): # don't interpret a string as a list
761
+ raise ValueError(f"Got a string but expected a list instead: '{obj}'")
762
+ if obj is None:
763
+ return None
764
+ else:
765
+ if len(obj) > 0:
766
+ for first_elmt in obj:
767
+ if _check_non_null_non_empty_recursive(first_elmt, schema.feature):
768
+ break
769
+ if not isinstance(first_elmt, list):
770
+ return self._enforce_nested_string_type(schema.feature, first_elmt)
771
+
772
+ elif isinstance(schema, Value):
773
+ if pa.types.is_string(schema.pa_type) and not isinstance(obj, str):
774
+ raise TypeError(f"Expected type str but got {type(obj)}.")
775
+
776
+
777
+ class Metric(EvaluationModule):
778
+ """A Metric is the base class and common API for all metrics.
779
+
780
+ Args:
781
+ config_name (`str`):
782
+ This is used to define a hash specific to a metric computation script and prevents the metric's data
783
+ to be overridden when the metric loading script is modified.
784
+ keep_in_memory (`bool`):
785
+ Keep all predictions and references in memory. Not possible in distributed settings.
786
+ cache_dir (`str`):
787
+ Path to a directory in which temporary prediction/references data will be stored.
788
+ The data directory should be located on a shared file-system in distributed setups.
789
+ num_process (`int`):
790
+ Specify the total number of nodes in a distributed settings.
791
+ This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
792
+ process_id (`int`):
793
+ Specify the id of the current process in a distributed setup (between 0 and num_process-1)
794
+ This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
795
+ seed (`int`, *optional*):
796
+ If specified, this will temporarily set numpy's random seed when [`~evaluate.Metric.compute`] is run.
797
+ experiment_id (`str`):
798
+ A specific experiment id. This is used if several distributed evaluations share the same file system.
799
+ This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
800
+ max_concurrent_cache_files (`int`):
801
+ Max number of concurrent metric cache files (default `10000`).
802
+ timeout (`Union[int, float]`):
803
+ Timeout in second for distributed setting synchronization.
804
+ """
805
+
806
+
807
+ class Comparison(EvaluationModule):
808
+ """A Comparison is the base class and common API for all comparisons.
809
+
810
+ Args:
811
+ config_name (`str`):
812
+ This is used to define a hash specific to a comparison computation script and prevents the comparison's data
813
+ to be overridden when the comparison loading script is modified.
814
+ keep_in_memory (`bool`):
815
+ Keep all predictions and references in memory. Not possible in distributed settings.
816
+ cache_dir (`str`):
817
+ Path to a directory in which temporary prediction/references data will be stored.
818
+ The data directory should be located on a shared file-system in distributed setups.
819
+ num_process (`int`):
820
+ Specify the total number of nodes in a distributed settings.
821
+ This is useful to compute comparisons in distributed setups (in particular non-additive comparisons).
822
+ process_id (`int`):
823
+ Specify the id of the current process in a distributed setup (between 0 and num_process-1)
824
+ This is useful to compute comparisons in distributed setups (in particular non-additive comparisons).
825
+ seed (`int`, *optional*):
826
+ If specified, this will temporarily set numpy's random seed when [`~evaluate.Comparison.compute`] is run.
827
+ experiment_id (`str`):
828
+ A specific experiment id. This is used if several distributed evaluations share the same file system.
829
+ This is useful to compute comparisons in distributed setups (in particular non-additive comparisons).
830
+ max_concurrent_cache_files (`int`):
831
+ Max number of concurrent comparison cache files (default `10000`).
832
+ timeout (`Union[int, float]`):
833
+ Timeout in second for distributed setting synchronization.
834
+ """
835
+
836
+
837
+ class Measurement(EvaluationModule):
838
+ """A Measurement is the base class and common API for all measurements.
839
+
840
+ Args:
841
+ config_name (`str`):
842
+ This is used to define a hash specific to a measurement computation script and prevents the measurement's data
843
+ to be overridden when the measurement loading script is modified.
844
+ keep_in_memory (`bool`):
845
+ Keep all predictions and references in memory. Not possible in distributed settings.
846
+ cache_dir (`str`):
847
+ Path to a directory in which temporary prediction/references data will be stored.
848
+ The data directory should be located on a shared file-system in distributed setups.
849
+ num_process (`int`):
850
+ Specify the total number of nodes in a distributed settings.
851
+ This is useful to compute measurements in distributed setups (in particular non-additive measurements).
852
+ process_id (`int`):
853
+ Specify the id of the current process in a distributed setup (between 0 and num_process-1)
854
+ This is useful to compute measurements in distributed setups (in particular non-additive measurements).
855
+ seed (`int`, *optional*):
856
+ If specified, this will temporarily set numpy's random seed when [`~evaluate.Measurement.compute`] is run.
857
+ experiment_id (`str`):
858
+ A specific experiment id. This is used if several distributed evaluations share the same file system.
859
+ This is useful to compute measurements in distributed setups (in particular non-additive measurements).
860
+ max_concurrent_cache_files (`int`):
861
+ Max number of concurrent measurement cache files (default `10000`).
862
+ timeout (`Union[int, float]`):
863
+ Timeout in second for distributed setting synchronization.
864
+ """
865
+
866
+
867
+ class CombinedEvaluations:
868
+ def __init__(self, evaluation_modules, force_prefix=False):
869
+ from .loading import load # avoid circular imports
870
+
871
+ self.evaluation_module_names = None
872
+ if isinstance(evaluation_modules, list):
873
+ self.evaluation_modules = evaluation_modules
874
+ elif isinstance(evaluation_modules, dict):
875
+ self.evaluation_modules = list(evaluation_modules.values())
876
+ self.evaluation_module_names = list(evaluation_modules.keys())
877
+ loaded_modules = []
878
+
879
+ for module in self.evaluation_modules:
880
+ if isinstance(module, str):
881
+ module = load(module)
882
+ loaded_modules.append(module)
883
+ self.evaluation_modules = loaded_modules
884
+
885
+ if self.evaluation_module_names is None:
886
+ self.evaluation_module_names = [module.name for module in self.evaluation_modules]
887
+
888
+ self.force_prefix = force_prefix
889
+
890
+ def add(self, prediction=None, reference=None, **kwargs):
891
+ """Add one prediction and reference for each evaluation module's stack.
892
+
893
+ Args:
894
+ predictions (`list/array/tensor`, *optional*):
895
+ Predictions.
896
+ references (`list/array/tensor`, *optional*):
897
+ References.
898
+
899
+ Example:
900
+
901
+ ```py
902
+ >>> import evaluate
903
+ >>> accuracy = evaluate.load("accuracy")
904
+ >>> f1 = evaluate.load("f1")
905
+ >>> clf_metrics = combine(["accuracy", "f1"])
906
+ >>> for ref, pred in zip([0,1,0,1], [1,0,0,1]):
907
+ ... clf_metrics.add(references=ref, predictions=pred)
908
+ ```
909
+ """
910
+ for evaluation_module in self.evaluation_modules:
911
+ batch = {"predictions": prediction, "references": reference, **kwargs}
912
+ batch = {input_name: batch[input_name] for input_name in evaluation_module._feature_names()}
913
+ evaluation_module.add(**batch)
914
+
915
+ def add_batch(self, predictions=None, references=None, **kwargs):
916
+ """Add a batch of predictions and references for each evaluation module's stack.
917
+
918
+ Args:
919
+ predictions (`list/array/tensor`, *optional*):
920
+ Predictions.
921
+ references (`list/array/tensor`, *optional*):
922
+ References.
923
+
924
+ Example:
925
+ ```py
926
+ >>> import evaluate
927
+ >>> accuracy = evaluate.load("accuracy")
928
+ >>> f1 = evaluate.load("f1")
929
+ >>> clf_metrics = combine(["accuracy", "f1"])
930
+ >>> for refs, preds in zip([[0,1],[0,1]], [[1,0],[0,1]]):
931
+ ... clf_metrics.add(references=refs, predictions=preds)
932
+ ```
933
+ """
934
+ for evaluation_module in self.evaluation_modules:
935
+ batch = {"predictions": predictions, "references": references, **kwargs}
936
+ batch = {input_name: batch[input_name] for input_name in evaluation_module._feature_names()}
937
+ evaluation_module.add_batch(**batch)
938
+
939
+ def compute(self, predictions=None, references=None, **kwargs):
940
+ """Compute each evaluation module.
941
+
942
+ Usage of positional arguments is not allowed to prevent mistakes.
943
+
944
+ Args:
945
+ predictions (`list/array/tensor`, *optional*):
946
+ Predictions.
947
+ references (`list/array/tensor`, *optional*):
948
+ References.
949
+ **kwargs (*optional*):
950
+ Keyword arguments that will be forwarded to the evaluation module [`~evaluate.EvaluationModule.compute`]
951
+ method (see details in the docstring).
952
+
953
+ Return:
954
+ `dict` or `None`
955
+
956
+ - Dictionary with the results if this evaluation module is run on the main process (`process_id == 0`).
957
+ - `None` if the evaluation module is not run on the main process (`process_id != 0`).
958
+
959
+ Example:
960
+
961
+ ```py
962
+ >>> import evaluate
963
+ >>> accuracy = evaluate.load("accuracy")
964
+ >>> f1 = evaluate.load("f1")
965
+ >>> clf_metrics = combine(["accuracy", "f1"])
966
+ >>> clf_metrics.compute(predictions=[0,1], references=[1,1])
967
+ {'accuracy': 0.5, 'f1': 0.6666666666666666}
968
+ ```
969
+ """
970
+ results = []
971
+
972
+ for evaluation_module in self.evaluation_modules:
973
+ batch = {"predictions": predictions, "references": references, **kwargs}
974
+ results.append(evaluation_module.compute(**batch))
975
+
976
+ return self._merge_results(results)
977
+
978
+ def _merge_results(self, results):
979
+ merged_results = {}
980
+ results_keys = list(itertools.chain.from_iterable([r.keys() for r in results]))
981
+ duplicate_keys = {item for item, count in collections.Counter(results_keys).items() if count > 1}
982
+
983
+ duplicate_names = [
984
+ item for item, count in collections.Counter(self.evaluation_module_names).items() if count > 1
985
+ ]
986
+ duplicate_counter = {name: 0 for name in duplicate_names}
987
+
988
+ for module_name, result in zip(self.evaluation_module_names, results):
989
+ for k, v in result.items():
990
+ if k not in duplicate_keys and not self.force_prefix:
991
+ merged_results[f"{k}"] = v
992
+ elif module_name in duplicate_counter:
993
+ merged_results[f"{module_name}_{duplicate_counter[module_name]}_{k}"] = v
994
+ else:
995
+ merged_results[f"{module_name}_{k}"] = v
996
+
997
+ if module_name in duplicate_counter:
998
+ duplicate_counter[module_name] += 1
999
+
1000
+ return merged_results
1001
+
1002
+
1003
+ def combine(evaluations, force_prefix=False):
1004
+ """Combines several metrics, comparisons, or measurements into a single `CombinedEvaluations` object that
1005
+ can be used like a single evaluation module.
1006
+
1007
+ If two scores have the same name, then they are prefixed with their module names.
1008
+ And if two modules have the same name, please use a dictionary to give them different names, otherwise an integer id is appended to the prefix.
1009
+
1010
+ Args:
1011
+ evaluations (`Union[list, dict]`):
1012
+ A list or dictionary of evaluation modules. The modules can either be passed
1013
+ as strings or loaded `EvaluationModule`s. If a dictionary is passed its keys are the names used and the values the modules.
1014
+ The names are used as prefix in case there are name overlaps in the returned results of each module or if `force_prefix=True`.
1015
+ force_prefix (`bool`, *optional*, defaults to `False`):
1016
+ If `True` all scores from the modules are prefixed with their name. If
1017
+ a dictionary is passed the keys are used as name otherwise the module's name.
1018
+
1019
+ Examples:
1020
+
1021
+ ```py
1022
+ >>> import evaluate
1023
+ >>> accuracy = evaluate.load("accuracy")
1024
+ >>> f1 = evaluate.load("f1")
1025
+ >>> clf_metrics = combine(["accuracy", "f1"])
1026
+ ```
1027
+ """
1028
+
1029
+ return CombinedEvaluations(evaluations, force_prefix=force_prefix)
env-llmeval/lib/python3.10/site-packages/evaluate/naming.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Lint as: python3
16
+ """Utilities for file names."""
17
+
18
+ import itertools
19
+ import os
20
+ import re
21
+
22
+
23
+ _uppercase_uppercase_re = re.compile(r"([A-Z]+)([A-Z][a-z])")
24
+ _lowercase_uppercase_re = re.compile(r"([a-z\d])([A-Z])")
25
+
26
+ _single_underscore_re = re.compile(r"(?<!_)_(?!_)")
27
+ _multiple_underscores_re = re.compile(r"(_{2,})")
28
+
29
+ _split_re = r"^\w+(\.\w+)*$"
30
+
31
+
32
+ def camelcase_to_snakecase(name):
33
+ """Convert camel-case string to snake-case."""
34
+ name = _uppercase_uppercase_re.sub(r"\1_\2", name)
35
+ name = _lowercase_uppercase_re.sub(r"\1_\2", name)
36
+ return name.lower()
37
+
38
+
39
+ def snakecase_to_camelcase(name):
40
+ """Convert snake-case string to camel-case string."""
41
+ name = _single_underscore_re.split(name)
42
+ name = [_multiple_underscores_re.split(n) for n in name]
43
+ return "".join(n.capitalize() for n in itertools.chain.from_iterable(name) if n != "")
44
+
45
+
46
+ def filename_prefix_for_name(name):
47
+ if os.path.basename(name) != name:
48
+ raise ValueError(f"Should be a dataset name, not a path: {name}")
49
+ return camelcase_to_snakecase(name)
50
+
51
+
52
+ def filename_prefix_for_split(name, split):
53
+ if os.path.basename(name) != name:
54
+ raise ValueError(f"Should be a dataset name, not a path: {name}")
55
+ if not re.match(_split_re, split):
56
+ raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'.")
57
+ return f"{filename_prefix_for_name(name)}-{split}"
58
+
59
+
60
+ def filepattern_for_dataset_split(dataset_name, split, data_dir, filetype_suffix=None):
61
+ prefix = filename_prefix_for_split(dataset_name, split)
62
+ if filetype_suffix:
63
+ prefix += f".{filetype_suffix}"
64
+ filepath = os.path.join(data_dir, prefix)
65
+ return f"{filepath}*"
66
+
67
+
68
+ def filename_for_dataset_split(dataset_name, split, filetype_suffix=None):
69
+ prefix = filename_prefix_for_split(dataset_name, split)
70
+ if filetype_suffix:
71
+ prefix += f".{filetype_suffix}"
72
+ return prefix
73
+
74
+
75
+ def filepath_for_dataset_split(dataset_name, split, data_dir, filetype_suffix=None):
76
+ filename = filename_for_dataset_split(
77
+ dataset_name=dataset_name,
78
+ split=split,
79
+ filetype_suffix=filetype_suffix,
80
+ )
81
+ filepath = os.path.join(data_dir, filename)
82
+ return filepath
env-llmeval/lib/python3.10/site-packages/evaluate/utils/__init__.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # flake8: noqa
16
+ # Lint as: python3
17
+ """Util import."""
18
+
19
+ __all__ = [
20
+ "disable_progress_bar",
21
+ "enable_progress_bar",
22
+ "is_progress_bar_enabled",
23
+ "infer_gradio_input_types",
24
+ "json_to_string_type",
25
+ "parse_readme",
26
+ "parse_gradio_data",
27
+ "parse_test_cases",
28
+ "launch_gradio_widget",
29
+ ]
30
+
31
+ from .gradio import (
32
+ infer_gradio_input_types,
33
+ json_to_string_type,
34
+ launch_gradio_widget,
35
+ parse_gradio_data,
36
+ parse_readme,
37
+ parse_test_cases,
38
+ )
39
+ from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
env-llmeval/lib/python3.10/site-packages/evaluate/utils/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (584 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/evaluate/utils/__pycache__/file_utils.cpython-310.pyc ADDED
Binary file (17.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/evaluate/utils/__pycache__/gradio.cpython-310.pyc ADDED
Binary file (4.51 kB). View file