applied-ai-018 commited on
Commit
668ca81
·
verified ·
1 Parent(s): 21d2b52

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/__init__.cpython-310.pyc +0 -0
  2. llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/arrow_reader.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/builder.bak.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/combine.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/data_files.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/dataset_dict.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/exceptions.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/info.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/iterable_dataset.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/metric.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/naming.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/splits.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/datasets/arrow_dataset.py +0 -0
  14. llmeval-env/lib/python3.10/site-packages/datasets/builder.py +0 -0
  15. llmeval-env/lib/python3.10/site-packages/datasets/config.py +272 -0
  16. llmeval-env/lib/python3.10/site-packages/datasets/distributed.py +39 -0
  17. llmeval-env/lib/python3.10/site-packages/datasets/inspect.py +582 -0
  18. llmeval-env/lib/python3.10/site-packages/datasets/iterable_dataset.py +0 -0
  19. llmeval-env/lib/python3.10/site-packages/datasets/keyhash.py +104 -0
  20. llmeval-env/lib/python3.10/site-packages/datasets/load.py +0 -0
  21. llmeval-env/lib/python3.10/site-packages/datasets/metric.py +652 -0
  22. llmeval-env/lib/python3.10/site-packages/datasets/naming.py +84 -0
  23. llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__pycache__/__init__.cpython-310.pyc +0 -0
  24. llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__pycache__/audiofolder.cpython-310.pyc +0 -0
  25. llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/cache/__init__.py +0 -0
  26. llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/cache/__pycache__/__init__.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/cache/__pycache__/cache.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/cache/cache.py +207 -0
  29. llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__init__.py +0 -0
  30. llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__pycache__/__init__.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__pycache__/folder_based_builder.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/folder_based_builder.py +406 -0
  33. llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/__init__.py +0 -0
  34. llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/__pycache__/__init__.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/__pycache__/imagefolder.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/imagefolder.py +104 -0
  37. llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__init__.py +0 -0
  38. llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__pycache__/__init__.cpython-310.pyc +0 -0
  39. llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__pycache__/pandas.cpython-310.pyc +0 -0
  40. llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/pandas/pandas.py +62 -0
  41. llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/parquet/__init__.py +0 -0
  42. llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/parquet/parquet.py +100 -0
  43. llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/sql/__init__.py +0 -0
  44. llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/__init__.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/sql.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/sql/sql.py +118 -0
  47. llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/text/__init__.py +0 -0
  48. llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/text/__pycache__/__init__.cpython-310.pyc +0 -0
  49. llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/text/__pycache__/text.cpython-310.pyc +0 -0
  50. llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/text/text.py +129 -0
llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.92 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/arrow_reader.cpython-310.pyc ADDED
Binary file (23.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/builder.bak.cpython-310.pyc ADDED
Binary file (73.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/combine.cpython-310.pyc ADDED
Binary file (9.13 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/data_files.cpython-310.pyc ADDED
Binary file (28.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/dataset_dict.cpython-310.pyc ADDED
Binary file (98.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/exceptions.cpython-310.pyc ADDED
Binary file (3.55 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/info.cpython-310.pyc ADDED
Binary file (22.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/iterable_dataset.cpython-310.pyc ADDED
Binary file (91.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/metric.cpython-310.pyc ADDED
Binary file (23.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/naming.cpython-310.pyc ADDED
Binary file (2.86 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/splits.cpython-310.pyc ADDED
Binary file (23.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/arrow_dataset.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/datasets/builder.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/datasets/config.py ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+ import importlib.metadata
3
+ import logging
4
+ import os
5
+ import platform
6
+ from pathlib import Path
7
+ from typing import Optional
8
+
9
+ from packaging import version
10
+
11
+
12
+ logger = logging.getLogger(__name__.split(".", 1)[0]) # to avoid circular import from .utils.logging
13
+
14
+ # Datasets
15
+ S3_DATASETS_BUCKET_PREFIX = "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets"
16
+ CLOUDFRONT_DATASETS_DISTRIB_PREFIX = "https://cdn-datasets.huggingface.co/datasets/datasets"
17
+ REPO_DATASETS_URL = "https://raw.githubusercontent.com/huggingface/datasets/{revision}/datasets/{path}/{name}"
18
+
19
+ # Metrics
20
+ S3_METRICS_BUCKET_PREFIX = "https://s3.amazonaws.com/datasets.huggingface.co/datasets/metrics"
21
+ CLOUDFRONT_METRICS_DISTRIB_PREFIX = "https://cdn-datasets.huggingface.co/datasets/metric"
22
+ REPO_METRICS_URL = "https://raw.githubusercontent.com/huggingface/datasets/{revision}/metrics/{path}/{name}"
23
+
24
+ # Hub
25
+ HF_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co")
26
+ HUB_DATASETS_URL = HF_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
27
+ HUB_DATASETS_HFFS_URL = "hf://datasets/{repo_id}@{revision}/{path}"
28
+ HUB_DEFAULT_VERSION = "main"
29
+
30
+ PY_VERSION = version.parse(platform.python_version())
31
+
32
+ # General environment variables accepted values for booleans
33
+ ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"}
34
+ ENV_VARS_FALSE_VALUES = {"0", "OFF", "NO", "FALSE"}
35
+ ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"})
36
+ ENV_VARS_FALSE_AND_AUTO_VALUES = ENV_VARS_FALSE_VALUES.union({"AUTO"})
37
+
38
+
39
+ # Imports
40
+ DILL_VERSION = version.parse(importlib.metadata.version("dill"))
41
+ FSSPEC_VERSION = version.parse(importlib.metadata.version("fsspec"))
42
+ PANDAS_VERSION = version.parse(importlib.metadata.version("pandas"))
43
+ PYARROW_VERSION = version.parse(importlib.metadata.version("pyarrow"))
44
+ HF_HUB_VERSION = version.parse(importlib.metadata.version("huggingface_hub"))
45
+
46
+ USE_TF = os.environ.get("USE_TF", "AUTO").upper()
47
+ USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper()
48
+ USE_JAX = os.environ.get("USE_JAX", "AUTO").upper()
49
+
50
+ TORCH_VERSION = "N/A"
51
+ TORCH_AVAILABLE = False
52
+
53
+ if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES:
54
+ TORCH_AVAILABLE = importlib.util.find_spec("torch") is not None
55
+ if TORCH_AVAILABLE:
56
+ try:
57
+ TORCH_VERSION = version.parse(importlib.metadata.version("torch"))
58
+ logger.info(f"PyTorch version {TORCH_VERSION} available.")
59
+ except importlib.metadata.PackageNotFoundError:
60
+ pass
61
+ else:
62
+ logger.info("Disabling PyTorch because USE_TF is set")
63
+
64
+ POLARS_VERSION = "N/A"
65
+ POLARS_AVAILABLE = importlib.util.find_spec("polars") is not None
66
+
67
+ if POLARS_AVAILABLE:
68
+ try:
69
+ POLARS_VERSION = version.parse(importlib.metadata.version("polars"))
70
+ logger.info(f"Polars version {POLARS_VERSION} available.")
71
+ except importlib.metadata.PackageNotFoundError:
72
+ pass
73
+
74
+ TF_VERSION = "N/A"
75
+ TF_AVAILABLE = False
76
+
77
+ if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES:
78
+ TF_AVAILABLE = importlib.util.find_spec("tensorflow") is not None
79
+ if TF_AVAILABLE:
80
+ # For the metadata, we have to look for both tensorflow and tensorflow-cpu
81
+ for package in [
82
+ "tensorflow",
83
+ "tensorflow-cpu",
84
+ "tensorflow-gpu",
85
+ "tf-nightly",
86
+ "tf-nightly-cpu",
87
+ "tf-nightly-gpu",
88
+ "intel-tensorflow",
89
+ "tensorflow-rocm",
90
+ "tensorflow-macos",
91
+ ]:
92
+ try:
93
+ TF_VERSION = version.parse(importlib.metadata.version(package))
94
+ except importlib.metadata.PackageNotFoundError:
95
+ continue
96
+ else:
97
+ break
98
+ else:
99
+ TF_AVAILABLE = False
100
+ if TF_AVAILABLE:
101
+ if TF_VERSION.major < 2:
102
+ logger.info(f"TensorFlow found but with version {TF_VERSION}. `datasets` requires version 2 minimum.")
103
+ TF_AVAILABLE = False
104
+ else:
105
+ logger.info(f"TensorFlow version {TF_VERSION} available.")
106
+ else:
107
+ logger.info("Disabling Tensorflow because USE_TORCH is set")
108
+
109
+
110
+ JAX_VERSION = "N/A"
111
+ JAX_AVAILABLE = False
112
+
113
+ if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES:
114
+ JAX_AVAILABLE = importlib.util.find_spec("jax") is not None and importlib.util.find_spec("jaxlib") is not None
115
+ if JAX_AVAILABLE:
116
+ try:
117
+ JAX_VERSION = version.parse(importlib.metadata.version("jax"))
118
+ logger.info(f"JAX version {JAX_VERSION} available.")
119
+ except importlib.metadata.PackageNotFoundError:
120
+ pass
121
+ else:
122
+ logger.info("Disabling JAX because USE_JAX is set to False")
123
+
124
+
125
+ USE_BEAM = os.environ.get("USE_BEAM", "AUTO").upper()
126
+ BEAM_VERSION = "N/A"
127
+ BEAM_AVAILABLE = False
128
+ if USE_BEAM in ENV_VARS_TRUE_AND_AUTO_VALUES:
129
+ try:
130
+ BEAM_VERSION = version.parse(importlib.metadata.version("apache_beam"))
131
+ BEAM_AVAILABLE = True
132
+ logger.info(f"Apache Beam version {BEAM_VERSION} available.")
133
+ except importlib.metadata.PackageNotFoundError:
134
+ pass
135
+ else:
136
+ logger.info("Disabling Apache Beam because USE_BEAM is set to False")
137
+
138
+
139
+ # Optional tools for data loading
140
+ SQLALCHEMY_AVAILABLE = importlib.util.find_spec("sqlalchemy") is not None
141
+
142
+ # Optional tools for feature decoding
143
+ PIL_AVAILABLE = importlib.util.find_spec("PIL") is not None
144
+ IS_OPUS_SUPPORTED = importlib.util.find_spec("soundfile") is not None and version.parse(
145
+ importlib.import_module("soundfile").__libsndfile_version__
146
+ ) >= version.parse("1.0.31")
147
+ IS_MP3_SUPPORTED = importlib.util.find_spec("soundfile") is not None and version.parse(
148
+ importlib.import_module("soundfile").__libsndfile_version__
149
+ ) >= version.parse("1.1.0")
150
+
151
+ # Optional compression tools
152
+ RARFILE_AVAILABLE = importlib.util.find_spec("rarfile") is not None
153
+ ZSTANDARD_AVAILABLE = importlib.util.find_spec("zstandard") is not None
154
+ LZ4_AVAILABLE = importlib.util.find_spec("lz4") is not None
155
+ PY7ZR_AVAILABLE = importlib.util.find_spec("py7zr") is not None
156
+
157
+ # Cache location
158
+ DEFAULT_XDG_CACHE_HOME = "~/.cache"
159
+ XDG_CACHE_HOME = os.getenv("XDG_CACHE_HOME", DEFAULT_XDG_CACHE_HOME)
160
+ DEFAULT_HF_CACHE_HOME = os.path.join(XDG_CACHE_HOME, "huggingface")
161
+ HF_CACHE_HOME = os.path.expanduser(os.getenv("HF_HOME", DEFAULT_HF_CACHE_HOME))
162
+
163
+ DEFAULT_HF_DATASETS_CACHE = os.path.join(HF_CACHE_HOME, "datasets")
164
+ HF_DATASETS_CACHE = Path(os.getenv("HF_DATASETS_CACHE", DEFAULT_HF_DATASETS_CACHE))
165
+
166
+ DEFAULT_HF_METRICS_CACHE = os.path.join(HF_CACHE_HOME, "metrics")
167
+ HF_METRICS_CACHE = Path(os.getenv("HF_METRICS_CACHE", DEFAULT_HF_METRICS_CACHE))
168
+
169
+ DEFAULT_HF_MODULES_CACHE = os.path.join(HF_CACHE_HOME, "modules")
170
+ HF_MODULES_CACHE = Path(os.getenv("HF_MODULES_CACHE", DEFAULT_HF_MODULES_CACHE))
171
+
172
+ DOWNLOADED_DATASETS_DIR = "downloads"
173
+ DEFAULT_DOWNLOADED_DATASETS_PATH = os.path.join(HF_DATASETS_CACHE, DOWNLOADED_DATASETS_DIR)
174
+ DOWNLOADED_DATASETS_PATH = Path(os.getenv("HF_DATASETS_DOWNLOADED_DATASETS_PATH", DEFAULT_DOWNLOADED_DATASETS_PATH))
175
+
176
+ EXTRACTED_DATASETS_DIR = "extracted"
177
+ DEFAULT_EXTRACTED_DATASETS_PATH = os.path.join(DEFAULT_DOWNLOADED_DATASETS_PATH, EXTRACTED_DATASETS_DIR)
178
+ EXTRACTED_DATASETS_PATH = Path(os.getenv("HF_DATASETS_EXTRACTED_DATASETS_PATH", DEFAULT_EXTRACTED_DATASETS_PATH))
179
+
180
+ # Download count for the website
181
+ HF_UPDATE_DOWNLOAD_COUNTS = (
182
+ os.environ.get("HF_UPDATE_DOWNLOAD_COUNTS", "AUTO").upper() in ENV_VARS_TRUE_AND_AUTO_VALUES
183
+ )
184
+
185
+ # For downloads and to check remote files metadata
186
+ HF_DATASETS_MULTITHREADING_MAX_WORKERS = 16
187
+
188
+ # Remote dataset scripts support
189
+ __HF_DATASETS_TRUST_REMOTE_CODE = os.environ.get("HF_DATASETS_TRUST_REMOTE_CODE", "1")
190
+ HF_DATASETS_TRUST_REMOTE_CODE: Optional[bool] = (
191
+ True
192
+ if __HF_DATASETS_TRUST_REMOTE_CODE.upper() in ENV_VARS_TRUE_VALUES
193
+ else False
194
+ if __HF_DATASETS_TRUST_REMOTE_CODE.upper() in ENV_VARS_FALSE_VALUES
195
+ else None
196
+ )
197
+ TIME_OUT_REMOTE_CODE = 15
198
+
199
+ # Dataset viewer API
200
+ USE_PARQUET_EXPORT = True
201
+
202
+ # Batch size constants. For more info, see:
203
+ # https://github.com/apache/arrow/blob/master/docs/source/cpp/arrays.rst#size-limitations-and-recommendations)
204
+ DEFAULT_MAX_BATCH_SIZE = 1000
205
+
206
+ # Size of the preloaded record batch in `Dataset.__iter__`
207
+ ARROW_READER_BATCH_SIZE_IN_DATASET_ITER = 10
208
+
209
+ # Max shard size in bytes (e.g. to shard parquet datasets in push_to_hub or download_and_prepare)
210
+ MAX_SHARD_SIZE = "500MB"
211
+
212
+ # Parquet configuration
213
+ PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS = 100
214
+ PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS = 100
215
+ PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS = 100
216
+
217
+ # Offline mode
218
+ HF_DATASETS_OFFLINE = os.environ.get("HF_DATASETS_OFFLINE", "AUTO").upper() in ENV_VARS_TRUE_VALUES
219
+
220
+ # Here, `True` will disable progress bars globally without possibility of enabling it
221
+ # programmatically. `False` will enable them without possibility of disabling them.
222
+ # If environment variable is not set (None), then the user is free to enable/disable
223
+ # them programmatically.
224
+ # TL;DR: env variable has priority over code
225
+ __HF_DATASETS_DISABLE_PROGRESS_BARS = os.environ.get("HF_DATASETS_DISABLE_PROGRESS_BARS")
226
+ HF_DATASETS_DISABLE_PROGRESS_BARS: Optional[bool] = (
227
+ __HF_DATASETS_DISABLE_PROGRESS_BARS.upper() in ENV_VARS_TRUE_VALUES
228
+ if __HF_DATASETS_DISABLE_PROGRESS_BARS is not None
229
+ else None
230
+ )
231
+
232
+ # In-memory
233
+ DEFAULT_IN_MEMORY_MAX_SIZE = 0 # Disabled
234
+ IN_MEMORY_MAX_SIZE = float(os.environ.get("HF_DATASETS_IN_MEMORY_MAX_SIZE", DEFAULT_IN_MEMORY_MAX_SIZE))
235
+
236
+ # File names
237
+ DATASET_ARROW_FILENAME = "dataset.arrow"
238
+ DATASET_INDICES_FILENAME = "indices.arrow"
239
+ DATASET_STATE_JSON_FILENAME = "state.json"
240
+ DATASET_INFO_FILENAME = "dataset_info.json"
241
+ DATASETDICT_INFOS_FILENAME = "dataset_infos.json"
242
+ LICENSE_FILENAME = "LICENSE"
243
+ METRIC_INFO_FILENAME = "metric_info.json"
244
+ DATASETDICT_JSON_FILENAME = "dataset_dict.json"
245
+ METADATA_CONFIGS_FIELD = "configs"
246
+ REPOCARD_FILENAME = "README.md"
247
+ REPOYAML_FILENAME = ".huggingface.yaml"
248
+
249
+ MODULE_NAME_FOR_DYNAMIC_MODULES = "datasets_modules"
250
+
251
+ MAX_DATASET_CONFIG_ID_READABLE_LENGTH = 255
252
+
253
+ # Temporary cache directory prefix
254
+ TEMP_CACHE_DIR_PREFIX = "hf_datasets-"
255
+
256
+ # Streaming
257
+ STREAMING_READ_MAX_RETRIES = 20
258
+ STREAMING_READ_RETRY_INTERVAL = 5
259
+
260
+ # Datasets without script
261
+ DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE = 200
262
+ GLOBBED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE = 10
263
+ ARCHIVED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE = 200
264
+
265
+ # Progress bars
266
+ PBAR_REFRESH_TIME_INTERVAL = 0.05 # 20 progress updates per sec
267
+
268
+ # Maximum number of uploaded files per commit
269
+ UPLOADS_MAX_NUMBER_PER_COMMIT = 50
270
+
271
+ # Backward compatibiliy
272
+ MAX_TABLE_NBYTES_FOR_PICKLING = 4 << 30
llmeval-env/lib/python3.10/site-packages/datasets/distributed.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TypeVar
2
+
3
+ from .arrow_dataset import Dataset, _split_by_node_map_style_dataset
4
+ from .iterable_dataset import IterableDataset, _split_by_node_iterable_dataset
5
+
6
+
7
+ DatasetType = TypeVar("DatasetType", Dataset, IterableDataset)
8
+
9
+
10
+ def split_dataset_by_node(dataset: DatasetType, rank: int, world_size: int) -> DatasetType:
11
+ """
12
+ Split a dataset for the node at rank `rank` in a pool of nodes of size `world_size`.
13
+
14
+ For map-style datasets:
15
+
16
+ Each node is assigned a chunk of data, e.g. rank 0 is given the first chunk of the dataset.
17
+ To maximize data loading throughput, chunks are made of contiguous data on disk if possible.
18
+
19
+ For iterable datasets:
20
+
21
+ If the dataset has a number of shards that is a factor of `world_size` (i.e. if `dataset.n_shards % world_size == 0`),
22
+ then the shards are evenly assigned across the nodes, which is the most optimized.
23
+ Otherwise, each node keeps 1 example out of `world_size`, skipping the other examples.
24
+
25
+ Args:
26
+ dataset ([`Dataset`] or [`IterableDataset`]):
27
+ The dataset to split by node.
28
+ rank (`int`):
29
+ Rank of the current node.
30
+ world_size (`int`):
31
+ Total number of nodes.
32
+
33
+ Returns:
34
+ [`Dataset`] or [`IterableDataset`]: The dataset to be used on the node at rank `rank`.
35
+ """
36
+ if isinstance(dataset, Dataset):
37
+ return _split_by_node_map_style_dataset(dataset, rank=rank, world_size=world_size)
38
+ else:
39
+ return _split_by_node_iterable_dataset(dataset, rank=rank, world_size=world_size)
llmeval-env/lib/python3.10/site-packages/datasets/inspect.py ADDED
@@ -0,0 +1,582 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Lint as: python3
16
+ """List and inspect datasets."""
17
+
18
+ import inspect
19
+ import os
20
+ import shutil
21
+ import warnings
22
+ from pathlib import Path, PurePath
23
+ from typing import Dict, List, Mapping, Optional, Sequence, Union
24
+
25
+ import huggingface_hub
26
+
27
+ from . import config
28
+ from .download.download_config import DownloadConfig
29
+ from .download.download_manager import DownloadMode
30
+ from .download.streaming_download_manager import StreamingDownloadManager
31
+ from .info import DatasetInfo
32
+ from .load import (
33
+ dataset_module_factory,
34
+ get_dataset_builder_class,
35
+ import_main_class,
36
+ load_dataset_builder,
37
+ metric_module_factory,
38
+ )
39
+ from .utils.deprecation_utils import deprecated
40
+ from .utils.file_utils import relative_to_absolute_path
41
+ from .utils.logging import get_logger
42
+ from .utils.version import Version
43
+
44
+
45
+ logger = get_logger(__name__)
46
+
47
+
48
+ class SplitsNotFoundError(ValueError):
49
+ pass
50
+
51
+
52
+ @deprecated("Use 'huggingface_hub.list_datasets' instead.")
53
+ def list_datasets(with_community_datasets=True, with_details=False):
54
+ """List all the datasets scripts available on the Hugging Face Hub.
55
+
56
+ Args:
57
+ with_community_datasets (`bool`, *optional*, defaults to `True`):
58
+ Include the community provided datasets.
59
+ with_details (`bool`, *optional*, defaults to `False`):
60
+ Return the full details on the datasets instead of only the short name.
61
+
62
+ Example:
63
+
64
+ ```py
65
+ >>> from datasets import list_datasets
66
+ >>> list_datasets()
67
+ ['acronym_identification',
68
+ 'ade_corpus_v2',
69
+ 'adversarial_qa',
70
+ 'aeslc',
71
+ 'afrikaans_ner_corpus',
72
+ 'ag_news',
73
+ ...
74
+ ]
75
+ ```
76
+ """
77
+ datasets = huggingface_hub.list_datasets(full=with_details)
78
+ if not with_community_datasets:
79
+ datasets = [dataset for dataset in datasets if "/" not in dataset.id]
80
+ if not with_details:
81
+ datasets = [dataset.id for dataset in datasets]
82
+ return list(datasets)
83
+
84
+
85
+ @deprecated(
86
+ "Use 'evaluate.list_evaluation_modules' instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate"
87
+ )
88
+ def list_metrics(with_community_metrics=True, with_details=False):
89
+ """List all the metrics script available on the Hugging Face Hub.
90
+
91
+ <Deprecated version="2.5.0">
92
+
93
+ Use `evaluate.list_evaluation_modules` instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate
94
+
95
+ </Deprecated>
96
+
97
+ Args:
98
+ with_community_metrics (:obj:`bool`, optional, default ``True``): Include the community provided metrics.
99
+ with_details (:obj:`bool`, optional, default ``False``): Return the full details on the metrics instead of only the short name.
100
+
101
+ Example:
102
+
103
+ ```py
104
+ >>> from datasets import list_metrics
105
+ >>> list_metrics()
106
+ ['accuracy',
107
+ 'bertscore',
108
+ 'bleu',
109
+ 'bleurt',
110
+ 'cer',
111
+ 'chrf',
112
+ ...
113
+ ]
114
+ ```
115
+ """
116
+ metrics = huggingface_hub.list_metrics()
117
+ if not with_community_metrics:
118
+ metrics = [metric for metric in metrics if "/" not in metric.id]
119
+ if not with_details:
120
+ metrics = [metric.id for metric in metrics]
121
+ return metrics
122
+
123
+
124
+ @deprecated("Clone the dataset repository from the Hugging Face Hub instead.")
125
+ def inspect_dataset(path: str, local_path: str, download_config: Optional[DownloadConfig] = None, **download_kwargs):
126
+ """
127
+ Allow inspection/modification of a dataset script by copying on local drive at local_path.
128
+
129
+ Args:
130
+ path (`str`): Path to the dataset processing script with the dataset builder. Can be either:
131
+
132
+ - a local path to processing script or the directory containing the script (if the script has the same name
133
+ as the directory),
134
+ e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`.
135
+ - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`list_datasets`])
136
+ e.g. `'squad'`, `'glue'` or `'openai/webtext'`.
137
+ local_path (`str`):
138
+ Path to the local folder to copy the dataset script to.
139
+ download_config ([`DownloadConfig`], *optional*):
140
+ Specific download configuration parameters.
141
+ **download_kwargs (additional keyword arguments):
142
+ Optional arguments for [`DownloadConfig`] which will override
143
+ the attributes of `download_config` if supplied.
144
+ """
145
+ if download_config is None:
146
+ download_config = DownloadConfig(**download_kwargs)
147
+ if os.path.isfile(path):
148
+ path = str(Path(path).parent)
149
+ if os.path.isdir(path):
150
+ shutil.copytree(path, local_path, dirs_exist_ok=True)
151
+ else:
152
+ huggingface_hub.HfApi(endpoint=config.HF_ENDPOINT, token=download_config.token).snapshot_download(
153
+ repo_id=path, repo_type="dataset", local_dir=local_path, force_download=download_config.force_download
154
+ )
155
+ print(
156
+ f"The dataset {path} can be inspected at {local_path}. "
157
+ f'You can modify this loading script if it has one and use it with `datasets.load_dataset("{PurePath(local_path).as_posix()}")`.'
158
+ )
159
+
160
+
161
+ @deprecated(
162
+ "Use 'evaluate.inspect_evaluation_module' instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate"
163
+ )
164
+ def inspect_metric(path: str, local_path: str, download_config: Optional[DownloadConfig] = None, **download_kwargs):
165
+ r"""
166
+ Allow inspection/modification of a metric script by copying it on local drive at local_path.
167
+
168
+ <Deprecated version="2.5.0">
169
+
170
+ Use `evaluate.inspect_evaluation_module` instead, from the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate
171
+
172
+ </Deprecated>
173
+
174
+ Args:
175
+ path (``str``): path to the dataset processing script with the dataset builder. Can be either:
176
+
177
+ - a local path to processing script or the directory containing the script (if the script has the same name as the directory),
178
+ e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'``
179
+ - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with ``datasets.list_datasets()``)
180
+ e.g. ``'squad'``, ``'glue'`` or ``'openai/webtext'``
181
+ local_path (``str``): path to the local folder to copy the datset script to.
182
+ download_config (Optional ``datasets.DownloadConfig``): specific download configuration parameters.
183
+ **download_kwargs (additional keyword arguments): optional attributes for DownloadConfig() which will override the attributes in download_config if supplied.
184
+ """
185
+ metric_module = metric_module_factory(path, download_config=download_config, **download_kwargs)
186
+ metric_cls = import_main_class(metric_module.module_path, dataset=False)
187
+ module_source_path = inspect.getsourcefile(metric_cls)
188
+ module_source_dirpath = os.path.dirname(module_source_path)
189
+ for dirpath, dirnames, filenames in os.walk(module_source_dirpath):
190
+ dst_dirpath = os.path.join(local_path, os.path.relpath(dirpath, module_source_dirpath))
191
+ os.makedirs(dst_dirpath, exist_ok=True)
192
+ # skipping hidden directories; prune the search
193
+ dirnames[:] = [dirname for dirname in dirnames if not dirname.startswith((".", "__"))]
194
+ for filename in filenames:
195
+ shutil.copy2(os.path.join(dirpath, filename), os.path.join(dst_dirpath, filename))
196
+ shutil.copystat(dirpath, dst_dirpath)
197
+ local_path = relative_to_absolute_path(local_path)
198
+ print(
199
+ f"The processing scripts for metric {path} can be inspected at {local_path}. "
200
+ f"The main class is in {module_source_dirpath}. "
201
+ f'You can modify this processing scripts and use it with `datasets.load_metric("{PurePath(local_path).as_posix()}")`.'
202
+ )
203
+
204
+
205
+ def get_dataset_infos(
206
+ path: str,
207
+ data_files: Optional[Union[Dict, List, str]] = None,
208
+ download_config: Optional[DownloadConfig] = None,
209
+ download_mode: Optional[Union[DownloadMode, str]] = None,
210
+ revision: Optional[Union[str, Version]] = None,
211
+ token: Optional[Union[bool, str]] = None,
212
+ use_auth_token="deprecated",
213
+ **config_kwargs,
214
+ ):
215
+ """Get the meta information about a dataset, returned as a dict mapping config name to DatasetInfoDict.
216
+
217
+ Args:
218
+ path (`str`): path to the dataset processing script with the dataset builder. Can be either:
219
+
220
+ - a local path to processing script or the directory containing the script (if the script has the same name as the directory),
221
+ e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`
222
+ - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`datasets.list_datasets`])
223
+ e.g. `'squad'`, `'glue'` or``'openai/webtext'`
224
+ revision (`Union[str, datasets.Version]`, *optional*):
225
+ If specified, the dataset module will be loaded from the datasets repository at this version.
226
+ By default:
227
+ - it is set to the local version of the lib.
228
+ - it will also try to load it from the main branch if it's not available at the local version of the lib.
229
+ Specifying a version that is different from your local version of the lib might cause compatibility issues.
230
+ download_config ([`DownloadConfig`], *optional*):
231
+ Specific download configuration parameters.
232
+ download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
233
+ Download/generate mode.
234
+ data_files (`Union[Dict, List, str]`, *optional*):
235
+ Defining the data_files of the dataset configuration.
236
+ token (`str` or `bool`, *optional*):
237
+ Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
238
+ If `True`, or not specified, will get token from `"~/.huggingface"`.
239
+ use_auth_token (`str` or `bool`, *optional*):
240
+ Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
241
+ If `True`, or not specified, will get token from `"~/.huggingface"`.
242
+
243
+ <Deprecated version="2.14.0">
244
+
245
+ `use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
246
+
247
+ </Deprecated>
248
+
249
+ **config_kwargs (additional keyword arguments):
250
+ Optional attributes for builder class which will override the attributes if supplied.
251
+
252
+ Example:
253
+
254
+ ```py
255
+ >>> from datasets import get_dataset_infos
256
+ >>> get_dataset_infos('rotten_tomatoes')
257
+ {'default': DatasetInfo(description="Movie Review Dataset.\nThis is a dataset of containing 5,331 positive and 5,331 negative processed\nsentences from Rotten Tomatoes movie reviews...), ...}
258
+ ```
259
+ """
260
+ if use_auth_token != "deprecated":
261
+ warnings.warn(
262
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
263
+ "You can remove this warning by passing 'token=<use_auth_token>' instead.",
264
+ FutureWarning,
265
+ )
266
+ token = use_auth_token
267
+
268
+ config_names = get_dataset_config_names(
269
+ path=path,
270
+ revision=revision,
271
+ download_config=download_config,
272
+ download_mode=download_mode,
273
+ data_files=data_files,
274
+ token=token,
275
+ )
276
+ return {
277
+ config_name: get_dataset_config_info(
278
+ path=path,
279
+ config_name=config_name,
280
+ data_files=data_files,
281
+ download_config=download_config,
282
+ download_mode=download_mode,
283
+ revision=revision,
284
+ token=token,
285
+ **config_kwargs,
286
+ )
287
+ for config_name in config_names
288
+ }
289
+
290
+
291
+ def get_dataset_config_names(
292
+ path: str,
293
+ revision: Optional[Union[str, Version]] = None,
294
+ download_config: Optional[DownloadConfig] = None,
295
+ download_mode: Optional[Union[DownloadMode, str]] = None,
296
+ dynamic_modules_path: Optional[str] = None,
297
+ data_files: Optional[Union[Dict, List, str]] = None,
298
+ **download_kwargs,
299
+ ):
300
+ """Get the list of available config names for a particular dataset.
301
+
302
+ Args:
303
+ path (`str`): path to the dataset processing script with the dataset builder. Can be either:
304
+
305
+ - a local path to processing script or the directory containing the script (if the script has the same name as the directory),
306
+ e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`
307
+ - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`datasets.list_datasets`])
308
+ e.g. `'squad'`, `'glue'` or `'openai/webtext'`
309
+ revision (`Union[str, datasets.Version]`, *optional*):
310
+ If specified, the dataset module will be loaded from the datasets repository at this version.
311
+ By default:
312
+ - it is set to the local version of the lib.
313
+ - it will also try to load it from the main branch if it's not available at the local version of the lib.
314
+ Specifying a version that is different from your local version of the lib might cause compatibility issues.
315
+ download_config ([`DownloadConfig`], *optional*):
316
+ Specific download configuration parameters.
317
+ download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
318
+ Download/generate mode.
319
+ dynamic_modules_path (`str`, defaults to `~/.cache/huggingface/modules/datasets_modules`):
320
+ Optional path to the directory in which the dynamic modules are saved. It must have been initialized with `init_dynamic_modules`.
321
+ By default the datasets and metrics are stored inside the `datasets_modules` module.
322
+ data_files (`Union[Dict, List, str]`, *optional*):
323
+ Defining the data_files of the dataset configuration.
324
+ **download_kwargs (additional keyword arguments):
325
+ Optional attributes for [`DownloadConfig`] which will override the attributes in `download_config` if supplied,
326
+ for example `token`.
327
+
328
+ Example:
329
+
330
+ ```py
331
+ >>> from datasets import get_dataset_config_names
332
+ >>> get_dataset_config_names("glue")
333
+ ['cola',
334
+ 'sst2',
335
+ 'mrpc',
336
+ 'qqp',
337
+ 'stsb',
338
+ 'mnli',
339
+ 'mnli_mismatched',
340
+ 'mnli_matched',
341
+ 'qnli',
342
+ 'rte',
343
+ 'wnli',
344
+ 'ax']
345
+ ```
346
+ """
347
+ dataset_module = dataset_module_factory(
348
+ path,
349
+ revision=revision,
350
+ download_config=download_config,
351
+ download_mode=download_mode,
352
+ dynamic_modules_path=dynamic_modules_path,
353
+ data_files=data_files,
354
+ **download_kwargs,
355
+ )
356
+ builder_cls = get_dataset_builder_class(dataset_module, dataset_name=os.path.basename(path))
357
+ return list(builder_cls.builder_configs.keys()) or [
358
+ dataset_module.builder_kwargs.get("config_name", builder_cls.DEFAULT_CONFIG_NAME or "default")
359
+ ]
360
+
361
+
362
+ def get_dataset_default_config_name(
363
+ path: str,
364
+ revision: Optional[Union[str, Version]] = None,
365
+ download_config: Optional[DownloadConfig] = None,
366
+ download_mode: Optional[Union[DownloadMode, str]] = None,
367
+ dynamic_modules_path: Optional[str] = None,
368
+ data_files: Optional[Union[Dict, List, str]] = None,
369
+ **download_kwargs,
370
+ ) -> Optional[str]:
371
+ """Get the default config name for a particular dataset.
372
+ Can return None only if the dataset has multiple configurations and no default configuration.
373
+
374
+ Args:
375
+ path (`str`): path to the dataset processing script with the dataset builder. Can be either:
376
+
377
+ - a local path to processing script or the directory containing the script (if the script has the same name as the directory),
378
+ e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`
379
+ - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`datasets.list_datasets`])
380
+ e.g. `'squad'`, `'glue'` or `'openai/webtext'`
381
+ revision (`Union[str, datasets.Version]`, *optional*):
382
+ If specified, the dataset module will be loaded from the datasets repository at this version.
383
+ By default:
384
+ - it is set to the local version of the lib.
385
+ - it will also try to load it from the main branch if it's not available at the local version of the lib.
386
+ Specifying a version that is different from your local version of the lib might cause compatibility issues.
387
+ download_config ([`DownloadConfig`], *optional*):
388
+ Specific download configuration parameters.
389
+ download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
390
+ Download/generate mode.
391
+ dynamic_modules_path (`str`, defaults to `~/.cache/huggingface/modules/datasets_modules`):
392
+ Optional path to the directory in which the dynamic modules are saved. It must have been initialized with `init_dynamic_modules`.
393
+ By default the datasets and metrics are stored inside the `datasets_modules` module.
394
+ data_files (`Union[Dict, List, str]`, *optional*):
395
+ Defining the data_files of the dataset configuration.
396
+ **download_kwargs (additional keyword arguments):
397
+ Optional attributes for [`DownloadConfig`] which will override the attributes in `download_config` if supplied,
398
+ for example `token`.
399
+
400
+ Returns:
401
+ Optional[str]: the default config name if there is one
402
+
403
+ Example:
404
+
405
+ ```py
406
+ >>> from datasets import get_dataset_default_config_name
407
+ >>> get_dataset_default_config_name("openbookqa")
408
+ 'main'
409
+ ```
410
+ """
411
+ dataset_module = dataset_module_factory(
412
+ path,
413
+ revision=revision,
414
+ download_config=download_config,
415
+ download_mode=download_mode,
416
+ dynamic_modules_path=dynamic_modules_path,
417
+ data_files=data_files,
418
+ **download_kwargs,
419
+ )
420
+ builder_cls = get_dataset_builder_class(dataset_module, dataset_name=os.path.basename(path))
421
+ builder_configs = list(builder_cls.builder_configs.keys())
422
+ if builder_configs:
423
+ default_config_name = builder_configs[0] if len(builder_configs) == 1 else None
424
+ else:
425
+ default_config_name = "default"
426
+ return builder_cls.DEFAULT_CONFIG_NAME or default_config_name
427
+
428
+
429
+ def get_dataset_config_info(
430
+ path: str,
431
+ config_name: Optional[str] = None,
432
+ data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None,
433
+ download_config: Optional[DownloadConfig] = None,
434
+ download_mode: Optional[Union[DownloadMode, str]] = None,
435
+ revision: Optional[Union[str, Version]] = None,
436
+ token: Optional[Union[bool, str]] = None,
437
+ use_auth_token="deprecated",
438
+ **config_kwargs,
439
+ ) -> DatasetInfo:
440
+ """Get the meta information (DatasetInfo) about a dataset for a particular config
441
+
442
+ Args:
443
+ path (``str``): path to the dataset processing script with the dataset builder. Can be either:
444
+
445
+ - a local path to processing script or the directory containing the script (if the script has the same name as the directory),
446
+ e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'``
447
+ - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with ``datasets.list_datasets()``)
448
+ e.g. ``'squad'``, ``'glue'`` or ``'openai/webtext'``
449
+ config_name (:obj:`str`, optional): Defining the name of the dataset configuration.
450
+ data_files (:obj:`str` or :obj:`Sequence` or :obj:`Mapping`, optional): Path(s) to source data file(s).
451
+ download_config (:class:`~download.DownloadConfig`, optional): Specific download configuration parameters.
452
+ download_mode (:class:`DownloadMode` or :obj:`str`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode.
453
+ revision (:class:`~utils.Version` or :obj:`str`, optional): Version of the dataset script to load.
454
+ As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch.
455
+ You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository.
456
+ token (``str`` or :obj:`bool`, optional): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
457
+ If True, or not specified, will get token from `"~/.huggingface"`.
458
+ use_auth_token (``str`` or :obj:`bool`, optional): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
459
+ If True, or not specified, will get token from `"~/.huggingface"`.
460
+
461
+ <Deprecated version="2.14.0">
462
+
463
+ `use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
464
+
465
+ </Deprecated>
466
+
467
+ **config_kwargs (additional keyword arguments): optional attributes for builder class which will override the attributes if supplied.
468
+
469
+ """
470
+ if use_auth_token != "deprecated":
471
+ warnings.warn(
472
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
473
+ "You can remove this warning by passing 'token=<use_auth_token>' instead.",
474
+ FutureWarning,
475
+ )
476
+ token = use_auth_token
477
+
478
+ builder = load_dataset_builder(
479
+ path,
480
+ name=config_name,
481
+ data_files=data_files,
482
+ download_config=download_config,
483
+ download_mode=download_mode,
484
+ revision=revision,
485
+ token=token,
486
+ **config_kwargs,
487
+ )
488
+ info = builder.info
489
+ if info.splits is None:
490
+ download_config = download_config.copy() if download_config else DownloadConfig()
491
+ if token is not None:
492
+ download_config.token = token
493
+ builder._check_manual_download(
494
+ StreamingDownloadManager(base_path=builder.base_path, download_config=download_config)
495
+ )
496
+ try:
497
+ info.splits = {
498
+ split_generator.name: {"name": split_generator.name, "dataset_name": path}
499
+ for split_generator in builder._split_generators(
500
+ StreamingDownloadManager(base_path=builder.base_path, download_config=download_config)
501
+ )
502
+ }
503
+ except Exception as err:
504
+ raise SplitsNotFoundError("The split names could not be parsed from the dataset config.") from err
505
+ return info
506
+
507
+
508
+ def get_dataset_split_names(
509
+ path: str,
510
+ config_name: Optional[str] = None,
511
+ data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None,
512
+ download_config: Optional[DownloadConfig] = None,
513
+ download_mode: Optional[Union[DownloadMode, str]] = None,
514
+ revision: Optional[Union[str, Version]] = None,
515
+ token: Optional[Union[bool, str]] = None,
516
+ use_auth_token="deprecated",
517
+ **config_kwargs,
518
+ ):
519
+ """Get the list of available splits for a particular config and dataset.
520
+
521
+ Args:
522
+ path (`str`): path to the dataset processing script with the dataset builder. Can be either:
523
+
524
+ - a local path to processing script or the directory containing the script (if the script has the same name as the directory),
525
+ e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`
526
+ - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`datasets.list_datasets`])
527
+ e.g. `'squad'`, `'glue'` or `'openai/webtext'`
528
+ config_name (`str`, *optional*):
529
+ Defining the name of the dataset configuration.
530
+ data_files (`str` or `Sequence` or `Mapping`, *optional*):
531
+ Path(s) to source data file(s).
532
+ download_config ([`DownloadConfig`], *optional*):
533
+ Specific download configuration parameters.
534
+ download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
535
+ Download/generate mode.
536
+ revision ([`Version`] or `str`, *optional*):
537
+ Version of the dataset script to load.
538
+ As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch.
539
+ You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository.
540
+ token (`str` or `bool`, *optional*):
541
+ Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
542
+ If `True`, or not specified, will get token from `"~/.huggingface"`.
543
+ use_auth_token (`str` or `bool`, *optional*):
544
+ Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
545
+ If `True`, or not specified, will get token from `"~/.huggingface"`.
546
+
547
+ <Deprecated version="2.14.0">
548
+
549
+ `use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
550
+
551
+ </Deprecated>
552
+
553
+ **config_kwargs (additional keyword arguments):
554
+ Optional attributes for builder class which will override the attributes if supplied.
555
+
556
+ Example:
557
+
558
+ ```py
559
+ >>> from datasets import get_dataset_split_names
560
+ >>> get_dataset_split_names('rotten_tomatoes')
561
+ ['train', 'validation', 'test']
562
+ ```
563
+ """
564
+ if use_auth_token != "deprecated":
565
+ warnings.warn(
566
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
567
+ "You can remove this warning by passing 'token=<use_auth_token>' instead.",
568
+ FutureWarning,
569
+ )
570
+ token = use_auth_token
571
+
572
+ info = get_dataset_config_info(
573
+ path,
574
+ config_name=config_name,
575
+ data_files=data_files,
576
+ download_config=download_config,
577
+ download_mode=download_mode,
578
+ revision=revision,
579
+ token=token,
580
+ **config_kwargs,
581
+ )
582
+ return list(info.splits.keys())
llmeval-env/lib/python3.10/site-packages/datasets/iterable_dataset.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/datasets/keyhash.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Lint as: python3
16
+
17
+ """
18
+ Hashing function for dataset keys using `hashlib.md5`
19
+
20
+ Requirements for the hash function:
21
+
22
+ - Provides a uniformly distributed hash from random space
23
+ - Adequately fast speed
24
+ - Working with multiple input types (in this case, `str`, `int` or `bytes`)
25
+ - Should be platform independent (generates same hash on different OS and systems)
26
+
27
+ The hashing function provides a unique 128-bit integer hash of the key provided.
28
+
29
+ The split name is being used here as the hash salt to avoid having same hashes
30
+ in different splits due to same keys
31
+ """
32
+
33
+ from typing import Union
34
+
35
+ from huggingface_hub.utils import insecure_hashlib
36
+
37
+
38
+ def _as_bytes(hash_data: Union[str, int, bytes]) -> bytes:
39
+ """
40
+ Returns the input hash_data in its bytes form
41
+
42
+ Args:
43
+ hash_data: the hash salt/key to be converted to bytes
44
+ """
45
+ if isinstance(hash_data, bytes):
46
+ # Data already in bytes, returns as it as
47
+ return hash_data
48
+ elif isinstance(hash_data, str):
49
+ # We keep the data as it as for it ot be later encoded to UTF-8
50
+ # However replace `\\` with `/` for Windows compatibility
51
+ hash_data = hash_data.replace("\\", "/")
52
+ elif isinstance(hash_data, int):
53
+ hash_data = str(hash_data)
54
+ else:
55
+ # If data is not of the required type, raise error
56
+ raise InvalidKeyError(hash_data)
57
+
58
+ return hash_data.encode("utf-8")
59
+
60
+
61
+ class InvalidKeyError(Exception):
62
+ """Raises an error when given key is of invalid datatype."""
63
+
64
+ def __init__(self, hash_data):
65
+ self.prefix = "\nFAILURE TO GENERATE DATASET: Invalid key type detected"
66
+ self.err_msg = f"\nFound Key {hash_data} of type {type(hash_data)}"
67
+ self.suffix = "\nKeys should be either str, int or bytes type"
68
+ super().__init__(f"{self.prefix}{self.err_msg}{self.suffix}")
69
+
70
+
71
+ class DuplicatedKeysError(Exception):
72
+ """Raise an error when duplicate key found."""
73
+
74
+ def __init__(self, key, duplicate_key_indices, fix_msg=""):
75
+ self.key = key
76
+ self.duplicate_key_indices = duplicate_key_indices
77
+ self.fix_msg = fix_msg
78
+ self.prefix = "Found multiple examples generated with the same key"
79
+ if len(duplicate_key_indices) <= 20:
80
+ self.err_msg = f"\nThe examples at index {', '.join(duplicate_key_indices)} have the key {key}"
81
+ else:
82
+ self.err_msg = f"\nThe examples at index {', '.join(duplicate_key_indices[:20])}... ({len(duplicate_key_indices) - 20} more) have the key {key}"
83
+ self.suffix = "\n" + fix_msg if fix_msg else ""
84
+ super().__init__(f"{self.prefix}{self.err_msg}{self.suffix}")
85
+
86
+
87
+ class KeyHasher:
88
+ """KeyHasher class for providing hash using md5"""
89
+
90
+ def __init__(self, hash_salt: str):
91
+ self._split_md5 = insecure_hashlib.md5(_as_bytes(hash_salt))
92
+
93
+ def hash(self, key: Union[str, int, bytes]) -> int:
94
+ """Returns 128-bits unique hash of input key
95
+
96
+ Args:
97
+ key: the input key to be hashed (should be str, int or bytes)
98
+
99
+ Returns: 128-bit int hash key"""
100
+ md5 = self._split_md5.copy()
101
+ byte_key = _as_bytes(key)
102
+ md5.update(byte_key)
103
+ # Convert to integer with hexadecimal conversion
104
+ return int(md5.hexdigest(), 16)
llmeval-env/lib/python3.10/site-packages/datasets/load.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/datasets/metric.py ADDED
@@ -0,0 +1,652 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Lint as: python3
16
+ """Metrics base class."""
17
+
18
+ import os
19
+ import types
20
+ import uuid
21
+ from typing import Any, Dict, List, Optional, Tuple, Union
22
+
23
+ import numpy as np
24
+ import pyarrow as pa
25
+ from filelock import BaseFileLock, Timeout
26
+
27
+ from . import config
28
+ from .arrow_dataset import Dataset
29
+ from .arrow_reader import ArrowReader
30
+ from .arrow_writer import ArrowWriter
31
+ from .download.download_config import DownloadConfig
32
+ from .download.download_manager import DownloadManager
33
+ from .features import Features
34
+ from .info import DatasetInfo, MetricInfo
35
+ from .naming import camelcase_to_snakecase
36
+ from .utils._filelock import FileLock
37
+ from .utils.deprecation_utils import deprecated
38
+ from .utils.logging import get_logger
39
+ from .utils.py_utils import copyfunc, temp_seed
40
+
41
+
42
+ logger = get_logger(__name__)
43
+
44
+
45
+ class FileFreeLock(BaseFileLock):
46
+ """Thread lock until a file **cannot** be locked"""
47
+
48
+ def __init__(self, lock_file, *args, **kwargs):
49
+ self.filelock = FileLock(lock_file)
50
+ super().__init__(self.filelock.lock_file, *args, **kwargs)
51
+
52
+ def _acquire(self):
53
+ try:
54
+ self.filelock.acquire(timeout=0.01, poll_intervall=0.02) # Try to lock once
55
+ except Timeout:
56
+ # We couldn't acquire the lock, the file is locked!
57
+ self._context.lock_file_fd = self.filelock.lock_file
58
+ else:
59
+ # We were able to acquire the lock, the file is not yet locked!
60
+ self.filelock.release()
61
+ self._context.lock_file_fd = None
62
+
63
+ def _release(self):
64
+ self._context.lock_file_fd = None
65
+
66
+
67
+ # lists - summarize long lists similarly to NumPy
68
+ # arrays/tensors - let the frameworks control formatting
69
+ def summarize_if_long_list(obj):
70
+ if not type(obj) == list or len(obj) <= 6: # noqa: E721
71
+ return f"{obj}"
72
+
73
+ def format_chunk(chunk):
74
+ return ", ".join(repr(x) for x in chunk)
75
+
76
+ return f"[{format_chunk(obj[:3])}, ..., {format_chunk(obj[-3:])}]"
77
+
78
+
79
+ class MetricInfoMixin:
80
+ """This base class exposes some attributes of MetricInfo
81
+ at the base level of the Metric for easy access.
82
+
83
+ <Deprecated version="2.5.0">
84
+
85
+ Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate
86
+
87
+ </Deprecated>
88
+
89
+ """
90
+
91
+ def __init__(self, info: MetricInfo):
92
+ self._metric_info = info
93
+
94
+ @property
95
+ def info(self):
96
+ """:class:`datasets.MetricInfo` object containing all the metadata in the metric."""
97
+ return self._metric_info
98
+
99
+ @property
100
+ def name(self) -> str:
101
+ return self._metric_info.metric_name
102
+
103
+ @property
104
+ def experiment_id(self) -> Optional[str]:
105
+ return self._metric_info.experiment_id
106
+
107
+ @property
108
+ def description(self) -> str:
109
+ return self._metric_info.description
110
+
111
+ @property
112
+ def citation(self) -> str:
113
+ return self._metric_info.citation
114
+
115
+ @property
116
+ def features(self) -> Features:
117
+ return self._metric_info.features
118
+
119
+ @property
120
+ def inputs_description(self) -> str:
121
+ return self._metric_info.inputs_description
122
+
123
+ @property
124
+ def homepage(self) -> Optional[str]:
125
+ return self._metric_info.homepage
126
+
127
+ @property
128
+ def license(self) -> str:
129
+ return self._metric_info.license
130
+
131
+ @property
132
+ def codebase_urls(self) -> Optional[List[str]]:
133
+ return self._metric_info.codebase_urls
134
+
135
+ @property
136
+ def reference_urls(self) -> Optional[List[str]]:
137
+ return self._metric_info.reference_urls
138
+
139
+ @property
140
+ def streamable(self) -> bool:
141
+ return self._metric_info.streamable
142
+
143
+ @property
144
+ def format(self) -> Optional[str]:
145
+ return self._metric_info.format
146
+
147
+
148
+ class Metric(MetricInfoMixin):
149
+ """A Metric is the base class and common API for all metrics.
150
+
151
+ <Deprecated version="2.5.0">
152
+
153
+ Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate
154
+
155
+ </Deprecated>
156
+
157
+ Args:
158
+ config_name (``str``): This is used to define a hash specific to a metrics computation script and prevents the metric's data
159
+ to be overridden when the metric loading script is modified.
160
+ keep_in_memory (:obj:`bool`): keep all predictions and references in memory. Not possible in distributed settings.
161
+ cache_dir (``str``): Path to a directory in which temporary prediction/references data will be stored.
162
+ The data directory should be located on a shared file-system in distributed setups.
163
+ num_process (``int``): specify the total number of nodes in a distributed settings.
164
+ This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
165
+ process_id (``int``): specify the id of the current process in a distributed setup (between 0 and num_process-1)
166
+ This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
167
+ seed (:obj:`int`, optional): If specified, this will temporarily set numpy's random seed when :func:`datasets.Metric.compute` is run.
168
+ experiment_id (``str``): A specific experiment id. This is used if several distributed evaluations share the same file system.
169
+ This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
170
+ max_concurrent_cache_files (``int``): Max number of concurrent metrics cache files (default 10000).
171
+ timeout (``Union[int, float]``): Timeout in second for distributed setting synchronization.
172
+ """
173
+
174
+ @deprecated("Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate")
175
+ def __init__(
176
+ self,
177
+ config_name: Optional[str] = None,
178
+ keep_in_memory: bool = False,
179
+ cache_dir: Optional[str] = None,
180
+ num_process: int = 1,
181
+ process_id: int = 0,
182
+ seed: Optional[int] = None,
183
+ experiment_id: Optional[str] = None,
184
+ max_concurrent_cache_files: int = 10000,
185
+ timeout: Union[int, float] = 100,
186
+ **kwargs,
187
+ ):
188
+ # prepare info
189
+ self.config_name = config_name or "default"
190
+ info = self._info()
191
+ info.metric_name = camelcase_to_snakecase(self.__class__.__name__)
192
+ info.config_name = self.config_name
193
+ info.experiment_id = experiment_id or "default_experiment"
194
+ MetricInfoMixin.__init__(self, info) # For easy access on low level
195
+
196
+ # Safety checks on num_process and process_id
197
+ if not isinstance(process_id, int) or process_id < 0:
198
+ raise ValueError("'process_id' should be a number greater than 0")
199
+ if not isinstance(num_process, int) or num_process <= process_id:
200
+ raise ValueError("'num_process' should be a number greater than process_id")
201
+ if keep_in_memory and num_process != 1:
202
+ raise ValueError("Using 'keep_in_memory' is not possible in distributed setting (num_process > 1).")
203
+
204
+ self.num_process = num_process
205
+ self.process_id = process_id
206
+ self.max_concurrent_cache_files = max_concurrent_cache_files
207
+
208
+ self.keep_in_memory = keep_in_memory
209
+ self._data_dir_root = os.path.expanduser(cache_dir or config.HF_METRICS_CACHE)
210
+ self.data_dir = self._build_data_dir()
211
+ if seed is None:
212
+ _, seed, pos, *_ = np.random.get_state()
213
+ self.seed: int = seed[pos] if pos < 624 else seed[0]
214
+ else:
215
+ self.seed: int = seed
216
+ self.timeout: Union[int, float] = timeout
217
+
218
+ # Update 'compute' and 'add' docstring
219
+ # methods need to be copied otherwise it changes the docstrings of every instance
220
+ self.compute = types.MethodType(copyfunc(self.compute), self)
221
+ self.add_batch = types.MethodType(copyfunc(self.add_batch), self)
222
+ self.add = types.MethodType(copyfunc(self.add), self)
223
+ self.compute.__func__.__doc__ += self.info.inputs_description
224
+ self.add_batch.__func__.__doc__ += self.info.inputs_description
225
+ self.add.__func__.__doc__ += self.info.inputs_description
226
+
227
+ # self.arrow_schema = pa.schema(field for field in self.info.features.type)
228
+ self.buf_writer = None
229
+ self.writer = None
230
+ self.writer_batch_size = None
231
+ self.data = None
232
+
233
+ # This is the cache file we store our predictions/references in
234
+ # Keep it None for now so we can (cloud)pickle the object
235
+ self.cache_file_name = None
236
+ self.filelock = None
237
+ self.rendez_vous_lock = None
238
+
239
+ # This is all the cache files on which we have a lock when we are in a distributed setting
240
+ self.file_paths = None
241
+ self.filelocks = None
242
+
243
+ def __len__(self):
244
+ """Return the number of examples (predictions or predictions/references pair)
245
+ currently stored in the metric's cache.
246
+ """
247
+ return 0 if self.writer is None else len(self.writer)
248
+
249
+ def __repr__(self):
250
+ return (
251
+ f'Metric(name: "{self.name}", features: {self.features}, '
252
+ f'usage: """{self.inputs_description}""", '
253
+ f"stored examples: {len(self)})"
254
+ )
255
+
256
+ def _build_data_dir(self):
257
+ """Path of this metric in cache_dir:
258
+ Will be:
259
+ self._data_dir_root/self.name/self.config_name/self.hash (if not none)/
260
+ If any of these element is missing or if ``with_version=False`` the corresponding subfolders are dropped.
261
+ """
262
+ builder_data_dir = self._data_dir_root
263
+ builder_data_dir = os.path.join(builder_data_dir, self.name, self.config_name)
264
+ os.makedirs(builder_data_dir, exist_ok=True)
265
+ return builder_data_dir
266
+
267
+ def _create_cache_file(self, timeout=1) -> Tuple[str, FileLock]:
268
+ """Create a new cache file. If the default cache file is used, we generated a new hash."""
269
+ file_path = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{self.process_id}.arrow")
270
+ filelock = None
271
+ for i in range(self.max_concurrent_cache_files):
272
+ filelock = FileLock(file_path + ".lock")
273
+ try:
274
+ filelock.acquire(timeout=timeout)
275
+ except Timeout:
276
+ # If we have reached the max number of attempts or we are not allow to find a free name (distributed setup)
277
+ # We raise an error
278
+ if self.num_process != 1:
279
+ raise ValueError(
280
+ f"Error in _create_cache_file: another metric instance is already using the local cache file at {file_path}. "
281
+ f"Please specify an experiment_id (currently: {self.experiment_id}) to avoid collision "
282
+ f"between distributed metric instances."
283
+ ) from None
284
+ if i == self.max_concurrent_cache_files - 1:
285
+ raise ValueError(
286
+ f"Cannot acquire lock, too many metric instance are operating concurrently on this file system."
287
+ f"You should set a larger value of max_concurrent_cache_files when creating the metric "
288
+ f"(current value is {self.max_concurrent_cache_files})."
289
+ ) from None
290
+ # In other cases (allow to find new file name + not yet at max num of attempts) we can try to sample a new hashing name.
291
+ file_uuid = str(uuid.uuid4())
292
+ file_path = os.path.join(
293
+ self.data_dir, f"{self.experiment_id}-{file_uuid}-{self.num_process}-{self.process_id}.arrow"
294
+ )
295
+ else:
296
+ break
297
+
298
+ return file_path, filelock
299
+
300
+ def _get_all_cache_files(self) -> Tuple[List[str], List[FileLock]]:
301
+ """Get a lock on all the cache files in a distributed setup.
302
+ We wait for timeout second to let all the distributed node finish their tasks (default is 100 seconds).
303
+ """
304
+ if self.num_process == 1:
305
+ if self.cache_file_name is None:
306
+ raise ValueError(
307
+ "Metric cache file doesn't exist. Please make sure that you call `add` or `add_batch` "
308
+ "at least once before calling `compute`."
309
+ )
310
+ file_paths = [self.cache_file_name]
311
+ else:
312
+ file_paths = [
313
+ os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{process_id}.arrow")
314
+ for process_id in range(self.num_process)
315
+ ]
316
+
317
+ # Let's acquire a lock on each process files to be sure they are finished writing
318
+ filelocks = []
319
+ for process_id, file_path in enumerate(file_paths):
320
+ if process_id == 0: # process 0 already has its lock file
321
+ filelocks.append(self.filelock)
322
+ else:
323
+ filelock = FileLock(file_path + ".lock")
324
+ try:
325
+ filelock.acquire(timeout=self.timeout)
326
+ except Timeout:
327
+ raise ValueError(
328
+ f"Cannot acquire lock on cached file {file_path} for process {process_id}."
329
+ ) from None
330
+ else:
331
+ filelocks.append(filelock)
332
+
333
+ return file_paths, filelocks
334
+
335
+ def _check_all_processes_locks(self):
336
+ expected_lock_file_names = [
337
+ os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{process_id}.arrow.lock")
338
+ for process_id in range(self.num_process)
339
+ ]
340
+ for expected_lock_file_name in expected_lock_file_names:
341
+ nofilelock = FileFreeLock(expected_lock_file_name)
342
+ try:
343
+ nofilelock.acquire(timeout=self.timeout)
344
+ except Timeout:
345
+ raise ValueError(
346
+ f"Expected to find locked file {expected_lock_file_name} from process {self.process_id} but it doesn't exist."
347
+ ) from None
348
+ else:
349
+ nofilelock.release()
350
+
351
+ def _check_rendez_vous(self):
352
+ expected_lock_file_name = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-0.arrow.lock")
353
+ nofilelock = FileFreeLock(expected_lock_file_name)
354
+ try:
355
+ nofilelock.acquire(timeout=self.timeout)
356
+ except Timeout:
357
+ raise ValueError(
358
+ f"Expected to find locked file {expected_lock_file_name} from process {self.process_id} but it doesn't exist."
359
+ ) from None
360
+ else:
361
+ nofilelock.release()
362
+ lock_file_name = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-rdv.lock")
363
+ rendez_vous_lock = FileLock(lock_file_name)
364
+ try:
365
+ rendez_vous_lock.acquire(timeout=self.timeout)
366
+ except Timeout:
367
+ raise ValueError(f"Couldn't acquire lock on {lock_file_name} from process {self.process_id}.") from None
368
+ else:
369
+ rendez_vous_lock.release()
370
+
371
+ def _finalize(self):
372
+ """Close all the writing process and load/gather the data
373
+ from all the nodes if main node or all_process is True.
374
+ """
375
+ if self.writer is not None:
376
+ self.writer.finalize()
377
+ self.writer = None
378
+ # release the locks of the processes > 0 so that process 0 can lock them to read + delete the data
379
+ if self.filelock is not None and self.process_id > 0:
380
+ self.filelock.release()
381
+
382
+ if self.keep_in_memory:
383
+ # Read the predictions and references
384
+ reader = ArrowReader(path=self.data_dir, info=DatasetInfo(features=self.features))
385
+ self.data = Dataset.from_buffer(self.buf_writer.getvalue())
386
+
387
+ elif self.process_id == 0:
388
+ # Let's acquire a lock on each node files to be sure they are finished writing
389
+ file_paths, filelocks = self._get_all_cache_files()
390
+
391
+ # Read the predictions and references
392
+ try:
393
+ reader = ArrowReader(path="", info=DatasetInfo(features=self.features))
394
+ self.data = Dataset(**reader.read_files([{"filename": f} for f in file_paths]))
395
+ except FileNotFoundError:
396
+ raise ValueError(
397
+ "Error in finalize: another metric instance is already using the local cache file. "
398
+ "Please specify an experiment_id to avoid collision between distributed metric instances."
399
+ ) from None
400
+
401
+ # Store file paths and locks and we will release/delete them after the computation.
402
+ self.file_paths = file_paths
403
+ self.filelocks = filelocks
404
+
405
+ def compute(self, *, predictions=None, references=None, **kwargs) -> Optional[dict]:
406
+ """Compute the metrics.
407
+
408
+ Usage of positional arguments is not allowed to prevent mistakes.
409
+
410
+ Args:
411
+ predictions (list/array/tensor, optional): Predictions.
412
+ references (list/array/tensor, optional): References.
413
+ **kwargs (optional): Keyword arguments that will be forwarded to the metrics :meth:`_compute`
414
+ method (see details in the docstring).
415
+
416
+ Return:
417
+ dict or None
418
+
419
+ - Dictionary with the metrics if this metric is run on the main process (``process_id == 0``).
420
+ - None if the metric is not run on the main process (``process_id != 0``).
421
+
422
+ Example:
423
+
424
+ ```py
425
+ >>> from datasets import load_metric
426
+ >>> metric = load_metric("accuracy")
427
+ >>> accuracy = metric.compute(predictions=model_prediction, references=labels)
428
+ ```
429
+ """
430
+ all_kwargs = {"predictions": predictions, "references": references, **kwargs}
431
+ if predictions is None and references is None:
432
+ missing_kwargs = {k: None for k in self.features if k not in all_kwargs}
433
+ all_kwargs.update(missing_kwargs)
434
+ else:
435
+ missing_inputs = [k for k in self.features if k not in all_kwargs]
436
+ if missing_inputs:
437
+ raise ValueError(
438
+ f"Metric inputs are missing: {missing_inputs}. All required inputs are {list(self.features)}"
439
+ )
440
+ inputs = {input_name: all_kwargs[input_name] for input_name in self.features}
441
+ compute_kwargs = {k: kwargs[k] for k in kwargs if k not in self.features}
442
+
443
+ if any(v is not None for v in inputs.values()):
444
+ self.add_batch(**inputs)
445
+ self._finalize()
446
+
447
+ self.cache_file_name = None
448
+ self.filelock = None
449
+
450
+ if self.process_id == 0:
451
+ self.data.set_format(type=self.info.format)
452
+
453
+ inputs = {input_name: self.data[input_name] for input_name in self.features}
454
+ with temp_seed(self.seed):
455
+ output = self._compute(**inputs, **compute_kwargs)
456
+
457
+ if self.buf_writer is not None:
458
+ self.buf_writer = None
459
+ del self.data
460
+ self.data = None
461
+ else:
462
+ # Release locks and delete all the cache files. Process 0 is released last.
463
+ for filelock, file_path in reversed(list(zip(self.filelocks, self.file_paths))):
464
+ logger.info(f"Removing {file_path}")
465
+ del self.data
466
+ self.data = None
467
+ del self.writer
468
+ self.writer = None
469
+ os.remove(file_path)
470
+ filelock.release()
471
+
472
+ return output
473
+ else:
474
+ return None
475
+
476
+ def add_batch(self, *, predictions=None, references=None, **kwargs):
477
+ """Add a batch of predictions and references for the metric's stack.
478
+
479
+ Args:
480
+ predictions (list/array/tensor, optional): Predictions.
481
+ references (list/array/tensor, optional): References.
482
+
483
+ Example:
484
+
485
+ ```py
486
+ >>> from datasets import load_metric
487
+ >>> metric = load_metric("accuracy")
488
+ >>> metric.add_batch(predictions=model_prediction, references=labels)
489
+ ```
490
+ """
491
+ bad_inputs = [input_name for input_name in kwargs if input_name not in self.features]
492
+ if bad_inputs:
493
+ raise ValueError(f"Bad inputs for metric: {bad_inputs}. All required inputs are {list(self.features)}")
494
+ batch = {"predictions": predictions, "references": references, **kwargs}
495
+ batch = {intput_name: batch[intput_name] for intput_name in self.features}
496
+ batch = self.info.features.encode_batch(batch)
497
+ if self.writer is None:
498
+ self._init_writer()
499
+ try:
500
+ self.writer.write_batch(batch)
501
+ except pa.ArrowInvalid:
502
+ if any(len(batch[c]) != len(next(iter(batch.values()))) for c in batch):
503
+ col0 = next(iter(batch))
504
+ bad_col = [c for c in batch if len(batch[c]) != len(batch[col0])][0]
505
+ error_msg = (
506
+ f"Mismatch in the number of {col0} ({len(batch[col0])}) and {bad_col} ({len(batch[bad_col])})"
507
+ )
508
+ elif sorted(self.features) != ["references", "predictions"]:
509
+ error_msg = f"Metric inputs don't match the expected format.\n" f"Expected format: {self.features},\n"
510
+ error_msg_inputs = ",\n".join(
511
+ f"Input {input_name}: {summarize_if_long_list(batch[input_name])}" for input_name in self.features
512
+ )
513
+ error_msg += error_msg_inputs
514
+ else:
515
+ error_msg = (
516
+ f"Predictions and/or references don't match the expected format.\n"
517
+ f"Expected format: {self.features},\n"
518
+ f"Input predictions: {summarize_if_long_list(predictions)},\n"
519
+ f"Input references: {summarize_if_long_list(references)}"
520
+ )
521
+ raise ValueError(error_msg) from None
522
+
523
+ def add(self, *, prediction=None, reference=None, **kwargs):
524
+ """Add one prediction and reference for the metric's stack.
525
+
526
+ Args:
527
+ prediction (list/array/tensor, optional): Predictions.
528
+ reference (list/array/tensor, optional): References.
529
+
530
+ Example:
531
+
532
+ ```py
533
+ >>> from datasets import load_metric
534
+ >>> metric = load_metric("accuracy")
535
+ >>> metric.add(predictions=model_predictions, references=labels)
536
+ ```
537
+ """
538
+ bad_inputs = [input_name for input_name in kwargs if input_name not in self.features]
539
+ if bad_inputs:
540
+ raise ValueError(f"Bad inputs for metric: {bad_inputs}. All required inputs are {list(self.features)}")
541
+ example = {"predictions": prediction, "references": reference, **kwargs}
542
+ example = {intput_name: example[intput_name] for intput_name in self.features}
543
+ example = self.info.features.encode_example(example)
544
+ if self.writer is None:
545
+ self._init_writer()
546
+ try:
547
+ self.writer.write(example)
548
+ except pa.ArrowInvalid:
549
+ error_msg = f"Metric inputs don't match the expected format.\n" f"Expected format: {self.features},\n"
550
+ error_msg_inputs = ",\n".join(
551
+ f"Input {input_name}: {summarize_if_long_list(example[input_name])}" for input_name in self.features
552
+ )
553
+ error_msg += error_msg_inputs
554
+ raise ValueError(error_msg) from None
555
+
556
+ def _init_writer(self, timeout=1):
557
+ if self.num_process > 1:
558
+ if self.process_id == 0:
559
+ file_path = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-rdv.lock")
560
+ self.rendez_vous_lock = FileLock(file_path)
561
+ try:
562
+ self.rendez_vous_lock.acquire(timeout=timeout)
563
+ except TimeoutError:
564
+ raise ValueError(
565
+ f"Error in _init_writer: another metric instance is already using the local cache file at {file_path}. "
566
+ f"Please specify an experiment_id (currently: {self.experiment_id}) to avoid collision "
567
+ f"between distributed metric instances."
568
+ ) from None
569
+
570
+ if self.keep_in_memory:
571
+ self.buf_writer = pa.BufferOutputStream()
572
+ self.writer = ArrowWriter(
573
+ features=self.info.features, stream=self.buf_writer, writer_batch_size=self.writer_batch_size
574
+ )
575
+ else:
576
+ self.buf_writer = None
577
+
578
+ # Get cache file name and lock it
579
+ if self.cache_file_name is None or self.filelock is None:
580
+ cache_file_name, filelock = self._create_cache_file() # get ready
581
+ self.cache_file_name = cache_file_name
582
+ self.filelock = filelock
583
+
584
+ self.writer = ArrowWriter(
585
+ features=self.info.features, path=self.cache_file_name, writer_batch_size=self.writer_batch_size
586
+ )
587
+ # Setup rendez-vous here if
588
+ if self.num_process > 1:
589
+ if self.process_id == 0:
590
+ self._check_all_processes_locks() # wait for everyone to be ready
591
+ self.rendez_vous_lock.release() # let everyone go
592
+ else:
593
+ self._check_rendez_vous() # wait for master to be ready and to let everyone go
594
+
595
+ def _info(self) -> MetricInfo:
596
+ """Construct the MetricInfo object. See `MetricInfo` for details.
597
+
598
+ Warning: This function is only called once and the result is cached for all
599
+ following .info() calls.
600
+
601
+ Returns:
602
+ info: (MetricInfo) The metrics information
603
+ """
604
+ raise NotImplementedError
605
+
606
+ def download_and_prepare(
607
+ self,
608
+ download_config: Optional[DownloadConfig] = None,
609
+ dl_manager: Optional[DownloadManager] = None,
610
+ ):
611
+ """Downloads and prepares dataset for reading.
612
+
613
+ Args:
614
+ download_config (:class:`DownloadConfig`, optional): Specific download configuration parameters.
615
+ dl_manager (:class:`DownloadManager`, optional): Specific download manager to use.
616
+ """
617
+ if dl_manager is None:
618
+ if download_config is None:
619
+ download_config = DownloadConfig()
620
+ download_config.cache_dir = os.path.join(self.data_dir, "downloads")
621
+ download_config.force_download = False
622
+
623
+ dl_manager = DownloadManager(
624
+ dataset_name=self.name, download_config=download_config, data_dir=self.data_dir
625
+ )
626
+
627
+ self._download_and_prepare(dl_manager)
628
+
629
+ def _download_and_prepare(self, dl_manager):
630
+ """Downloads and prepares resources for the metric.
631
+
632
+ This is the internal implementation to overwrite called when user calls
633
+ `download_and_prepare`. It should download all required resources for the metric.
634
+
635
+ Args:
636
+ dl_manager (:class:`DownloadManager`): `DownloadManager` used to download and cache data.
637
+ """
638
+ return None
639
+
640
+ def _compute(self, *, predictions=None, references=None, **kwargs) -> Dict[str, Any]:
641
+ """This method defines the common API for all the metrics in the library"""
642
+ raise NotImplementedError
643
+
644
+ def __del__(self):
645
+ if hasattr(self, "filelock") and self.filelock is not None:
646
+ self.filelock.release()
647
+ if hasattr(self, "rendez_vous_lock") and self.rendez_vous_lock is not None:
648
+ self.rendez_vous_lock.release()
649
+ if hasattr(self, "writer"): # in case it was already deleted
650
+ del self.writer
651
+ if hasattr(self, "data"): # in case it was already deleted
652
+ del self.data
llmeval-env/lib/python3.10/site-packages/datasets/naming.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Lint as: python3
16
+ """Utilities for file names."""
17
+
18
+ import itertools
19
+ import os
20
+ import re
21
+
22
+
23
+ _uppercase_uppercase_re = re.compile(r"([A-Z]+)([A-Z][a-z])")
24
+ _lowercase_uppercase_re = re.compile(r"([a-z\d])([A-Z])")
25
+
26
+ _single_underscore_re = re.compile(r"(?<!_)_(?!_)")
27
+ _multiple_underscores_re = re.compile(r"(_{2,})")
28
+
29
+ _split_re = r"^\w+(\.\w+)*$"
30
+
31
+ INVALID_WINDOWS_CHARACTERS_IN_PATH = r"<>:/\|?*"
32
+
33
+
34
+ def camelcase_to_snakecase(name):
35
+ """Convert camel-case string to snake-case."""
36
+ name = _uppercase_uppercase_re.sub(r"\1_\2", name)
37
+ name = _lowercase_uppercase_re.sub(r"\1_\2", name)
38
+ return name.lower()
39
+
40
+
41
+ def snakecase_to_camelcase(name):
42
+ """Convert snake-case string to camel-case string."""
43
+ name = _single_underscore_re.split(name)
44
+ name = [_multiple_underscores_re.split(n) for n in name]
45
+ return "".join(n.capitalize() for n in itertools.chain.from_iterable(name) if n != "")
46
+
47
+
48
+ def filename_prefix_for_name(name):
49
+ if os.path.basename(name) != name:
50
+ raise ValueError(f"Should be a dataset name, not a path: {name}")
51
+ return camelcase_to_snakecase(name)
52
+
53
+
54
+ def filename_prefix_for_split(name, split):
55
+ if os.path.basename(name) != name:
56
+ raise ValueError(f"Should be a dataset name, not a path: {name}")
57
+ if not re.match(_split_re, split):
58
+ raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'.")
59
+ return f"{filename_prefix_for_name(name)}-{split}"
60
+
61
+
62
+ def filepattern_for_dataset_split(dataset_name, split, data_dir, filetype_suffix=None):
63
+ prefix = filename_prefix_for_split(dataset_name, split)
64
+ if filetype_suffix:
65
+ prefix += f".{filetype_suffix}"
66
+ filepath = os.path.join(data_dir, prefix)
67
+ return f"{filepath}*"
68
+
69
+
70
+ def filenames_for_dataset_split(path, dataset_name, split, filetype_suffix=None, shard_lengths=None):
71
+ prefix = filename_prefix_for_split(dataset_name, split)
72
+ prefix = os.path.join(path, prefix)
73
+
74
+ if shard_lengths:
75
+ num_shards = len(shard_lengths)
76
+ filenames = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(num_shards)]
77
+ if filetype_suffix:
78
+ filenames = [filename + f".{filetype_suffix}" for filename in filenames]
79
+ return filenames
80
+ else:
81
+ filename = prefix
82
+ if filetype_suffix:
83
+ filename += f".{filetype_suffix}"
84
+ return [filename]
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (210 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__pycache__/audiofolder.cpython-310.pyc ADDED
Binary file (1.36 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/cache/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/cache/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (204 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/cache/__pycache__/cache.cpython-310.pyc ADDED
Binary file (6.82 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/cache/cache.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import json
3
+ import os
4
+ import shutil
5
+ import time
6
+ import warnings
7
+ from pathlib import Path
8
+ from typing import List, Optional, Tuple, Union
9
+
10
+ import pyarrow as pa
11
+
12
+ import datasets
13
+ import datasets.config
14
+ import datasets.data_files
15
+ from datasets.naming import camelcase_to_snakecase, filenames_for_dataset_split
16
+
17
+
18
+ logger = datasets.utils.logging.get_logger(__name__)
19
+
20
+
21
+ def _get_modification_time(cached_directory_path):
22
+ return (Path(cached_directory_path)).stat().st_mtime
23
+
24
+
25
+ def _find_hash_in_cache(
26
+ dataset_name: str,
27
+ config_name: Optional[str],
28
+ cache_dir: Optional[str],
29
+ config_kwargs: dict,
30
+ custom_features: Optional[datasets.Features],
31
+ ) -> Tuple[str, str, str]:
32
+ if config_name or config_kwargs or custom_features:
33
+ config_id = datasets.BuilderConfig(config_name or "default").create_config_id(
34
+ config_kwargs=config_kwargs, custom_features=custom_features
35
+ )
36
+ else:
37
+ config_id = None
38
+ cache_dir = os.path.expanduser(str(cache_dir or datasets.config.HF_DATASETS_CACHE))
39
+ namespace_and_dataset_name = dataset_name.split("/")
40
+ namespace_and_dataset_name[-1] = camelcase_to_snakecase(namespace_and_dataset_name[-1])
41
+ cached_relative_path = "___".join(namespace_and_dataset_name)
42
+ cached_datasets_directory_path_root = os.path.join(cache_dir, cached_relative_path)
43
+ cached_directory_paths = [
44
+ cached_directory_path
45
+ for cached_directory_path in glob.glob(
46
+ os.path.join(cached_datasets_directory_path_root, config_id or "*", "*", "*")
47
+ )
48
+ if os.path.isdir(cached_directory_path)
49
+ and (
50
+ config_kwargs
51
+ or custom_features
52
+ or json.loads(Path(cached_directory_path, "dataset_info.json").read_text(encoding="utf-8"))["config_name"]
53
+ == Path(cached_directory_path).parts[-3] # no extra params => config_id == config_name
54
+ )
55
+ ]
56
+ if not cached_directory_paths:
57
+ cached_directory_paths = [
58
+ cached_directory_path
59
+ for cached_directory_path in glob.glob(os.path.join(cached_datasets_directory_path_root, "*", "*", "*"))
60
+ if os.path.isdir(cached_directory_path)
61
+ ]
62
+ available_configs = sorted(
63
+ {Path(cached_directory_path).parts[-3] for cached_directory_path in cached_directory_paths}
64
+ )
65
+ raise ValueError(
66
+ f"Couldn't find cache for {dataset_name}"
67
+ + (f" for config '{config_id}'" if config_id else "")
68
+ + (f"\nAvailable configs in the cache: {available_configs}" if available_configs else "")
69
+ )
70
+ # get most recent
71
+ cached_directory_path = Path(sorted(cached_directory_paths, key=_get_modification_time)[-1])
72
+ version, hash = cached_directory_path.parts[-2:]
73
+ other_configs = [
74
+ Path(_cached_directory_path).parts[-3]
75
+ for _cached_directory_path in glob.glob(os.path.join(cached_datasets_directory_path_root, "*", version, hash))
76
+ if os.path.isdir(_cached_directory_path)
77
+ and (
78
+ config_kwargs
79
+ or custom_features
80
+ or json.loads(Path(_cached_directory_path, "dataset_info.json").read_text(encoding="utf-8"))["config_name"]
81
+ == Path(_cached_directory_path).parts[-3] # no extra params => config_id == config_name
82
+ )
83
+ ]
84
+ if not config_id and len(other_configs) > 1:
85
+ raise ValueError(
86
+ f"There are multiple '{dataset_name}' configurations in the cache: {', '.join(other_configs)}"
87
+ f"\nPlease specify which configuration to reload from the cache, e.g."
88
+ f"\n\tload_dataset('{dataset_name}', '{other_configs[0]}')"
89
+ )
90
+ config_name = cached_directory_path.parts[-3]
91
+ warning_msg = (
92
+ f"Found the latest cached dataset configuration '{config_name}' at {cached_directory_path} "
93
+ f"(last modified on {time.ctime(_get_modification_time(cached_directory_path))})."
94
+ )
95
+ logger.warning(warning_msg)
96
+ return config_name, version, hash
97
+
98
+
99
+ class Cache(datasets.ArrowBasedBuilder):
100
+ def __init__(
101
+ self,
102
+ cache_dir: Optional[str] = None,
103
+ dataset_name: Optional[str] = None,
104
+ config_name: Optional[str] = None,
105
+ version: Optional[str] = "0.0.0",
106
+ hash: Optional[str] = None,
107
+ base_path: Optional[str] = None,
108
+ info: Optional[datasets.DatasetInfo] = None,
109
+ features: Optional[datasets.Features] = None,
110
+ token: Optional[Union[bool, str]] = None,
111
+ use_auth_token="deprecated",
112
+ repo_id: Optional[str] = None,
113
+ data_files: Optional[Union[str, list, dict, datasets.data_files.DataFilesDict]] = None,
114
+ data_dir: Optional[str] = None,
115
+ storage_options: Optional[dict] = None,
116
+ writer_batch_size: Optional[int] = None,
117
+ name="deprecated",
118
+ **config_kwargs,
119
+ ):
120
+ if use_auth_token != "deprecated":
121
+ warnings.warn(
122
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
123
+ f"You can remove this warning by passing 'token={use_auth_token}' instead.",
124
+ FutureWarning,
125
+ )
126
+ token = use_auth_token
127
+ if name != "deprecated":
128
+ warnings.warn(
129
+ "Parameter 'name' was renamed to 'config_name' in version 2.3.0 and will be removed in 3.0.0.",
130
+ category=FutureWarning,
131
+ )
132
+ config_name = name
133
+ if repo_id is None and dataset_name is None:
134
+ raise ValueError("repo_id or dataset_name is required for the Cache dataset builder")
135
+ if data_files is not None:
136
+ config_kwargs["data_files"] = data_files
137
+ if data_dir is not None:
138
+ config_kwargs["data_dir"] = data_dir
139
+ if hash == "auto" and version == "auto":
140
+ config_name, version, hash = _find_hash_in_cache(
141
+ dataset_name=repo_id or dataset_name,
142
+ config_name=config_name,
143
+ cache_dir=cache_dir,
144
+ config_kwargs=config_kwargs,
145
+ custom_features=features,
146
+ )
147
+ elif hash == "auto" or version == "auto":
148
+ raise NotImplementedError("Pass both hash='auto' and version='auto' instead")
149
+ super().__init__(
150
+ cache_dir=cache_dir,
151
+ dataset_name=dataset_name,
152
+ config_name=config_name,
153
+ version=version,
154
+ hash=hash,
155
+ base_path=base_path,
156
+ info=info,
157
+ token=token,
158
+ repo_id=repo_id,
159
+ storage_options=storage_options,
160
+ writer_batch_size=writer_batch_size,
161
+ )
162
+
163
+ def _info(self) -> datasets.DatasetInfo:
164
+ return datasets.DatasetInfo()
165
+
166
+ def download_and_prepare(self, output_dir: Optional[str] = None, *args, **kwargs):
167
+ if not os.path.exists(self.cache_dir):
168
+ raise ValueError(f"Cache directory for {self.dataset_name} doesn't exist at {self.cache_dir}")
169
+ if output_dir is not None and output_dir != self.cache_dir:
170
+ shutil.copytree(self.cache_dir, output_dir)
171
+
172
+ def _split_generators(self, dl_manager):
173
+ # used to stream from cache
174
+ if isinstance(self.info.splits, datasets.SplitDict):
175
+ split_infos: List[datasets.SplitInfo] = list(self.info.splits.values())
176
+ else:
177
+ raise ValueError(f"Missing splits info for {self.dataset_name} in cache directory {self.cache_dir}")
178
+ return [
179
+ datasets.SplitGenerator(
180
+ name=split_info.name,
181
+ gen_kwargs={
182
+ "files": filenames_for_dataset_split(
183
+ self.cache_dir,
184
+ dataset_name=self.dataset_name,
185
+ split=split_info.name,
186
+ filetype_suffix="arrow",
187
+ shard_lengths=split_info.shard_lengths,
188
+ )
189
+ },
190
+ )
191
+ for split_info in split_infos
192
+ ]
193
+
194
+ def _generate_tables(self, files):
195
+ # used to stream from cache
196
+ for file_idx, file in enumerate(files):
197
+ with open(file, "rb") as f:
198
+ try:
199
+ for batch_idx, record_batch in enumerate(pa.ipc.open_stream(f)):
200
+ pa_table = pa.Table.from_batches([record_batch])
201
+ # Uncomment for debugging (will print the Arrow table size and elements)
202
+ # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
203
+ # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
204
+ yield f"{file_idx}_{batch_idx}", pa_table
205
+ except ValueError as e:
206
+ logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
207
+ raise
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (219 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__pycache__/folder_based_builder.cpython-310.pyc ADDED
Binary file (10.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/folder_based_builder.py ADDED
@@ -0,0 +1,406 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import itertools
3
+ import os
4
+ from dataclasses import dataclass
5
+ from typing import List, Optional, Tuple, Type
6
+
7
+ import pandas as pd
8
+ import pyarrow as pa
9
+ import pyarrow.json as paj
10
+
11
+ import datasets
12
+ from datasets.features.features import FeatureType
13
+ from datasets.tasks.base import TaskTemplate
14
+
15
+
16
+ logger = datasets.utils.logging.get_logger(__name__)
17
+
18
+
19
+ def count_path_segments(path):
20
+ return path.replace("\\", "/").count("/")
21
+
22
+
23
+ @dataclass
24
+ class FolderBasedBuilderConfig(datasets.BuilderConfig):
25
+ """BuilderConfig for AutoFolder."""
26
+
27
+ features: Optional[datasets.Features] = None
28
+ drop_labels: bool = None
29
+ drop_metadata: bool = None
30
+
31
+
32
+ class FolderBasedBuilder(datasets.GeneratorBasedBuilder):
33
+ """
34
+ Base class for generic data loaders for vision and image data.
35
+
36
+
37
+ Abstract class attributes to be overridden by a child class:
38
+ BASE_FEATURE: feature object to decode data (i.e. datasets.Image, datasets.Audio, ...)
39
+ BASE_COLUMN_NAME: string key name of a base feature (i.e. "image", "audio", ...)
40
+ BUILDER_CONFIG_CLASS: builder config inherited from `folder_based_builder.FolderBasedBuilderConfig`
41
+ EXTENSIONS: list of allowed extensions (only files with these extensions and METADATA_FILENAME files
42
+ will be included in a dataset)
43
+ CLASSIFICATION_TASK: classification task to use if labels are obtained from the folder structure
44
+ """
45
+
46
+ BASE_FEATURE: Type[FeatureType]
47
+ BASE_COLUMN_NAME: str
48
+ BUILDER_CONFIG_CLASS: FolderBasedBuilderConfig
49
+ EXTENSIONS: List[str]
50
+ CLASSIFICATION_TASK: TaskTemplate
51
+
52
+ METADATA_FILENAMES: List[str] = ["metadata.csv", "metadata.jsonl"]
53
+
54
+ def _info(self):
55
+ return datasets.DatasetInfo(features=self.config.features)
56
+
57
+ def _split_generators(self, dl_manager):
58
+ if not self.config.data_files:
59
+ raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
60
+ dl_manager.download_config.extract_on_the_fly = True
61
+ # Do an early pass if:
62
+ # * `drop_labels` is None (default) or False, to infer the class labels
63
+ # * `drop_metadata` is None (default) or False, to find the metadata files
64
+ do_analyze = not self.config.drop_labels or not self.config.drop_metadata
65
+ labels, path_depths = set(), set()
66
+ metadata_files = collections.defaultdict(set)
67
+
68
+ def analyze(files_or_archives, downloaded_files_or_dirs, split):
69
+ if len(downloaded_files_or_dirs) == 0:
70
+ return
71
+ # The files are separated from the archives at this point, so check the first sample
72
+ # to see if it's a file or a directory and iterate accordingly
73
+ if os.path.isfile(downloaded_files_or_dirs[0]):
74
+ original_files, downloaded_files = files_or_archives, downloaded_files_or_dirs
75
+ for original_file, downloaded_file in zip(original_files, downloaded_files):
76
+ original_file, downloaded_file = str(original_file), str(downloaded_file)
77
+ _, original_file_ext = os.path.splitext(original_file)
78
+ if original_file_ext.lower() in self.EXTENSIONS:
79
+ if not self.config.drop_labels:
80
+ labels.add(os.path.basename(os.path.dirname(original_file)))
81
+ path_depths.add(count_path_segments(original_file))
82
+ elif os.path.basename(original_file) in self.METADATA_FILENAMES:
83
+ metadata_files[split].add((original_file, downloaded_file))
84
+ else:
85
+ original_file_name = os.path.basename(original_file)
86
+ logger.debug(
87
+ f"The file '{original_file_name}' was ignored: it is not an image, and is not {self.METADATA_FILENAMES} either."
88
+ )
89
+ else:
90
+ archives, downloaded_dirs = files_or_archives, downloaded_files_or_dirs
91
+ for archive, downloaded_dir in zip(archives, downloaded_dirs):
92
+ archive, downloaded_dir = str(archive), str(downloaded_dir)
93
+ for downloaded_dir_file in dl_manager.iter_files(downloaded_dir):
94
+ _, downloaded_dir_file_ext = os.path.splitext(downloaded_dir_file)
95
+ if downloaded_dir_file_ext in self.EXTENSIONS:
96
+ if not self.config.drop_labels:
97
+ labels.add(os.path.basename(os.path.dirname(downloaded_dir_file)))
98
+ path_depths.add(count_path_segments(downloaded_dir_file))
99
+ elif os.path.basename(downloaded_dir_file) in self.METADATA_FILENAMES:
100
+ metadata_files[split].add((None, downloaded_dir_file))
101
+ else:
102
+ archive_file_name = os.path.basename(archive)
103
+ original_file_name = os.path.basename(downloaded_dir_file)
104
+ logger.debug(
105
+ f"The file '{original_file_name}' from the archive '{archive_file_name}' was ignored: it is not an {self.BASE_COLUMN_NAME}, and is not {self.METADATA_FILENAMES} either."
106
+ )
107
+
108
+ data_files = self.config.data_files
109
+ splits = []
110
+ for split_name, files in data_files.items():
111
+ if isinstance(files, str):
112
+ files = [files]
113
+ files, archives = self._split_files_and_archives(files)
114
+ downloaded_files = dl_manager.download(files)
115
+ downloaded_dirs = dl_manager.download_and_extract(archives)
116
+ if do_analyze: # drop_metadata is None or False, drop_labels is None or False
117
+ logger.info(f"Searching for labels and/or metadata files in {split_name} data files...")
118
+ analyze(files, downloaded_files, split_name)
119
+ analyze(archives, downloaded_dirs, split_name)
120
+
121
+ if metadata_files:
122
+ # add metadata if `metadata_files` are found and `drop_metadata` is None (default) or False
123
+ add_metadata = not self.config.drop_metadata
124
+ # if `metadata_files` are found, add labels only if
125
+ # `drop_labels` is set up to False explicitly (not-default behavior)
126
+ add_labels = self.config.drop_labels is False
127
+ else:
128
+ # if `metadata_files` are not found, don't add metadata
129
+ add_metadata = False
130
+ # if `metadata_files` are not found and `drop_labels` is None (default) -
131
+ # add labels if files are on the same level in directory hierarchy and there is more than one label
132
+ add_labels = (
133
+ (len(labels) > 1 and len(path_depths) == 1)
134
+ if self.config.drop_labels is None
135
+ else not self.config.drop_labels
136
+ )
137
+
138
+ if add_labels:
139
+ logger.info("Adding the labels inferred from data directories to the dataset's features...")
140
+ if add_metadata:
141
+ logger.info("Adding metadata to the dataset...")
142
+ else:
143
+ add_labels, add_metadata, metadata_files = False, False, {}
144
+
145
+ splits.append(
146
+ datasets.SplitGenerator(
147
+ name=split_name,
148
+ gen_kwargs={
149
+ "files": list(zip(files, downloaded_files))
150
+ + [(None, dl_manager.iter_files(downloaded_dir)) for downloaded_dir in downloaded_dirs],
151
+ "metadata_files": metadata_files,
152
+ "split_name": split_name,
153
+ "add_labels": add_labels,
154
+ "add_metadata": add_metadata,
155
+ },
156
+ )
157
+ )
158
+
159
+ if add_metadata:
160
+ # Verify that:
161
+ # * all metadata files have the same set of features
162
+ # * the `file_name` key is one of the metadata keys and is of type string
163
+ features_per_metadata_file: List[Tuple[str, datasets.Features]] = []
164
+
165
+ # Check that all metadata files share the same format
166
+ metadata_ext = {
167
+ os.path.splitext(original_metadata_file)[-1]
168
+ for original_metadata_file, _ in itertools.chain.from_iterable(metadata_files.values())
169
+ }
170
+ if len(metadata_ext) > 1:
171
+ raise ValueError(f"Found metadata files with different extensions: {list(metadata_ext)}")
172
+ metadata_ext = metadata_ext.pop()
173
+
174
+ for _, downloaded_metadata_file in itertools.chain.from_iterable(metadata_files.values()):
175
+ pa_metadata_table = self._read_metadata(downloaded_metadata_file, metadata_ext=metadata_ext)
176
+ features_per_metadata_file.append(
177
+ (downloaded_metadata_file, datasets.Features.from_arrow_schema(pa_metadata_table.schema))
178
+ )
179
+ for downloaded_metadata_file, metadata_features in features_per_metadata_file:
180
+ if metadata_features != features_per_metadata_file[0][1]:
181
+ raise ValueError(
182
+ f"Metadata files {downloaded_metadata_file} and {features_per_metadata_file[0][0]} have different features: {features_per_metadata_file[0]} != {metadata_features}"
183
+ )
184
+ metadata_features = features_per_metadata_file[0][1]
185
+ if "file_name" not in metadata_features:
186
+ raise ValueError("`file_name` must be present as dictionary key in metadata files")
187
+ if metadata_features["file_name"] != datasets.Value("string"):
188
+ raise ValueError("`file_name` key must be a string")
189
+ del metadata_features["file_name"]
190
+ else:
191
+ metadata_features = None
192
+
193
+ # Normally, we would do this in _info, but we need to know the labels and/or metadata
194
+ # before building the features
195
+ if self.config.features is None:
196
+ if add_labels:
197
+ self.info.features = datasets.Features(
198
+ {
199
+ self.BASE_COLUMN_NAME: self.BASE_FEATURE(),
200
+ "label": datasets.ClassLabel(names=sorted(labels)),
201
+ }
202
+ )
203
+ self.info.task_templates = [self.CLASSIFICATION_TASK.align_with_features(self.info.features)]
204
+ else:
205
+ self.info.features = datasets.Features({self.BASE_COLUMN_NAME: self.BASE_FEATURE()})
206
+
207
+ if add_metadata:
208
+ # Warn if there are duplicated keys in metadata compared to the existing features
209
+ # (`BASE_COLUMN_NAME`, optionally "label")
210
+ duplicated_keys = set(self.info.features) & set(metadata_features)
211
+ if duplicated_keys:
212
+ logger.warning(
213
+ f"Ignoring metadata columns {list(duplicated_keys)} as they are already present in "
214
+ f"the features dictionary."
215
+ )
216
+ # skip metadata duplicated keys
217
+ self.info.features.update(
218
+ {
219
+ feature: metadata_features[feature]
220
+ for feature in metadata_features
221
+ if feature not in duplicated_keys
222
+ }
223
+ )
224
+
225
+ return splits
226
+
227
+ def _split_files_and_archives(self, data_files):
228
+ files, archives = [], []
229
+ for data_file in data_files:
230
+ _, data_file_ext = os.path.splitext(data_file)
231
+ if data_file_ext.lower() in self.EXTENSIONS:
232
+ files.append(data_file)
233
+ elif os.path.basename(data_file) in self.METADATA_FILENAMES:
234
+ files.append(data_file)
235
+ else:
236
+ archives.append(data_file)
237
+ return files, archives
238
+
239
+ def _read_metadata(self, metadata_file, metadata_ext: str = ""):
240
+ if metadata_ext == ".csv":
241
+ # Use `pd.read_csv` (although slower) instead of `pyarrow.csv.read_csv` for reading CSV files for consistency with the CSV packaged module
242
+ return pa.Table.from_pandas(pd.read_csv(metadata_file))
243
+ else:
244
+ with open(metadata_file, "rb") as f:
245
+ return paj.read_json(f)
246
+
247
+ def _generate_examples(self, files, metadata_files, split_name, add_metadata, add_labels):
248
+ split_metadata_files = metadata_files.get(split_name, [])
249
+ sample_empty_metadata = (
250
+ {k: None for k in self.info.features if k != self.BASE_COLUMN_NAME} if self.info.features else {}
251
+ )
252
+ last_checked_dir = None
253
+ metadata_dir = None
254
+ metadata_dict = None
255
+ downloaded_metadata_file = None
256
+
257
+ metadata_ext = ""
258
+ if split_metadata_files:
259
+ metadata_ext = {
260
+ os.path.splitext(original_metadata_file)[-1] for original_metadata_file, _ in split_metadata_files
261
+ }
262
+ metadata_ext = metadata_ext.pop()
263
+
264
+ file_idx = 0
265
+ for original_file, downloaded_file_or_dir in files:
266
+ if original_file is not None:
267
+ _, original_file_ext = os.path.splitext(original_file)
268
+ if original_file_ext.lower() in self.EXTENSIONS:
269
+ if add_metadata:
270
+ # If the file is a file of a needed type, and we've just entered a new directory,
271
+ # find the nereast metadata file (by counting path segments) for the directory
272
+ current_dir = os.path.dirname(original_file)
273
+ if last_checked_dir is None or last_checked_dir != current_dir:
274
+ last_checked_dir = current_dir
275
+ metadata_file_candidates = [
276
+ (
277
+ os.path.relpath(original_file, os.path.dirname(metadata_file_candidate)),
278
+ metadata_file_candidate,
279
+ downloaded_metadata_file,
280
+ )
281
+ for metadata_file_candidate, downloaded_metadata_file in split_metadata_files
282
+ if metadata_file_candidate
283
+ is not None # ignore metadata_files that are inside archives
284
+ and not os.path.relpath(
285
+ original_file, os.path.dirname(metadata_file_candidate)
286
+ ).startswith("..")
287
+ ]
288
+ if metadata_file_candidates:
289
+ _, metadata_file, downloaded_metadata_file = min(
290
+ metadata_file_candidates, key=lambda x: count_path_segments(x[0])
291
+ )
292
+ pa_metadata_table = self._read_metadata(
293
+ downloaded_metadata_file, metadata_ext=metadata_ext
294
+ )
295
+ pa_file_name_array = pa_metadata_table["file_name"]
296
+ pa_metadata_table = pa_metadata_table.drop(["file_name"])
297
+ metadata_dir = os.path.dirname(metadata_file)
298
+ metadata_dict = {
299
+ os.path.normpath(file_name).replace("\\", "/"): sample_metadata
300
+ for file_name, sample_metadata in zip(
301
+ pa_file_name_array.to_pylist(), pa_metadata_table.to_pylist()
302
+ )
303
+ }
304
+ else:
305
+ raise ValueError(
306
+ f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}."
307
+ )
308
+ if metadata_dir is not None and downloaded_metadata_file is not None:
309
+ file_relpath = os.path.relpath(original_file, metadata_dir)
310
+ file_relpath = file_relpath.replace("\\", "/")
311
+ if file_relpath not in metadata_dict:
312
+ raise ValueError(
313
+ f"{self.BASE_COLUMN_NAME} at {file_relpath} doesn't have metadata in {downloaded_metadata_file}."
314
+ )
315
+ sample_metadata = metadata_dict[file_relpath]
316
+ else:
317
+ raise ValueError(
318
+ f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}."
319
+ )
320
+ else:
321
+ sample_metadata = {}
322
+ if add_labels:
323
+ sample_label = {"label": os.path.basename(os.path.dirname(original_file))}
324
+ else:
325
+ sample_label = {}
326
+ yield (
327
+ file_idx,
328
+ {
329
+ **sample_empty_metadata,
330
+ self.BASE_COLUMN_NAME: downloaded_file_or_dir,
331
+ **sample_metadata,
332
+ **sample_label,
333
+ },
334
+ )
335
+ file_idx += 1
336
+ else:
337
+ for downloaded_dir_file in downloaded_file_or_dir:
338
+ _, downloaded_dir_file_ext = os.path.splitext(downloaded_dir_file)
339
+ if downloaded_dir_file_ext.lower() in self.EXTENSIONS:
340
+ if add_metadata:
341
+ current_dir = os.path.dirname(downloaded_dir_file)
342
+ if last_checked_dir is None or last_checked_dir != current_dir:
343
+ last_checked_dir = current_dir
344
+ metadata_file_candidates = [
345
+ (
346
+ os.path.relpath(
347
+ downloaded_dir_file, os.path.dirname(downloaded_metadata_file)
348
+ ),
349
+ metadata_file_candidate,
350
+ downloaded_metadata_file,
351
+ )
352
+ for metadata_file_candidate, downloaded_metadata_file in split_metadata_files
353
+ if metadata_file_candidate
354
+ is None # ignore metadata_files that are not inside archives
355
+ and not os.path.relpath(
356
+ downloaded_dir_file, os.path.dirname(downloaded_metadata_file)
357
+ ).startswith("..")
358
+ ]
359
+ if metadata_file_candidates:
360
+ _, metadata_file, downloaded_metadata_file = min(
361
+ metadata_file_candidates, key=lambda x: count_path_segments(x[0])
362
+ )
363
+ pa_metadata_table = self._read_metadata(
364
+ downloaded_metadata_file, metadata_ext=metadata_ext
365
+ )
366
+ pa_file_name_array = pa_metadata_table["file_name"]
367
+ pa_metadata_table = pa_metadata_table.drop(["file_name"])
368
+ metadata_dir = os.path.dirname(downloaded_metadata_file)
369
+ metadata_dict = {
370
+ os.path.normpath(file_name).replace("\\", "/"): sample_metadata
371
+ for file_name, sample_metadata in zip(
372
+ pa_file_name_array.to_pylist(), pa_metadata_table.to_pylist()
373
+ )
374
+ }
375
+ else:
376
+ raise ValueError(
377
+ f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}."
378
+ )
379
+ if metadata_dir is not None and downloaded_metadata_file is not None:
380
+ downloaded_dir_file_relpath = os.path.relpath(downloaded_dir_file, metadata_dir)
381
+ downloaded_dir_file_relpath = downloaded_dir_file_relpath.replace("\\", "/")
382
+ if downloaded_dir_file_relpath not in metadata_dict:
383
+ raise ValueError(
384
+ f"{self.BASE_COLUMN_NAME} at {downloaded_dir_file_relpath} doesn't have metadata in {downloaded_metadata_file}."
385
+ )
386
+ sample_metadata = metadata_dict[downloaded_dir_file_relpath]
387
+ else:
388
+ raise ValueError(
389
+ f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}."
390
+ )
391
+ else:
392
+ sample_metadata = {}
393
+ if add_labels:
394
+ sample_label = {"label": os.path.basename(os.path.dirname(downloaded_dir_file))}
395
+ else:
396
+ sample_label = {}
397
+ yield (
398
+ file_idx,
399
+ {
400
+ **sample_empty_metadata,
401
+ self.BASE_COLUMN_NAME: downloaded_dir_file,
402
+ **sample_metadata,
403
+ **sample_label,
404
+ },
405
+ )
406
+ file_idx += 1
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (210 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/__pycache__/imagefolder.cpython-310.pyc ADDED
Binary file (1.57 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/imagefolder.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+
3
+ import datasets
4
+ from datasets.tasks import ImageClassification
5
+
6
+ from ..folder_based_builder import folder_based_builder
7
+
8
+
9
+ logger = datasets.utils.logging.get_logger(__name__)
10
+
11
+
12
+ class ImageFolderConfig(folder_based_builder.FolderBasedBuilderConfig):
13
+ """BuilderConfig for ImageFolder."""
14
+
15
+ drop_labels: bool = None
16
+ drop_metadata: bool = None
17
+
18
+
19
+ class ImageFolder(folder_based_builder.FolderBasedBuilder):
20
+ BASE_FEATURE = datasets.Image
21
+ BASE_COLUMN_NAME = "image"
22
+ BUILDER_CONFIG_CLASS = ImageFolderConfig
23
+ EXTENSIONS: List[str] # definition at the bottom of the script
24
+ CLASSIFICATION_TASK = ImageClassification(image_column="image", label_column="label")
25
+
26
+
27
+ # Obtained with:
28
+ # ```
29
+ # import PIL.Image
30
+ # IMAGE_EXTENSIONS = []
31
+ # PIL.Image.init()
32
+ # for ext, format in PIL.Image.EXTENSION.items():
33
+ # if format in PIL.Image.OPEN:
34
+ # IMAGE_EXTENSIONS.append(ext[1:])
35
+ # ```
36
+ # We intentionally do not run this code on launch because:
37
+ # (1) Pillow is an optional dependency, so importing Pillow in global namespace is not allowed
38
+ # (2) To ensure the list of supported extensions is deterministic
39
+ IMAGE_EXTENSIONS = [
40
+ ".blp",
41
+ ".bmp",
42
+ ".dib",
43
+ ".bufr",
44
+ ".cur",
45
+ ".pcx",
46
+ ".dcx",
47
+ ".dds",
48
+ ".ps",
49
+ ".eps",
50
+ ".fit",
51
+ ".fits",
52
+ ".fli",
53
+ ".flc",
54
+ ".ftc",
55
+ ".ftu",
56
+ ".gbr",
57
+ ".gif",
58
+ ".grib",
59
+ ".h5",
60
+ ".hdf",
61
+ ".png",
62
+ ".apng",
63
+ ".jp2",
64
+ ".j2k",
65
+ ".jpc",
66
+ ".jpf",
67
+ ".jpx",
68
+ ".j2c",
69
+ ".icns",
70
+ ".ico",
71
+ ".im",
72
+ ".iim",
73
+ ".tif",
74
+ ".tiff",
75
+ ".jfif",
76
+ ".jpe",
77
+ ".jpg",
78
+ ".jpeg",
79
+ ".mpg",
80
+ ".mpeg",
81
+ ".msp",
82
+ ".pcd",
83
+ ".pxr",
84
+ ".pbm",
85
+ ".pgm",
86
+ ".ppm",
87
+ ".pnm",
88
+ ".psd",
89
+ ".bw",
90
+ ".rgb",
91
+ ".rgba",
92
+ ".sgi",
93
+ ".ras",
94
+ ".tga",
95
+ ".icb",
96
+ ".vda",
97
+ ".vst",
98
+ ".webp",
99
+ ".wmf",
100
+ ".emf",
101
+ ".xbm",
102
+ ".xpm",
103
+ ]
104
+ ImageFolder.EXTENSIONS = IMAGE_EXTENSIONS
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (205 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__pycache__/pandas.cpython-310.pyc ADDED
Binary file (2.73 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/pandas/pandas.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ import warnings
3
+ from dataclasses import dataclass
4
+ from typing import Optional
5
+
6
+ import pandas as pd
7
+ import pyarrow as pa
8
+
9
+ import datasets
10
+ from datasets.table import table_cast
11
+
12
+
13
+ @dataclass
14
+ class PandasConfig(datasets.BuilderConfig):
15
+ """BuilderConfig for Pandas."""
16
+
17
+ features: Optional[datasets.Features] = None
18
+
19
+
20
+ class Pandas(datasets.ArrowBasedBuilder):
21
+ BUILDER_CONFIG_CLASS = PandasConfig
22
+
23
+ def _info(self):
24
+ warnings.warn(
25
+ "The Pandas builder is deprecated and will be removed in the next major version of datasets.",
26
+ FutureWarning,
27
+ )
28
+ return datasets.DatasetInfo(features=self.config.features)
29
+
30
+ def _split_generators(self, dl_manager):
31
+ """We handle string, list and dicts in datafiles"""
32
+ if not self.config.data_files:
33
+ raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
34
+ data_files = dl_manager.download_and_extract(self.config.data_files)
35
+ if isinstance(data_files, (str, list, tuple)):
36
+ files = data_files
37
+ if isinstance(files, str):
38
+ files = [files]
39
+ # Use `dl_manager.iter_files` to skip hidden files in an extracted archive
40
+ files = [dl_manager.iter_files(file) for file in files]
41
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
42
+ splits = []
43
+ for split_name, files in data_files.items():
44
+ if isinstance(files, str):
45
+ files = [files]
46
+ # Use `dl_manager.iter_files` to skip hidden files in an extracted archive
47
+ files = [dl_manager.iter_files(file) for file in files]
48
+ splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
49
+ return splits
50
+
51
+ def _cast_table(self, pa_table: pa.Table) -> pa.Table:
52
+ if self.config.features is not None:
53
+ # more expensive cast to support nested features with keys in a different order
54
+ # allows str <-> int/float or str to Audio for example
55
+ pa_table = table_cast(pa_table, self.config.features.arrow_schema)
56
+ return pa_table
57
+
58
+ def _generate_tables(self, files):
59
+ for i, file in enumerate(itertools.chain.from_iterable(files)):
60
+ with open(file, "rb") as f:
61
+ pa_table = pa.Table.from_pandas(pd.read_pickle(f))
62
+ yield i, self._cast_table(pa_table)
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/parquet/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/parquet/parquet.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ from dataclasses import dataclass
3
+ from typing import List, Optional
4
+
5
+ import pyarrow as pa
6
+ import pyarrow.parquet as pq
7
+
8
+ import datasets
9
+ from datasets.table import table_cast
10
+
11
+
12
+ logger = datasets.utils.logging.get_logger(__name__)
13
+
14
+
15
+ @dataclass
16
+ class ParquetConfig(datasets.BuilderConfig):
17
+ """BuilderConfig for Parquet."""
18
+
19
+ batch_size: Optional[int] = None
20
+ columns: Optional[List[str]] = None
21
+ features: Optional[datasets.Features] = None
22
+
23
+
24
+ class Parquet(datasets.ArrowBasedBuilder):
25
+ BUILDER_CONFIG_CLASS = ParquetConfig
26
+
27
+ def _info(self):
28
+ if (
29
+ self.config.columns is not None
30
+ and self.config.features is not None
31
+ and set(self.config.columns) != set(self.config.features)
32
+ ):
33
+ raise ValueError(
34
+ "The columns and features argument must contain the same columns, but got ",
35
+ f"{self.config.columns} and {self.config.features}",
36
+ )
37
+ return datasets.DatasetInfo(features=self.config.features)
38
+
39
+ def _split_generators(self, dl_manager):
40
+ """We handle string, list and dicts in datafiles"""
41
+ if not self.config.data_files:
42
+ raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
43
+ dl_manager.download_config.extract_on_the_fly = True
44
+ data_files = dl_manager.download_and_extract(self.config.data_files)
45
+ if isinstance(data_files, (str, list, tuple)):
46
+ files = data_files
47
+ if isinstance(files, str):
48
+ files = [files]
49
+ # Use `dl_manager.iter_files` to skip hidden files in an extracted archive
50
+ files = [dl_manager.iter_files(file) for file in files]
51
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
52
+ splits = []
53
+ for split_name, files in data_files.items():
54
+ if isinstance(files, str):
55
+ files = [files]
56
+ # Use `dl_manager.iter_files` to skip hidden files in an extracted archive
57
+ files = [dl_manager.iter_files(file) for file in files]
58
+ # Infer features if they are stored in the arrow schema
59
+ if self.info.features is None:
60
+ for file in itertools.chain.from_iterable(files):
61
+ with open(file, "rb") as f:
62
+ self.info.features = datasets.Features.from_arrow_schema(pq.read_schema(f))
63
+ break
64
+ splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
65
+ if self.config.columns is not None and set(self.config.columns) != set(self.info.features):
66
+ self.info.features = datasets.Features(
67
+ {col: feat for col, feat in self.info.features.items() if col in self.config.columns}
68
+ )
69
+ return splits
70
+
71
+ def _cast_table(self, pa_table: pa.Table) -> pa.Table:
72
+ if self.info.features is not None:
73
+ # more expensive cast to support nested features with keys in a different order
74
+ # allows str <-> int/float or str to Audio for example
75
+ pa_table = table_cast(pa_table, self.info.features.arrow_schema)
76
+ return pa_table
77
+
78
+ def _generate_tables(self, files):
79
+ if self.config.features is not None and self.config.columns is not None:
80
+ if sorted(field.name for field in self.info.features.arrow_schema) != sorted(self.config.columns):
81
+ raise ValueError(
82
+ f"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'"
83
+ )
84
+ for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
85
+ with open(file, "rb") as f:
86
+ parquet_file = pq.ParquetFile(f)
87
+ if parquet_file.metadata.num_row_groups > 0:
88
+ batch_size = self.config.batch_size or parquet_file.metadata.row_group(0).num_rows
89
+ try:
90
+ for batch_idx, record_batch in enumerate(
91
+ parquet_file.iter_batches(batch_size=batch_size, columns=self.config.columns)
92
+ ):
93
+ pa_table = pa.Table.from_batches([record_batch])
94
+ # Uncomment for debugging (will print the Arrow table size and elements)
95
+ # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
96
+ # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
97
+ yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table)
98
+ except ValueError as e:
99
+ logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
100
+ raise
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/sql/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (202 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/sql.cpython-310.pyc ADDED
Binary file (4.48 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/sql/sql.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from dataclasses import dataclass
3
+ from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
4
+
5
+ import pandas as pd
6
+ import pyarrow as pa
7
+
8
+ import datasets
9
+ import datasets.config
10
+ from datasets.features.features import require_storage_cast
11
+ from datasets.table import table_cast
12
+
13
+
14
+ if TYPE_CHECKING:
15
+ import sqlite3
16
+
17
+ import sqlalchemy
18
+
19
+
20
+ logger = datasets.utils.logging.get_logger(__name__)
21
+
22
+
23
+ @dataclass
24
+ class SqlConfig(datasets.BuilderConfig):
25
+ """BuilderConfig for SQL."""
26
+
27
+ sql: Union[str, "sqlalchemy.sql.Selectable"] = None
28
+ con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] = None
29
+ index_col: Optional[Union[str, List[str]]] = None
30
+ coerce_float: bool = True
31
+ params: Optional[Union[List, Tuple, Dict]] = None
32
+ parse_dates: Optional[Union[List, Dict]] = None
33
+ columns: Optional[List[str]] = None
34
+ chunksize: Optional[int] = 10_000
35
+ features: Optional[datasets.Features] = None
36
+
37
+ def __post_init__(self):
38
+ if self.sql is None:
39
+ raise ValueError("sql must be specified")
40
+ if self.con is None:
41
+ raise ValueError("con must be specified")
42
+
43
+ def create_config_id(
44
+ self,
45
+ config_kwargs: dict,
46
+ custom_features: Optional[datasets.Features] = None,
47
+ ) -> str:
48
+ config_kwargs = config_kwargs.copy()
49
+ # We need to stringify the Selectable object to make its hash deterministic
50
+
51
+ # The process of stringifying is explained here: http://docs.sqlalchemy.org/en/latest/faq/sqlexpressions.html
52
+ sql = config_kwargs["sql"]
53
+ if not isinstance(sql, str):
54
+ if datasets.config.SQLALCHEMY_AVAILABLE and "sqlalchemy" in sys.modules:
55
+ import sqlalchemy
56
+
57
+ if isinstance(sql, sqlalchemy.sql.Selectable):
58
+ engine = sqlalchemy.create_engine(config_kwargs["con"].split("://")[0] + "://")
59
+ sql_str = str(sql.compile(dialect=engine.dialect))
60
+ config_kwargs["sql"] = sql_str
61
+ else:
62
+ raise TypeError(
63
+ f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}"
64
+ )
65
+ else:
66
+ raise TypeError(
67
+ f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}"
68
+ )
69
+ con = config_kwargs["con"]
70
+ if not isinstance(con, str):
71
+ config_kwargs["con"] = id(con)
72
+ logger.info(
73
+ f"SQL connection 'con' of type {type(con)} couldn't be hashed properly. To enable hashing, specify 'con' as URI string instead."
74
+ )
75
+
76
+ return super().create_config_id(config_kwargs, custom_features=custom_features)
77
+
78
+ @property
79
+ def pd_read_sql_kwargs(self):
80
+ pd_read_sql_kwargs = {
81
+ "index_col": self.index_col,
82
+ "columns": self.columns,
83
+ "params": self.params,
84
+ "coerce_float": self.coerce_float,
85
+ "parse_dates": self.parse_dates,
86
+ }
87
+ return pd_read_sql_kwargs
88
+
89
+
90
+ class Sql(datasets.ArrowBasedBuilder):
91
+ BUILDER_CONFIG_CLASS = SqlConfig
92
+
93
+ def _info(self):
94
+ return datasets.DatasetInfo(features=self.config.features)
95
+
96
+ def _split_generators(self, dl_manager):
97
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={})]
98
+
99
+ def _cast_table(self, pa_table: pa.Table) -> pa.Table:
100
+ if self.config.features is not None:
101
+ schema = self.config.features.arrow_schema
102
+ if all(not require_storage_cast(feature) for feature in self.config.features.values()):
103
+ # cheaper cast
104
+ pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema)
105
+ else:
106
+ # more expensive cast; allows str <-> int/float or str to Audio for example
107
+ pa_table = table_cast(pa_table, schema)
108
+ return pa_table
109
+
110
+ def _generate_tables(self):
111
+ chunksize = self.config.chunksize
112
+ sql_reader = pd.read_sql(
113
+ self.config.sql, self.config.con, chunksize=chunksize, **self.config.pd_read_sql_kwargs
114
+ )
115
+ sql_reader = [sql_reader] if chunksize is None else sql_reader
116
+ for chunk_idx, df in enumerate(sql_reader):
117
+ pa_table = pa.Table.from_pandas(df)
118
+ yield chunk_idx, self._cast_table(pa_table)
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/text/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/text/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (203 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/text/__pycache__/text.cpython-310.pyc ADDED
Binary file (4.89 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/text/text.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ import warnings
3
+ from dataclasses import InitVar, dataclass
4
+ from io import StringIO
5
+ from typing import Optional
6
+
7
+ import pyarrow as pa
8
+
9
+ import datasets
10
+ from datasets.features.features import require_storage_cast
11
+ from datasets.table import table_cast
12
+
13
+
14
+ logger = datasets.utils.logging.get_logger(__name__)
15
+
16
+
17
+ @dataclass
18
+ class TextConfig(datasets.BuilderConfig):
19
+ """BuilderConfig for text files."""
20
+
21
+ features: Optional[datasets.Features] = None
22
+ encoding: str = "utf-8"
23
+ errors: InitVar[Optional[str]] = "deprecated"
24
+ encoding_errors: Optional[str] = None
25
+ chunksize: int = 10 << 20 # 10MB
26
+ keep_linebreaks: bool = False
27
+ sample_by: str = "line"
28
+
29
+ def __post_init__(self, errors):
30
+ if errors != "deprecated":
31
+ warnings.warn(
32
+ "'errors' was deprecated in favor of 'encoding_errors' in version 2.14.0 and will be removed in 3.0.0.\n"
33
+ f"You can remove this warning by passing 'encoding_errors={errors}' instead.",
34
+ FutureWarning,
35
+ )
36
+ self.encoding_errors = errors
37
+
38
+
39
+ class Text(datasets.ArrowBasedBuilder):
40
+ BUILDER_CONFIG_CLASS = TextConfig
41
+
42
+ def _info(self):
43
+ return datasets.DatasetInfo(features=self.config.features)
44
+
45
+ def _split_generators(self, dl_manager):
46
+ """The `data_files` kwarg in load_dataset() can be a str, List[str], Dict[str,str], or Dict[str,List[str]].
47
+
48
+ If str or List[str], then the dataset returns only the 'train' split.
49
+ If dict, then keys should be from the `datasets.Split` enum.
50
+ """
51
+ if not self.config.data_files:
52
+ raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
53
+ dl_manager.download_config.extract_on_the_fly = True
54
+ data_files = dl_manager.download_and_extract(self.config.data_files)
55
+ if isinstance(data_files, (str, list, tuple)):
56
+ files = data_files
57
+ if isinstance(files, str):
58
+ files = [files]
59
+ files = [dl_manager.iter_files(file) for file in files]
60
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
61
+ splits = []
62
+ for split_name, files in data_files.items():
63
+ if isinstance(files, str):
64
+ files = [files]
65
+ files = [dl_manager.iter_files(file) for file in files]
66
+ splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
67
+ return splits
68
+
69
+ def _cast_table(self, pa_table: pa.Table) -> pa.Table:
70
+ if self.config.features is not None:
71
+ schema = self.config.features.arrow_schema
72
+ if all(not require_storage_cast(feature) for feature in self.config.features.values()):
73
+ # cheaper cast
74
+ pa_table = pa_table.cast(schema)
75
+ else:
76
+ # more expensive cast; allows str <-> int/float or str to Audio for example
77
+ pa_table = table_cast(pa_table, schema)
78
+ return pa_table
79
+ else:
80
+ return pa_table.cast(pa.schema({"text": pa.string()}))
81
+
82
+ def _generate_tables(self, files):
83
+ pa_table_names = list(self.config.features) if self.config.features is not None else ["text"]
84
+ for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
85
+ # open in text mode, by default translates universal newlines ("\n", "\r\n" and "\r") into "\n"
86
+ with open(file, encoding=self.config.encoding, errors=self.config.encoding_errors) as f:
87
+ if self.config.sample_by == "line":
88
+ batch_idx = 0
89
+ while True:
90
+ batch = f.read(self.config.chunksize)
91
+ if not batch:
92
+ break
93
+ batch += f.readline() # finish current line
94
+ # StringIO.readlines, by default splits only on "\n" (and keeps line breaks)
95
+ batch = StringIO(batch).readlines()
96
+ if not self.config.keep_linebreaks:
97
+ batch = [line.rstrip("\n") for line in batch]
98
+ pa_table = pa.Table.from_arrays([pa.array(batch)], names=pa_table_names)
99
+ # Uncomment for debugging (will print the Arrow table size and elements)
100
+ # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
101
+ # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
102
+ yield (file_idx, batch_idx), self._cast_table(pa_table)
103
+ batch_idx += 1
104
+ elif self.config.sample_by == "paragraph":
105
+ batch_idx = 0
106
+ batch = ""
107
+ while True:
108
+ new_batch = f.read(self.config.chunksize)
109
+ if not new_batch:
110
+ break
111
+ batch += new_batch
112
+ batch += f.readline() # finish current line
113
+ batch = batch.split("\n\n")
114
+ pa_table = pa.Table.from_arrays(
115
+ [pa.array([example for example in batch[:-1] if example])], names=pa_table_names
116
+ )
117
+ # Uncomment for debugging (will print the Arrow table size and elements)
118
+ # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
119
+ # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
120
+ yield (file_idx, batch_idx), self._cast_table(pa_table)
121
+ batch_idx += 1
122
+ batch = batch[-1]
123
+ if batch:
124
+ pa_table = pa.Table.from_arrays([pa.array([batch])], names=pa_table_names)
125
+ yield (file_idx, batch_idx), self._cast_table(pa_table)
126
+ elif self.config.sample_by == "document":
127
+ text = f.read()
128
+ pa_table = pa.Table.from_arrays([pa.array([text])], names=pa_table_names)
129
+ yield file_idx, self._cast_table(pa_table)