applied-ai-018 commited on
Commit
e1bbc88
·
verified ·
1 Parent(s): fe3c857

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step20/zero/15.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step20/zero/17.attention.query_key_value.weight/fp32.pt +3 -0
  3. ckpts/universal/global_step20/zero/18.attention.dense.weight/exp_avg.pt +3 -0
  4. ckpts/universal/global_step20/zero/18.attention.dense.weight/fp32.pt +3 -0
  5. ckpts/universal/global_step20/zero/9.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  6. venv/lib/python3.10/site-packages/datasets/commands/__pycache__/convert.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/datasets/commands/__pycache__/convert_to_parquet.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/datasets/commands/__pycache__/test.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/datasets/download/__init__.py +10 -0
  10. venv/lib/python3.10/site-packages/datasets/download/__pycache__/__init__.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/datasets/download/__pycache__/download_config.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/datasets/download/__pycache__/download_manager.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/datasets/download/__pycache__/mock_download_manager.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/datasets/download/__pycache__/streaming_download_manager.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/datasets/download/download_config.py +108 -0
  16. venv/lib/python3.10/site-packages/datasets/download/download_manager.py +448 -0
  17. venv/lib/python3.10/site-packages/datasets/download/mock_download_manager.py +244 -0
  18. venv/lib/python3.10/site-packages/datasets/download/streaming_download_manager.py +210 -0
  19. venv/lib/python3.10/site-packages/datasets/packaged_modules/cache/__init__.py +0 -0
  20. venv/lib/python3.10/site-packages/datasets/packaged_modules/cache/__pycache__/__init__.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/datasets/packaged_modules/cache/__pycache__/cache.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/datasets/packaged_modules/cache/cache.py +207 -0
  23. venv/lib/python3.10/site-packages/datasets/packaged_modules/csv/__init__.py +0 -0
  24. venv/lib/python3.10/site-packages/datasets/packaged_modules/csv/__pycache__/csv.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/datasets/packaged_modules/csv/csv.py +203 -0
  26. venv/lib/python3.10/site-packages/datasets/packaged_modules/json/__init__.py +0 -0
  27. venv/lib/python3.10/site-packages/datasets/packaged_modules/json/__pycache__/__init__.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/datasets/packaged_modules/json/__pycache__/json.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/datasets/packaged_modules/json/json.py +180 -0
  30. venv/lib/python3.10/site-packages/datasets/packaged_modules/spark/__init__.py +0 -0
  31. venv/lib/python3.10/site-packages/datasets/packaged_modules/spark/__pycache__/__init__.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/datasets/packaged_modules/spark/__pycache__/spark.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/datasets/packaged_modules/spark/spark.py +349 -0
  34. venv/lib/python3.10/site-packages/datasets/packaged_modules/sql/__init__.py +0 -0
  35. venv/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/__init__.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/sql.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/datasets/packaged_modules/sql/sql.py +118 -0
  38. venv/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__init__.py +0 -0
  39. venv/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/__init__.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/_tenbin.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/webdataset.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/_tenbin.py +285 -0
  43. venv/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/webdataset.py +299 -0
  44. venv/lib/python3.10/site-packages/datasets/parallel/__init__.py +1 -0
  45. venv/lib/python3.10/site-packages/datasets/parallel/__pycache__/__init__.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/datasets/parallel/__pycache__/parallel.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/datasets/parallel/parallel.py +120 -0
  48. venv/lib/python3.10/site-packages/datasets/tasks/__init__.py +46 -0
  49. venv/lib/python3.10/site-packages/datasets/tasks/__pycache__/__init__.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/datasets/tasks/__pycache__/audio_classification.cpython-310.pyc +0 -0
ckpts/universal/global_step20/zero/15.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:929555ecf3a84a09dbb0adf6abce9c36ae0b9b84114921beac5066f33a45c7f9
3
+ size 33555612
ckpts/universal/global_step20/zero/17.attention.query_key_value.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0509bc0ff365fd6e8cb292534fb79e5ac1abfa14165d165555b1dd7f194d0aaa
3
+ size 50332749
ckpts/universal/global_step20/zero/18.attention.dense.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aeaa31a4a99e6eba1e9955db4c27f1412e0ea156029115bc4691f1684455a2b6
3
+ size 16778396
ckpts/universal/global_step20/zero/18.attention.dense.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a546964f06449aeeebda85ccb697d5601209f9c88ecbedcb1a012bcee820eca
3
+ size 16778317
ckpts/universal/global_step20/zero/9.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0f1da3f760ef72009420595c4abe76a9a6633487215813eacadbdd377feb6c6
3
+ size 33555533
venv/lib/python3.10/site-packages/datasets/commands/__pycache__/convert.cpython-310.pyc ADDED
Binary file (6.07 kB). View file
 
venv/lib/python3.10/site-packages/datasets/commands/__pycache__/convert_to_parquet.cpython-310.pyc ADDED
Binary file (4.42 kB). View file
 
venv/lib/python3.10/site-packages/datasets/commands/__pycache__/test.cpython-310.pyc ADDED
Binary file (5.63 kB). View file
 
venv/lib/python3.10/site-packages/datasets/download/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__ = [
2
+ "DownloadConfig",
3
+ "DownloadManager",
4
+ "DownloadMode",
5
+ "StreamingDownloadManager",
6
+ ]
7
+
8
+ from .download_config import DownloadConfig
9
+ from .download_manager import DownloadManager, DownloadMode
10
+ from .streaming_download_manager import StreamingDownloadManager
venv/lib/python3.10/site-packages/datasets/download/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (434 Bytes). View file
 
venv/lib/python3.10/site-packages/datasets/download/__pycache__/download_config.cpython-310.pyc ADDED
Binary file (5.65 kB). View file
 
venv/lib/python3.10/site-packages/datasets/download/__pycache__/download_manager.cpython-310.pyc ADDED
Binary file (15.3 kB). View file
 
venv/lib/python3.10/site-packages/datasets/download/__pycache__/mock_download_manager.cpython-310.pyc ADDED
Binary file (8.02 kB). View file
 
venv/lib/python3.10/site-packages/datasets/download/__pycache__/streaming_download_manager.cpython-310.pyc ADDED
Binary file (7.45 kB). View file
 
venv/lib/python3.10/site-packages/datasets/download/download_config.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import warnings
3
+ from dataclasses import InitVar, dataclass, field
4
+ from pathlib import Path
5
+ from typing import Any, Dict, Optional, Union
6
+
7
+ from .. import config
8
+
9
+
10
+ @dataclass
11
+ class DownloadConfig:
12
+ """Configuration for our cached path manager.
13
+
14
+ Attributes:
15
+ cache_dir (`str` or `Path`, *optional*):
16
+ Specify a cache directory to save the file to (overwrite the
17
+ default cache dir).
18
+ force_download (`bool`, defaults to `False`):
19
+ If `True`, re-dowload the file even if it's already cached in
20
+ the cache dir.
21
+ resume_download (`bool`, defaults to `False`):
22
+ If `True`, resume the download if an incompletely received file is
23
+ found.
24
+ proxies (`dict`, *optional*):
25
+ user_agent (`str`, *optional*):
26
+ Optional string or dict that will be appended to the user-agent on remote
27
+ requests.
28
+ extract_compressed_file (`bool`, defaults to `False`):
29
+ If `True` and the path point to a zip or tar file,
30
+ extract the compressed file in a folder along the archive.
31
+ force_extract (`bool`, defaults to `False`):
32
+ If `True` when `extract_compressed_file` is `True` and the archive
33
+ was already extracted, re-extract the archive and override the folder where it was extracted.
34
+ delete_extracted (`bool`, defaults to `False`):
35
+ Whether to delete (or keep) the extracted files.
36
+ extract_on_the_fly (`bool`, defaults to `False`):
37
+ If `True`, extract compressed files while they are being read.
38
+ use_etag (`bool`, defaults to `True`):
39
+ Whether to use the ETag HTTP response header to validate the cached files.
40
+ num_proc (`int`, *optional*):
41
+ The number of processes to launch to download the files in parallel.
42
+ max_retries (`int`, default to `1`):
43
+ The number of times to retry an HTTP request if it fails.
44
+ token (`str` or `bool`, *optional*):
45
+ Optional string or boolean to use as Bearer token
46
+ for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`.
47
+ use_auth_token (`str` or `bool`, *optional*):
48
+ Optional string or boolean to use as Bearer token
49
+ for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`.
50
+
51
+ <Deprecated version="2.14.0">
52
+
53
+ `use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
54
+
55
+ </Deprecated>
56
+
57
+ ignore_url_params (`bool`, defaults to `False`):
58
+ Whether to strip all query parameters and fragments from
59
+ the download URL before using it for caching the file.
60
+ storage_options (`dict`, *optional*):
61
+ Key/value pairs to be passed on to the dataset file-system backend, if any.
62
+ download_desc (`str`, *optional*):
63
+ A description to be displayed alongside with the progress bar while downloading the files.
64
+ disable_tqdm (`bool`, defaults to `False`):
65
+ Whether to disable the individual files download progress bar
66
+ """
67
+
68
+ cache_dir: Optional[Union[str, Path]] = None
69
+ force_download: bool = False
70
+ resume_download: bool = False
71
+ local_files_only: bool = False
72
+ proxies: Optional[Dict] = None
73
+ user_agent: Optional[str] = None
74
+ extract_compressed_file: bool = False
75
+ force_extract: bool = False
76
+ delete_extracted: bool = False
77
+ extract_on_the_fly: bool = False
78
+ use_etag: bool = True
79
+ num_proc: Optional[int] = None
80
+ max_retries: int = 1
81
+ token: Optional[Union[str, bool]] = None
82
+ use_auth_token: InitVar[Optional[Union[str, bool]]] = "deprecated"
83
+ ignore_url_params: bool = False
84
+ storage_options: Dict[str, Any] = field(default_factory=dict)
85
+ download_desc: Optional[str] = None
86
+ disable_tqdm: bool = False
87
+
88
+ def __post_init__(self, use_auth_token):
89
+ if use_auth_token != "deprecated":
90
+ warnings.warn(
91
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
92
+ f"You can remove this warning by passing 'token={use_auth_token}' instead.",
93
+ FutureWarning,
94
+ )
95
+ self.token = use_auth_token
96
+ if "hf" not in self.storage_options:
97
+ self.storage_options["hf"] = {"token": self.token, "endpoint": config.HF_ENDPOINT}
98
+
99
+ def copy(self) -> "DownloadConfig":
100
+ return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
101
+
102
+ def __setattr__(self, name, value):
103
+ if name == "token" and getattr(self, "storage_options", None) is not None:
104
+ if "hf" not in self.storage_options:
105
+ self.storage_options["hf"] = {"token": value, "endpoint": config.HF_ENDPOINT}
106
+ elif getattr(self.storage_options["hf"], "token", None) is None:
107
+ self.storage_options["hf"]["token"] = value
108
+ super().__setattr__(name, value)
venv/lib/python3.10/site-packages/datasets/download/download_manager.py ADDED
@@ -0,0 +1,448 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The TensorFlow Datasets Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Lint as: python3
16
+ """Download manager interface."""
17
+
18
+ import enum
19
+ import io
20
+ import multiprocessing
21
+ import os
22
+ import posixpath
23
+ import warnings
24
+ from datetime import datetime
25
+ from functools import partial
26
+ from typing import Dict, List, Optional, Union
27
+
28
+ import fsspec
29
+ from fsspec.core import url_to_fs
30
+ from tqdm.contrib.concurrent import thread_map
31
+
32
+ from .. import config
33
+ from ..utils import tqdm as hf_tqdm
34
+ from ..utils.deprecation_utils import DeprecatedEnum, deprecated
35
+ from ..utils.file_utils import (
36
+ ArchiveIterable,
37
+ FilesIterable,
38
+ cached_path,
39
+ get_from_cache,
40
+ hash_url_to_filename,
41
+ is_relative_path,
42
+ stack_multiprocessing_download_progress_bars,
43
+ url_or_path_join,
44
+ )
45
+ from ..utils.info_utils import get_size_checksum_dict
46
+ from ..utils.logging import get_logger, tqdm
47
+ from ..utils.py_utils import NestedDataStructure, map_nested, size_str
48
+ from ..utils.track import tracked_str
49
+ from .download_config import DownloadConfig
50
+
51
+
52
+ logger = get_logger(__name__)
53
+
54
+
55
+ class DownloadMode(enum.Enum):
56
+ """`Enum` for how to treat pre-existing downloads and data.
57
+
58
+ The default mode is `REUSE_DATASET_IF_EXISTS`, which will reuse both
59
+ raw downloads and the prepared dataset if they exist.
60
+
61
+ The generations modes:
62
+
63
+ | | Downloads | Dataset |
64
+ |-------------------------------------|-----------|---------|
65
+ | `REUSE_DATASET_IF_EXISTS` (default) | Reuse | Reuse |
66
+ | `REUSE_CACHE_IF_EXISTS` | Reuse | Fresh |
67
+ | `FORCE_REDOWNLOAD` | Fresh | Fresh |
68
+
69
+ """
70
+
71
+ REUSE_DATASET_IF_EXISTS = "reuse_dataset_if_exists"
72
+ REUSE_CACHE_IF_EXISTS = "reuse_cache_if_exists"
73
+ FORCE_REDOWNLOAD = "force_redownload"
74
+
75
+
76
+ class GenerateMode(DeprecatedEnum):
77
+ REUSE_DATASET_IF_EXISTS = "reuse_dataset_if_exists"
78
+ REUSE_CACHE_IF_EXISTS = "reuse_cache_if_exists"
79
+ FORCE_REDOWNLOAD = "force_redownload"
80
+
81
+ @property
82
+ def help_message(self):
83
+ return "Use 'DownloadMode' instead."
84
+
85
+
86
+ class DownloadManager:
87
+ is_streaming = False
88
+
89
+ def __init__(
90
+ self,
91
+ dataset_name: Optional[str] = None,
92
+ data_dir: Optional[str] = None,
93
+ download_config: Optional[DownloadConfig] = None,
94
+ base_path: Optional[str] = None,
95
+ record_checksums=True,
96
+ ):
97
+ """Download manager constructor.
98
+
99
+ Args:
100
+ data_dir:
101
+ can be used to specify a manual directory to get the files from.
102
+ dataset_name (`str`):
103
+ name of dataset this instance will be used for. If
104
+ provided, downloads will contain which datasets they were used for.
105
+ download_config (`DownloadConfig`):
106
+ to specify the cache directory and other
107
+ download options
108
+ base_path (`str`):
109
+ base path that is used when relative paths are used to
110
+ download files. This can be a remote url.
111
+ record_checksums (`bool`, defaults to `True`):
112
+ Whether to record the checksums of the downloaded files. If None, the value is inferred from the builder.
113
+ """
114
+ self._dataset_name = dataset_name
115
+ self._data_dir = data_dir
116
+ self._base_path = base_path or os.path.abspath(".")
117
+ # To record what is being used: {url: {num_bytes: int, checksum: str}}
118
+ self._recorded_sizes_checksums: Dict[str, Dict[str, Optional[Union[int, str]]]] = {}
119
+ self.record_checksums = record_checksums
120
+ self.download_config = download_config or DownloadConfig()
121
+ self.downloaded_paths = {}
122
+ self.extracted_paths = {}
123
+
124
+ @property
125
+ def manual_dir(self):
126
+ return self._data_dir
127
+
128
+ @property
129
+ def downloaded_size(self):
130
+ """Returns the total size of downloaded files."""
131
+ return sum(checksums_dict["num_bytes"] for checksums_dict in self._recorded_sizes_checksums.values())
132
+
133
+ @staticmethod
134
+ def ship_files_with_pipeline(downloaded_path_or_paths, pipeline):
135
+ """Ship the files using Beam FileSystems to the pipeline temp dir.
136
+
137
+ Args:
138
+ downloaded_path_or_paths (`str` or `list[str]` or `dict[str, str]`):
139
+ Nested structure containing the
140
+ downloaded path(s).
141
+ pipeline ([`utils.beam_utils.BeamPipeline`]):
142
+ Apache Beam Pipeline.
143
+
144
+ Returns:
145
+ `str` or `list[str]` or `dict[str, str]`
146
+ """
147
+ from ..utils.beam_utils import upload_local_to_remote
148
+
149
+ remote_dir = pipeline._options.get_all_options().get("temp_location")
150
+ if remote_dir is None:
151
+ raise ValueError("You need to specify 'temp_location' in PipelineOptions to upload files")
152
+
153
+ def upload(local_file_path):
154
+ remote_file_path = posixpath.join(
155
+ remote_dir, config.DOWNLOADED_DATASETS_DIR, os.path.basename(local_file_path)
156
+ )
157
+ logger.info(
158
+ f"Uploading {local_file_path} ({size_str(os.path.getsize(local_file_path))}) to {remote_file_path}."
159
+ )
160
+ upload_local_to_remote(local_file_path, remote_file_path)
161
+ return remote_file_path
162
+
163
+ uploaded_path_or_paths = map_nested(
164
+ lambda local_file_path: upload(local_file_path),
165
+ downloaded_path_or_paths,
166
+ )
167
+ return uploaded_path_or_paths
168
+
169
+ def _record_sizes_checksums(self, url_or_urls: NestedDataStructure, downloaded_path_or_paths: NestedDataStructure):
170
+ """Record size/checksum of downloaded files."""
171
+ delay = 5
172
+ for url, path in hf_tqdm(
173
+ list(zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten())),
174
+ delay=delay,
175
+ desc="Computing checksums",
176
+ ):
177
+ # call str to support PathLike objects
178
+ self._recorded_sizes_checksums[str(url)] = get_size_checksum_dict(
179
+ path, record_checksum=self.record_checksums
180
+ )
181
+
182
+ @deprecated("Use `.download`/`.download_and_extract` with `fsspec` URLs instead.")
183
+ def download_custom(self, url_or_urls, custom_download):
184
+ """
185
+ Download given urls(s) by calling `custom_download`.
186
+
187
+ Args:
188
+ url_or_urls (`str` or `list` or `dict`):
189
+ URL or `list` or `dict` of URLs to download and extract. Each URL is a `str`.
190
+ custom_download (`Callable[src_url, dst_path]`):
191
+ The source URL and destination path. For example
192
+ `tf.io.gfile.copy`, that lets you download from Google storage.
193
+
194
+ Returns:
195
+ downloaded_path(s): `str`, The downloaded paths matching the given input
196
+ `url_or_urls`.
197
+
198
+ Example:
199
+
200
+ ```py
201
+ >>> downloaded_files = dl_manager.download_custom('s3://my-bucket/data.zip', custom_download_for_my_private_bucket)
202
+ ```
203
+ """
204
+ cache_dir = self.download_config.cache_dir or config.DOWNLOADED_DATASETS_PATH
205
+ max_retries = self.download_config.max_retries
206
+
207
+ def url_to_downloaded_path(url):
208
+ return os.path.join(cache_dir, hash_url_to_filename(url))
209
+
210
+ downloaded_path_or_paths = map_nested(url_to_downloaded_path, url_or_urls)
211
+ url_or_urls = NestedDataStructure(url_or_urls)
212
+ downloaded_path_or_paths = NestedDataStructure(downloaded_path_or_paths)
213
+ for url, path in zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten()):
214
+ try:
215
+ get_from_cache(
216
+ url, cache_dir=cache_dir, local_files_only=True, use_etag=False, max_retries=max_retries
217
+ )
218
+ cached = True
219
+ except FileNotFoundError:
220
+ cached = False
221
+ if not cached or self.download_config.force_download:
222
+ custom_download(url, path)
223
+ get_from_cache(
224
+ url, cache_dir=cache_dir, local_files_only=True, use_etag=False, max_retries=max_retries
225
+ )
226
+ self._record_sizes_checksums(url_or_urls, downloaded_path_or_paths)
227
+ return downloaded_path_or_paths.data
228
+
229
+ def download(self, url_or_urls):
230
+ """Download given URL(s).
231
+
232
+ By default, only one process is used for download. Pass customized `download_config.num_proc` to change this behavior.
233
+
234
+ Args:
235
+ url_or_urls (`str` or `list` or `dict`):
236
+ URL or `list` or `dict` of URLs to download. Each URL is a `str`.
237
+
238
+ Returns:
239
+ `str` or `list` or `dict`:
240
+ The downloaded paths matching the given input `url_or_urls`.
241
+
242
+ Example:
243
+
244
+ ```py
245
+ >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
246
+ ```
247
+ """
248
+ download_config = self.download_config.copy()
249
+ download_config.extract_compressed_file = False
250
+ if download_config.download_desc is None:
251
+ download_config.download_desc = "Downloading data"
252
+
253
+ download_func = partial(self._download_batched, download_config=download_config)
254
+
255
+ start_time = datetime.now()
256
+ with stack_multiprocessing_download_progress_bars():
257
+ downloaded_path_or_paths = map_nested(
258
+ download_func,
259
+ url_or_urls,
260
+ map_tuple=True,
261
+ num_proc=download_config.num_proc,
262
+ desc="Downloading data files",
263
+ batched=True,
264
+ batch_size=-1,
265
+ )
266
+ duration = datetime.now() - start_time
267
+ logger.info(f"Downloading took {duration.total_seconds() // 60} min")
268
+ url_or_urls = NestedDataStructure(url_or_urls)
269
+ downloaded_path_or_paths = NestedDataStructure(downloaded_path_or_paths)
270
+ self.downloaded_paths.update(dict(zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten())))
271
+
272
+ start_time = datetime.now()
273
+ self._record_sizes_checksums(url_or_urls, downloaded_path_or_paths)
274
+ duration = datetime.now() - start_time
275
+ logger.info(f"Checksum Computation took {duration.total_seconds() // 60} min")
276
+
277
+ return downloaded_path_or_paths.data
278
+
279
+ def _download_batched(
280
+ self,
281
+ url_or_filenames: List[str],
282
+ download_config: DownloadConfig,
283
+ ) -> List[str]:
284
+ if len(url_or_filenames) >= 16:
285
+ download_config = download_config.copy()
286
+ download_config.disable_tqdm = True
287
+ download_func = partial(self._download_single, download_config=download_config)
288
+
289
+ fs: fsspec.AbstractFileSystem
290
+ fs, path = url_to_fs(url_or_filenames[0], **download_config.storage_options)
291
+ size = 0
292
+ try:
293
+ size = fs.info(path).get("size", 0)
294
+ except Exception:
295
+ pass
296
+ max_workers = (
297
+ config.HF_DATASETS_MULTITHREADING_MAX_WORKERS if size < (20 << 20) else 1
298
+ ) # enable multithreading if files are small
299
+
300
+ return thread_map(
301
+ download_func,
302
+ url_or_filenames,
303
+ desc=download_config.download_desc or "Downloading",
304
+ unit="files",
305
+ position=multiprocessing.current_process()._identity[-1] # contains the ranks of subprocesses
306
+ if os.environ.get("HF_DATASETS_STACK_MULTIPROCESSING_DOWNLOAD_PROGRESS_BARS") == "1"
307
+ and multiprocessing.current_process()._identity
308
+ else None,
309
+ max_workers=max_workers,
310
+ tqdm_class=tqdm,
311
+ )
312
+ else:
313
+ return [
314
+ self._download_single(url_or_filename, download_config=download_config)
315
+ for url_or_filename in url_or_filenames
316
+ ]
317
+
318
+ def _download_single(self, url_or_filename: str, download_config: DownloadConfig) -> str:
319
+ url_or_filename = str(url_or_filename)
320
+ if is_relative_path(url_or_filename):
321
+ # append the relative path to the base_path
322
+ url_or_filename = url_or_path_join(self._base_path, url_or_filename)
323
+ out = cached_path(url_or_filename, download_config=download_config)
324
+ out = tracked_str(out)
325
+ out.set_origin(url_or_filename)
326
+ return out
327
+
328
+ def iter_archive(self, path_or_buf: Union[str, io.BufferedReader]):
329
+ """Iterate over files within an archive.
330
+
331
+ Args:
332
+ path_or_buf (`str` or `io.BufferedReader`):
333
+ Archive path or archive binary file object.
334
+
335
+ Yields:
336
+ `tuple[str, io.BufferedReader]`:
337
+ 2-tuple (path_within_archive, file_object).
338
+ File object is opened in binary mode.
339
+
340
+ Example:
341
+
342
+ ```py
343
+ >>> archive = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
344
+ >>> files = dl_manager.iter_archive(archive)
345
+ ```
346
+ """
347
+
348
+ if hasattr(path_or_buf, "read"):
349
+ return ArchiveIterable.from_buf(path_or_buf)
350
+ else:
351
+ return ArchiveIterable.from_urlpath(path_or_buf)
352
+
353
+ def iter_files(self, paths: Union[str, List[str]]):
354
+ """Iterate over file paths.
355
+
356
+ Args:
357
+ paths (`str` or `list` of `str`):
358
+ Root paths.
359
+
360
+ Yields:
361
+ `str`: File path.
362
+
363
+ Example:
364
+
365
+ ```py
366
+ >>> files = dl_manager.download_and_extract('https://huggingface.co/datasets/beans/resolve/main/data/train.zip')
367
+ >>> files = dl_manager.iter_files(files)
368
+ ```
369
+ """
370
+ return FilesIterable.from_urlpaths(paths)
371
+
372
+ def extract(self, path_or_paths, num_proc="deprecated"):
373
+ """Extract given path(s).
374
+
375
+ Args:
376
+ path_or_paths (path or `list` or `dict`):
377
+ Path of file to extract. Each path is a `str`.
378
+ num_proc (`int`):
379
+ Use multi-processing if `num_proc` > 1 and the length of
380
+ `path_or_paths` is larger than `num_proc`.
381
+
382
+ <Deprecated version="2.6.2">
383
+
384
+ Pass `DownloadConfig(num_proc=<num_proc>)` to the initializer instead.
385
+
386
+ </Deprecated>
387
+
388
+ Returns:
389
+ extracted_path(s): `str`, The extracted paths matching the given input
390
+ path_or_paths.
391
+
392
+ Example:
393
+
394
+ ```py
395
+ >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
396
+ >>> extracted_files = dl_manager.extract(downloaded_files)
397
+ ```
398
+ """
399
+ if num_proc != "deprecated":
400
+ warnings.warn(
401
+ "'num_proc' was deprecated in version 2.6.2 and will be removed in 3.0.0. Pass `DownloadConfig(num_proc=<num_proc>)` to the initializer instead.",
402
+ FutureWarning,
403
+ )
404
+ download_config = self.download_config.copy()
405
+ download_config.extract_compressed_file = True
406
+ extract_func = partial(self._download_single, download_config=download_config)
407
+ extracted_paths = map_nested(
408
+ extract_func,
409
+ path_or_paths,
410
+ num_proc=download_config.num_proc,
411
+ desc="Extracting data files",
412
+ )
413
+ path_or_paths = NestedDataStructure(path_or_paths)
414
+ extracted_paths = NestedDataStructure(extracted_paths)
415
+ self.extracted_paths.update(dict(zip(path_or_paths.flatten(), extracted_paths.flatten())))
416
+ return extracted_paths.data
417
+
418
+ def download_and_extract(self, url_or_urls):
419
+ """Download and extract given `url_or_urls`.
420
+
421
+ Is roughly equivalent to:
422
+
423
+ ```
424
+ extracted_paths = dl_manager.extract(dl_manager.download(url_or_urls))
425
+ ```
426
+
427
+ Args:
428
+ url_or_urls (`str` or `list` or `dict`):
429
+ URL or `list` or `dict` of URLs to download and extract. Each URL is a `str`.
430
+
431
+ Returns:
432
+ extracted_path(s): `str`, extracted paths of given URL(s).
433
+ """
434
+ return self.extract(self.download(url_or_urls))
435
+
436
+ def get_recorded_sizes_checksums(self):
437
+ return self._recorded_sizes_checksums.copy()
438
+
439
+ def delete_extracted_files(self):
440
+ paths_to_delete = set(self.extracted_paths.values()) - set(self.downloaded_paths.values())
441
+ for key, path in list(self.extracted_paths.items()):
442
+ if path in paths_to_delete and os.path.isfile(path):
443
+ os.remove(path)
444
+ del self.extracted_paths[key]
445
+
446
+ def manage_extracted_files(self):
447
+ if self.download_config.delete_extracted:
448
+ self.delete_extracted_files()
venv/lib/python3.10/site-packages/datasets/download/mock_download_manager.py ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The TensorFlow Datasets Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Lint as: python3
16
+ """Mock download manager interface."""
17
+
18
+ import os
19
+ import re
20
+ import urllib.parse
21
+ from pathlib import Path
22
+ from typing import Callable, List, Optional, Union
23
+ from zipfile import ZipFile
24
+
25
+ from ..utils.file_utils import cached_path, hf_github_url
26
+ from ..utils.logging import get_logger
27
+ from ..utils.version import Version
28
+
29
+
30
+ logger = get_logger(__name__)
31
+
32
+
33
+ class MockDownloadManager:
34
+ dummy_file_name = "dummy_data"
35
+ datasets_scripts_dir = "datasets"
36
+ is_streaming = False
37
+
38
+ def __init__(
39
+ self,
40
+ dataset_name: str,
41
+ config: str,
42
+ version: Union[Version, str],
43
+ cache_dir: Optional[str] = None,
44
+ use_local_dummy_data: bool = False,
45
+ load_existing_dummy_data: bool = True,
46
+ download_callbacks: Optional[List[Callable]] = None,
47
+ ):
48
+ self.downloaded_size = 0
49
+ self.dataset_name = dataset_name
50
+ self.cache_dir = cache_dir
51
+ self.use_local_dummy_data = use_local_dummy_data
52
+ self.config = config
53
+ # download_callbacks take a single url as input
54
+ self.download_callbacks: List[Callable] = download_callbacks or []
55
+ # if False, it doesn't load existing files and it returns the paths of the dummy files relative
56
+ # to the dummy_data zip file root
57
+ self.load_existing_dummy_data = load_existing_dummy_data
58
+
59
+ # TODO(PVP, QL) might need to make this more general
60
+ self.version_name = str(version)
61
+ # to be downloaded
62
+ self._dummy_file = None
63
+ self._bucket_url = None
64
+
65
+ @property
66
+ def dummy_file(self):
67
+ if self._dummy_file is None:
68
+ self._dummy_file = self.download_dummy_data()
69
+ return self._dummy_file
70
+
71
+ @property
72
+ def dummy_data_folder(self):
73
+ if self.config is not None:
74
+ # structure is dummy / config_name / version_name
75
+ return os.path.join("dummy", self.config.name, self.version_name)
76
+ # structure is dummy / version_name
77
+ return os.path.join("dummy", self.version_name)
78
+
79
+ @property
80
+ def dummy_zip_file(self):
81
+ return os.path.join(self.dummy_data_folder, "dummy_data.zip")
82
+
83
+ def download_dummy_data(self):
84
+ path_to_dummy_data_dir = (
85
+ self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
86
+ )
87
+
88
+ local_path = cached_path(
89
+ path_to_dummy_data_dir, cache_dir=self.cache_dir, extract_compressed_file=True, force_extract=True
90
+ )
91
+
92
+ return os.path.join(local_path, self.dummy_file_name)
93
+
94
+ @property
95
+ def local_path_to_dummy_data(self):
96
+ return os.path.join(self.datasets_scripts_dir, self.dataset_name, self.dummy_zip_file)
97
+
98
+ @property
99
+ def github_path_to_dummy_data(self):
100
+ if self._bucket_url is None:
101
+ self._bucket_url = hf_github_url(self.dataset_name, self.dummy_zip_file.replace(os.sep, "/"))
102
+ return self._bucket_url
103
+
104
+ @property
105
+ def manual_dir(self):
106
+ # return full path if its a dir
107
+ if os.path.isdir(self.dummy_file):
108
+ return self.dummy_file
109
+ # else cut off path to file -> example `xsum`.
110
+ return "/".join(self.dummy_file.replace(os.sep, "/").split("/")[:-1])
111
+
112
+ # this function has to be in the manager under this name so that testing works
113
+ def download_and_extract(self, data_url, *args):
114
+ if self.load_existing_dummy_data:
115
+ # dummy data is downloaded and tested
116
+ dummy_file = self.dummy_file
117
+ else:
118
+ # dummy data cannot be downloaded and only the path to dummy file is returned
119
+ dummy_file = self.dummy_file_name
120
+
121
+ # special case when data_url is a dict
122
+ if isinstance(data_url, dict):
123
+ return self.create_dummy_data_dict(dummy_file, data_url)
124
+ elif isinstance(data_url, (list, tuple)):
125
+ return self.create_dummy_data_list(dummy_file, data_url)
126
+ else:
127
+ return self.create_dummy_data_single(dummy_file, data_url)
128
+
129
+ # this function has to be in the manager under this name so that testing works
130
+ def download(self, data_url, *args):
131
+ return self.download_and_extract(data_url)
132
+
133
+ # this function has to be in the manager under this name so that testing works
134
+ def download_custom(self, data_url, custom_download):
135
+ return self.download_and_extract(data_url)
136
+
137
+ # this function has to be in the manager under this name so that testing works
138
+ def extract(self, path, *args, **kwargs):
139
+ return path
140
+
141
+ # this function has to be in the manager under this name so that testing works
142
+ def get_recorded_sizes_checksums(self):
143
+ return {}
144
+
145
+ def create_dummy_data_dict(self, path_to_dummy_data, data_url):
146
+ dummy_data_dict = {}
147
+ for key, single_urls in data_url.items():
148
+ for download_callback in self.download_callbacks:
149
+ if isinstance(single_urls, list):
150
+ for single_url in single_urls:
151
+ download_callback(single_url)
152
+ else:
153
+ single_url = single_urls
154
+ download_callback(single_url)
155
+ # we force the name of each key to be the last file / folder name of the url path
156
+ # if the url has arguments, we need to encode them with urllib.parse.quote_plus
157
+ if isinstance(single_urls, list):
158
+ value = [os.path.join(path_to_dummy_data, urllib.parse.quote_plus(Path(x).name)) for x in single_urls]
159
+ else:
160
+ single_url = single_urls
161
+ value = os.path.join(path_to_dummy_data, urllib.parse.quote_plus(Path(single_url).name))
162
+ dummy_data_dict[key] = value
163
+
164
+ # make sure that values are unique
165
+ if all(isinstance(i, str) for i in dummy_data_dict.values()) and len(set(dummy_data_dict.values())) < len(
166
+ dummy_data_dict.values()
167
+ ):
168
+ # append key to value to make its name unique
169
+ dummy_data_dict = {key: value + key for key, value in dummy_data_dict.items()}
170
+
171
+ return dummy_data_dict
172
+
173
+ def create_dummy_data_list(self, path_to_dummy_data, data_url):
174
+ dummy_data_list = []
175
+ # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
176
+ is_tf_records = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}", url)) for url in data_url)
177
+ is_pubmed_records = all(
178
+ url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed") for url in data_url
179
+ )
180
+ if data_url and (is_tf_records or is_pubmed_records):
181
+ data_url = [data_url[0]] * len(data_url)
182
+ for single_url in data_url:
183
+ for download_callback in self.download_callbacks:
184
+ download_callback(single_url)
185
+ # we force the name of each key to be the last file / folder name of the url path
186
+ # if the url has arguments, we need to encode them with urllib.parse.quote_plus
187
+ value = os.path.join(path_to_dummy_data, urllib.parse.quote_plus(single_url.split("/")[-1]))
188
+ dummy_data_list.append(value)
189
+ return dummy_data_list
190
+
191
+ def create_dummy_data_single(self, path_to_dummy_data, data_url):
192
+ for download_callback in self.download_callbacks:
193
+ download_callback(data_url)
194
+ # we force the name of each key to be the last file / folder name of the url path
195
+ # if the url has arguments, we need to encode them with urllib.parse.quote_plus
196
+ value = os.path.join(path_to_dummy_data, urllib.parse.quote_plus(data_url.split("/")[-1]))
197
+ if os.path.exists(value) or not self.load_existing_dummy_data:
198
+ return value
199
+ else:
200
+ # Backward compatibility, maybe deprecate at one point.
201
+ # For many datasets with single url calls to dl_manager.download_and_extract,
202
+ # the dummy_data.zip file is actually the zipped downloaded file
203
+ # while now we expected the dummy_data.zip file to be a directory containing
204
+ # the downloaded file.
205
+ return path_to_dummy_data
206
+
207
+ def delete_extracted_files(self):
208
+ pass
209
+
210
+ def manage_extracted_files(self):
211
+ pass
212
+
213
+ def iter_archive(self, path):
214
+ def _iter_archive_members(path):
215
+ # this preserves the order of the members inside the ZIP archive
216
+ dummy_parent_path = Path(self.dummy_file).parent
217
+ relative_path = path.relative_to(dummy_parent_path)
218
+ with ZipFile(self.local_path_to_dummy_data) as zip_file:
219
+ members = zip_file.namelist()
220
+ for member in members:
221
+ if member.startswith(relative_path.as_posix()):
222
+ yield dummy_parent_path.joinpath(member)
223
+
224
+ path = Path(path)
225
+ file_paths = _iter_archive_members(path) if self.use_local_dummy_data else path.rglob("*")
226
+ for file_path in file_paths:
227
+ if file_path.is_file() and not file_path.name.startswith((".", "__")):
228
+ yield file_path.relative_to(path).as_posix(), file_path.open("rb")
229
+
230
+ def iter_files(self, paths):
231
+ if not isinstance(paths, list):
232
+ paths = [paths]
233
+ for path in paths:
234
+ if os.path.isfile(path):
235
+ yield path
236
+ else:
237
+ for dirpath, dirnames, filenames in os.walk(path):
238
+ if os.path.basename(dirpath).startswith((".", "__")):
239
+ continue
240
+ dirnames.sort()
241
+ for filename in sorted(filenames):
242
+ if filename.startswith((".", "__")):
243
+ continue
244
+ yield os.path.join(dirpath, filename)
venv/lib/python3.10/site-packages/datasets/download/streaming_download_manager.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import os
3
+ from typing import Iterable, List, Optional, Tuple, Union
4
+
5
+ from ..utils.file_utils import ( # noqa: F401 # backward compatibility
6
+ SINGLE_FILE_COMPRESSION_PROTOCOLS,
7
+ ArchiveIterable,
8
+ FilesIterable,
9
+ _get_extraction_protocol,
10
+ _get_path_extension,
11
+ _prepare_path_and_storage_options,
12
+ is_relative_path,
13
+ url_or_path_join,
14
+ xbasename,
15
+ xdirname,
16
+ xet_parse,
17
+ xexists,
18
+ xgetsize,
19
+ xglob,
20
+ xgzip_open,
21
+ xisdir,
22
+ xisfile,
23
+ xjoin,
24
+ xlistdir,
25
+ xnumpy_load,
26
+ xopen,
27
+ xpandas_read_csv,
28
+ xpandas_read_excel,
29
+ xPath,
30
+ xpyarrow_parquet_read_table,
31
+ xrelpath,
32
+ xsio_loadmat,
33
+ xsplit,
34
+ xsplitext,
35
+ xwalk,
36
+ xxml_dom_minidom_parse,
37
+ )
38
+ from ..utils.logging import get_logger
39
+ from ..utils.py_utils import map_nested
40
+ from .download_config import DownloadConfig
41
+
42
+
43
+ logger = get_logger(__name__)
44
+
45
+
46
+ class StreamingDownloadManager:
47
+ """
48
+ Download manager that uses the "::" separator to navigate through (possibly remote) compressed archives.
49
+ Contrary to the regular `DownloadManager`, the `download` and `extract` methods don't actually download nor extract
50
+ data, but they rather return the path or url that could be opened using the `xopen` function which extends the
51
+ built-in `open` function to stream data from remote files.
52
+ """
53
+
54
+ is_streaming = True
55
+
56
+ def __init__(
57
+ self,
58
+ dataset_name: Optional[str] = None,
59
+ data_dir: Optional[str] = None,
60
+ download_config: Optional[DownloadConfig] = None,
61
+ base_path: Optional[str] = None,
62
+ ):
63
+ self._dataset_name = dataset_name
64
+ self._data_dir = data_dir
65
+ self._base_path = base_path or os.path.abspath(".")
66
+ self.download_config = download_config or DownloadConfig()
67
+
68
+ @property
69
+ def manual_dir(self):
70
+ return self._data_dir
71
+
72
+ def download(self, url_or_urls):
73
+ """Normalize URL(s) of files to stream data from.
74
+ This is the lazy version of `DownloadManager.download` for streaming.
75
+
76
+ Args:
77
+ url_or_urls (`str` or `list` or `dict`):
78
+ URL(s) of files to stream data from. Each url is a `str`.
79
+
80
+ Returns:
81
+ url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input url_or_urls.
82
+
83
+ Example:
84
+
85
+ ```py
86
+ >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
87
+ ```
88
+ """
89
+ url_or_urls = map_nested(self._download_single, url_or_urls, map_tuple=True)
90
+ return url_or_urls
91
+
92
+ def _download_single(self, urlpath: str) -> str:
93
+ urlpath = str(urlpath)
94
+ if is_relative_path(urlpath):
95
+ # append the relative path to the base_path
96
+ urlpath = url_or_path_join(self._base_path, urlpath)
97
+ return urlpath
98
+
99
+ def extract(self, url_or_urls):
100
+ """Add extraction protocol for given url(s) for streaming.
101
+
102
+ This is the lazy version of `DownloadManager.extract` for streaming.
103
+
104
+ Args:
105
+ url_or_urls (`str` or `list` or `dict`):
106
+ URL(s) of files to stream data from. Each url is a `str`.
107
+
108
+ Returns:
109
+ url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input `url_or_urls`.
110
+
111
+ Example:
112
+
113
+ ```py
114
+ >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
115
+ >>> extracted_files = dl_manager.extract(downloaded_files)
116
+ ```
117
+ """
118
+ urlpaths = map_nested(self._extract, url_or_urls, map_tuple=True)
119
+ return urlpaths
120
+
121
+ def _extract(self, urlpath: str) -> str:
122
+ urlpath = str(urlpath)
123
+ protocol = _get_extraction_protocol(urlpath, download_config=self.download_config)
124
+ # get inner file: zip://train-00000.json.gz::https://foo.bar/data.zip -> zip://train-00000.json.gz
125
+ path = urlpath.split("::")[0]
126
+ extension = _get_path_extension(path)
127
+ if extension in ["tgz", "tar"] or path.endswith((".tar.gz", ".tar.bz2", ".tar.xz")):
128
+ raise NotImplementedError(
129
+ f"Extraction protocol for TAR archives like '{urlpath}' is not implemented in streaming mode. "
130
+ f"Please use `dl_manager.iter_archive` instead.\n\n"
131
+ f"Example usage:\n\n"
132
+ f"\turl = dl_manager.download(url)\n"
133
+ f"\ttar_archive_iterator = dl_manager.iter_archive(url)\n\n"
134
+ f"\tfor filename, file in tar_archive_iterator:\n"
135
+ f"\t\t..."
136
+ )
137
+ if protocol is None:
138
+ # no extraction
139
+ return urlpath
140
+ elif protocol in SINGLE_FILE_COMPRESSION_PROTOCOLS:
141
+ # there is one single file which is the uncompressed file
142
+ inner_file = os.path.basename(urlpath.split("::")[0])
143
+ inner_file = inner_file[: inner_file.rindex(".")] if "." in inner_file else inner_file
144
+ return f"{protocol}://{inner_file}::{urlpath}"
145
+ else:
146
+ return f"{protocol}://::{urlpath}"
147
+
148
+ def download_and_extract(self, url_or_urls):
149
+ """Prepare given `url_or_urls` for streaming (add extraction protocol).
150
+
151
+ This is the lazy version of `DownloadManager.download_and_extract` for streaming.
152
+
153
+ Is equivalent to:
154
+
155
+ ```
156
+ urls = dl_manager.extract(dl_manager.download(url_or_urls))
157
+ ```
158
+
159
+ Args:
160
+ url_or_urls (`str` or `list` or `dict`):
161
+ URL(s) to stream from data from. Each url is a `str`.
162
+
163
+ Returns:
164
+ url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input `url_or_urls`.
165
+ """
166
+ return self.extract(self.download(url_or_urls))
167
+
168
+ def iter_archive(self, urlpath_or_buf: Union[str, io.BufferedReader]) -> Iterable[Tuple]:
169
+ """Iterate over files within an archive.
170
+
171
+ Args:
172
+ urlpath_or_buf (`str` or `io.BufferedReader`):
173
+ Archive path or archive binary file object.
174
+
175
+ Yields:
176
+ `tuple[str, io.BufferedReader]`:
177
+ 2-tuple (path_within_archive, file_object).
178
+ File object is opened in binary mode.
179
+
180
+ Example:
181
+
182
+ ```py
183
+ >>> archive = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
184
+ >>> files = dl_manager.iter_archive(archive)
185
+ ```
186
+ """
187
+
188
+ if hasattr(urlpath_or_buf, "read"):
189
+ return ArchiveIterable.from_buf(urlpath_or_buf)
190
+ else:
191
+ return ArchiveIterable.from_urlpath(urlpath_or_buf, download_config=self.download_config)
192
+
193
+ def iter_files(self, urlpaths: Union[str, List[str]]) -> Iterable[str]:
194
+ """Iterate over files.
195
+
196
+ Args:
197
+ urlpaths (`str` or `list` of `str`):
198
+ Root paths.
199
+
200
+ Yields:
201
+ str: File URL path.
202
+
203
+ Example:
204
+
205
+ ```py
206
+ >>> files = dl_manager.download_and_extract('https://huggingface.co/datasets/beans/resolve/main/data/train.zip')
207
+ >>> files = dl_manager.iter_files(files)
208
+ ```
209
+ """
210
+ return FilesIterable.from_urlpaths(urlpaths, download_config=self.download_config)
venv/lib/python3.10/site-packages/datasets/packaged_modules/cache/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/datasets/packaged_modules/cache/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (199 Bytes). View file
 
venv/lib/python3.10/site-packages/datasets/packaged_modules/cache/__pycache__/cache.cpython-310.pyc ADDED
Binary file (6.81 kB). View file
 
venv/lib/python3.10/site-packages/datasets/packaged_modules/cache/cache.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import json
3
+ import os
4
+ import shutil
5
+ import time
6
+ import warnings
7
+ from pathlib import Path
8
+ from typing import List, Optional, Tuple, Union
9
+
10
+ import pyarrow as pa
11
+
12
+ import datasets
13
+ import datasets.config
14
+ import datasets.data_files
15
+ from datasets.naming import camelcase_to_snakecase, filenames_for_dataset_split
16
+
17
+
18
+ logger = datasets.utils.logging.get_logger(__name__)
19
+
20
+
21
+ def _get_modification_time(cached_directory_path):
22
+ return (Path(cached_directory_path)).stat().st_mtime
23
+
24
+
25
+ def _find_hash_in_cache(
26
+ dataset_name: str,
27
+ config_name: Optional[str],
28
+ cache_dir: Optional[str],
29
+ config_kwargs: dict,
30
+ custom_features: Optional[datasets.Features],
31
+ ) -> Tuple[str, str, str]:
32
+ if config_name or config_kwargs or custom_features:
33
+ config_id = datasets.BuilderConfig(config_name or "default").create_config_id(
34
+ config_kwargs=config_kwargs, custom_features=custom_features
35
+ )
36
+ else:
37
+ config_id = None
38
+ cache_dir = os.path.expanduser(str(cache_dir or datasets.config.HF_DATASETS_CACHE))
39
+ namespace_and_dataset_name = dataset_name.split("/")
40
+ namespace_and_dataset_name[-1] = camelcase_to_snakecase(namespace_and_dataset_name[-1])
41
+ cached_relative_path = "___".join(namespace_and_dataset_name)
42
+ cached_datasets_directory_path_root = os.path.join(cache_dir, cached_relative_path)
43
+ cached_directory_paths = [
44
+ cached_directory_path
45
+ for cached_directory_path in glob.glob(
46
+ os.path.join(cached_datasets_directory_path_root, config_id or "*", "*", "*")
47
+ )
48
+ if os.path.isdir(cached_directory_path)
49
+ and (
50
+ config_kwargs
51
+ or custom_features
52
+ or json.loads(Path(cached_directory_path, "dataset_info.json").read_text(encoding="utf-8"))["config_name"]
53
+ == Path(cached_directory_path).parts[-3] # no extra params => config_id == config_name
54
+ )
55
+ ]
56
+ if not cached_directory_paths:
57
+ cached_directory_paths = [
58
+ cached_directory_path
59
+ for cached_directory_path in glob.glob(os.path.join(cached_datasets_directory_path_root, "*", "*", "*"))
60
+ if os.path.isdir(cached_directory_path)
61
+ ]
62
+ available_configs = sorted(
63
+ {Path(cached_directory_path).parts[-3] for cached_directory_path in cached_directory_paths}
64
+ )
65
+ raise ValueError(
66
+ f"Couldn't find cache for {dataset_name}"
67
+ + (f" for config '{config_id}'" if config_id else "")
68
+ + (f"\nAvailable configs in the cache: {available_configs}" if available_configs else "")
69
+ )
70
+ # get most recent
71
+ cached_directory_path = Path(sorted(cached_directory_paths, key=_get_modification_time)[-1])
72
+ version, hash = cached_directory_path.parts[-2:]
73
+ other_configs = [
74
+ Path(_cached_directory_path).parts[-3]
75
+ for _cached_directory_path in glob.glob(os.path.join(cached_datasets_directory_path_root, "*", version, hash))
76
+ if os.path.isdir(_cached_directory_path)
77
+ and (
78
+ config_kwargs
79
+ or custom_features
80
+ or json.loads(Path(_cached_directory_path, "dataset_info.json").read_text(encoding="utf-8"))["config_name"]
81
+ == Path(_cached_directory_path).parts[-3] # no extra params => config_id == config_name
82
+ )
83
+ ]
84
+ if not config_id and len(other_configs) > 1:
85
+ raise ValueError(
86
+ f"There are multiple '{dataset_name}' configurations in the cache: {', '.join(other_configs)}"
87
+ f"\nPlease specify which configuration to reload from the cache, e.g."
88
+ f"\n\tload_dataset('{dataset_name}', '{other_configs[0]}')"
89
+ )
90
+ config_name = cached_directory_path.parts[-3]
91
+ warning_msg = (
92
+ f"Found the latest cached dataset configuration '{config_name}' at {cached_directory_path} "
93
+ f"(last modified on {time.ctime(_get_modification_time(cached_directory_path))})."
94
+ )
95
+ logger.warning(warning_msg)
96
+ return config_name, version, hash
97
+
98
+
99
+ class Cache(datasets.ArrowBasedBuilder):
100
+ def __init__(
101
+ self,
102
+ cache_dir: Optional[str] = None,
103
+ dataset_name: Optional[str] = None,
104
+ config_name: Optional[str] = None,
105
+ version: Optional[str] = "0.0.0",
106
+ hash: Optional[str] = None,
107
+ base_path: Optional[str] = None,
108
+ info: Optional[datasets.DatasetInfo] = None,
109
+ features: Optional[datasets.Features] = None,
110
+ token: Optional[Union[bool, str]] = None,
111
+ use_auth_token="deprecated",
112
+ repo_id: Optional[str] = None,
113
+ data_files: Optional[Union[str, list, dict, datasets.data_files.DataFilesDict]] = None,
114
+ data_dir: Optional[str] = None,
115
+ storage_options: Optional[dict] = None,
116
+ writer_batch_size: Optional[int] = None,
117
+ name="deprecated",
118
+ **config_kwargs,
119
+ ):
120
+ if use_auth_token != "deprecated":
121
+ warnings.warn(
122
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
123
+ f"You can remove this warning by passing 'token={use_auth_token}' instead.",
124
+ FutureWarning,
125
+ )
126
+ token = use_auth_token
127
+ if name != "deprecated":
128
+ warnings.warn(
129
+ "Parameter 'name' was renamed to 'config_name' in version 2.3.0 and will be removed in 3.0.0.",
130
+ category=FutureWarning,
131
+ )
132
+ config_name = name
133
+ if repo_id is None and dataset_name is None:
134
+ raise ValueError("repo_id or dataset_name is required for the Cache dataset builder")
135
+ if data_files is not None:
136
+ config_kwargs["data_files"] = data_files
137
+ if data_dir is not None:
138
+ config_kwargs["data_dir"] = data_dir
139
+ if hash == "auto" and version == "auto":
140
+ config_name, version, hash = _find_hash_in_cache(
141
+ dataset_name=repo_id or dataset_name,
142
+ config_name=config_name,
143
+ cache_dir=cache_dir,
144
+ config_kwargs=config_kwargs,
145
+ custom_features=features,
146
+ )
147
+ elif hash == "auto" or version == "auto":
148
+ raise NotImplementedError("Pass both hash='auto' and version='auto' instead")
149
+ super().__init__(
150
+ cache_dir=cache_dir,
151
+ dataset_name=dataset_name,
152
+ config_name=config_name,
153
+ version=version,
154
+ hash=hash,
155
+ base_path=base_path,
156
+ info=info,
157
+ token=token,
158
+ repo_id=repo_id,
159
+ storage_options=storage_options,
160
+ writer_batch_size=writer_batch_size,
161
+ )
162
+
163
+ def _info(self) -> datasets.DatasetInfo:
164
+ return datasets.DatasetInfo()
165
+
166
+ def download_and_prepare(self, output_dir: Optional[str] = None, *args, **kwargs):
167
+ if not os.path.exists(self.cache_dir):
168
+ raise ValueError(f"Cache directory for {self.dataset_name} doesn't exist at {self.cache_dir}")
169
+ if output_dir is not None and output_dir != self.cache_dir:
170
+ shutil.copytree(self.cache_dir, output_dir)
171
+
172
+ def _split_generators(self, dl_manager):
173
+ # used to stream from cache
174
+ if isinstance(self.info.splits, datasets.SplitDict):
175
+ split_infos: List[datasets.SplitInfo] = list(self.info.splits.values())
176
+ else:
177
+ raise ValueError(f"Missing splits info for {self.dataset_name} in cache directory {self.cache_dir}")
178
+ return [
179
+ datasets.SplitGenerator(
180
+ name=split_info.name,
181
+ gen_kwargs={
182
+ "files": filenames_for_dataset_split(
183
+ self.cache_dir,
184
+ dataset_name=self.dataset_name,
185
+ split=split_info.name,
186
+ filetype_suffix="arrow",
187
+ shard_lengths=split_info.shard_lengths,
188
+ )
189
+ },
190
+ )
191
+ for split_info in split_infos
192
+ ]
193
+
194
+ def _generate_tables(self, files):
195
+ # used to stream from cache
196
+ for file_idx, file in enumerate(files):
197
+ with open(file, "rb") as f:
198
+ try:
199
+ for batch_idx, record_batch in enumerate(pa.ipc.open_stream(f)):
200
+ pa_table = pa.Table.from_batches([record_batch])
201
+ # Uncomment for debugging (will print the Arrow table size and elements)
202
+ # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
203
+ # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
204
+ yield f"{file_idx}_{batch_idx}", pa_table
205
+ except ValueError as e:
206
+ logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
207
+ raise
venv/lib/python3.10/site-packages/datasets/packaged_modules/csv/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/datasets/packaged_modules/csv/__pycache__/csv.cpython-310.pyc ADDED
Binary file (7.22 kB). View file
 
venv/lib/python3.10/site-packages/datasets/packaged_modules/csv/csv.py ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ from dataclasses import dataclass
3
+ from typing import Any, Callable, Dict, List, Optional, Union
4
+
5
+ import pandas as pd
6
+ import pyarrow as pa
7
+
8
+ import datasets
9
+ import datasets.config
10
+ from datasets.features.features import require_storage_cast
11
+ from datasets.table import table_cast
12
+ from datasets.utils.py_utils import Literal
13
+
14
+
15
+ logger = datasets.utils.logging.get_logger(__name__)
16
+
17
+ _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS = ["names", "prefix"]
18
+ _PANDAS_READ_CSV_DEPRECATED_PARAMETERS = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
19
+ _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS = ["encoding_errors", "on_bad_lines"]
20
+ _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS = ["date_format"]
21
+ _PANDAS_READ_CSV_DEPRECATED_2_2_0_PARAMETERS = ["verbose"]
22
+
23
+
24
+ @dataclass
25
+ class CsvConfig(datasets.BuilderConfig):
26
+ """BuilderConfig for CSV."""
27
+
28
+ sep: str = ","
29
+ delimiter: Optional[str] = None
30
+ header: Optional[Union[int, List[int], str]] = "infer"
31
+ names: Optional[List[str]] = None
32
+ column_names: Optional[List[str]] = None
33
+ index_col: Optional[Union[int, str, List[int], List[str]]] = None
34
+ usecols: Optional[Union[List[int], List[str]]] = None
35
+ prefix: Optional[str] = None
36
+ mangle_dupe_cols: bool = True
37
+ engine: Optional[Literal["c", "python", "pyarrow"]] = None
38
+ converters: Dict[Union[int, str], Callable[[Any], Any]] = None
39
+ true_values: Optional[list] = None
40
+ false_values: Optional[list] = None
41
+ skipinitialspace: bool = False
42
+ skiprows: Optional[Union[int, List[int]]] = None
43
+ nrows: Optional[int] = None
44
+ na_values: Optional[Union[str, List[str]]] = None
45
+ keep_default_na: bool = True
46
+ na_filter: bool = True
47
+ verbose: bool = False
48
+ skip_blank_lines: bool = True
49
+ thousands: Optional[str] = None
50
+ decimal: str = "."
51
+ lineterminator: Optional[str] = None
52
+ quotechar: str = '"'
53
+ quoting: int = 0
54
+ escapechar: Optional[str] = None
55
+ comment: Optional[str] = None
56
+ encoding: Optional[str] = None
57
+ dialect: Optional[str] = None
58
+ error_bad_lines: bool = True
59
+ warn_bad_lines: bool = True
60
+ skipfooter: int = 0
61
+ doublequote: bool = True
62
+ memory_map: bool = False
63
+ float_precision: Optional[str] = None
64
+ chunksize: int = 10_000
65
+ features: Optional[datasets.Features] = None
66
+ encoding_errors: Optional[str] = "strict"
67
+ on_bad_lines: Literal["error", "warn", "skip"] = "error"
68
+ date_format: Optional[str] = None
69
+
70
+ def __post_init__(self):
71
+ if self.delimiter is not None:
72
+ self.sep = self.delimiter
73
+ if self.column_names is not None:
74
+ self.names = self.column_names
75
+
76
+ @property
77
+ def pd_read_csv_kwargs(self):
78
+ pd_read_csv_kwargs = {
79
+ "sep": self.sep,
80
+ "header": self.header,
81
+ "names": self.names,
82
+ "index_col": self.index_col,
83
+ "usecols": self.usecols,
84
+ "prefix": self.prefix,
85
+ "mangle_dupe_cols": self.mangle_dupe_cols,
86
+ "engine": self.engine,
87
+ "converters": self.converters,
88
+ "true_values": self.true_values,
89
+ "false_values": self.false_values,
90
+ "skipinitialspace": self.skipinitialspace,
91
+ "skiprows": self.skiprows,
92
+ "nrows": self.nrows,
93
+ "na_values": self.na_values,
94
+ "keep_default_na": self.keep_default_na,
95
+ "na_filter": self.na_filter,
96
+ "verbose": self.verbose,
97
+ "skip_blank_lines": self.skip_blank_lines,
98
+ "thousands": self.thousands,
99
+ "decimal": self.decimal,
100
+ "lineterminator": self.lineterminator,
101
+ "quotechar": self.quotechar,
102
+ "quoting": self.quoting,
103
+ "escapechar": self.escapechar,
104
+ "comment": self.comment,
105
+ "encoding": self.encoding,
106
+ "dialect": self.dialect,
107
+ "error_bad_lines": self.error_bad_lines,
108
+ "warn_bad_lines": self.warn_bad_lines,
109
+ "skipfooter": self.skipfooter,
110
+ "doublequote": self.doublequote,
111
+ "memory_map": self.memory_map,
112
+ "float_precision": self.float_precision,
113
+ "chunksize": self.chunksize,
114
+ "encoding_errors": self.encoding_errors,
115
+ "on_bad_lines": self.on_bad_lines,
116
+ "date_format": self.date_format,
117
+ }
118
+
119
+ # some kwargs must not be passed if they don't have a default value
120
+ # some others are deprecated and we can also not pass them if they are the default value
121
+ for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
122
+ if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), pd_read_csv_parameter):
123
+ del pd_read_csv_kwargs[pd_read_csv_parameter]
124
+
125
+ # Remove 1.3 new arguments
126
+ if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
127
+ for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
128
+ del pd_read_csv_kwargs[pd_read_csv_parameter]
129
+
130
+ # Remove 2.0 new arguments
131
+ if not (datasets.config.PANDAS_VERSION.major >= 2):
132
+ for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
133
+ del pd_read_csv_kwargs[pd_read_csv_parameter]
134
+
135
+ # Remove 2.2 deprecated arguments
136
+ if datasets.config.PANDAS_VERSION.release >= (2, 2):
137
+ for pd_read_csv_parameter in _PANDAS_READ_CSV_DEPRECATED_2_2_0_PARAMETERS:
138
+ if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), pd_read_csv_parameter):
139
+ del pd_read_csv_kwargs[pd_read_csv_parameter]
140
+
141
+ return pd_read_csv_kwargs
142
+
143
+
144
+ class Csv(datasets.ArrowBasedBuilder):
145
+ BUILDER_CONFIG_CLASS = CsvConfig
146
+
147
+ def _info(self):
148
+ return datasets.DatasetInfo(features=self.config.features)
149
+
150
+ def _split_generators(self, dl_manager):
151
+ """We handle string, list and dicts in datafiles"""
152
+ if not self.config.data_files:
153
+ raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
154
+ dl_manager.download_config.extract_on_the_fly = True
155
+ data_files = dl_manager.download_and_extract(self.config.data_files)
156
+ if isinstance(data_files, (str, list, tuple)):
157
+ files = data_files
158
+ if isinstance(files, str):
159
+ files = [files]
160
+ files = [dl_manager.iter_files(file) for file in files]
161
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
162
+ splits = []
163
+ for split_name, files in data_files.items():
164
+ if isinstance(files, str):
165
+ files = [files]
166
+ files = [dl_manager.iter_files(file) for file in files]
167
+ splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
168
+ return splits
169
+
170
+ def _cast_table(self, pa_table: pa.Table) -> pa.Table:
171
+ if self.config.features is not None:
172
+ schema = self.config.features.arrow_schema
173
+ if all(not require_storage_cast(feature) for feature in self.config.features.values()):
174
+ # cheaper cast
175
+ pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema)
176
+ else:
177
+ # more expensive cast; allows str <-> int/float or str to Audio for example
178
+ pa_table = table_cast(pa_table, schema)
179
+ return pa_table
180
+
181
+ def _generate_tables(self, files):
182
+ schema = self.config.features.arrow_schema if self.config.features else None
183
+ # dtype allows reading an int column as str
184
+ dtype = (
185
+ {
186
+ name: dtype.to_pandas_dtype() if not require_storage_cast(feature) else object
187
+ for name, dtype, feature in zip(schema.names, schema.types, self.config.features.values())
188
+ }
189
+ if schema is not None
190
+ else None
191
+ )
192
+ for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
193
+ csv_file_reader = pd.read_csv(file, iterator=True, dtype=dtype, **self.config.pd_read_csv_kwargs)
194
+ try:
195
+ for batch_idx, df in enumerate(csv_file_reader):
196
+ pa_table = pa.Table.from_pandas(df)
197
+ # Uncomment for debugging (will print the Arrow table size and elements)
198
+ # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
199
+ # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
200
+ yield (file_idx, batch_idx), self._cast_table(pa_table)
201
+ except ValueError as e:
202
+ logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
203
+ raise
venv/lib/python3.10/site-packages/datasets/packaged_modules/json/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/datasets/packaged_modules/json/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (198 Bytes). View file
 
venv/lib/python3.10/site-packages/datasets/packaged_modules/json/__pycache__/json.cpython-310.pyc ADDED
Binary file (6.3 kB). View file
 
venv/lib/python3.10/site-packages/datasets/packaged_modules/json/json.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import itertools
3
+ import json
4
+ from dataclasses import dataclass
5
+ from typing import Optional
6
+
7
+ import pyarrow as pa
8
+ import pyarrow.json as paj
9
+
10
+ import datasets
11
+ from datasets.table import table_cast
12
+ from datasets.utils.file_utils import readline
13
+
14
+
15
+ logger = datasets.utils.logging.get_logger(__name__)
16
+
17
+
18
+ @dataclass
19
+ class JsonConfig(datasets.BuilderConfig):
20
+ """BuilderConfig for JSON."""
21
+
22
+ features: Optional[datasets.Features] = None
23
+ encoding: str = "utf-8"
24
+ encoding_errors: Optional[str] = None
25
+ field: Optional[str] = None
26
+ use_threads: bool = True # deprecated
27
+ block_size: Optional[int] = None # deprecated
28
+ chunksize: int = 10 << 20 # 10MB
29
+ newlines_in_values: Optional[bool] = None
30
+
31
+
32
+ class Json(datasets.ArrowBasedBuilder):
33
+ BUILDER_CONFIG_CLASS = JsonConfig
34
+
35
+ def _info(self):
36
+ if self.config.block_size is not None:
37
+ logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead")
38
+ self.config.chunksize = self.config.block_size
39
+ if self.config.use_threads is not True:
40
+ logger.warning(
41
+ "The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore."
42
+ )
43
+ if self.config.newlines_in_values is not None:
44
+ raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported")
45
+ return datasets.DatasetInfo(features=self.config.features)
46
+
47
+ def _split_generators(self, dl_manager):
48
+ """We handle string, list and dicts in datafiles"""
49
+ if not self.config.data_files:
50
+ raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
51
+ dl_manager.download_config.extract_on_the_fly = True
52
+ data_files = dl_manager.download_and_extract(self.config.data_files)
53
+ if isinstance(data_files, (str, list, tuple)):
54
+ files = data_files
55
+ if isinstance(files, str):
56
+ files = [files]
57
+ files = [dl_manager.iter_files(file) for file in files]
58
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
59
+ splits = []
60
+ for split_name, files in data_files.items():
61
+ if isinstance(files, str):
62
+ files = [files]
63
+ files = [dl_manager.iter_files(file) for file in files]
64
+ splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
65
+ return splits
66
+
67
+ def _cast_table(self, pa_table: pa.Table) -> pa.Table:
68
+ if self.config.features is not None:
69
+ # adding missing columns
70
+ for column_name in set(self.config.features) - set(pa_table.column_names):
71
+ type = self.config.features.arrow_schema.field(column_name).type
72
+ pa_table = pa_table.append_column(column_name, pa.array([None] * len(pa_table), type=type))
73
+ # more expensive cast to support nested structures with keys in a different order
74
+ # allows str <-> int/float or str to Audio for example
75
+ pa_table = table_cast(pa_table, self.config.features.arrow_schema)
76
+ return pa_table
77
+
78
+ def _generate_tables(self, files):
79
+ for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
80
+ # If the file is one json object and if we need to look at the list of items in one specific field
81
+ if self.config.field is not None:
82
+ with open(file, encoding=self.config.encoding, errors=self.config.encoding_errors) as f:
83
+ dataset = json.load(f)
84
+
85
+ # We keep only the field we are interested in
86
+ dataset = dataset[self.config.field]
87
+
88
+ # We accept two format: a list of dicts or a dict of lists
89
+ if isinstance(dataset, (list, tuple)):
90
+ keys = set().union(*[row.keys() for row in dataset])
91
+ mapping = {col: [row.get(col) for row in dataset] for col in keys}
92
+ else:
93
+ mapping = dataset
94
+ pa_table = pa.Table.from_pydict(mapping)
95
+ yield file_idx, self._cast_table(pa_table)
96
+
97
+ # If the file has one json object per line
98
+ else:
99
+ with open(file, "rb") as f:
100
+ batch_idx = 0
101
+ # Use block_size equal to the chunk size divided by 32 to leverage multithreading
102
+ # Set a default minimum value of 16kB if the chunk size is really small
103
+ block_size = max(self.config.chunksize // 32, 16 << 10)
104
+ encoding_errors = (
105
+ self.config.encoding_errors if self.config.encoding_errors is not None else "strict"
106
+ )
107
+ while True:
108
+ batch = f.read(self.config.chunksize)
109
+ if not batch:
110
+ break
111
+ # Finish current line
112
+ try:
113
+ batch += f.readline()
114
+ except (AttributeError, io.UnsupportedOperation):
115
+ batch += readline(f)
116
+ # PyArrow only accepts utf-8 encoded bytes
117
+ if self.config.encoding != "utf-8":
118
+ batch = batch.decode(self.config.encoding, errors=encoding_errors).encode("utf-8")
119
+ try:
120
+ while True:
121
+ try:
122
+ pa_table = paj.read_json(
123
+ io.BytesIO(batch), read_options=paj.ReadOptions(block_size=block_size)
124
+ )
125
+ break
126
+ except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
127
+ if (
128
+ isinstance(e, pa.ArrowInvalid)
129
+ and "straddling" not in str(e)
130
+ or block_size > len(batch)
131
+ ):
132
+ raise
133
+ else:
134
+ # Increase the block size in case it was too small.
135
+ # The block size will be reset for the next file.
136
+ logger.debug(
137
+ f"Batch of {len(batch)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}."
138
+ )
139
+ block_size *= 2
140
+ except pa.ArrowInvalid as e:
141
+ try:
142
+ with open(
143
+ file, encoding=self.config.encoding, errors=self.config.encoding_errors
144
+ ) as f:
145
+ dataset = json.load(f)
146
+ except json.JSONDecodeError:
147
+ logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
148
+ raise e
149
+ # If possible, parse the file as a list of json objects/strings and exit the loop
150
+ if isinstance(dataset, list): # list is the only sequence type supported in JSON
151
+ try:
152
+ if dataset and isinstance(dataset[0], str):
153
+ pa_table_names = (
154
+ list(self.config.features)
155
+ if self.config.features is not None
156
+ else ["text"]
157
+ )
158
+ pa_table = pa.Table.from_arrays([pa.array(dataset)], names=pa_table_names)
159
+ else:
160
+ keys = set().union(*[row.keys() for row in dataset])
161
+ mapping = {col: [row.get(col) for row in dataset] for col in keys}
162
+ pa_table = pa.Table.from_pydict(mapping)
163
+ except (pa.ArrowInvalid, AttributeError) as e:
164
+ logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
165
+ raise ValueError(f"Not able to read records in the JSON file at {file}.") from None
166
+ yield file_idx, self._cast_table(pa_table)
167
+ break
168
+ else:
169
+ logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
170
+ raise ValueError(
171
+ f"Not able to read records in the JSON file at {file}. "
172
+ f"You should probably indicate the field of the JSON file containing your records. "
173
+ f"This JSON file contain the following fields: {str(list(dataset.keys()))}. "
174
+ f"Select the correct one and provide it as `field='XXX'` to the dataset loading method. "
175
+ ) from None
176
+ # Uncomment for debugging (will print the Arrow table size and elements)
177
+ # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
178
+ # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
179
+ yield (file_idx, batch_idx), self._cast_table(pa_table)
180
+ batch_idx += 1
venv/lib/python3.10/site-packages/datasets/packaged_modules/spark/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/datasets/packaged_modules/spark/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (199 Bytes). View file
 
venv/lib/python3.10/site-packages/datasets/packaged_modules/spark/__pycache__/spark.cpython-310.pyc ADDED
Binary file (10.5 kB). View file
 
venv/lib/python3.10/site-packages/datasets/packaged_modules/spark/spark.py ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import posixpath
3
+ import uuid
4
+ from dataclasses import dataclass
5
+ from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
6
+
7
+ import numpy as np
8
+ import pyarrow as pa
9
+
10
+ import datasets
11
+ from datasets.arrow_writer import ArrowWriter, ParquetWriter
12
+ from datasets.config import MAX_SHARD_SIZE
13
+ from datasets.filesystems import (
14
+ is_remote_filesystem,
15
+ rename,
16
+ )
17
+ from datasets.iterable_dataset import _BaseExamplesIterable
18
+ from datasets.utils.py_utils import convert_file_size_to_int
19
+
20
+
21
+ logger = datasets.utils.logging.get_logger(__name__)
22
+
23
+ if TYPE_CHECKING:
24
+ import pyspark
25
+
26
+
27
+ @dataclass
28
+ class SparkConfig(datasets.BuilderConfig):
29
+ """BuilderConfig for Spark."""
30
+
31
+ features: Optional[datasets.Features] = None
32
+
33
+
34
+ def _reorder_dataframe_by_partition(df: "pyspark.sql.DataFrame", new_partition_order: List[int]):
35
+ df_combined = df.select("*").where(f"part_id = {new_partition_order[0]}")
36
+ for partition_id in new_partition_order[1:]:
37
+ partition_df = df.select("*").where(f"part_id = {partition_id}")
38
+ df_combined = df_combined.union(partition_df)
39
+ return df_combined
40
+
41
+
42
+ def _generate_iterable_examples(
43
+ df: "pyspark.sql.DataFrame",
44
+ partition_order: List[int],
45
+ ):
46
+ import pyspark
47
+
48
+ def generate_fn():
49
+ df_with_partition_id = df.select("*", pyspark.sql.functions.spark_partition_id().alias("part_id"))
50
+ partition_df = _reorder_dataframe_by_partition(df_with_partition_id, partition_order)
51
+ row_id = 0
52
+ # pipeline next partition in parallel to hide latency
53
+ rows = partition_df.toLocalIterator(prefetchPartitions=True)
54
+ curr_partition = -1
55
+ for row in rows:
56
+ row_as_dict = row.asDict()
57
+ part_id = row_as_dict["part_id"]
58
+ row_as_dict.pop("part_id")
59
+ if curr_partition != part_id:
60
+ curr_partition = part_id
61
+ row_id = 0
62
+ yield f"{part_id}_{row_id}", row_as_dict
63
+ row_id += 1
64
+
65
+ return generate_fn
66
+
67
+
68
+ class SparkExamplesIterable(_BaseExamplesIterable):
69
+ def __init__(
70
+ self,
71
+ df: "pyspark.sql.DataFrame",
72
+ partition_order=None,
73
+ ):
74
+ self.df = df
75
+ self.partition_order = partition_order or range(self.df.rdd.getNumPartitions())
76
+ self.generate_examples_fn = _generate_iterable_examples(self.df, self.partition_order)
77
+
78
+ def __iter__(self):
79
+ yield from self.generate_examples_fn()
80
+
81
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "SparkExamplesIterable":
82
+ partition_order = list(range(self.df.rdd.getNumPartitions()))
83
+ generator.shuffle(partition_order)
84
+ return SparkExamplesIterable(self.df, partition_order=partition_order)
85
+
86
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "SparkExamplesIterable":
87
+ partition_order = self.split_shard_indices_by_worker(worker_id, num_workers)
88
+ return SparkExamplesIterable(self.df, partition_order=partition_order)
89
+
90
+ @property
91
+ def n_shards(self) -> int:
92
+ return len(self.partition_order)
93
+
94
+
95
+ class Spark(datasets.DatasetBuilder):
96
+ BUILDER_CONFIG_CLASS = SparkConfig
97
+
98
+ def __init__(
99
+ self,
100
+ df: "pyspark.sql.DataFrame",
101
+ cache_dir: str = None,
102
+ working_dir: str = None,
103
+ **config_kwargs,
104
+ ):
105
+ import pyspark
106
+
107
+ self._spark = pyspark.sql.SparkSession.builder.getOrCreate()
108
+ self.df = df
109
+ self._working_dir = working_dir
110
+
111
+ super().__init__(
112
+ cache_dir=cache_dir,
113
+ config_name=str(self.df.semanticHash()),
114
+ **config_kwargs,
115
+ )
116
+
117
+ def _validate_cache_dir(self):
118
+ # Define this so that we don't reference self in create_cache_and_write_probe, which will result in a pickling
119
+ # error due to pickling the SparkContext.
120
+ cache_dir = self._cache_dir
121
+
122
+ # Returns the path of the created file.
123
+ def create_cache_and_write_probe(context):
124
+ # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
125
+ # already exist.
126
+ os.makedirs(cache_dir, exist_ok=True)
127
+ probe_file = os.path.join(cache_dir, "fs_test" + uuid.uuid4().hex)
128
+ # Opening the file in append mode will create a new file unless it already exists, in which case it will not
129
+ # change the file contents.
130
+ open(probe_file, "a")
131
+ return [probe_file]
132
+
133
+ if self._spark.conf.get("spark.master", "").startswith("local"):
134
+ return
135
+
136
+ # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
137
+ # accessible to the driver.
138
+ # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
139
+ if self._cache_dir:
140
+ probe = (
141
+ self._spark.sparkContext.parallelize(range(1), 1).mapPartitions(create_cache_and_write_probe).collect()
142
+ )
143
+ if os.path.isfile(probe[0]):
144
+ return
145
+
146
+ raise ValueError(
147
+ "When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir"
148
+ )
149
+
150
+ def _info(self):
151
+ return datasets.DatasetInfo(features=self.config.features)
152
+
153
+ def _split_generators(self, dl_manager: datasets.download.download_manager.DownloadManager):
154
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN)]
155
+
156
+ def _repartition_df_if_needed(self, max_shard_size):
157
+ import pyspark
158
+
159
+ def get_arrow_batch_size(it):
160
+ for batch in it:
161
+ yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]})
162
+
163
+ df_num_rows = self.df.count()
164
+ sample_num_rows = df_num_rows if df_num_rows <= 100 else 100
165
+ # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
166
+ approx_bytes_per_row = (
167
+ self.df.limit(sample_num_rows)
168
+ .repartition(1)
169
+ .mapInArrow(get_arrow_batch_size, "batch_bytes: long")
170
+ .agg(pyspark.sql.functions.sum("batch_bytes").alias("sample_bytes"))
171
+ .collect()[0]
172
+ .sample_bytes
173
+ / sample_num_rows
174
+ )
175
+ approx_total_size = approx_bytes_per_row * df_num_rows
176
+ if approx_total_size > max_shard_size:
177
+ # Make sure there is at least one row per partition.
178
+ new_num_partitions = min(df_num_rows, int(approx_total_size / max_shard_size))
179
+ self.df = self.df.repartition(new_num_partitions)
180
+
181
+ def _prepare_split_single(
182
+ self,
183
+ fpath: str,
184
+ file_format: str,
185
+ max_shard_size: int,
186
+ ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
187
+ import pyspark
188
+
189
+ writer_class = ParquetWriter if file_format == "parquet" else ArrowWriter
190
+ working_fpath = os.path.join(self._working_dir, os.path.basename(fpath)) if self._working_dir else fpath
191
+ embed_local_files = file_format == "parquet"
192
+
193
+ # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
194
+ # pickling the SparkContext.
195
+ features = self.config.features
196
+ writer_batch_size = self._writer_batch_size
197
+ storage_options = self._fs.storage_options
198
+
199
+ def write_arrow(it):
200
+ # Within the same SparkContext, no two task attempts will share the same attempt ID.
201
+ task_id = pyspark.TaskContext().taskAttemptId()
202
+ first_batch = next(it, None)
203
+ if first_batch is None:
204
+ # Some partitions might not receive any data.
205
+ return pa.RecordBatch.from_arrays(
206
+ [[task_id], [0], [0]],
207
+ names=["task_id", "num_examples", "num_bytes"],
208
+ )
209
+ shard_id = 0
210
+ writer = writer_class(
211
+ features=features,
212
+ path=working_fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"),
213
+ writer_batch_size=writer_batch_size,
214
+ storage_options=storage_options,
215
+ embed_local_files=embed_local_files,
216
+ )
217
+ table = pa.Table.from_batches([first_batch])
218
+ writer.write_table(table)
219
+ for batch in it:
220
+ if max_shard_size is not None and writer._num_bytes >= max_shard_size:
221
+ num_examples, num_bytes = writer.finalize()
222
+ writer.close()
223
+ yield pa.RecordBatch.from_arrays(
224
+ [[task_id], [num_examples], [num_bytes]],
225
+ names=["task_id", "num_examples", "num_bytes"],
226
+ )
227
+ shard_id += 1
228
+ writer = writer_class(
229
+ features=writer._features,
230
+ path=working_fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"),
231
+ writer_batch_size=writer_batch_size,
232
+ storage_options=storage_options,
233
+ embed_local_files=embed_local_files,
234
+ )
235
+ table = pa.Table.from_batches([batch])
236
+ writer.write_table(table)
237
+
238
+ if writer._num_bytes > 0:
239
+ num_examples, num_bytes = writer.finalize()
240
+ writer.close()
241
+ yield pa.RecordBatch.from_arrays(
242
+ [[task_id], [num_examples], [num_bytes]],
243
+ names=["task_id", "num_examples", "num_bytes"],
244
+ )
245
+
246
+ if working_fpath != fpath:
247
+ for file in os.listdir(os.path.dirname(working_fpath)):
248
+ dest = os.path.join(os.path.dirname(fpath), os.path.basename(file))
249
+ shutil.move(file, dest)
250
+
251
+ stats = (
252
+ self.df.mapInArrow(write_arrow, "task_id: long, num_examples: long, num_bytes: long")
253
+ .groupBy("task_id")
254
+ .agg(
255
+ pyspark.sql.functions.sum("num_examples").alias("total_num_examples"),
256
+ pyspark.sql.functions.sum("num_bytes").alias("total_num_bytes"),
257
+ pyspark.sql.functions.count("num_bytes").alias("num_shards"),
258
+ pyspark.sql.functions.collect_list("num_examples").alias("shard_lengths"),
259
+ )
260
+ .collect()
261
+ )
262
+ for row in stats:
263
+ yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
264
+
265
+ def _prepare_split(
266
+ self,
267
+ split_generator: "datasets.SplitGenerator",
268
+ file_format: str = "arrow",
269
+ max_shard_size: Optional[Union[str, int]] = None,
270
+ num_proc: Optional[int] = None,
271
+ **kwargs,
272
+ ):
273
+ self._validate_cache_dir()
274
+
275
+ max_shard_size = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE)
276
+ self._repartition_df_if_needed(max_shard_size)
277
+ is_local = not is_remote_filesystem(self._fs)
278
+ path_join = os.path.join if is_local else posixpath.join
279
+
280
+ SUFFIX = "-TTTTT-SSSSS-of-NNNNN"
281
+ fname = f"{self.name}-{split_generator.name}{SUFFIX}.{file_format}"
282
+ fpath = path_join(self._output_dir, fname)
283
+
284
+ total_num_examples = 0
285
+ total_num_bytes = 0
286
+ total_shards = 0
287
+ task_id_and_num_shards = []
288
+ all_shard_lengths = []
289
+
290
+ for task_id, content in self._prepare_split_single(fpath, file_format, max_shard_size):
291
+ (
292
+ num_examples,
293
+ num_bytes,
294
+ num_shards,
295
+ shard_lengths,
296
+ ) = content
297
+ if num_bytes > 0:
298
+ total_num_examples += num_examples
299
+ total_num_bytes += num_bytes
300
+ total_shards += num_shards
301
+ task_id_and_num_shards.append((task_id, num_shards))
302
+ all_shard_lengths.extend(shard_lengths)
303
+
304
+ split_generator.split_info.num_examples = total_num_examples
305
+ split_generator.split_info.num_bytes = total_num_bytes
306
+
307
+ # should rename everything at the end
308
+ logger.debug(f"Renaming {total_shards} shards.")
309
+ if total_shards > 1:
310
+ split_generator.split_info.shard_lengths = all_shard_lengths
311
+
312
+ # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
313
+ # pickling error due to pickling the SparkContext.
314
+ fs = self._fs
315
+
316
+ # use the -SSSSS-of-NNNNN pattern
317
+ def _rename_shard(
318
+ task_id: int,
319
+ shard_id: int,
320
+ global_shard_id: int,
321
+ ):
322
+ rename(
323
+ fs,
324
+ fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"),
325
+ fpath.replace("TTTTT-SSSSS", f"{global_shard_id:05d}").replace("NNNNN", f"{total_shards:05d}"),
326
+ )
327
+
328
+ args = []
329
+ global_shard_id = 0
330
+ for i in range(len(task_id_and_num_shards)):
331
+ task_id, num_shards = task_id_and_num_shards[i]
332
+ for shard_id in range(num_shards):
333
+ args.append([task_id, shard_id, global_shard_id])
334
+ global_shard_id += 1
335
+ self._spark.sparkContext.parallelize(args, len(args)).map(lambda args: _rename_shard(*args)).collect()
336
+ else:
337
+ # don't use any pattern
338
+ shard_id = 0
339
+ task_id = task_id_and_num_shards[0][0]
340
+ self._rename(
341
+ fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"),
342
+ fpath.replace(SUFFIX, ""),
343
+ )
344
+
345
+ def _get_examples_iterable_for_split(
346
+ self,
347
+ split_generator: "datasets.SplitGenerator",
348
+ ) -> SparkExamplesIterable:
349
+ return SparkExamplesIterable(self.df)
venv/lib/python3.10/site-packages/datasets/packaged_modules/sql/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (197 Bytes). View file
 
venv/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/sql.cpython-310.pyc ADDED
Binary file (4.47 kB). View file
 
venv/lib/python3.10/site-packages/datasets/packaged_modules/sql/sql.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from dataclasses import dataclass
3
+ from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
4
+
5
+ import pandas as pd
6
+ import pyarrow as pa
7
+
8
+ import datasets
9
+ import datasets.config
10
+ from datasets.features.features import require_storage_cast
11
+ from datasets.table import table_cast
12
+
13
+
14
+ if TYPE_CHECKING:
15
+ import sqlite3
16
+
17
+ import sqlalchemy
18
+
19
+
20
+ logger = datasets.utils.logging.get_logger(__name__)
21
+
22
+
23
+ @dataclass
24
+ class SqlConfig(datasets.BuilderConfig):
25
+ """BuilderConfig for SQL."""
26
+
27
+ sql: Union[str, "sqlalchemy.sql.Selectable"] = None
28
+ con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] = None
29
+ index_col: Optional[Union[str, List[str]]] = None
30
+ coerce_float: bool = True
31
+ params: Optional[Union[List, Tuple, Dict]] = None
32
+ parse_dates: Optional[Union[List, Dict]] = None
33
+ columns: Optional[List[str]] = None
34
+ chunksize: Optional[int] = 10_000
35
+ features: Optional[datasets.Features] = None
36
+
37
+ def __post_init__(self):
38
+ if self.sql is None:
39
+ raise ValueError("sql must be specified")
40
+ if self.con is None:
41
+ raise ValueError("con must be specified")
42
+
43
+ def create_config_id(
44
+ self,
45
+ config_kwargs: dict,
46
+ custom_features: Optional[datasets.Features] = None,
47
+ ) -> str:
48
+ config_kwargs = config_kwargs.copy()
49
+ # We need to stringify the Selectable object to make its hash deterministic
50
+
51
+ # The process of stringifying is explained here: http://docs.sqlalchemy.org/en/latest/faq/sqlexpressions.html
52
+ sql = config_kwargs["sql"]
53
+ if not isinstance(sql, str):
54
+ if datasets.config.SQLALCHEMY_AVAILABLE and "sqlalchemy" in sys.modules:
55
+ import sqlalchemy
56
+
57
+ if isinstance(sql, sqlalchemy.sql.Selectable):
58
+ engine = sqlalchemy.create_engine(config_kwargs["con"].split("://")[0] + "://")
59
+ sql_str = str(sql.compile(dialect=engine.dialect))
60
+ config_kwargs["sql"] = sql_str
61
+ else:
62
+ raise TypeError(
63
+ f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}"
64
+ )
65
+ else:
66
+ raise TypeError(
67
+ f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}"
68
+ )
69
+ con = config_kwargs["con"]
70
+ if not isinstance(con, str):
71
+ config_kwargs["con"] = id(con)
72
+ logger.info(
73
+ f"SQL connection 'con' of type {type(con)} couldn't be hashed properly. To enable hashing, specify 'con' as URI string instead."
74
+ )
75
+
76
+ return super().create_config_id(config_kwargs, custom_features=custom_features)
77
+
78
+ @property
79
+ def pd_read_sql_kwargs(self):
80
+ pd_read_sql_kwargs = {
81
+ "index_col": self.index_col,
82
+ "columns": self.columns,
83
+ "params": self.params,
84
+ "coerce_float": self.coerce_float,
85
+ "parse_dates": self.parse_dates,
86
+ }
87
+ return pd_read_sql_kwargs
88
+
89
+
90
+ class Sql(datasets.ArrowBasedBuilder):
91
+ BUILDER_CONFIG_CLASS = SqlConfig
92
+
93
+ def _info(self):
94
+ return datasets.DatasetInfo(features=self.config.features)
95
+
96
+ def _split_generators(self, dl_manager):
97
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={})]
98
+
99
+ def _cast_table(self, pa_table: pa.Table) -> pa.Table:
100
+ if self.config.features is not None:
101
+ schema = self.config.features.arrow_schema
102
+ if all(not require_storage_cast(feature) for feature in self.config.features.values()):
103
+ # cheaper cast
104
+ pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema)
105
+ else:
106
+ # more expensive cast; allows str <-> int/float or str to Audio for example
107
+ pa_table = table_cast(pa_table, schema)
108
+ return pa_table
109
+
110
+ def _generate_tables(self):
111
+ chunksize = self.config.chunksize
112
+ sql_reader = pd.read_sql(
113
+ self.config.sql, self.config.con, chunksize=chunksize, **self.config.pd_read_sql_kwargs
114
+ )
115
+ sql_reader = [sql_reader] if chunksize is None else sql_reader
116
+ for chunk_idx, df in enumerate(sql_reader):
117
+ pa_table = pa.Table.from_pandas(df)
118
+ yield chunk_idx, self._cast_table(pa_table)
venv/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (204 Bytes). View file
 
venv/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/_tenbin.cpython-310.pyc ADDED
Binary file (8.84 kB). View file
 
venv/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/webdataset.cpython-310.pyc ADDED
Binary file (6.11 kB). View file
 
venv/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/_tenbin.py ADDED
@@ -0,0 +1,285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2017-2021 NVIDIA CORPORATION. All rights reserved.
3
+ # This file coems from the WebDataset library.
4
+ # See the LICENSE file for licensing terms (BSD-style).
5
+ #
6
+
7
+ """
8
+ Binary tensor encodings for PyTorch and NumPy.
9
+
10
+ This defines efficient binary encodings for tensors. The format is 8 byte
11
+ aligned and can be used directly for computations when transmitted, say,
12
+ via RDMA. The format is supported by WebDataset with the `.ten` filename
13
+ extension. It is also used by Tensorcom, Tensorcom RDMA, and can be used
14
+ for fast tensor storage with LMDB and in disk files (which can be memory
15
+ mapped)
16
+
17
+ Data is encoded as a series of chunks:
18
+
19
+ - magic number (int64)
20
+ - length in bytes (int64)
21
+ - bytes (multiple of 64 bytes long)
22
+
23
+ Arrays are a header chunk followed by a data chunk.
24
+ Header chunks have the following structure:
25
+
26
+ - dtype (int64)
27
+ - 8 byte array name
28
+ - ndim (int64)
29
+ - dim[0]
30
+ - dim[1]
31
+ - ...
32
+ """
33
+
34
+ import struct
35
+ import sys
36
+
37
+ import numpy as np
38
+
39
+
40
+ def bytelen(a):
41
+ """Determine the length of a in bytes."""
42
+ if hasattr(a, "nbytes"):
43
+ return a.nbytes
44
+ elif isinstance(a, (bytearray, bytes)):
45
+ return len(a)
46
+ else:
47
+ raise ValueError(a, "cannot determine nbytes")
48
+
49
+
50
+ def bytedata(a):
51
+ """Return a the raw data corresponding to a."""
52
+ if isinstance(a, (bytearray, bytes, memoryview)):
53
+ return a
54
+ elif hasattr(a, "data"):
55
+ return a.data
56
+ else:
57
+ raise ValueError(a, "cannot return bytedata")
58
+
59
+
60
+ # tables for converting between long/short NumPy dtypes
61
+
62
+ long_to_short = """
63
+ float16 f2
64
+ float32 f4
65
+ float64 f8
66
+ int8 i1
67
+ int16 i2
68
+ int32 i4
69
+ int64 i8
70
+ uint8 u1
71
+ uint16 u2
72
+ unit32 u4
73
+ uint64 u8
74
+ """.strip()
75
+ long_to_short = [x.split() for x in long_to_short.split("\n")]
76
+ long_to_short = {x[0]: x[1] for x in long_to_short}
77
+ short_to_long = {v: k for k, v in long_to_short.items()}
78
+
79
+
80
+ def check_acceptable_input_type(data, allow64):
81
+ """Check that the data has an acceptable type for tensor encoding.
82
+
83
+ :param data: array
84
+ :param allow64: allow 64 bit types
85
+ """
86
+ for a in data:
87
+ if a.dtype.name not in long_to_short:
88
+ raise ValueError("unsupported dataypte")
89
+ if not allow64 and a.dtype.name not in ["float64", "int64", "uint64"]:
90
+ raise ValueError("64 bit datatypes not allowed unless explicitly enabled")
91
+
92
+
93
+ def str64(s):
94
+ """Convert a string to an int64."""
95
+ s = s + "\0" * (8 - len(s))
96
+ s = s.encode("ascii")
97
+ return struct.unpack("@q", s)[0]
98
+
99
+
100
+ def unstr64(i):
101
+ """Convert an int64 to a string."""
102
+ b = struct.pack("@q", i)
103
+ return b.decode("ascii").strip("\0")
104
+
105
+
106
+ def check_infos(data, infos, required_infos=None):
107
+ """Verify the info strings."""
108
+ if required_infos is False or required_infos is None:
109
+ return data
110
+ if required_infos is True:
111
+ return data, infos
112
+ if not isinstance(required_infos, (tuple, list)):
113
+ raise ValueError("required_infos must be tuple or list")
114
+ for required, actual in zip(required_infos, infos):
115
+ raise ValueError(f"actual info {actual} doesn't match required info {required}")
116
+ return data
117
+
118
+
119
+ def encode_header(a, info=""):
120
+ """Encode an array header as a byte array."""
121
+ if a.ndim >= 10:
122
+ raise ValueError("too many dimensions")
123
+ if a.nbytes != np.prod(a.shape) * a.itemsize:
124
+ raise ValueError("mismatch between size and shape")
125
+ if a.dtype.name not in long_to_short:
126
+ raise ValueError("unsupported array type")
127
+ header = [str64(long_to_short[a.dtype.name]), str64(info), len(a.shape)] + list(a.shape)
128
+ return bytedata(np.array(header, dtype="i8"))
129
+
130
+
131
+ def decode_header(h):
132
+ """Decode a byte array into an array header."""
133
+ h = np.frombuffer(h, dtype="i8")
134
+ if unstr64(h[0]) not in short_to_long:
135
+ raise ValueError("unsupported array type")
136
+ dtype = np.dtype(short_to_long[unstr64(h[0])])
137
+ info = unstr64(h[1])
138
+ rank = int(h[2])
139
+ shape = tuple(h[3 : 3 + rank])
140
+ return shape, dtype, info
141
+
142
+
143
+ def encode_list(l, infos=None): # noqa: E741
144
+ """Given a list of arrays, encode them into a list of byte arrays."""
145
+ if infos is None:
146
+ infos = [""]
147
+ else:
148
+ if len(l) != len(infos):
149
+ raise ValueError(f"length of list {l} must muatch length of infos {infos}")
150
+ result = []
151
+ for i, a in enumerate(l):
152
+ header = encode_header(a, infos[i % len(infos)])
153
+ result += [header, bytedata(a)]
154
+ return result
155
+
156
+
157
+ def decode_list(l, infos=False): # noqa: E741
158
+ """Given a list of byte arrays, decode them into arrays."""
159
+ result = []
160
+ infos0 = []
161
+ for header, data in zip(l[::2], l[1::2]):
162
+ shape, dtype, info = decode_header(header)
163
+ a = np.frombuffer(data, dtype=dtype, count=np.prod(shape)).reshape(*shape)
164
+ result += [a]
165
+ infos0 += [info]
166
+ return check_infos(result, infos0, infos)
167
+
168
+
169
+ magic_str = "~TenBin~"
170
+ magic = str64(magic_str)
171
+ magic_bytes = unstr64(magic).encode("ascii")
172
+
173
+
174
+ def roundup(n, k=64):
175
+ """Round up to the next multiple of 64."""
176
+ return k * ((n + k - 1) // k)
177
+
178
+
179
+ def encode_chunks(l): # noqa: E741
180
+ """Encode a list of chunks into a single byte array, with lengths and magics.."""
181
+ size = sum(16 + roundup(b.nbytes) for b in l)
182
+ result = bytearray(size)
183
+ offset = 0
184
+ for b in l:
185
+ result[offset : offset + 8] = magic_bytes
186
+ offset += 8
187
+ result[offset : offset + 8] = struct.pack("@q", b.nbytes)
188
+ offset += 8
189
+ result[offset : offset + bytelen(b)] = b
190
+ offset += roundup(bytelen(b))
191
+ return result
192
+
193
+
194
+ def decode_chunks(buf):
195
+ """Decode a byte array into a list of chunks."""
196
+ result = []
197
+ offset = 0
198
+ total = bytelen(buf)
199
+ while offset < total:
200
+ if magic_bytes != buf[offset : offset + 8]:
201
+ raise ValueError("magic bytes mismatch")
202
+ offset += 8
203
+ nbytes = struct.unpack("@q", buf[offset : offset + 8])[0]
204
+ offset += 8
205
+ b = buf[offset : offset + nbytes]
206
+ offset += roundup(nbytes)
207
+ result.append(b)
208
+ return result
209
+
210
+
211
+ def encode_buffer(l, infos=None): # noqa: E741
212
+ """Encode a list of arrays into a single byte array."""
213
+ if not isinstance(l, list):
214
+ raise ValueError("requires list")
215
+ return encode_chunks(encode_list(l, infos=infos))
216
+
217
+
218
+ def decode_buffer(buf, infos=False):
219
+ """Decode a byte array into a list of arrays."""
220
+ return decode_list(decode_chunks(buf), infos=infos)
221
+
222
+
223
+ def write_chunk(stream, buf):
224
+ """Write a byte chunk to the stream with magics, length, and padding."""
225
+ nbytes = bytelen(buf)
226
+ stream.write(magic_bytes)
227
+ stream.write(struct.pack("@q", nbytes))
228
+ stream.write(bytedata(buf))
229
+ padding = roundup(nbytes) - nbytes
230
+ if padding > 0:
231
+ stream.write(b"\0" * padding)
232
+
233
+
234
+ def read_chunk(stream):
235
+ """Read a byte chunk from a stream with magics, length, and padding."""
236
+ magic = stream.read(8)
237
+ if magic == b"":
238
+ return None
239
+ if magic != magic_bytes:
240
+ raise ValueError("magic number does not match")
241
+ nbytes = stream.read(8)
242
+ nbytes = struct.unpack("@q", nbytes)[0]
243
+ if nbytes < 0:
244
+ raise ValueError("negative nbytes")
245
+ data = stream.read(nbytes)
246
+ padding = roundup(nbytes) - nbytes
247
+ if padding > 0:
248
+ stream.read(padding)
249
+ return data
250
+
251
+
252
+ def write(stream, l, infos=None): # noqa: E741
253
+ """Write a list of arrays to a stream, with magics, length, and padding."""
254
+ for chunk in encode_list(l, infos=infos):
255
+ write_chunk(stream, chunk)
256
+
257
+
258
+ def read(stream, n=sys.maxsize, infos=False):
259
+ """Read a list of arrays from a stream, with magics, length, and padding."""
260
+ chunks = []
261
+ for _ in range(n):
262
+ header = read_chunk(stream)
263
+ if header is None:
264
+ break
265
+ data = read_chunk(stream)
266
+ if data is None:
267
+ raise ValueError("premature EOF")
268
+ chunks += [header, data]
269
+ return decode_list(chunks, infos=infos)
270
+
271
+
272
+ def save(fname, *args, infos=None, nocheck=False):
273
+ """Save a list of arrays to a file, with magics, length, and padding."""
274
+ if not nocheck and not fname.endswith(".ten"):
275
+ raise ValueError("file name should end in .ten")
276
+ with open(fname, "wb") as stream:
277
+ write(stream, args, infos=infos)
278
+
279
+
280
+ def load(fname, infos=False, nocheck=False):
281
+ """Read a list of arrays from a file, with magics, length, and padding."""
282
+ if not nocheck and not fname.endswith(".ten"):
283
+ raise ValueError("file name should end in .ten")
284
+ with open(fname, "rb") as stream:
285
+ return read(stream, infos=infos)
venv/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/webdataset.py ADDED
@@ -0,0 +1,299 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import json
3
+ from itertools import islice
4
+ from typing import Any, Callable, Dict, List
5
+
6
+ import numpy as np
7
+ import pyarrow as pa
8
+
9
+ import datasets
10
+
11
+
12
+ logger = datasets.utils.logging.get_logger(__name__)
13
+
14
+
15
+ class WebDataset(datasets.GeneratorBasedBuilder):
16
+ DEFAULT_WRITER_BATCH_SIZE = 100
17
+ IMAGE_EXTENSIONS: List[str] # definition at the bottom of the script
18
+ AUDIO_EXTENSIONS: List[str] # definition at the bottom of the script
19
+ DECODERS: Dict[str, Callable[[Any], Any]] # definition at the bottom of the script
20
+ NUM_EXAMPLES_FOR_FEATURES_INFERENCE = 5
21
+
22
+ @classmethod
23
+ def _get_pipeline_from_tar(cls, tar_path, tar_iterator):
24
+ current_example = {}
25
+ for filename, f in tar_iterator:
26
+ if "." in filename:
27
+ example_key, field_name = filename.split(".", 1)
28
+ if current_example and current_example["__key__"] != example_key:
29
+ yield current_example
30
+ current_example = {}
31
+ current_example["__key__"] = example_key
32
+ current_example["__url__"] = tar_path
33
+ current_example[field_name.lower()] = f.read()
34
+ if field_name in cls.DECODERS:
35
+ current_example[field_name] = cls.DECODERS[field_name](current_example[field_name])
36
+ if current_example:
37
+ yield current_example
38
+
39
+ def _info(self) -> datasets.DatasetInfo:
40
+ return datasets.DatasetInfo()
41
+
42
+ def _split_generators(self, dl_manager):
43
+ """We handle string, list and dicts in datafiles"""
44
+ # Download the data files
45
+ if not self.config.data_files:
46
+ raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
47
+ data_files = dl_manager.download(self.config.data_files)
48
+ if isinstance(data_files, (str, list, tuple)):
49
+ tar_paths = data_files
50
+ if isinstance(tar_paths, str):
51
+ tar_paths = [tar_paths]
52
+ tar_iterators = [dl_manager.iter_archive(tar_path) for tar_path in tar_paths]
53
+ splits = [
54
+ datasets.SplitGenerator(
55
+ name=datasets.Split.TRAIN, gen_kwargs={"tar_paths": tar_paths, "tar_iterators": tar_iterators}
56
+ )
57
+ ]
58
+ else:
59
+ splits = []
60
+ for split_name, tar_paths in data_files.items():
61
+ if isinstance(tar_paths, str):
62
+ tar_paths = [tar_paths]
63
+ tar_iterators = [dl_manager.iter_archive(tar_path) for tar_path in tar_paths]
64
+ splits.append(
65
+ datasets.SplitGenerator(
66
+ name=split_name, gen_kwargs={"tar_paths": tar_paths, "tar_iterators": tar_iterators}
67
+ )
68
+ )
69
+ if not self.info.features:
70
+ # Get one example to get the feature types
71
+ pipeline = self._get_pipeline_from_tar(tar_paths[0], tar_iterators[0])
72
+ first_examples = list(islice(pipeline, self.NUM_EXAMPLES_FOR_FEATURES_INFERENCE))
73
+ if any(example.keys() != first_examples[0].keys() for example in first_examples):
74
+ raise ValueError(
75
+ "The TAR archives of the dataset should be in WebDataset format, "
76
+ "but the files in the archive don't share the same prefix or the same types."
77
+ )
78
+ pa_tables = [pa.Table.from_pylist([example]) for example in first_examples]
79
+ if datasets.config.PYARROW_VERSION.major < 14:
80
+ inferred_arrow_schema = pa.concat_tables(pa_tables, promote=True).schema
81
+ else:
82
+ inferred_arrow_schema = pa.concat_tables(pa_tables, promote_options="default").schema
83
+ features = datasets.Features.from_arrow_schema(inferred_arrow_schema)
84
+
85
+ # Set Image types
86
+ for field_name in first_examples[0]:
87
+ extension = field_name.rsplit(".", 1)[-1]
88
+ if extension in self.IMAGE_EXTENSIONS:
89
+ features[field_name] = datasets.Image()
90
+ # Set Audio types
91
+ for field_name in first_examples[0]:
92
+ extension = field_name.rsplit(".", 1)[-1]
93
+ if extension in self.AUDIO_EXTENSIONS:
94
+ features[field_name] = datasets.Audio()
95
+ self.info.features = features
96
+
97
+ return splits
98
+
99
+ def _generate_examples(self, tar_paths, tar_iterators):
100
+ image_field_names = [
101
+ field_name for field_name, feature in self.info.features.items() if isinstance(feature, datasets.Image)
102
+ ]
103
+ audio_field_names = [
104
+ field_name for field_name, feature in self.info.features.items() if isinstance(feature, datasets.Audio)
105
+ ]
106
+ for tar_idx, (tar_path, tar_iterator) in enumerate(zip(tar_paths, tar_iterators)):
107
+ for example_idx, example in enumerate(self._get_pipeline_from_tar(tar_path, tar_iterator)):
108
+ for field_name in image_field_names + audio_field_names:
109
+ example[field_name] = {"path": example["__key__"] + "." + field_name, "bytes": example[field_name]}
110
+ yield f"{tar_idx}_{example_idx}", example
111
+
112
+
113
+ # Obtained with:
114
+ # ```
115
+ # import PIL.Image
116
+ # IMAGE_EXTENSIONS = []
117
+ # PIL.Image.init()
118
+ # for ext, format in PIL.Image.EXTENSION.items():
119
+ # if format in PIL.Image.OPEN:
120
+ # IMAGE_EXTENSIONS.append(ext[1:])
121
+ # ```
122
+ # We intentionally do not run this code on launch because:
123
+ # (1) Pillow is an optional dependency, so importing Pillow in global namespace is not allowed
124
+ # (2) To ensure the list of supported extensions is deterministic
125
+ IMAGE_EXTENSIONS = [
126
+ "blp",
127
+ "bmp",
128
+ "dib",
129
+ "bufr",
130
+ "cur",
131
+ "pcx",
132
+ "dcx",
133
+ "dds",
134
+ "ps",
135
+ "eps",
136
+ "fit",
137
+ "fits",
138
+ "fli",
139
+ "flc",
140
+ "ftc",
141
+ "ftu",
142
+ "gbr",
143
+ "gif",
144
+ "grib",
145
+ "h5",
146
+ "hdf",
147
+ "png",
148
+ "apng",
149
+ "jp2",
150
+ "j2k",
151
+ "jpc",
152
+ "jpf",
153
+ "jpx",
154
+ "j2c",
155
+ "icns",
156
+ "ico",
157
+ "im",
158
+ "iim",
159
+ "tif",
160
+ "tiff",
161
+ "jfif",
162
+ "jpe",
163
+ "jpg",
164
+ "jpeg",
165
+ "mpg",
166
+ "mpeg",
167
+ "msp",
168
+ "pcd",
169
+ "pxr",
170
+ "pbm",
171
+ "pgm",
172
+ "ppm",
173
+ "pnm",
174
+ "psd",
175
+ "bw",
176
+ "rgb",
177
+ "rgba",
178
+ "sgi",
179
+ "ras",
180
+ "tga",
181
+ "icb",
182
+ "vda",
183
+ "vst",
184
+ "webp",
185
+ "wmf",
186
+ "emf",
187
+ "xbm",
188
+ "xpm",
189
+ ]
190
+ WebDataset.IMAGE_EXTENSIONS = IMAGE_EXTENSIONS
191
+
192
+
193
+ # Obtained with:
194
+ # ```
195
+ # import soundfile as sf
196
+ #
197
+ # AUDIO_EXTENSIONS = [f".{format.lower()}" for format in sf.available_formats().keys()]
198
+ #
199
+ # # .mp3 is currently decoded via `torchaudio`, .opus decoding is supported if version of `libsndfile` >= 1.0.30:
200
+ # AUDIO_EXTENSIONS.extend([".mp3", ".opus"])
201
+ # ```
202
+ # We intentionally do not run this code on launch because:
203
+ # (1) Soundfile is an optional dependency, so importing it in global namespace is not allowed
204
+ # (2) To ensure the list of supported extensions is deterministic
205
+ AUDIO_EXTENSIONS = [
206
+ "aiff",
207
+ "au",
208
+ "avr",
209
+ "caf",
210
+ "flac",
211
+ "htk",
212
+ "svx",
213
+ "mat4",
214
+ "mat5",
215
+ "mpc2k",
216
+ "ogg",
217
+ "paf",
218
+ "pvf",
219
+ "raw",
220
+ "rf64",
221
+ "sd2",
222
+ "sds",
223
+ "ircam",
224
+ "voc",
225
+ "w64",
226
+ "wav",
227
+ "nist",
228
+ "wavex",
229
+ "wve",
230
+ "xi",
231
+ "mp3",
232
+ "opus",
233
+ ]
234
+ WebDataset.AUDIO_EXTENSIONS = AUDIO_EXTENSIONS
235
+
236
+
237
+ def text_loads(data: bytes):
238
+ return data.decode("utf-8")
239
+
240
+
241
+ def tenbin_loads(data: bytes):
242
+ from . import _tenbin
243
+
244
+ return _tenbin.decode_buffer(data)
245
+
246
+
247
+ def msgpack_loads(data: bytes):
248
+ import msgpack
249
+
250
+ return msgpack.unpackb(data)
251
+
252
+
253
+ def npy_loads(data: bytes):
254
+ import numpy.lib.format
255
+
256
+ stream = io.BytesIO(data)
257
+ return numpy.lib.format.read_array(stream, allow_pickle=False)
258
+
259
+
260
+ def npz_loads(data: bytes):
261
+ return np.load(io.BytesIO(data), allow_pickle=False)
262
+
263
+
264
+ def cbor_loads(data: bytes):
265
+ import cbor
266
+
267
+ return cbor.loads(data)
268
+
269
+
270
+ # Obtained by checking `decoders` in `webdataset.autodecode`
271
+ # and removing unsafe extension decoders.
272
+ # Removed Pickle decoders:
273
+ # - "pyd": lambda data: pickle.loads(data)
274
+ # - "pickle": lambda data: pickle.loads(data)
275
+ # Removed Torch decoders:
276
+ # - "pth": lambda data: torch_loads(data)
277
+ # Modified NumPy decoders to fix CVE-2019-6446 (add allow_pickle=False):
278
+ # - "npy": npy_loads,
279
+ # - "npz": lambda data: np.load(io.BytesIO(data)),
280
+ DECODERS = {
281
+ "txt": text_loads,
282
+ "text": text_loads,
283
+ "transcript": text_loads,
284
+ "cls": int,
285
+ "cls2": int,
286
+ "index": int,
287
+ "inx": int,
288
+ "id": int,
289
+ "json": json.loads,
290
+ "jsn": json.loads,
291
+ "ten": tenbin_loads,
292
+ "tb": tenbin_loads,
293
+ "mp": msgpack_loads,
294
+ "msg": msgpack_loads,
295
+ "npy": npy_loads,
296
+ "npz": npz_loads,
297
+ "cbor": cbor_loads,
298
+ }
299
+ WebDataset.DECODERS = DECODERS
venv/lib/python3.10/site-packages/datasets/parallel/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .parallel import parallel_backend, parallel_map, ParallelBackendConfig # noqa F401
venv/lib/python3.10/site-packages/datasets/parallel/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (289 Bytes). View file
 
venv/lib/python3.10/site-packages/datasets/parallel/__pycache__/parallel.cpython-310.pyc ADDED
Binary file (4.56 kB). View file
 
venv/lib/python3.10/site-packages/datasets/parallel/parallel.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ from multiprocessing import Pool, RLock
3
+
4
+ from tqdm.auto import tqdm
5
+
6
+ from ..utils import experimental, logging
7
+
8
+
9
+ logger = logging.get_logger(__name__)
10
+
11
+
12
+ class ParallelBackendConfig:
13
+ backend_name = None
14
+
15
+
16
+ @experimental
17
+ def parallel_map(function, iterable, num_proc, batched, batch_size, types, disable_tqdm, desc, single_map_nested_func):
18
+ """
19
+ **Experimental.** Apply a function to iterable elements in parallel, where the implementation uses either
20
+ multiprocessing.Pool or joblib for parallelization.
21
+
22
+ Args:
23
+ function (`Callable[[Any], Any]`): Function to be applied to `iterable`.
24
+ iterable (`list`, `tuple` or `np.ndarray`): Iterable elements to apply function to.
25
+ num_proc (`int`): Number of processes (if no backend specified) or jobs (using joblib).
26
+ types (`tuple`): Additional types (besides `dict` values) to apply `function` recursively to their elements.
27
+ disable_tqdm (`bool`): Whether to disable the tqdm progressbar.
28
+ desc (`str`): Prefix for the tqdm progressbar.
29
+ single_map_nested_func (`Callable`): Map function that applies `function` to an element from `iterable`.
30
+ Takes a tuple of function, data_struct, types, rank, disable_tqdm, desc as input, where data_struct is an
31
+ element of `iterable`, and `rank` is used for progress bar.
32
+ """
33
+ if ParallelBackendConfig.backend_name is None:
34
+ return _map_with_multiprocessing_pool(
35
+ function, iterable, num_proc, batched, batch_size, types, disable_tqdm, desc, single_map_nested_func
36
+ )
37
+
38
+ return _map_with_joblib(
39
+ function, iterable, num_proc, batched, batch_size, types, disable_tqdm, desc, single_map_nested_func
40
+ )
41
+
42
+
43
+ def _map_with_multiprocessing_pool(
44
+ function, iterable, num_proc, batched, batch_size, types, disable_tqdm, desc, single_map_nested_func
45
+ ):
46
+ num_proc = num_proc if num_proc <= len(iterable) else len(iterable)
47
+ split_kwds = [] # We organize the splits ourselve (contiguous splits)
48
+ for index in range(num_proc):
49
+ div = len(iterable) // num_proc
50
+ mod = len(iterable) % num_proc
51
+ start = div * index + min(index, mod)
52
+ end = start + div + (1 if index < mod else 0)
53
+ split_kwds.append((function, iterable[start:end], batched, batch_size, types, index, disable_tqdm, desc))
54
+
55
+ if len(iterable) != sum(len(i[1]) for i in split_kwds):
56
+ raise ValueError(
57
+ f"Error dividing inputs iterable among processes. "
58
+ f"Total number of objects {len(iterable)}, "
59
+ f"length: {sum(len(i[1]) for i in split_kwds)}"
60
+ )
61
+
62
+ logger.info(
63
+ f"Spawning {num_proc} processes for {len(iterable)} objects in slices of {[len(i[1]) for i in split_kwds]}"
64
+ )
65
+ initargs, initializer = None, None
66
+ if not disable_tqdm:
67
+ initargs, initializer = (RLock(),), tqdm.set_lock
68
+ with Pool(num_proc, initargs=initargs, initializer=initializer) as pool:
69
+ mapped = pool.map(single_map_nested_func, split_kwds)
70
+ logger.info(f"Finished {num_proc} processes")
71
+ mapped = [obj for proc_res in mapped for obj in proc_res]
72
+ logger.info(f"Unpacked {len(mapped)} objects")
73
+
74
+ return mapped
75
+
76
+
77
+ def _map_with_joblib(
78
+ function, iterable, num_proc, batched, batch_size, types, disable_tqdm, desc, single_map_nested_func
79
+ ):
80
+ # progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
81
+ # and it requires monkey-patching joblib internal classes which is subject to change
82
+ import joblib
83
+
84
+ with joblib.parallel_backend(ParallelBackendConfig.backend_name, n_jobs=num_proc):
85
+ return joblib.Parallel()(
86
+ joblib.delayed(single_map_nested_func)((function, obj, batched, batch_size, types, None, True, None))
87
+ for obj in iterable
88
+ )
89
+
90
+
91
+ @experimental
92
+ @contextlib.contextmanager
93
+ def parallel_backend(backend_name: str):
94
+ """
95
+ **Experimental.** Configures the parallel backend for parallelized dataset loading, which uses the parallelization
96
+ implemented by joblib.
97
+
98
+ Args:
99
+ backend_name (str): Name of backend for parallelization implementation, has to be supported by joblib.
100
+
101
+ Example usage:
102
+ ```py
103
+ with parallel_backend('spark'):
104
+ dataset = load_dataset(..., num_proc=2)
105
+ ```
106
+ """
107
+ ParallelBackendConfig.backend_name = backend_name
108
+
109
+ if backend_name == "spark":
110
+ from joblibspark import register_spark
111
+
112
+ register_spark()
113
+
114
+ # TODO: call create_cache_and_write_probe if "download" in steps
115
+ # TODO: raise NotImplementedError when Dataset.map etc is called
116
+
117
+ try:
118
+ yield
119
+ finally:
120
+ ParallelBackendConfig.backend_name = None
venv/lib/python3.10/site-packages/datasets/tasks/__init__.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ from ..utils.logging import get_logger
4
+ from .audio_classification import AudioClassification
5
+ from .automatic_speech_recognition import AutomaticSpeechRecognition
6
+ from .base import TaskTemplate
7
+ from .image_classification import ImageClassification
8
+ from .language_modeling import LanguageModeling
9
+ from .question_answering import QuestionAnsweringExtractive
10
+ from .summarization import Summarization
11
+ from .text_classification import TextClassification
12
+
13
+
14
+ __all__ = [
15
+ "AutomaticSpeechRecognition",
16
+ "AudioClassification",
17
+ "ImageClassification",
18
+ "LanguageModeling",
19
+ "QuestionAnsweringExtractive",
20
+ "Summarization",
21
+ "TaskTemplate",
22
+ "TextClassification",
23
+ ]
24
+
25
+ logger = get_logger(__name__)
26
+
27
+
28
+ NAME2TEMPLATE = {
29
+ AutomaticSpeechRecognition.task: AutomaticSpeechRecognition,
30
+ AudioClassification.task: AudioClassification,
31
+ ImageClassification.task: ImageClassification,
32
+ LanguageModeling.task: LanguageModeling,
33
+ QuestionAnsweringExtractive.task: QuestionAnsweringExtractive,
34
+ Summarization.task: Summarization,
35
+ TextClassification.task: TextClassification,
36
+ }
37
+
38
+
39
+ def task_template_from_dict(task_template_dict: dict) -> Optional[TaskTemplate]:
40
+ """Create one of the supported task templates in :py:mod:`datasets.tasks` from a dictionary."""
41
+ task_name = task_template_dict.get("task")
42
+ if task_name is None:
43
+ logger.warning(f"Couldn't find template for task '{task_name}'. Available templates: {list(NAME2TEMPLATE)}")
44
+ return None
45
+ template = NAME2TEMPLATE.get(task_name)
46
+ return template.from_dict(task_template_dict)
venv/lib/python3.10/site-packages/datasets/tasks/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.4 kB). View file
 
venv/lib/python3.10/site-packages/datasets/tasks/__pycache__/audio_classification.cpython-310.pyc ADDED
Binary file (1.64 kB). View file