applied-ai-018 commited on
Commit
fe6b9da
·
verified ·
1 Parent(s): 39d59a1

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/__init__.cpython-310.pyc +0 -0
  2. env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/arrow_dataset.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/arrow_reader.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/arrow_writer.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/builder.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/combine.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/config.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/data_files.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/dataset_dict.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/distributed.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/exceptions.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/fingerprint.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/info.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/inspect.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/iterable_dataset.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/keyhash.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/load.cpython-310.pyc +0 -0
  18. env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/metric.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/naming.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/search.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/splits.cpython-310.pyc +0 -0
  22. env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/streaming.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/table.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/datasets/io/__init__.py +0 -0
  25. env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/__init__.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/abc.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/csv.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/generator.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/json.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/parquet.cpython-310.pyc +0 -0
  31. env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/spark.cpython-310.pyc +0 -0
  32. env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/sql.cpython-310.pyc +0 -0
  33. env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/text.cpython-310.pyc +0 -0
  34. env-llmeval/lib/python3.10/site-packages/datasets/io/abc.py +53 -0
  35. env-llmeval/lib/python3.10/site-packages/datasets/io/csv.py +142 -0
  36. env-llmeval/lib/python3.10/site-packages/datasets/io/generator.py +58 -0
  37. env-llmeval/lib/python3.10/site-packages/datasets/io/json.py +166 -0
  38. env-llmeval/lib/python3.10/site-packages/datasets/io/parquet.py +156 -0
  39. env-llmeval/lib/python3.10/site-packages/datasets/io/spark.py +57 -0
  40. env-llmeval/lib/python3.10/site-packages/datasets/io/sql.py +125 -0
  41. env-llmeval/lib/python3.10/site-packages/datasets/io/text.py +61 -0
  42. env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/__init__.py +71 -0
  43. env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/arrow/arrow.py +73 -0
  44. env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__init__.py +0 -0
  45. env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__pycache__/__init__.cpython-310.pyc +0 -0
  46. env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__pycache__/audiofolder.cpython-310.pyc +0 -0
  47. env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/audiofolder.py +68 -0
  48. env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/cache/__init__.py +0 -0
  49. env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/cache/__pycache__/__init__.cpython-310.pyc +0 -0
  50. env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/cache/__pycache__/cache.cpython-310.pyc +0 -0
env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.91 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/arrow_dataset.cpython-310.pyc ADDED
Binary file (225 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/arrow_reader.cpython-310.pyc ADDED
Binary file (23.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/arrow_writer.cpython-310.pyc ADDED
Binary file (24.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/builder.cpython-310.pyc ADDED
Binary file (77.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/combine.cpython-310.pyc ADDED
Binary file (9.12 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/config.cpython-310.pyc ADDED
Binary file (6.56 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/data_files.cpython-310.pyc ADDED
Binary file (27.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/dataset_dict.cpython-310.pyc ADDED
Binary file (98.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/distributed.cpython-310.pyc ADDED
Binary file (1.69 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/exceptions.cpython-310.pyc ADDED
Binary file (3.54 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/fingerprint.cpython-310.pyc ADDED
Binary file (17.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/info.cpython-310.pyc ADDED
Binary file (22.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/inspect.cpython-310.pyc ADDED
Binary file (23.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/iterable_dataset.cpython-310.pyc ADDED
Binary file (91.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/keyhash.cpython-310.pyc ADDED
Binary file (3.42 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/load.cpython-310.pyc ADDED
Binary file (85.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/metric.cpython-310.pyc ADDED
Binary file (23.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/naming.cpython-310.pyc ADDED
Binary file (2.85 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/search.cpython-310.pyc ADDED
Binary file (33.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/splits.cpython-310.pyc ADDED
Binary file (23.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/streaming.cpython-310.pyc ADDED
Binary file (4.84 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/table.cpython-310.pyc ADDED
Binary file (74.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/io/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (176 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/abc.cpython-310.pyc ADDED
Binary file (2.12 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/csv.cpython-310.pyc ADDED
Binary file (4.38 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/generator.cpython-310.pyc ADDED
Binary file (1.66 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/json.cpython-310.pyc ADDED
Binary file (4.96 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/parquet.cpython-310.pyc ADDED
Binary file (5.25 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/spark.cpython-310.pyc ADDED
Binary file (1.92 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/sql.cpython-310.pyc ADDED
Binary file (3.94 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/text.cpython-310.pyc ADDED
Binary file (1.73 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/io/abc.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC, abstractmethod
2
+ from typing import Optional, Union
3
+
4
+ from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
5
+ from ..utils.typing import NestedDataStructureLike, PathLike
6
+
7
+
8
+ class AbstractDatasetReader(ABC):
9
+ def __init__(
10
+ self,
11
+ path_or_paths: Optional[NestedDataStructureLike[PathLike]] = None,
12
+ split: Optional[NamedSplit] = None,
13
+ features: Optional[Features] = None,
14
+ cache_dir: str = None,
15
+ keep_in_memory: bool = False,
16
+ streaming: bool = False,
17
+ num_proc: Optional[int] = None,
18
+ **kwargs,
19
+ ):
20
+ self.path_or_paths = path_or_paths
21
+ self.split = split if split or isinstance(path_or_paths, dict) else "train"
22
+ self.features = features
23
+ self.cache_dir = cache_dir
24
+ self.keep_in_memory = keep_in_memory
25
+ self.streaming = streaming
26
+ self.num_proc = num_proc
27
+ self.kwargs = kwargs
28
+
29
+ @abstractmethod
30
+ def read(self) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
31
+ pass
32
+
33
+
34
+ class AbstractDatasetInputStream(ABC):
35
+ def __init__(
36
+ self,
37
+ features: Optional[Features] = None,
38
+ cache_dir: str = None,
39
+ keep_in_memory: bool = False,
40
+ streaming: bool = False,
41
+ num_proc: Optional[int] = None,
42
+ **kwargs,
43
+ ):
44
+ self.features = features
45
+ self.cache_dir = cache_dir
46
+ self.keep_in_memory = keep_in_memory
47
+ self.streaming = streaming
48
+ self.num_proc = num_proc
49
+ self.kwargs = kwargs
50
+
51
+ @abstractmethod
52
+ def read(self) -> Union[Dataset, IterableDataset]:
53
+ pass
env-llmeval/lib/python3.10/site-packages/datasets/io/csv.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import multiprocessing
2
+ import os
3
+ from typing import BinaryIO, Optional, Union
4
+
5
+ from .. import Dataset, Features, NamedSplit, config
6
+ from ..formatting import query_table
7
+ from ..packaged_modules.csv.csv import Csv
8
+ from ..utils import tqdm as hf_tqdm
9
+ from ..utils.typing import NestedDataStructureLike, PathLike
10
+ from .abc import AbstractDatasetReader
11
+
12
+
13
+ class CsvDatasetReader(AbstractDatasetReader):
14
+ def __init__(
15
+ self,
16
+ path_or_paths: NestedDataStructureLike[PathLike],
17
+ split: Optional[NamedSplit] = None,
18
+ features: Optional[Features] = None,
19
+ cache_dir: str = None,
20
+ keep_in_memory: bool = False,
21
+ streaming: bool = False,
22
+ num_proc: Optional[int] = None,
23
+ **kwargs,
24
+ ):
25
+ super().__init__(
26
+ path_or_paths,
27
+ split=split,
28
+ features=features,
29
+ cache_dir=cache_dir,
30
+ keep_in_memory=keep_in_memory,
31
+ streaming=streaming,
32
+ num_proc=num_proc,
33
+ **kwargs,
34
+ )
35
+ path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
36
+ self.builder = Csv(
37
+ cache_dir=cache_dir,
38
+ data_files=path_or_paths,
39
+ features=features,
40
+ **kwargs,
41
+ )
42
+
43
+ def read(self):
44
+ # Build iterable dataset
45
+ if self.streaming:
46
+ dataset = self.builder.as_streaming_dataset(split=self.split)
47
+ # Build regular (map-style) dataset
48
+ else:
49
+ download_config = None
50
+ download_mode = None
51
+ verification_mode = None
52
+ base_path = None
53
+
54
+ self.builder.download_and_prepare(
55
+ download_config=download_config,
56
+ download_mode=download_mode,
57
+ verification_mode=verification_mode,
58
+ # try_from_hf_gcs=try_from_hf_gcs,
59
+ base_path=base_path,
60
+ num_proc=self.num_proc,
61
+ )
62
+ dataset = self.builder.as_dataset(
63
+ split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory
64
+ )
65
+ return dataset
66
+
67
+
68
+ class CsvDatasetWriter:
69
+ def __init__(
70
+ self,
71
+ dataset: Dataset,
72
+ path_or_buf: Union[PathLike, BinaryIO],
73
+ batch_size: Optional[int] = None,
74
+ num_proc: Optional[int] = None,
75
+ **to_csv_kwargs,
76
+ ):
77
+ if num_proc is not None and num_proc <= 0:
78
+ raise ValueError(f"num_proc {num_proc} must be an integer > 0.")
79
+
80
+ self.dataset = dataset
81
+ self.path_or_buf = path_or_buf
82
+ self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
83
+ self.num_proc = num_proc
84
+ self.encoding = "utf-8"
85
+ self.to_csv_kwargs = to_csv_kwargs
86
+
87
+ def write(self) -> int:
88
+ _ = self.to_csv_kwargs.pop("path_or_buf", None)
89
+ header = self.to_csv_kwargs.pop("header", True)
90
+ index = self.to_csv_kwargs.pop("index", False)
91
+
92
+ if isinstance(self.path_or_buf, (str, bytes, os.PathLike)):
93
+ with open(self.path_or_buf, "wb+") as buffer:
94
+ written = self._write(file_obj=buffer, header=header, index=index, **self.to_csv_kwargs)
95
+ else:
96
+ written = self._write(file_obj=self.path_or_buf, header=header, index=index, **self.to_csv_kwargs)
97
+ return written
98
+
99
+ def _batch_csv(self, args):
100
+ offset, header, index, to_csv_kwargs = args
101
+
102
+ batch = query_table(
103
+ table=self.dataset.data,
104
+ key=slice(offset, offset + self.batch_size),
105
+ indices=self.dataset._indices,
106
+ )
107
+ csv_str = batch.to_pandas().to_csv(
108
+ path_or_buf=None, header=header if (offset == 0) else False, index=index, **to_csv_kwargs
109
+ )
110
+ return csv_str.encode(self.encoding)
111
+
112
+ def _write(self, file_obj: BinaryIO, header, index, **to_csv_kwargs) -> int:
113
+ """Writes the pyarrow table as CSV to a binary file handle.
114
+
115
+ Caller is responsible for opening and closing the handle.
116
+ """
117
+ written = 0
118
+
119
+ if self.num_proc is None or self.num_proc == 1:
120
+ for offset in hf_tqdm(
121
+ range(0, len(self.dataset), self.batch_size),
122
+ unit="ba",
123
+ desc="Creating CSV from Arrow format",
124
+ ):
125
+ csv_str = self._batch_csv((offset, header, index, to_csv_kwargs))
126
+ written += file_obj.write(csv_str)
127
+
128
+ else:
129
+ num_rows, batch_size = len(self.dataset), self.batch_size
130
+ with multiprocessing.Pool(self.num_proc) as pool:
131
+ for csv_str in hf_tqdm(
132
+ pool.imap(
133
+ self._batch_csv,
134
+ [(offset, header, index, to_csv_kwargs) for offset in range(0, num_rows, batch_size)],
135
+ ),
136
+ total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size,
137
+ unit="ba",
138
+ desc="Creating CSV from Arrow format",
139
+ ):
140
+ written += file_obj.write(csv_str)
141
+
142
+ return written
env-llmeval/lib/python3.10/site-packages/datasets/io/generator.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Callable, Optional
2
+
3
+ from .. import Features
4
+ from ..packaged_modules.generator.generator import Generator
5
+ from .abc import AbstractDatasetInputStream
6
+
7
+
8
+ class GeneratorDatasetInputStream(AbstractDatasetInputStream):
9
+ def __init__(
10
+ self,
11
+ generator: Callable,
12
+ features: Optional[Features] = None,
13
+ cache_dir: str = None,
14
+ keep_in_memory: bool = False,
15
+ streaming: bool = False,
16
+ gen_kwargs: Optional[dict] = None,
17
+ num_proc: Optional[int] = None,
18
+ **kwargs,
19
+ ):
20
+ super().__init__(
21
+ features=features,
22
+ cache_dir=cache_dir,
23
+ keep_in_memory=keep_in_memory,
24
+ streaming=streaming,
25
+ num_proc=num_proc,
26
+ **kwargs,
27
+ )
28
+ self.builder = Generator(
29
+ cache_dir=cache_dir,
30
+ features=features,
31
+ generator=generator,
32
+ gen_kwargs=gen_kwargs,
33
+ **kwargs,
34
+ )
35
+
36
+ def read(self):
37
+ # Build iterable dataset
38
+ if self.streaming:
39
+ dataset = self.builder.as_streaming_dataset(split="train")
40
+ # Build regular (map-style) dataset
41
+ else:
42
+ download_config = None
43
+ download_mode = None
44
+ verification_mode = None
45
+ base_path = None
46
+
47
+ self.builder.download_and_prepare(
48
+ download_config=download_config,
49
+ download_mode=download_mode,
50
+ verification_mode=verification_mode,
51
+ try_from_hf_gcs=False,
52
+ base_path=base_path,
53
+ num_proc=self.num_proc,
54
+ )
55
+ dataset = self.builder.as_dataset(
56
+ split="train", verification_mode=verification_mode, in_memory=self.keep_in_memory
57
+ )
58
+ return dataset
env-llmeval/lib/python3.10/site-packages/datasets/io/json.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import multiprocessing
2
+ import os
3
+ from typing import BinaryIO, Optional, Union
4
+
5
+ import fsspec
6
+
7
+ from .. import Dataset, Features, NamedSplit, config
8
+ from ..formatting import query_table
9
+ from ..packaged_modules.json.json import Json
10
+ from ..utils import tqdm as hf_tqdm
11
+ from ..utils.typing import NestedDataStructureLike, PathLike
12
+ from .abc import AbstractDatasetReader
13
+
14
+
15
+ class JsonDatasetReader(AbstractDatasetReader):
16
+ def __init__(
17
+ self,
18
+ path_or_paths: NestedDataStructureLike[PathLike],
19
+ split: Optional[NamedSplit] = None,
20
+ features: Optional[Features] = None,
21
+ cache_dir: str = None,
22
+ keep_in_memory: bool = False,
23
+ streaming: bool = False,
24
+ field: Optional[str] = None,
25
+ num_proc: Optional[int] = None,
26
+ **kwargs,
27
+ ):
28
+ super().__init__(
29
+ path_or_paths,
30
+ split=split,
31
+ features=features,
32
+ cache_dir=cache_dir,
33
+ keep_in_memory=keep_in_memory,
34
+ streaming=streaming,
35
+ num_proc=num_proc,
36
+ **kwargs,
37
+ )
38
+ self.field = field
39
+ path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
40
+ self.builder = Json(
41
+ cache_dir=cache_dir,
42
+ data_files=path_or_paths,
43
+ features=features,
44
+ field=field,
45
+ **kwargs,
46
+ )
47
+
48
+ def read(self):
49
+ # Build iterable dataset
50
+ if self.streaming:
51
+ dataset = self.builder.as_streaming_dataset(split=self.split)
52
+ # Build regular (map-style) dataset
53
+ else:
54
+ download_config = None
55
+ download_mode = None
56
+ verification_mode = None
57
+ base_path = None
58
+
59
+ self.builder.download_and_prepare(
60
+ download_config=download_config,
61
+ download_mode=download_mode,
62
+ verification_mode=verification_mode,
63
+ # try_from_hf_gcs=try_from_hf_gcs,
64
+ base_path=base_path,
65
+ num_proc=self.num_proc,
66
+ )
67
+ dataset = self.builder.as_dataset(
68
+ split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory
69
+ )
70
+ return dataset
71
+
72
+
73
+ class JsonDatasetWriter:
74
+ def __init__(
75
+ self,
76
+ dataset: Dataset,
77
+ path_or_buf: Union[PathLike, BinaryIO],
78
+ batch_size: Optional[int] = None,
79
+ num_proc: Optional[int] = None,
80
+ **to_json_kwargs,
81
+ ):
82
+ if num_proc is not None and num_proc <= 0:
83
+ raise ValueError(f"num_proc {num_proc} must be an integer > 0.")
84
+
85
+ self.dataset = dataset
86
+ self.path_or_buf = path_or_buf
87
+ self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
88
+ self.num_proc = num_proc
89
+ self.encoding = "utf-8"
90
+ self.to_json_kwargs = to_json_kwargs
91
+
92
+ def write(self) -> int:
93
+ _ = self.to_json_kwargs.pop("path_or_buf", None)
94
+ orient = self.to_json_kwargs.pop("orient", "records")
95
+ lines = self.to_json_kwargs.pop("lines", True if orient == "records" else False)
96
+ if "index" not in self.to_json_kwargs and orient in ["split", "table"]:
97
+ self.to_json_kwargs["index"] = False
98
+
99
+ # Determine the default compression value based on self.path_or_buf type
100
+ default_compression = "infer" if isinstance(self.path_or_buf, (str, bytes, os.PathLike)) else None
101
+ compression = self.to_json_kwargs.pop("compression", default_compression)
102
+
103
+ if compression not in [None, "infer", "gzip", "bz2", "xz"]:
104
+ raise NotImplementedError(f"`datasets` currently does not support {compression} compression")
105
+
106
+ if isinstance(self.path_or_buf, (str, bytes, os.PathLike)):
107
+ with fsspec.open(self.path_or_buf, "wb", compression=compression) as buffer:
108
+ written = self._write(file_obj=buffer, orient=orient, lines=lines, **self.to_json_kwargs)
109
+ else:
110
+ if compression:
111
+ raise NotImplementedError(
112
+ f"The compression parameter is not supported when writing to a buffer, but compression={compression}"
113
+ " was passed. Please provide a local path instead."
114
+ )
115
+ written = self._write(file_obj=self.path_or_buf, orient=orient, lines=lines, **self.to_json_kwargs)
116
+ return written
117
+
118
+ def _batch_json(self, args):
119
+ offset, orient, lines, to_json_kwargs = args
120
+
121
+ batch = query_table(
122
+ table=self.dataset.data,
123
+ key=slice(offset, offset + self.batch_size),
124
+ indices=self.dataset._indices,
125
+ )
126
+ json_str = batch.to_pandas().to_json(path_or_buf=None, orient=orient, lines=lines, **to_json_kwargs)
127
+ if not json_str.endswith("\n"):
128
+ json_str += "\n"
129
+ return json_str.encode(self.encoding)
130
+
131
+ def _write(
132
+ self,
133
+ file_obj: BinaryIO,
134
+ orient,
135
+ lines,
136
+ **to_json_kwargs,
137
+ ) -> int:
138
+ """Writes the pyarrow table as JSON lines to a binary file handle.
139
+
140
+ Caller is responsible for opening and closing the handle.
141
+ """
142
+ written = 0
143
+
144
+ if self.num_proc is None or self.num_proc == 1:
145
+ for offset in hf_tqdm(
146
+ range(0, len(self.dataset), self.batch_size),
147
+ unit="ba",
148
+ desc="Creating json from Arrow format",
149
+ ):
150
+ json_str = self._batch_json((offset, orient, lines, to_json_kwargs))
151
+ written += file_obj.write(json_str)
152
+ else:
153
+ num_rows, batch_size = len(self.dataset), self.batch_size
154
+ with multiprocessing.Pool(self.num_proc) as pool:
155
+ for json_str in hf_tqdm(
156
+ pool.imap(
157
+ self._batch_json,
158
+ [(offset, orient, lines, to_json_kwargs) for offset in range(0, num_rows, batch_size)],
159
+ ),
160
+ total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size,
161
+ unit="ba",
162
+ desc="Creating json from Arrow format",
163
+ ):
164
+ written += file_obj.write(json_str)
165
+
166
+ return written
env-llmeval/lib/python3.10/site-packages/datasets/io/parquet.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import BinaryIO, Optional, Union
3
+
4
+ import numpy as np
5
+ import pyarrow.parquet as pq
6
+
7
+ from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
8
+ from ..features.features import FeatureType, _visit
9
+ from ..formatting import query_table
10
+ from ..packaged_modules import _PACKAGED_DATASETS_MODULES
11
+ from ..packaged_modules.parquet.parquet import Parquet
12
+ from ..utils import tqdm as hf_tqdm
13
+ from ..utils.typing import NestedDataStructureLike, PathLike
14
+ from .abc import AbstractDatasetReader
15
+
16
+
17
+ def get_writer_batch_size(features: Features) -> Optional[int]:
18
+ """
19
+ Get the writer_batch_size that defines the maximum row group size in the parquet files.
20
+ The default in `datasets` is 1,000 but we lower it to 100 for image datasets.
21
+ This allows to optimize random access to parquet file, since accessing 1 row requires
22
+ to read its entire row group.
23
+
24
+ This can be improved to get optimized size for querying/iterating
25
+ but at least it matches the dataset viewer expectations on HF.
26
+
27
+ Args:
28
+ ds_config_info (`datasets.info.DatasetInfo`):
29
+ Dataset info from `datasets`.
30
+ Returns:
31
+ writer_batch_size (`Optional[int]`):
32
+ Writer batch size to pass to a dataset builder.
33
+ If `None`, then it will use the `datasets` default.
34
+ """
35
+
36
+ batch_size = np.inf
37
+
38
+ def set_batch_size(feature: FeatureType) -> None:
39
+ nonlocal batch_size
40
+ if isinstance(feature, Image):
41
+ batch_size = min(batch_size, config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS)
42
+ elif isinstance(feature, Audio):
43
+ batch_size = min(batch_size, config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS)
44
+ elif isinstance(feature, Value) and feature.dtype == "binary":
45
+ batch_size = min(batch_size, config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS)
46
+
47
+ _visit(features, set_batch_size)
48
+
49
+ return None if batch_size is np.inf else batch_size
50
+
51
+
52
+ class ParquetDatasetReader(AbstractDatasetReader):
53
+ def __init__(
54
+ self,
55
+ path_or_paths: NestedDataStructureLike[PathLike],
56
+ split: Optional[NamedSplit] = None,
57
+ features: Optional[Features] = None,
58
+ cache_dir: str = None,
59
+ keep_in_memory: bool = False,
60
+ streaming: bool = False,
61
+ num_proc: Optional[int] = None,
62
+ **kwargs,
63
+ ):
64
+ super().__init__(
65
+ path_or_paths,
66
+ split=split,
67
+ features=features,
68
+ cache_dir=cache_dir,
69
+ keep_in_memory=keep_in_memory,
70
+ streaming=streaming,
71
+ num_proc=num_proc,
72
+ **kwargs,
73
+ )
74
+ path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
75
+ hash = _PACKAGED_DATASETS_MODULES["parquet"][1]
76
+ self.builder = Parquet(
77
+ cache_dir=cache_dir,
78
+ data_files=path_or_paths,
79
+ features=features,
80
+ hash=hash,
81
+ **kwargs,
82
+ )
83
+
84
+ def read(self):
85
+ # Build iterable dataset
86
+ if self.streaming:
87
+ dataset = self.builder.as_streaming_dataset(split=self.split)
88
+ # Build regular (map-style) dataset
89
+ else:
90
+ download_config = None
91
+ download_mode = None
92
+ verification_mode = None
93
+ base_path = None
94
+
95
+ self.builder.download_and_prepare(
96
+ download_config=download_config,
97
+ download_mode=download_mode,
98
+ verification_mode=verification_mode,
99
+ # try_from_hf_gcs=try_from_hf_gcs,
100
+ base_path=base_path,
101
+ num_proc=self.num_proc,
102
+ )
103
+ dataset = self.builder.as_dataset(
104
+ split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory
105
+ )
106
+ return dataset
107
+
108
+
109
+ class ParquetDatasetWriter:
110
+ def __init__(
111
+ self,
112
+ dataset: Dataset,
113
+ path_or_buf: Union[PathLike, BinaryIO],
114
+ batch_size: Optional[int] = None,
115
+ **parquet_writer_kwargs,
116
+ ):
117
+ self.dataset = dataset
118
+ self.path_or_buf = path_or_buf
119
+ self.batch_size = batch_size or get_writer_batch_size(dataset.features)
120
+ self.parquet_writer_kwargs = parquet_writer_kwargs
121
+
122
+ def write(self) -> int:
123
+ batch_size = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
124
+
125
+ if isinstance(self.path_or_buf, (str, bytes, os.PathLike)):
126
+ with open(self.path_or_buf, "wb+") as buffer:
127
+ written = self._write(file_obj=buffer, batch_size=batch_size, **self.parquet_writer_kwargs)
128
+ else:
129
+ written = self._write(file_obj=self.path_or_buf, batch_size=batch_size, **self.parquet_writer_kwargs)
130
+ return written
131
+
132
+ def _write(self, file_obj: BinaryIO, batch_size: int, **parquet_writer_kwargs) -> int:
133
+ """Writes the pyarrow table as Parquet to a binary file handle.
134
+
135
+ Caller is responsible for opening and closing the handle.
136
+ """
137
+ written = 0
138
+ _ = parquet_writer_kwargs.pop("path_or_buf", None)
139
+ schema = self.dataset.features.arrow_schema
140
+
141
+ writer = pq.ParquetWriter(file_obj, schema=schema, **parquet_writer_kwargs)
142
+
143
+ for offset in hf_tqdm(
144
+ range(0, len(self.dataset), batch_size),
145
+ unit="ba",
146
+ desc="Creating parquet from Arrow format",
147
+ ):
148
+ batch = query_table(
149
+ table=self.dataset._data,
150
+ key=slice(offset, offset + batch_size),
151
+ indices=self.dataset._indices,
152
+ )
153
+ writer.write_table(batch)
154
+ written += batch.nbytes
155
+ writer.close()
156
+ return written
env-llmeval/lib/python3.10/site-packages/datasets/io/spark.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ import pyspark
4
+
5
+ from .. import Features, NamedSplit
6
+ from ..download import DownloadMode
7
+ from ..packaged_modules.spark.spark import Spark
8
+ from .abc import AbstractDatasetReader
9
+
10
+
11
+ class SparkDatasetReader(AbstractDatasetReader):
12
+ """A dataset reader that reads from a Spark DataFrame.
13
+
14
+ When caching, cache materialization is parallelized over Spark; an NFS that is accessible to the driver must be
15
+ provided. Streaming is not currently supported.
16
+ """
17
+
18
+ def __init__(
19
+ self,
20
+ df: pyspark.sql.DataFrame,
21
+ split: Optional[NamedSplit] = None,
22
+ features: Optional[Features] = None,
23
+ streaming: bool = True,
24
+ cache_dir: str = None,
25
+ keep_in_memory: bool = False,
26
+ working_dir: str = None,
27
+ load_from_cache_file: bool = True,
28
+ file_format: str = "arrow",
29
+ **kwargs,
30
+ ):
31
+ super().__init__(
32
+ split=split,
33
+ features=features,
34
+ cache_dir=cache_dir,
35
+ keep_in_memory=keep_in_memory,
36
+ streaming=streaming,
37
+ **kwargs,
38
+ )
39
+ self._load_from_cache_file = load_from_cache_file
40
+ self._file_format = file_format
41
+ self.builder = Spark(
42
+ df=df,
43
+ features=features,
44
+ cache_dir=cache_dir,
45
+ working_dir=working_dir,
46
+ **kwargs,
47
+ )
48
+
49
+ def read(self):
50
+ if self.streaming:
51
+ return self.builder.as_streaming_dataset(split=self.split)
52
+ download_mode = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
53
+ self.builder.download_and_prepare(
54
+ download_mode=download_mode,
55
+ file_format=self._file_format,
56
+ )
57
+ return self.builder.as_dataset(split=self.split)
env-llmeval/lib/python3.10/site-packages/datasets/io/sql.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import multiprocessing
2
+ from typing import TYPE_CHECKING, Optional, Union
3
+
4
+ from .. import Dataset, Features, config
5
+ from ..formatting import query_table
6
+ from ..packaged_modules.sql.sql import Sql
7
+ from ..utils import tqdm as hf_tqdm
8
+ from .abc import AbstractDatasetInputStream
9
+
10
+
11
+ if TYPE_CHECKING:
12
+ import sqlite3
13
+
14
+ import sqlalchemy
15
+
16
+
17
+ class SqlDatasetReader(AbstractDatasetInputStream):
18
+ def __init__(
19
+ self,
20
+ sql: Union[str, "sqlalchemy.sql.Selectable"],
21
+ con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
22
+ features: Optional[Features] = None,
23
+ cache_dir: str = None,
24
+ keep_in_memory: bool = False,
25
+ **kwargs,
26
+ ):
27
+ super().__init__(features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs)
28
+ self.builder = Sql(
29
+ cache_dir=cache_dir,
30
+ features=features,
31
+ sql=sql,
32
+ con=con,
33
+ **kwargs,
34
+ )
35
+
36
+ def read(self):
37
+ download_config = None
38
+ download_mode = None
39
+ verification_mode = None
40
+ base_path = None
41
+
42
+ self.builder.download_and_prepare(
43
+ download_config=download_config,
44
+ download_mode=download_mode,
45
+ verification_mode=verification_mode,
46
+ # try_from_hf_gcs=try_from_hf_gcs,
47
+ base_path=base_path,
48
+ )
49
+
50
+ # Build dataset for splits
51
+ dataset = self.builder.as_dataset(
52
+ split="train", verification_mode=verification_mode, in_memory=self.keep_in_memory
53
+ )
54
+ return dataset
55
+
56
+
57
+ class SqlDatasetWriter:
58
+ def __init__(
59
+ self,
60
+ dataset: Dataset,
61
+ name: str,
62
+ con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
63
+ batch_size: Optional[int] = None,
64
+ num_proc: Optional[int] = None,
65
+ **to_sql_kwargs,
66
+ ):
67
+ if num_proc is not None and num_proc <= 0:
68
+ raise ValueError(f"num_proc {num_proc} must be an integer > 0.")
69
+
70
+ self.dataset = dataset
71
+ self.name = name
72
+ self.con = con
73
+ self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
74
+ self.num_proc = num_proc
75
+ self.to_sql_kwargs = to_sql_kwargs
76
+
77
+ def write(self) -> int:
78
+ _ = self.to_sql_kwargs.pop("sql", None)
79
+ _ = self.to_sql_kwargs.pop("con", None)
80
+ index = self.to_sql_kwargs.pop("index", False)
81
+
82
+ written = self._write(index=index, **self.to_sql_kwargs)
83
+ return written
84
+
85
+ def _batch_sql(self, args):
86
+ offset, index, to_sql_kwargs = args
87
+ to_sql_kwargs = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
88
+ batch = query_table(
89
+ table=self.dataset.data,
90
+ key=slice(offset, offset + self.batch_size),
91
+ indices=self.dataset._indices,
92
+ )
93
+ df = batch.to_pandas()
94
+ num_rows = df.to_sql(self.name, self.con, index=index, **to_sql_kwargs)
95
+ return num_rows or len(df)
96
+
97
+ def _write(self, index, **to_sql_kwargs) -> int:
98
+ """Writes the pyarrow table as SQL to a database.
99
+
100
+ Caller is responsible for opening and closing the SQL connection.
101
+ """
102
+ written = 0
103
+
104
+ if self.num_proc is None or self.num_proc == 1:
105
+ for offset in hf_tqdm(
106
+ range(0, len(self.dataset), self.batch_size),
107
+ unit="ba",
108
+ desc="Creating SQL from Arrow format",
109
+ ):
110
+ written += self._batch_sql((offset, index, to_sql_kwargs))
111
+ else:
112
+ num_rows, batch_size = len(self.dataset), self.batch_size
113
+ with multiprocessing.Pool(self.num_proc) as pool:
114
+ for num_rows in hf_tqdm(
115
+ pool.imap(
116
+ self._batch_sql,
117
+ [(offset, index, to_sql_kwargs) for offset in range(0, num_rows, batch_size)],
118
+ ),
119
+ total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size,
120
+ unit="ba",
121
+ desc="Creating SQL from Arrow format",
122
+ ):
123
+ written += num_rows
124
+
125
+ return written
env-llmeval/lib/python3.10/site-packages/datasets/io/text.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ from .. import Features, NamedSplit
4
+ from ..packaged_modules.text.text import Text
5
+ from ..utils.typing import NestedDataStructureLike, PathLike
6
+ from .abc import AbstractDatasetReader
7
+
8
+
9
+ class TextDatasetReader(AbstractDatasetReader):
10
+ def __init__(
11
+ self,
12
+ path_or_paths: NestedDataStructureLike[PathLike],
13
+ split: Optional[NamedSplit] = None,
14
+ features: Optional[Features] = None,
15
+ cache_dir: str = None,
16
+ keep_in_memory: bool = False,
17
+ streaming: bool = False,
18
+ num_proc: Optional[int] = None,
19
+ **kwargs,
20
+ ):
21
+ super().__init__(
22
+ path_or_paths,
23
+ split=split,
24
+ features=features,
25
+ cache_dir=cache_dir,
26
+ keep_in_memory=keep_in_memory,
27
+ streaming=streaming,
28
+ num_proc=num_proc,
29
+ **kwargs,
30
+ )
31
+ path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
32
+ self.builder = Text(
33
+ cache_dir=cache_dir,
34
+ data_files=path_or_paths,
35
+ features=features,
36
+ **kwargs,
37
+ )
38
+
39
+ def read(self):
40
+ # Build iterable dataset
41
+ if self.streaming:
42
+ dataset = self.builder.as_streaming_dataset(split=self.split)
43
+ # Build regular (map-style) dataset
44
+ else:
45
+ download_config = None
46
+ download_mode = None
47
+ verification_mode = None
48
+ base_path = None
49
+
50
+ self.builder.download_and_prepare(
51
+ download_config=download_config,
52
+ download_mode=download_mode,
53
+ verification_mode=verification_mode,
54
+ # try_from_hf_gcs=try_from_hf_gcs,
55
+ base_path=base_path,
56
+ num_proc=self.num_proc,
57
+ )
58
+ dataset = self.builder.as_dataset(
59
+ split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory
60
+ )
61
+ return dataset
env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/__init__.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import re
3
+ from typing import Dict, List, Tuple
4
+
5
+ from huggingface_hub.utils import insecure_hashlib
6
+
7
+ from .arrow import arrow
8
+ from .audiofolder import audiofolder
9
+ from .cache import cache # noqa F401
10
+ from .csv import csv
11
+ from .imagefolder import imagefolder
12
+ from .json import json
13
+ from .pandas import pandas
14
+ from .parquet import parquet
15
+ from .sql import sql # noqa F401
16
+ from .text import text
17
+ from .webdataset import webdataset
18
+
19
+
20
+ def _hash_python_lines(lines: List[str]) -> str:
21
+ filtered_lines = []
22
+ for line in lines:
23
+ line = re.sub(r"#.*", "", line) # remove comments
24
+ if line:
25
+ filtered_lines.append(line)
26
+ full_str = "\n".join(filtered_lines)
27
+
28
+ # Make a hash from all this code
29
+ full_bytes = full_str.encode("utf-8")
30
+ return insecure_hashlib.sha256(full_bytes).hexdigest()
31
+
32
+
33
+ # get importable module names and hash for caching
34
+ _PACKAGED_DATASETS_MODULES = {
35
+ "csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
36
+ "json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
37
+ "pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
38
+ "parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
39
+ "arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
40
+ "text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
41
+ "imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
42
+ "audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
43
+ "webdataset": (webdataset.__name__, _hash_python_lines(inspect.getsource(webdataset).splitlines())),
44
+ }
45
+
46
+ # Used to infer the module to use based on the data files extensions
47
+ _EXTENSION_TO_MODULE: Dict[str, Tuple[str, dict]] = {
48
+ ".csv": ("csv", {}),
49
+ ".tsv": ("csv", {"sep": "\t"}),
50
+ ".json": ("json", {}),
51
+ ".jsonl": ("json", {}),
52
+ ".parquet": ("parquet", {}),
53
+ ".geoparquet": ("parquet", {}),
54
+ ".gpq": ("parquet", {}),
55
+ ".arrow": ("arrow", {}),
56
+ ".txt": ("text", {}),
57
+ ".tar": ("webdataset", {}),
58
+ }
59
+ _EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
60
+ _EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
61
+ _EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
62
+ _EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
63
+ _MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder"}
64
+
65
+ # Used to filter data files based on extensions given a module name
66
+ _MODULE_TO_EXTENSIONS: Dict[str, List[str]] = {}
67
+ for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
68
+ _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
69
+
70
+ for _module in _MODULE_TO_EXTENSIONS:
71
+ _MODULE_TO_EXTENSIONS[_module].append(".zip")
env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/arrow/arrow.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ from dataclasses import dataclass
3
+ from typing import Optional
4
+
5
+ import pyarrow as pa
6
+
7
+ import datasets
8
+ from datasets.table import table_cast
9
+
10
+
11
+ logger = datasets.utils.logging.get_logger(__name__)
12
+
13
+
14
+ @dataclass
15
+ class ArrowConfig(datasets.BuilderConfig):
16
+ """BuilderConfig for Arrow."""
17
+
18
+ features: Optional[datasets.Features] = None
19
+
20
+
21
+ class Arrow(datasets.ArrowBasedBuilder):
22
+ BUILDER_CONFIG_CLASS = ArrowConfig
23
+
24
+ def _info(self):
25
+ return datasets.DatasetInfo(features=self.config.features)
26
+
27
+ def _split_generators(self, dl_manager):
28
+ """We handle string, list and dicts in datafiles"""
29
+ if not self.config.data_files:
30
+ raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
31
+ data_files = dl_manager.download_and_extract(self.config.data_files)
32
+ if isinstance(data_files, (str, list, tuple)):
33
+ files = data_files
34
+ if isinstance(files, str):
35
+ files = [files]
36
+ # Use `dl_manager.iter_files` to skip hidden files in an extracted archive
37
+ files = [dl_manager.iter_files(file) for file in files]
38
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
39
+ splits = []
40
+ for split_name, files in data_files.items():
41
+ if isinstance(files, str):
42
+ files = [files]
43
+ # Use `dl_manager.iter_files` to skip hidden files in an extracted archive
44
+ files = [dl_manager.iter_files(file) for file in files]
45
+ # Infer features is they are stoed in the arrow schema
46
+ if self.info.features is None:
47
+ for file in itertools.chain.from_iterable(files):
48
+ with open(file, "rb") as f:
49
+ self.info.features = datasets.Features.from_arrow_schema(pa.ipc.open_stream(f).schema)
50
+ break
51
+ splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
52
+ return splits
53
+
54
+ def _cast_table(self, pa_table: pa.Table) -> pa.Table:
55
+ if self.info.features is not None:
56
+ # more expensive cast to support nested features with keys in a different order
57
+ # allows str <-> int/float or str to Audio for example
58
+ pa_table = table_cast(pa_table, self.info.features.arrow_schema)
59
+ return pa_table
60
+
61
+ def _generate_tables(self, files):
62
+ for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
63
+ with open(file, "rb") as f:
64
+ try:
65
+ for batch_idx, record_batch in enumerate(pa.ipc.open_stream(f)):
66
+ pa_table = pa.Table.from_batches([record_batch])
67
+ # Uncomment for debugging (will print the Arrow table size and elements)
68
+ # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
69
+ # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
70
+ yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table)
71
+ except ValueError as e:
72
+ logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
73
+ raise
env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (202 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__pycache__/audiofolder.cpython-310.pyc ADDED
Binary file (1.35 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/audiofolder.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+
3
+ import datasets
4
+ from datasets.tasks import AudioClassification
5
+
6
+ from ..folder_based_builder import folder_based_builder
7
+
8
+
9
+ logger = datasets.utils.logging.get_logger(__name__)
10
+
11
+
12
+ class AudioFolderConfig(folder_based_builder.FolderBasedBuilderConfig):
13
+ """Builder Config for AudioFolder."""
14
+
15
+ drop_labels: bool = None
16
+ drop_metadata: bool = None
17
+
18
+
19
+ class AudioFolder(folder_based_builder.FolderBasedBuilder):
20
+ BASE_FEATURE = datasets.Audio
21
+ BASE_COLUMN_NAME = "audio"
22
+ BUILDER_CONFIG_CLASS = AudioFolderConfig
23
+ EXTENSIONS: List[str] # definition at the bottom of the script
24
+ CLASSIFICATION_TASK = AudioClassification(audio_column="audio", label_column="label")
25
+
26
+
27
+ # Obtained with:
28
+ # ```
29
+ # import soundfile as sf
30
+ #
31
+ # AUDIO_EXTENSIONS = [f".{format.lower()}" for format in sf.available_formats().keys()]
32
+ #
33
+ # # .mp3 is currently decoded via `torchaudio`, .opus decoding is supported if version of `libsndfile` >= 1.0.30:
34
+ # AUDIO_EXTENSIONS.extend([".mp3", ".opus"])
35
+ # ```
36
+ # We intentionally do not run this code on launch because:
37
+ # (1) Soundfile is an optional dependency, so importing it in global namespace is not allowed
38
+ # (2) To ensure the list of supported extensions is deterministic
39
+ AUDIO_EXTENSIONS = [
40
+ ".aiff",
41
+ ".au",
42
+ ".avr",
43
+ ".caf",
44
+ ".flac",
45
+ ".htk",
46
+ ".svx",
47
+ ".mat4",
48
+ ".mat5",
49
+ ".mpc2k",
50
+ ".ogg",
51
+ ".paf",
52
+ ".pvf",
53
+ ".raw",
54
+ ".rf64",
55
+ ".sd2",
56
+ ".sds",
57
+ ".ircam",
58
+ ".voc",
59
+ ".w64",
60
+ ".wav",
61
+ ".nist",
62
+ ".wavex",
63
+ ".wve",
64
+ ".xi",
65
+ ".mp3",
66
+ ".opus",
67
+ ]
68
+ AudioFolder.EXTENSIONS = AUDIO_EXTENSIONS
env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/cache/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/cache/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (196 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/cache/__pycache__/cache.cpython-310.pyc ADDED
Binary file (6.33 kB). View file