diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a12c93b4f17721680d9f7b3666d5e791d6c02e06 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/arrow_dataset.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/arrow_dataset.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb7a32b416eba8ec1f03cd24a9a47e4a91876574 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/arrow_dataset.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/arrow_reader.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/arrow_reader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab108441d1ea8645ecc96d1de1686ffad5c69e54 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/arrow_reader.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/arrow_writer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/arrow_writer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38e0476d40a198b3bd4294ab44f1934d88c8b2c5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/arrow_writer.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/builder.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/builder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c22686abac0903bf008afaeae43e5bf8272fbe9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/builder.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/combine.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/combine.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..385064e8b3cee3f67c2f784059b572593440ff22 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/combine.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/config.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fadb9dfb9cdbfb1817054533347aff7d28e7f360 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/config.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/data_files.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/data_files.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9911d58a872ffaf10d2688af3d40e300b67fa5a0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/data_files.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/dataset_dict.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/dataset_dict.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..880b96e2d5747e0c6fc91abf55ddda76dcadf792 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/dataset_dict.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/distributed.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/distributed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee5a05c8d1611fa24857f53480837a8d0d426bfc Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/distributed.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/exceptions.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e255169edcbf9cc656fd03011d8523452b313d8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/exceptions.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/fingerprint.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/fingerprint.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ebf8ee677287203c7e175b5585b5711a422f01d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/fingerprint.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/info.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/info.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ecc5c05469233485bb9842dc37a6fc0904a436ed Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/info.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/inspect.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/inspect.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c59e95a7a23c1242d04d5de403b7df203c5557a1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/inspect.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/iterable_dataset.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/iterable_dataset.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de94ceb9d6c76038e3c27846766d52efc7e0245d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/iterable_dataset.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/keyhash.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/keyhash.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff210501fef12f450c3eb82241ee57c474cedd36 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/keyhash.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/load.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/load.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e577f3d9b39b7c8c0bc49ce9450db0066bf83f72 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/load.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/metric.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/metric.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73871e54e7dc8d8961c9c12f341fae65c3fdc69d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/metric.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/naming.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/naming.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28581b5079a7f6ba7830599489c138bc67a611ea Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/naming.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/search.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/search.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3a3033c25885ff40a76d38e547310eae5d01a7d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/search.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/splits.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/splits.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8a6f72f3c97232f4b4a1453f3e7dac4c2b35c57 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/splits.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/streaming.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/streaming.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45b44a734037537daa0810bb4e120ec30891ead3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/streaming.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/table.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/table.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dba363318ce71edeedc2730a7ea93c3fac63fb7d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/__pycache__/table.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/io/__init__.py b/env-llmeval/lib/python3.10/site-packages/datasets/io/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6432ae93b268114b674b520e778efdefb15559f6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/abc.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/abc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97e5b86cac7173670741490bbbe1e4d43a6fac9b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/abc.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/csv.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/csv.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f805e603b158a546cee94500a2fb5295b8965199 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/csv.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/generator.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/generator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95edfa53c7026037d2065b2ecf10646c34952659 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/generator.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/json.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/json.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82243ed6530ee038c6b45bc0286c63e3298233e6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/json.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/parquet.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/parquet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36b19ae3358aadb882a4dd1472f0897d7470c591 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/parquet.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/spark.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/spark.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c186cca64b35100925bc21863b2b96ddf77cb21b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/spark.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/sql.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/sql.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bcc8e70c3a0481e6c1309aec7f01c5e7c17b74e8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/sql.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/text.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/text.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..87ebc2f8ccbdea474857a8f5ff91b42dcaca3faa Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/io/__pycache__/text.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/io/abc.py b/env-llmeval/lib/python3.10/site-packages/datasets/io/abc.py new file mode 100644 index 0000000000000000000000000000000000000000..a1913cc20e3fd748ef912e2fb3d7c1e18f16ac8c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/io/abc.py @@ -0,0 +1,53 @@ +from abc import ABC, abstractmethod +from typing import Optional, Union + +from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit +from ..utils.typing import NestedDataStructureLike, PathLike + + +class AbstractDatasetReader(ABC): + def __init__( + self, + path_or_paths: Optional[NestedDataStructureLike[PathLike]] = None, + split: Optional[NamedSplit] = None, + features: Optional[Features] = None, + cache_dir: str = None, + keep_in_memory: bool = False, + streaming: bool = False, + num_proc: Optional[int] = None, + **kwargs, + ): + self.path_or_paths = path_or_paths + self.split = split if split or isinstance(path_or_paths, dict) else "train" + self.features = features + self.cache_dir = cache_dir + self.keep_in_memory = keep_in_memory + self.streaming = streaming + self.num_proc = num_proc + self.kwargs = kwargs + + @abstractmethod + def read(self) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]: + pass + + +class AbstractDatasetInputStream(ABC): + def __init__( + self, + features: Optional[Features] = None, + cache_dir: str = None, + keep_in_memory: bool = False, + streaming: bool = False, + num_proc: Optional[int] = None, + **kwargs, + ): + self.features = features + self.cache_dir = cache_dir + self.keep_in_memory = keep_in_memory + self.streaming = streaming + self.num_proc = num_proc + self.kwargs = kwargs + + @abstractmethod + def read(self) -> Union[Dataset, IterableDataset]: + pass diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/io/csv.py b/env-llmeval/lib/python3.10/site-packages/datasets/io/csv.py new file mode 100644 index 0000000000000000000000000000000000000000..f5091e1352efbcfde2e1f75f68d8ef2a8a383621 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/io/csv.py @@ -0,0 +1,142 @@ +import multiprocessing +import os +from typing import BinaryIO, Optional, Union + +from .. import Dataset, Features, NamedSplit, config +from ..formatting import query_table +from ..packaged_modules.csv.csv import Csv +from ..utils import tqdm as hf_tqdm +from ..utils.typing import NestedDataStructureLike, PathLike +from .abc import AbstractDatasetReader + + +class CsvDatasetReader(AbstractDatasetReader): + def __init__( + self, + path_or_paths: NestedDataStructureLike[PathLike], + split: Optional[NamedSplit] = None, + features: Optional[Features] = None, + cache_dir: str = None, + keep_in_memory: bool = False, + streaming: bool = False, + num_proc: Optional[int] = None, + **kwargs, + ): + super().__init__( + path_or_paths, + split=split, + features=features, + cache_dir=cache_dir, + keep_in_memory=keep_in_memory, + streaming=streaming, + num_proc=num_proc, + **kwargs, + ) + path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths} + self.builder = Csv( + cache_dir=cache_dir, + data_files=path_or_paths, + features=features, + **kwargs, + ) + + def read(self): + # Build iterable dataset + if self.streaming: + dataset = self.builder.as_streaming_dataset(split=self.split) + # Build regular (map-style) dataset + else: + download_config = None + download_mode = None + verification_mode = None + base_path = None + + self.builder.download_and_prepare( + download_config=download_config, + download_mode=download_mode, + verification_mode=verification_mode, + # try_from_hf_gcs=try_from_hf_gcs, + base_path=base_path, + num_proc=self.num_proc, + ) + dataset = self.builder.as_dataset( + split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory + ) + return dataset + + +class CsvDatasetWriter: + def __init__( + self, + dataset: Dataset, + path_or_buf: Union[PathLike, BinaryIO], + batch_size: Optional[int] = None, + num_proc: Optional[int] = None, + **to_csv_kwargs, + ): + if num_proc is not None and num_proc <= 0: + raise ValueError(f"num_proc {num_proc} must be an integer > 0.") + + self.dataset = dataset + self.path_or_buf = path_or_buf + self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE + self.num_proc = num_proc + self.encoding = "utf-8" + self.to_csv_kwargs = to_csv_kwargs + + def write(self) -> int: + _ = self.to_csv_kwargs.pop("path_or_buf", None) + header = self.to_csv_kwargs.pop("header", True) + index = self.to_csv_kwargs.pop("index", False) + + if isinstance(self.path_or_buf, (str, bytes, os.PathLike)): + with open(self.path_or_buf, "wb+") as buffer: + written = self._write(file_obj=buffer, header=header, index=index, **self.to_csv_kwargs) + else: + written = self._write(file_obj=self.path_or_buf, header=header, index=index, **self.to_csv_kwargs) + return written + + def _batch_csv(self, args): + offset, header, index, to_csv_kwargs = args + + batch = query_table( + table=self.dataset.data, + key=slice(offset, offset + self.batch_size), + indices=self.dataset._indices, + ) + csv_str = batch.to_pandas().to_csv( + path_or_buf=None, header=header if (offset == 0) else False, index=index, **to_csv_kwargs + ) + return csv_str.encode(self.encoding) + + def _write(self, file_obj: BinaryIO, header, index, **to_csv_kwargs) -> int: + """Writes the pyarrow table as CSV to a binary file handle. + + Caller is responsible for opening and closing the handle. + """ + written = 0 + + if self.num_proc is None or self.num_proc == 1: + for offset in hf_tqdm( + range(0, len(self.dataset), self.batch_size), + unit="ba", + desc="Creating CSV from Arrow format", + ): + csv_str = self._batch_csv((offset, header, index, to_csv_kwargs)) + written += file_obj.write(csv_str) + + else: + num_rows, batch_size = len(self.dataset), self.batch_size + with multiprocessing.Pool(self.num_proc) as pool: + for csv_str in hf_tqdm( + pool.imap( + self._batch_csv, + [(offset, header, index, to_csv_kwargs) for offset in range(0, num_rows, batch_size)], + ), + total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size, + unit="ba", + desc="Creating CSV from Arrow format", + ): + written += file_obj.write(csv_str) + + return written diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/io/generator.py b/env-llmeval/lib/python3.10/site-packages/datasets/io/generator.py new file mode 100644 index 0000000000000000000000000000000000000000..3cb461769c5bec9b86c984a17bb4890bdc9fab7e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/io/generator.py @@ -0,0 +1,58 @@ +from typing import Callable, Optional + +from .. import Features +from ..packaged_modules.generator.generator import Generator +from .abc import AbstractDatasetInputStream + + +class GeneratorDatasetInputStream(AbstractDatasetInputStream): + def __init__( + self, + generator: Callable, + features: Optional[Features] = None, + cache_dir: str = None, + keep_in_memory: bool = False, + streaming: bool = False, + gen_kwargs: Optional[dict] = None, + num_proc: Optional[int] = None, + **kwargs, + ): + super().__init__( + features=features, + cache_dir=cache_dir, + keep_in_memory=keep_in_memory, + streaming=streaming, + num_proc=num_proc, + **kwargs, + ) + self.builder = Generator( + cache_dir=cache_dir, + features=features, + generator=generator, + gen_kwargs=gen_kwargs, + **kwargs, + ) + + def read(self): + # Build iterable dataset + if self.streaming: + dataset = self.builder.as_streaming_dataset(split="train") + # Build regular (map-style) dataset + else: + download_config = None + download_mode = None + verification_mode = None + base_path = None + + self.builder.download_and_prepare( + download_config=download_config, + download_mode=download_mode, + verification_mode=verification_mode, + try_from_hf_gcs=False, + base_path=base_path, + num_proc=self.num_proc, + ) + dataset = self.builder.as_dataset( + split="train", verification_mode=verification_mode, in_memory=self.keep_in_memory + ) + return dataset diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/io/json.py b/env-llmeval/lib/python3.10/site-packages/datasets/io/json.py new file mode 100644 index 0000000000000000000000000000000000000000..2d4698df96659e97820d814bf6b991a2c66ebb57 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/io/json.py @@ -0,0 +1,166 @@ +import multiprocessing +import os +from typing import BinaryIO, Optional, Union + +import fsspec + +from .. import Dataset, Features, NamedSplit, config +from ..formatting import query_table +from ..packaged_modules.json.json import Json +from ..utils import tqdm as hf_tqdm +from ..utils.typing import NestedDataStructureLike, PathLike +from .abc import AbstractDatasetReader + + +class JsonDatasetReader(AbstractDatasetReader): + def __init__( + self, + path_or_paths: NestedDataStructureLike[PathLike], + split: Optional[NamedSplit] = None, + features: Optional[Features] = None, + cache_dir: str = None, + keep_in_memory: bool = False, + streaming: bool = False, + field: Optional[str] = None, + num_proc: Optional[int] = None, + **kwargs, + ): + super().__init__( + path_or_paths, + split=split, + features=features, + cache_dir=cache_dir, + keep_in_memory=keep_in_memory, + streaming=streaming, + num_proc=num_proc, + **kwargs, + ) + self.field = field + path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths} + self.builder = Json( + cache_dir=cache_dir, + data_files=path_or_paths, + features=features, + field=field, + **kwargs, + ) + + def read(self): + # Build iterable dataset + if self.streaming: + dataset = self.builder.as_streaming_dataset(split=self.split) + # Build regular (map-style) dataset + else: + download_config = None + download_mode = None + verification_mode = None + base_path = None + + self.builder.download_and_prepare( + download_config=download_config, + download_mode=download_mode, + verification_mode=verification_mode, + # try_from_hf_gcs=try_from_hf_gcs, + base_path=base_path, + num_proc=self.num_proc, + ) + dataset = self.builder.as_dataset( + split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory + ) + return dataset + + +class JsonDatasetWriter: + def __init__( + self, + dataset: Dataset, + path_or_buf: Union[PathLike, BinaryIO], + batch_size: Optional[int] = None, + num_proc: Optional[int] = None, + **to_json_kwargs, + ): + if num_proc is not None and num_proc <= 0: + raise ValueError(f"num_proc {num_proc} must be an integer > 0.") + + self.dataset = dataset + self.path_or_buf = path_or_buf + self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE + self.num_proc = num_proc + self.encoding = "utf-8" + self.to_json_kwargs = to_json_kwargs + + def write(self) -> int: + _ = self.to_json_kwargs.pop("path_or_buf", None) + orient = self.to_json_kwargs.pop("orient", "records") + lines = self.to_json_kwargs.pop("lines", True if orient == "records" else False) + if "index" not in self.to_json_kwargs and orient in ["split", "table"]: + self.to_json_kwargs["index"] = False + + # Determine the default compression value based on self.path_or_buf type + default_compression = "infer" if isinstance(self.path_or_buf, (str, bytes, os.PathLike)) else None + compression = self.to_json_kwargs.pop("compression", default_compression) + + if compression not in [None, "infer", "gzip", "bz2", "xz"]: + raise NotImplementedError(f"`datasets` currently does not support {compression} compression") + + if isinstance(self.path_or_buf, (str, bytes, os.PathLike)): + with fsspec.open(self.path_or_buf, "wb", compression=compression) as buffer: + written = self._write(file_obj=buffer, orient=orient, lines=lines, **self.to_json_kwargs) + else: + if compression: + raise NotImplementedError( + f"The compression parameter is not supported when writing to a buffer, but compression={compression}" + " was passed. Please provide a local path instead." + ) + written = self._write(file_obj=self.path_or_buf, orient=orient, lines=lines, **self.to_json_kwargs) + return written + + def _batch_json(self, args): + offset, orient, lines, to_json_kwargs = args + + batch = query_table( + table=self.dataset.data, + key=slice(offset, offset + self.batch_size), + indices=self.dataset._indices, + ) + json_str = batch.to_pandas().to_json(path_or_buf=None, orient=orient, lines=lines, **to_json_kwargs) + if not json_str.endswith("\n"): + json_str += "\n" + return json_str.encode(self.encoding) + + def _write( + self, + file_obj: BinaryIO, + orient, + lines, + **to_json_kwargs, + ) -> int: + """Writes the pyarrow table as JSON lines to a binary file handle. + + Caller is responsible for opening and closing the handle. + """ + written = 0 + + if self.num_proc is None or self.num_proc == 1: + for offset in hf_tqdm( + range(0, len(self.dataset), self.batch_size), + unit="ba", + desc="Creating json from Arrow format", + ): + json_str = self._batch_json((offset, orient, lines, to_json_kwargs)) + written += file_obj.write(json_str) + else: + num_rows, batch_size = len(self.dataset), self.batch_size + with multiprocessing.Pool(self.num_proc) as pool: + for json_str in hf_tqdm( + pool.imap( + self._batch_json, + [(offset, orient, lines, to_json_kwargs) for offset in range(0, num_rows, batch_size)], + ), + total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size, + unit="ba", + desc="Creating json from Arrow format", + ): + written += file_obj.write(json_str) + + return written diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/io/parquet.py b/env-llmeval/lib/python3.10/site-packages/datasets/io/parquet.py new file mode 100644 index 0000000000000000000000000000000000000000..97245a36204d5edb20c715c9aec1d4cc9de7852f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/io/parquet.py @@ -0,0 +1,156 @@ +import os +from typing import BinaryIO, Optional, Union + +import numpy as np +import pyarrow.parquet as pq + +from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config +from ..features.features import FeatureType, _visit +from ..formatting import query_table +from ..packaged_modules import _PACKAGED_DATASETS_MODULES +from ..packaged_modules.parquet.parquet import Parquet +from ..utils import tqdm as hf_tqdm +from ..utils.typing import NestedDataStructureLike, PathLike +from .abc import AbstractDatasetReader + + +def get_writer_batch_size(features: Features) -> Optional[int]: + """ + Get the writer_batch_size that defines the maximum row group size in the parquet files. + The default in `datasets` is 1,000 but we lower it to 100 for image datasets. + This allows to optimize random access to parquet file, since accessing 1 row requires + to read its entire row group. + + This can be improved to get optimized size for querying/iterating + but at least it matches the dataset viewer expectations on HF. + + Args: + ds_config_info (`datasets.info.DatasetInfo`): + Dataset info from `datasets`. + Returns: + writer_batch_size (`Optional[int]`): + Writer batch size to pass to a dataset builder. + If `None`, then it will use the `datasets` default. + """ + + batch_size = np.inf + + def set_batch_size(feature: FeatureType) -> None: + nonlocal batch_size + if isinstance(feature, Image): + batch_size = min(batch_size, config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS) + elif isinstance(feature, Audio): + batch_size = min(batch_size, config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS) + elif isinstance(feature, Value) and feature.dtype == "binary": + batch_size = min(batch_size, config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS) + + _visit(features, set_batch_size) + + return None if batch_size is np.inf else batch_size + + +class ParquetDatasetReader(AbstractDatasetReader): + def __init__( + self, + path_or_paths: NestedDataStructureLike[PathLike], + split: Optional[NamedSplit] = None, + features: Optional[Features] = None, + cache_dir: str = None, + keep_in_memory: bool = False, + streaming: bool = False, + num_proc: Optional[int] = None, + **kwargs, + ): + super().__init__( + path_or_paths, + split=split, + features=features, + cache_dir=cache_dir, + keep_in_memory=keep_in_memory, + streaming=streaming, + num_proc=num_proc, + **kwargs, + ) + path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths} + hash = _PACKAGED_DATASETS_MODULES["parquet"][1] + self.builder = Parquet( + cache_dir=cache_dir, + data_files=path_or_paths, + features=features, + hash=hash, + **kwargs, + ) + + def read(self): + # Build iterable dataset + if self.streaming: + dataset = self.builder.as_streaming_dataset(split=self.split) + # Build regular (map-style) dataset + else: + download_config = None + download_mode = None + verification_mode = None + base_path = None + + self.builder.download_and_prepare( + download_config=download_config, + download_mode=download_mode, + verification_mode=verification_mode, + # try_from_hf_gcs=try_from_hf_gcs, + base_path=base_path, + num_proc=self.num_proc, + ) + dataset = self.builder.as_dataset( + split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory + ) + return dataset + + +class ParquetDatasetWriter: + def __init__( + self, + dataset: Dataset, + path_or_buf: Union[PathLike, BinaryIO], + batch_size: Optional[int] = None, + **parquet_writer_kwargs, + ): + self.dataset = dataset + self.path_or_buf = path_or_buf + self.batch_size = batch_size or get_writer_batch_size(dataset.features) + self.parquet_writer_kwargs = parquet_writer_kwargs + + def write(self) -> int: + batch_size = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE + + if isinstance(self.path_or_buf, (str, bytes, os.PathLike)): + with open(self.path_or_buf, "wb+") as buffer: + written = self._write(file_obj=buffer, batch_size=batch_size, **self.parquet_writer_kwargs) + else: + written = self._write(file_obj=self.path_or_buf, batch_size=batch_size, **self.parquet_writer_kwargs) + return written + + def _write(self, file_obj: BinaryIO, batch_size: int, **parquet_writer_kwargs) -> int: + """Writes the pyarrow table as Parquet to a binary file handle. + + Caller is responsible for opening and closing the handle. + """ + written = 0 + _ = parquet_writer_kwargs.pop("path_or_buf", None) + schema = self.dataset.features.arrow_schema + + writer = pq.ParquetWriter(file_obj, schema=schema, **parquet_writer_kwargs) + + for offset in hf_tqdm( + range(0, len(self.dataset), batch_size), + unit="ba", + desc="Creating parquet from Arrow format", + ): + batch = query_table( + table=self.dataset._data, + key=slice(offset, offset + batch_size), + indices=self.dataset._indices, + ) + writer.write_table(batch) + written += batch.nbytes + writer.close() + return written diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/io/spark.py b/env-llmeval/lib/python3.10/site-packages/datasets/io/spark.py new file mode 100644 index 0000000000000000000000000000000000000000..7562ba1fb5f77ed8f82374e3021fcb3a93b1da8d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/io/spark.py @@ -0,0 +1,57 @@ +from typing import Optional + +import pyspark + +from .. import Features, NamedSplit +from ..download import DownloadMode +from ..packaged_modules.spark.spark import Spark +from .abc import AbstractDatasetReader + + +class SparkDatasetReader(AbstractDatasetReader): + """A dataset reader that reads from a Spark DataFrame. + + When caching, cache materialization is parallelized over Spark; an NFS that is accessible to the driver must be + provided. Streaming is not currently supported. + """ + + def __init__( + self, + df: pyspark.sql.DataFrame, + split: Optional[NamedSplit] = None, + features: Optional[Features] = None, + streaming: bool = True, + cache_dir: str = None, + keep_in_memory: bool = False, + working_dir: str = None, + load_from_cache_file: bool = True, + file_format: str = "arrow", + **kwargs, + ): + super().__init__( + split=split, + features=features, + cache_dir=cache_dir, + keep_in_memory=keep_in_memory, + streaming=streaming, + **kwargs, + ) + self._load_from_cache_file = load_from_cache_file + self._file_format = file_format + self.builder = Spark( + df=df, + features=features, + cache_dir=cache_dir, + working_dir=working_dir, + **kwargs, + ) + + def read(self): + if self.streaming: + return self.builder.as_streaming_dataset(split=self.split) + download_mode = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD + self.builder.download_and_prepare( + download_mode=download_mode, + file_format=self._file_format, + ) + return self.builder.as_dataset(split=self.split) diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/io/sql.py b/env-llmeval/lib/python3.10/site-packages/datasets/io/sql.py new file mode 100644 index 0000000000000000000000000000000000000000..ceb425447c29c170499f68ab6fa221844e36d760 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/io/sql.py @@ -0,0 +1,125 @@ +import multiprocessing +from typing import TYPE_CHECKING, Optional, Union + +from .. import Dataset, Features, config +from ..formatting import query_table +from ..packaged_modules.sql.sql import Sql +from ..utils import tqdm as hf_tqdm +from .abc import AbstractDatasetInputStream + + +if TYPE_CHECKING: + import sqlite3 + + import sqlalchemy + + +class SqlDatasetReader(AbstractDatasetInputStream): + def __init__( + self, + sql: Union[str, "sqlalchemy.sql.Selectable"], + con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"], + features: Optional[Features] = None, + cache_dir: str = None, + keep_in_memory: bool = False, + **kwargs, + ): + super().__init__(features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs) + self.builder = Sql( + cache_dir=cache_dir, + features=features, + sql=sql, + con=con, + **kwargs, + ) + + def read(self): + download_config = None + download_mode = None + verification_mode = None + base_path = None + + self.builder.download_and_prepare( + download_config=download_config, + download_mode=download_mode, + verification_mode=verification_mode, + # try_from_hf_gcs=try_from_hf_gcs, + base_path=base_path, + ) + + # Build dataset for splits + dataset = self.builder.as_dataset( + split="train", verification_mode=verification_mode, in_memory=self.keep_in_memory + ) + return dataset + + +class SqlDatasetWriter: + def __init__( + self, + dataset: Dataset, + name: str, + con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"], + batch_size: Optional[int] = None, + num_proc: Optional[int] = None, + **to_sql_kwargs, + ): + if num_proc is not None and num_proc <= 0: + raise ValueError(f"num_proc {num_proc} must be an integer > 0.") + + self.dataset = dataset + self.name = name + self.con = con + self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE + self.num_proc = num_proc + self.to_sql_kwargs = to_sql_kwargs + + def write(self) -> int: + _ = self.to_sql_kwargs.pop("sql", None) + _ = self.to_sql_kwargs.pop("con", None) + index = self.to_sql_kwargs.pop("index", False) + + written = self._write(index=index, **self.to_sql_kwargs) + return written + + def _batch_sql(self, args): + offset, index, to_sql_kwargs = args + to_sql_kwargs = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs + batch = query_table( + table=self.dataset.data, + key=slice(offset, offset + self.batch_size), + indices=self.dataset._indices, + ) + df = batch.to_pandas() + num_rows = df.to_sql(self.name, self.con, index=index, **to_sql_kwargs) + return num_rows or len(df) + + def _write(self, index, **to_sql_kwargs) -> int: + """Writes the pyarrow table as SQL to a database. + + Caller is responsible for opening and closing the SQL connection. + """ + written = 0 + + if self.num_proc is None or self.num_proc == 1: + for offset in hf_tqdm( + range(0, len(self.dataset), self.batch_size), + unit="ba", + desc="Creating SQL from Arrow format", + ): + written += self._batch_sql((offset, index, to_sql_kwargs)) + else: + num_rows, batch_size = len(self.dataset), self.batch_size + with multiprocessing.Pool(self.num_proc) as pool: + for num_rows in hf_tqdm( + pool.imap( + self._batch_sql, + [(offset, index, to_sql_kwargs) for offset in range(0, num_rows, batch_size)], + ), + total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size, + unit="ba", + desc="Creating SQL from Arrow format", + ): + written += num_rows + + return written diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/io/text.py b/env-llmeval/lib/python3.10/site-packages/datasets/io/text.py new file mode 100644 index 0000000000000000000000000000000000000000..42aa62b06589df2ad5679ef2935730483d76a4f6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/io/text.py @@ -0,0 +1,61 @@ +from typing import Optional + +from .. import Features, NamedSplit +from ..packaged_modules.text.text import Text +from ..utils.typing import NestedDataStructureLike, PathLike +from .abc import AbstractDatasetReader + + +class TextDatasetReader(AbstractDatasetReader): + def __init__( + self, + path_or_paths: NestedDataStructureLike[PathLike], + split: Optional[NamedSplit] = None, + features: Optional[Features] = None, + cache_dir: str = None, + keep_in_memory: bool = False, + streaming: bool = False, + num_proc: Optional[int] = None, + **kwargs, + ): + super().__init__( + path_or_paths, + split=split, + features=features, + cache_dir=cache_dir, + keep_in_memory=keep_in_memory, + streaming=streaming, + num_proc=num_proc, + **kwargs, + ) + path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths} + self.builder = Text( + cache_dir=cache_dir, + data_files=path_or_paths, + features=features, + **kwargs, + ) + + def read(self): + # Build iterable dataset + if self.streaming: + dataset = self.builder.as_streaming_dataset(split=self.split) + # Build regular (map-style) dataset + else: + download_config = None + download_mode = None + verification_mode = None + base_path = None + + self.builder.download_and_prepare( + download_config=download_config, + download_mode=download_mode, + verification_mode=verification_mode, + # try_from_hf_gcs=try_from_hf_gcs, + base_path=base_path, + num_proc=self.num_proc, + ) + dataset = self.builder.as_dataset( + split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory + ) + return dataset diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/__init__.py b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bf1408eaf91901a0dcf886ef9d085b79fb422b49 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/__init__.py @@ -0,0 +1,71 @@ +import inspect +import re +from typing import Dict, List, Tuple + +from huggingface_hub.utils import insecure_hashlib + +from .arrow import arrow +from .audiofolder import audiofolder +from .cache import cache # noqa F401 +from .csv import csv +from .imagefolder import imagefolder +from .json import json +from .pandas import pandas +from .parquet import parquet +from .sql import sql # noqa F401 +from .text import text +from .webdataset import webdataset + + +def _hash_python_lines(lines: List[str]) -> str: + filtered_lines = [] + for line in lines: + line = re.sub(r"#.*", "", line) # remove comments + if line: + filtered_lines.append(line) + full_str = "\n".join(filtered_lines) + + # Make a hash from all this code + full_bytes = full_str.encode("utf-8") + return insecure_hashlib.sha256(full_bytes).hexdigest() + + +# get importable module names and hash for caching +_PACKAGED_DATASETS_MODULES = { + "csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), + "json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), + "pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), + "parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), + "arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), + "text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), + "imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), + "audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), + "webdataset": (webdataset.__name__, _hash_python_lines(inspect.getsource(webdataset).splitlines())), +} + +# Used to infer the module to use based on the data files extensions +_EXTENSION_TO_MODULE: Dict[str, Tuple[str, dict]] = { + ".csv": ("csv", {}), + ".tsv": ("csv", {"sep": "\t"}), + ".json": ("json", {}), + ".jsonl": ("json", {}), + ".parquet": ("parquet", {}), + ".geoparquet": ("parquet", {}), + ".gpq": ("parquet", {}), + ".arrow": ("arrow", {}), + ".txt": ("text", {}), + ".tar": ("webdataset", {}), +} +_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) +_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) +_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) +_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) +_MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder"} + +# Used to filter data files based on extensions given a module name +_MODULE_TO_EXTENSIONS: Dict[str, List[str]] = {} +for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): + _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) + +for _module in _MODULE_TO_EXTENSIONS: + _MODULE_TO_EXTENSIONS[_module].append(".zip") diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/arrow/arrow.py b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/arrow/arrow.py new file mode 100644 index 0000000000000000000000000000000000000000..dad2cdeffc2a3de13598d234a4784a3b3cc07066 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/arrow/arrow.py @@ -0,0 +1,73 @@ +import itertools +from dataclasses import dataclass +from typing import Optional + +import pyarrow as pa + +import datasets +from datasets.table import table_cast + + +logger = datasets.utils.logging.get_logger(__name__) + + +@dataclass +class ArrowConfig(datasets.BuilderConfig): + """BuilderConfig for Arrow.""" + + features: Optional[datasets.Features] = None + + +class Arrow(datasets.ArrowBasedBuilder): + BUILDER_CONFIG_CLASS = ArrowConfig + + def _info(self): + return datasets.DatasetInfo(features=self.config.features) + + def _split_generators(self, dl_manager): + """We handle string, list and dicts in datafiles""" + if not self.config.data_files: + raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") + data_files = dl_manager.download_and_extract(self.config.data_files) + if isinstance(data_files, (str, list, tuple)): + files = data_files + if isinstance(files, str): + files = [files] + # Use `dl_manager.iter_files` to skip hidden files in an extracted archive + files = [dl_manager.iter_files(file) for file in files] + return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})] + splits = [] + for split_name, files in data_files.items(): + if isinstance(files, str): + files = [files] + # Use `dl_manager.iter_files` to skip hidden files in an extracted archive + files = [dl_manager.iter_files(file) for file in files] + # Infer features is they are stoed in the arrow schema + if self.info.features is None: + for file in itertools.chain.from_iterable(files): + with open(file, "rb") as f: + self.info.features = datasets.Features.from_arrow_schema(pa.ipc.open_stream(f).schema) + break + splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files})) + return splits + + def _cast_table(self, pa_table: pa.Table) -> pa.Table: + if self.info.features is not None: + # more expensive cast to support nested features with keys in a different order + # allows str <-> int/float or str to Audio for example + pa_table = table_cast(pa_table, self.info.features.arrow_schema) + return pa_table + + def _generate_tables(self, files): + for file_idx, file in enumerate(itertools.chain.from_iterable(files)): + with open(file, "rb") as f: + try: + for batch_idx, record_batch in enumerate(pa.ipc.open_stream(f)): + pa_table = pa.Table.from_batches([record_batch]) + # Uncomment for debugging (will print the Arrow table size and elements) + # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") + # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) + yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table) + except ValueError as e: + logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}") + raise diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__init__.py b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8dec1f193029841e8874cdbc8f843fedca43520b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__pycache__/audiofolder.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__pycache__/audiofolder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5c19ceef6bf9c2b820ea4242a2e3414f3b30bb8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__pycache__/audiofolder.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/audiofolder.py b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/audiofolder.py new file mode 100644 index 0000000000000000000000000000000000000000..51044143039e98af0f9fd7d1ecdf1cab229e58a1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/audiofolder.py @@ -0,0 +1,68 @@ +from typing import List + +import datasets +from datasets.tasks import AudioClassification + +from ..folder_based_builder import folder_based_builder + + +logger = datasets.utils.logging.get_logger(__name__) + + +class AudioFolderConfig(folder_based_builder.FolderBasedBuilderConfig): + """Builder Config for AudioFolder.""" + + drop_labels: bool = None + drop_metadata: bool = None + + +class AudioFolder(folder_based_builder.FolderBasedBuilder): + BASE_FEATURE = datasets.Audio + BASE_COLUMN_NAME = "audio" + BUILDER_CONFIG_CLASS = AudioFolderConfig + EXTENSIONS: List[str] # definition at the bottom of the script + CLASSIFICATION_TASK = AudioClassification(audio_column="audio", label_column="label") + + +# Obtained with: +# ``` +# import soundfile as sf +# +# AUDIO_EXTENSIONS = [f".{format.lower()}" for format in sf.available_formats().keys()] +# +# # .mp3 is currently decoded via `torchaudio`, .opus decoding is supported if version of `libsndfile` >= 1.0.30: +# AUDIO_EXTENSIONS.extend([".mp3", ".opus"]) +# ``` +# We intentionally do not run this code on launch because: +# (1) Soundfile is an optional dependency, so importing it in global namespace is not allowed +# (2) To ensure the list of supported extensions is deterministic +AUDIO_EXTENSIONS = [ + ".aiff", + ".au", + ".avr", + ".caf", + ".flac", + ".htk", + ".svx", + ".mat4", + ".mat5", + ".mpc2k", + ".ogg", + ".paf", + ".pvf", + ".raw", + ".rf64", + ".sd2", + ".sds", + ".ircam", + ".voc", + ".w64", + ".wav", + ".nist", + ".wavex", + ".wve", + ".xi", + ".mp3", + ".opus", +] +AudioFolder.EXTENSIONS = AUDIO_EXTENSIONS diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/cache/__init__.py b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/cache/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/cache/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/cache/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b39117d15953dbc80d1c6abf70608ddd778e073 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/cache/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/cache/__pycache__/cache.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/cache/__pycache__/cache.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f5dac377d8cf066adb7a5b1bdf3a26ea0eceaad Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/cache/__pycache__/cache.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/cache/cache.py b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/cache/cache.py new file mode 100644 index 0000000000000000000000000000000000000000..54c4737be11246a3cee44f7f4ac19135237dcb74 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/cache/cache.py @@ -0,0 +1,187 @@ +import glob +import os +import shutil +import time +import warnings +from pathlib import Path +from typing import List, Optional, Tuple, Union + +import pyarrow as pa + +import datasets +import datasets.config +import datasets.data_files +from datasets.naming import filenames_for_dataset_split + + +logger = datasets.utils.logging.get_logger(__name__) + + +def _get_modification_time(cached_directory_path): + return (Path(cached_directory_path)).stat().st_mtime + + +def _find_hash_in_cache( + dataset_name: str, config_name: Optional[str], cache_dir: Optional[str] +) -> Tuple[str, str, str]: + cache_dir = os.path.expanduser(str(cache_dir or datasets.config.HF_DATASETS_CACHE)) + cached_datasets_directory_path_root = os.path.join(cache_dir, dataset_name.replace("/", "___")) + cached_directory_paths = [ + cached_directory_path + for cached_directory_path in glob.glob( + os.path.join(cached_datasets_directory_path_root, config_name or "*", "*", "*") + ) + if os.path.isdir(cached_directory_path) + ] + if not cached_directory_paths: + if config_name is not None: + cached_directory_paths = [ + cached_directory_path + for cached_directory_path in glob.glob( + os.path.join(cached_datasets_directory_path_root, "*", "*", "*") + ) + if os.path.isdir(cached_directory_path) + ] + available_configs = sorted( + {Path(cached_directory_path).parts[-3] for cached_directory_path in cached_directory_paths} + ) + raise ValueError( + f"Couldn't find cache for {dataset_name}" + + (f" for config '{config_name}'" if config_name else "") + + (f"\nAvailable configs in the cache: {available_configs}" if available_configs else "") + ) + # get most recent + cached_directory_path = Path(sorted(cached_directory_paths, key=_get_modification_time)[-1]) + version, hash = cached_directory_path.parts[-2:] + other_configs = [ + Path(cached_directory_path).parts[-3] + for cached_directory_path in glob.glob(os.path.join(cached_datasets_directory_path_root, "*", version, hash)) + if os.path.isdir(cached_directory_path) + ] + if not config_name and len(other_configs) > 1: + raise ValueError( + f"There are multiple '{dataset_name}' configurations in the cache: {', '.join(other_configs)}" + f"\nPlease specify which configuration to reload from the cache, e.g." + f"\n\tload_dataset('{dataset_name}', '{other_configs[0]}')" + ) + config_name = cached_directory_path.parts[-3] + warning_msg = ( + f"Found the latest cached dataset configuration '{config_name}' at {cached_directory_path} " + f"(last modified on {time.ctime(_get_modification_time(cached_directory_path))})." + ) + logger.warning(warning_msg) + return config_name, version, hash + + +class Cache(datasets.ArrowBasedBuilder): + def __init__( + self, + cache_dir: Optional[str] = None, + dataset_name: Optional[str] = None, + config_name: Optional[str] = None, + version: Optional[str] = "0.0.0", + hash: Optional[str] = None, + base_path: Optional[str] = None, + info: Optional[datasets.DatasetInfo] = None, + features: Optional[datasets.Features] = None, + token: Optional[Union[bool, str]] = None, + use_auth_token="deprecated", + repo_id: Optional[str] = None, + data_files: Optional[Union[str, list, dict, datasets.data_files.DataFilesDict]] = None, + data_dir: Optional[str] = None, + storage_options: Optional[dict] = None, + writer_batch_size: Optional[int] = None, + name="deprecated", + **config_kwargs, + ): + if use_auth_token != "deprecated": + warnings.warn( + "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n" + f"You can remove this warning by passing 'token={use_auth_token}' instead.", + FutureWarning, + ) + token = use_auth_token + if name != "deprecated": + warnings.warn( + "Parameter 'name' was renamed to 'config_name' in version 2.3.0 and will be removed in 3.0.0.", + category=FutureWarning, + ) + config_name = name + if repo_id is None and dataset_name is None: + raise ValueError("repo_id or dataset_name is required for the Cache dataset builder") + if data_files is not None: + config_kwargs["data_files"] = data_files + if data_dir is not None: + config_kwargs["data_dir"] = data_dir + if hash == "auto" and version == "auto": + # First we try to find a folder that takes the config_kwargs into account + # e.g. with "default-data_dir=data%2Ffortran" as config_id + config_id = self.BUILDER_CONFIG_CLASS(config_name or "default").create_config_id( + config_kwargs=config_kwargs, custom_features=features + ) + config_name, version, hash = _find_hash_in_cache( + dataset_name=repo_id or dataset_name, + config_name=config_id, + cache_dir=cache_dir, + ) + elif hash == "auto" or version == "auto": + raise NotImplementedError("Pass both hash='auto' and version='auto' instead") + super().__init__( + cache_dir=cache_dir, + dataset_name=dataset_name, + config_name=config_name, + version=version, + hash=hash, + base_path=base_path, + info=info, + token=token, + repo_id=repo_id, + storage_options=storage_options, + writer_batch_size=writer_batch_size, + ) + + def _info(self) -> datasets.DatasetInfo: + return datasets.DatasetInfo() + + def download_and_prepare(self, output_dir: Optional[str] = None, *args, **kwargs): + if not os.path.exists(self.cache_dir): + raise ValueError(f"Cache directory for {self.dataset_name} doesn't exist at {self.cache_dir}") + if output_dir is not None and output_dir != self.cache_dir: + shutil.copytree(self.cache_dir, output_dir) + + def _split_generators(self, dl_manager): + # used to stream from cache + if isinstance(self.info.splits, datasets.SplitDict): + split_infos: List[datasets.SplitInfo] = list(self.info.splits.values()) + else: + raise ValueError(f"Missing splits info for {self.dataset_name} in cache directory {self.cache_dir}") + return [ + datasets.SplitGenerator( + name=split_info.name, + gen_kwargs={ + "files": filenames_for_dataset_split( + self.cache_dir, + dataset_name=self.dataset_name, + split=split_info.name, + filetype_suffix="arrow", + shard_lengths=split_info.shard_lengths, + ) + }, + ) + for split_info in split_infos + ] + + def _generate_tables(self, files): + # used to stream from cache + for file_idx, file in enumerate(files): + with open(file, "rb") as f: + try: + for batch_idx, record_batch in enumerate(pa.ipc.open_stream(f)): + pa_table = pa.Table.from_batches([record_batch]) + # Uncomment for debugging (will print the Arrow table size and elements) + # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") + # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) + yield f"{file_idx}_{batch_idx}", pa_table + except ValueError as e: + logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}") + raise diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/csv/csv.py b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/csv/csv.py new file mode 100644 index 0000000000000000000000000000000000000000..7be3758e0c93e9979203dc79393369b639349f1c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/csv/csv.py @@ -0,0 +1,202 @@ +import itertools +from dataclasses import dataclass +from typing import Any, Callable, Dict, List, Optional, Union + +import pandas as pd +import pyarrow as pa + +import datasets +import datasets.config +from datasets.features.features import require_storage_cast +from datasets.table import table_cast +from datasets.utils.py_utils import Literal + + +logger = datasets.utils.logging.get_logger(__name__) + +_PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS = ["names", "prefix"] +_PANDAS_READ_CSV_DEPRECATED_PARAMETERS = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"] +_PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS = ["encoding_errors", "on_bad_lines"] +_PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS = ["date_format"] +_PANDAS_READ_CSV_DEPRECATED_2_2_0_PARAMETERS = ["verbose"] + + +@dataclass +class CsvConfig(datasets.BuilderConfig): + """BuilderConfig for CSV.""" + + sep: str = "," + delimiter: Optional[str] = None + header: Optional[Union[int, List[int], str]] = "infer" + names: Optional[List[str]] = None + column_names: Optional[List[str]] = None + index_col: Optional[Union[int, str, List[int], List[str]]] = None + usecols: Optional[Union[List[int], List[str]]] = None + prefix: Optional[str] = None + mangle_dupe_cols: bool = True + engine: Optional[Literal["c", "python", "pyarrow"]] = None + converters: Dict[Union[int, str], Callable[[Any], Any]] = None + true_values: Optional[list] = None + false_values: Optional[list] = None + skipinitialspace: bool = False + skiprows: Optional[Union[int, List[int]]] = None + nrows: Optional[int] = None + na_values: Optional[Union[str, List[str]]] = None + keep_default_na: bool = True + na_filter: bool = True + verbose: bool = False + skip_blank_lines: bool = True + thousands: Optional[str] = None + decimal: str = "." + lineterminator: Optional[str] = None + quotechar: str = '"' + quoting: int = 0 + escapechar: Optional[str] = None + comment: Optional[str] = None + encoding: Optional[str] = None + dialect: Optional[str] = None + error_bad_lines: bool = True + warn_bad_lines: bool = True + skipfooter: int = 0 + doublequote: bool = True + memory_map: bool = False + float_precision: Optional[str] = None + chunksize: int = 10_000 + features: Optional[datasets.Features] = None + encoding_errors: Optional[str] = "strict" + on_bad_lines: Literal["error", "warn", "skip"] = "error" + date_format: Optional[str] = None + + def __post_init__(self): + if self.delimiter is not None: + self.sep = self.delimiter + if self.column_names is not None: + self.names = self.column_names + + @property + def pd_read_csv_kwargs(self): + pd_read_csv_kwargs = { + "sep": self.sep, + "header": self.header, + "names": self.names, + "index_col": self.index_col, + "usecols": self.usecols, + "prefix": self.prefix, + "mangle_dupe_cols": self.mangle_dupe_cols, + "engine": self.engine, + "converters": self.converters, + "true_values": self.true_values, + "false_values": self.false_values, + "skipinitialspace": self.skipinitialspace, + "skiprows": self.skiprows, + "nrows": self.nrows, + "na_values": self.na_values, + "keep_default_na": self.keep_default_na, + "na_filter": self.na_filter, + "verbose": self.verbose, + "skip_blank_lines": self.skip_blank_lines, + "thousands": self.thousands, + "decimal": self.decimal, + "lineterminator": self.lineterminator, + "quotechar": self.quotechar, + "quoting": self.quoting, + "escapechar": self.escapechar, + "comment": self.comment, + "encoding": self.encoding, + "dialect": self.dialect, + "error_bad_lines": self.error_bad_lines, + "warn_bad_lines": self.warn_bad_lines, + "skipfooter": self.skipfooter, + "doublequote": self.doublequote, + "memory_map": self.memory_map, + "float_precision": self.float_precision, + "chunksize": self.chunksize, + "encoding_errors": self.encoding_errors, + "on_bad_lines": self.on_bad_lines, + "date_format": self.date_format, + } + + # some kwargs must not be passed if they don't have a default value + # some others are deprecated and we can also not pass them if they are the default value + for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: + if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), pd_read_csv_parameter): + del pd_read_csv_kwargs[pd_read_csv_parameter] + + # Remove 1.3 new arguments + if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): + for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: + del pd_read_csv_kwargs[pd_read_csv_parameter] + + # Remove 2.0 new arguments + if not (datasets.config.PANDAS_VERSION.major >= 2): + for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: + del pd_read_csv_kwargs[pd_read_csv_parameter] + + # Remove 2.2 deprecated arguments + if datasets.config.PANDAS_VERSION.release >= (2, 2): + for pd_read_csv_parameter in _PANDAS_READ_CSV_DEPRECATED_2_2_0_PARAMETERS: + if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), pd_read_csv_parameter): + del pd_read_csv_kwargs[pd_read_csv_parameter] + + return pd_read_csv_kwargs + + +class Csv(datasets.ArrowBasedBuilder): + BUILDER_CONFIG_CLASS = CsvConfig + + def _info(self): + return datasets.DatasetInfo(features=self.config.features) + + def _split_generators(self, dl_manager): + """We handle string, list and dicts in datafiles""" + if not self.config.data_files: + raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") + data_files = dl_manager.download_and_extract(self.config.data_files) + if isinstance(data_files, (str, list, tuple)): + files = data_files + if isinstance(files, str): + files = [files] + files = [dl_manager.iter_files(file) for file in files] + return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})] + splits = [] + for split_name, files in data_files.items(): + if isinstance(files, str): + files = [files] + files = [dl_manager.iter_files(file) for file in files] + splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files})) + return splits + + def _cast_table(self, pa_table: pa.Table) -> pa.Table: + if self.config.features is not None: + schema = self.config.features.arrow_schema + if all(not require_storage_cast(feature) for feature in self.config.features.values()): + # cheaper cast + pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema) + else: + # more expensive cast; allows str <-> int/float or str to Audio for example + pa_table = table_cast(pa_table, schema) + return pa_table + + def _generate_tables(self, files): + schema = self.config.features.arrow_schema if self.config.features else None + # dtype allows reading an int column as str + dtype = ( + { + name: dtype.to_pandas_dtype() if not require_storage_cast(feature) else object + for name, dtype, feature in zip(schema.names, schema.types, self.config.features.values()) + } + if schema is not None + else None + ) + for file_idx, file in enumerate(itertools.chain.from_iterable(files)): + csv_file_reader = pd.read_csv(file, iterator=True, dtype=dtype, **self.config.pd_read_csv_kwargs) + try: + for batch_idx, df in enumerate(csv_file_reader): + pa_table = pa.Table.from_pandas(df) + # Uncomment for debugging (will print the Arrow table size and elements) + # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") + # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) + yield (file_idx, batch_idx), self._cast_table(pa_table) + except ValueError as e: + logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}") + raise diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..15073925b3e628f8fa399a14f79f8cc782a855cd Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__pycache__/folder_based_builder.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__pycache__/folder_based_builder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f41743865832a2a055561698568ddf67ce404d4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__pycache__/folder_based_builder.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/folder_based_builder.py b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/folder_based_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..146ef4e613b9d943b160c04b2286b2a2d331b80a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/folder_based_builder.py @@ -0,0 +1,406 @@ +import collections +import itertools +import os +from dataclasses import dataclass +from typing import List, Optional, Tuple, Type + +import pandas as pd +import pyarrow as pa +import pyarrow.json as paj + +import datasets +from datasets.features.features import FeatureType +from datasets.tasks.base import TaskTemplate + + +logger = datasets.utils.logging.get_logger(__name__) + + +def count_path_segments(path): + return path.replace("\\", "/").count("/") + + +@dataclass +class FolderBasedBuilderConfig(datasets.BuilderConfig): + """BuilderConfig for AutoFolder.""" + + features: Optional[datasets.Features] = None + drop_labels: bool = None + drop_metadata: bool = None + + +class FolderBasedBuilder(datasets.GeneratorBasedBuilder): + """ + Base class for generic data loaders for vision and image data. + + + Abstract class attributes to be overridden by a child class: + BASE_FEATURE: feature object to decode data (i.e. datasets.Image, datasets.Audio, ...) + BASE_COLUMN_NAME: string key name of a base feature (i.e. "image", "audio", ...) + BUILDER_CONFIG_CLASS: builder config inherited from `folder_based_builder.FolderBasedBuilderConfig` + EXTENSIONS: list of allowed extensions (only files with these extensions and METADATA_FILENAME files + will be included in a dataset) + CLASSIFICATION_TASK: classification task to use if labels are obtained from the folder structure + """ + + BASE_FEATURE: Type[FeatureType] + BASE_COLUMN_NAME: str + BUILDER_CONFIG_CLASS: FolderBasedBuilderConfig + EXTENSIONS: List[str] + CLASSIFICATION_TASK: TaskTemplate + + METADATA_FILENAMES: List[str] = ["metadata.csv", "metadata.jsonl"] + + def _info(self): + return datasets.DatasetInfo(features=self.config.features) + + def _split_generators(self, dl_manager): + if not self.config.data_files: + raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") + + # Do an early pass if: + # * `drop_labels` is None (default) or False, to infer the class labels + # * `drop_metadata` is None (default) or False, to find the metadata files + do_analyze = not self.config.drop_labels or not self.config.drop_metadata + labels, path_depths = set(), set() + metadata_files = collections.defaultdict(set) + + def analyze(files_or_archives, downloaded_files_or_dirs, split): + if len(downloaded_files_or_dirs) == 0: + return + # The files are separated from the archives at this point, so check the first sample + # to see if it's a file or a directory and iterate accordingly + if os.path.isfile(downloaded_files_or_dirs[0]): + original_files, downloaded_files = files_or_archives, downloaded_files_or_dirs + for original_file, downloaded_file in zip(original_files, downloaded_files): + original_file, downloaded_file = str(original_file), str(downloaded_file) + _, original_file_ext = os.path.splitext(original_file) + if original_file_ext.lower() in self.EXTENSIONS: + if not self.config.drop_labels: + labels.add(os.path.basename(os.path.dirname(original_file))) + path_depths.add(count_path_segments(original_file)) + elif os.path.basename(original_file) in self.METADATA_FILENAMES: + metadata_files[split].add((original_file, downloaded_file)) + else: + original_file_name = os.path.basename(original_file) + logger.debug( + f"The file '{original_file_name}' was ignored: it is not an image, and is not {self.METADATA_FILENAMES} either." + ) + else: + archives, downloaded_dirs = files_or_archives, downloaded_files_or_dirs + for archive, downloaded_dir in zip(archives, downloaded_dirs): + archive, downloaded_dir = str(archive), str(downloaded_dir) + for downloaded_dir_file in dl_manager.iter_files(downloaded_dir): + _, downloaded_dir_file_ext = os.path.splitext(downloaded_dir_file) + if downloaded_dir_file_ext in self.EXTENSIONS: + if not self.config.drop_labels: + labels.add(os.path.basename(os.path.dirname(downloaded_dir_file))) + path_depths.add(count_path_segments(downloaded_dir_file)) + elif os.path.basename(downloaded_dir_file) in self.METADATA_FILENAMES: + metadata_files[split].add((None, downloaded_dir_file)) + else: + archive_file_name = os.path.basename(archive) + original_file_name = os.path.basename(downloaded_dir_file) + logger.debug( + f"The file '{original_file_name}' from the archive '{archive_file_name}' was ignored: it is not an {self.BASE_COLUMN_NAME}, and is not {self.METADATA_FILENAMES} either." + ) + + data_files = self.config.data_files + splits = [] + for split_name, files in data_files.items(): + if isinstance(files, str): + files = [files] + files, archives = self._split_files_and_archives(files) + downloaded_files = dl_manager.download(files) + downloaded_dirs = dl_manager.download_and_extract(archives) + if do_analyze: # drop_metadata is None or False, drop_labels is None or False + logger.info(f"Searching for labels and/or metadata files in {split_name} data files...") + analyze(files, downloaded_files, split_name) + analyze(archives, downloaded_dirs, split_name) + + if metadata_files: + # add metadata if `metadata_files` are found and `drop_metadata` is None (default) or False + add_metadata = not self.config.drop_metadata + # if `metadata_files` are found, add labels only if + # `drop_labels` is set up to False explicitly (not-default behavior) + add_labels = self.config.drop_labels is False + else: + # if `metadata_files` are not found, don't add metadata + add_metadata = False + # if `metadata_files` are not found and `drop_labels` is None (default) - + # add labels if files are on the same level in directory hierarchy and there is more than one label + add_labels = ( + (len(labels) > 1 and len(path_depths) == 1) + if self.config.drop_labels is None + else not self.config.drop_labels + ) + + if add_labels: + logger.info("Adding the labels inferred from data directories to the dataset's features...") + if add_metadata: + logger.info("Adding metadata to the dataset...") + else: + add_labels, add_metadata, metadata_files = False, False, {} + + splits.append( + datasets.SplitGenerator( + name=split_name, + gen_kwargs={ + "files": list(zip(files, downloaded_files)) + + [(None, dl_manager.iter_files(downloaded_dir)) for downloaded_dir in downloaded_dirs], + "metadata_files": metadata_files, + "split_name": split_name, + "add_labels": add_labels, + "add_metadata": add_metadata, + }, + ) + ) + + if add_metadata: + # Verify that: + # * all metadata files have the same set of features + # * the `file_name` key is one of the metadata keys and is of type string + features_per_metadata_file: List[Tuple[str, datasets.Features]] = [] + + # Check that all metadata files share the same format + metadata_ext = { + os.path.splitext(original_metadata_file)[-1] + for original_metadata_file, _ in itertools.chain.from_iterable(metadata_files.values()) + } + if len(metadata_ext) > 1: + raise ValueError(f"Found metadata files with different extensions: {list(metadata_ext)}") + metadata_ext = metadata_ext.pop() + + for _, downloaded_metadata_file in itertools.chain.from_iterable(metadata_files.values()): + pa_metadata_table = self._read_metadata(downloaded_metadata_file, metadata_ext=metadata_ext) + features_per_metadata_file.append( + (downloaded_metadata_file, datasets.Features.from_arrow_schema(pa_metadata_table.schema)) + ) + for downloaded_metadata_file, metadata_features in features_per_metadata_file: + if metadata_features != features_per_metadata_file[0][1]: + raise ValueError( + f"Metadata files {downloaded_metadata_file} and {features_per_metadata_file[0][0]} have different features: {features_per_metadata_file[0]} != {metadata_features}" + ) + metadata_features = features_per_metadata_file[0][1] + if "file_name" not in metadata_features: + raise ValueError("`file_name` must be present as dictionary key in metadata files") + if metadata_features["file_name"] != datasets.Value("string"): + raise ValueError("`file_name` key must be a string") + del metadata_features["file_name"] + else: + metadata_features = None + + # Normally, we would do this in _info, but we need to know the labels and/or metadata + # before building the features + if self.config.features is None: + if add_labels: + self.info.features = datasets.Features( + { + self.BASE_COLUMN_NAME: self.BASE_FEATURE(), + "label": datasets.ClassLabel(names=sorted(labels)), + } + ) + self.info.task_templates = [self.CLASSIFICATION_TASK.align_with_features(self.info.features)] + else: + self.info.features = datasets.Features({self.BASE_COLUMN_NAME: self.BASE_FEATURE()}) + + if add_metadata: + # Warn if there are duplicated keys in metadata compared to the existing features + # (`BASE_COLUMN_NAME`, optionally "label") + duplicated_keys = set(self.info.features) & set(metadata_features) + if duplicated_keys: + logger.warning( + f"Ignoring metadata columns {list(duplicated_keys)} as they are already present in " + f"the features dictionary." + ) + # skip metadata duplicated keys + self.info.features.update( + { + feature: metadata_features[feature] + for feature in metadata_features + if feature not in duplicated_keys + } + ) + + return splits + + def _split_files_and_archives(self, data_files): + files, archives = [], [] + for data_file in data_files: + _, data_file_ext = os.path.splitext(data_file) + if data_file_ext.lower() in self.EXTENSIONS: + files.append(data_file) + elif os.path.basename(data_file) in self.METADATA_FILENAMES: + files.append(data_file) + else: + archives.append(data_file) + return files, archives + + def _read_metadata(self, metadata_file, metadata_ext: str = ""): + if metadata_ext == ".csv": + # Use `pd.read_csv` (although slower) instead of `pyarrow.csv.read_csv` for reading CSV files for consistency with the CSV packaged module + return pa.Table.from_pandas(pd.read_csv(metadata_file)) + else: + with open(metadata_file, "rb") as f: + return paj.read_json(f) + + def _generate_examples(self, files, metadata_files, split_name, add_metadata, add_labels): + split_metadata_files = metadata_files.get(split_name, []) + sample_empty_metadata = ( + {k: None for k in self.info.features if k != self.BASE_COLUMN_NAME} if self.info.features else {} + ) + last_checked_dir = None + metadata_dir = None + metadata_dict = None + downloaded_metadata_file = None + + metadata_ext = "" + if split_metadata_files: + metadata_ext = { + os.path.splitext(original_metadata_file)[-1] for original_metadata_file, _ in split_metadata_files + } + metadata_ext = metadata_ext.pop() + + file_idx = 0 + for original_file, downloaded_file_or_dir in files: + if original_file is not None: + _, original_file_ext = os.path.splitext(original_file) + if original_file_ext.lower() in self.EXTENSIONS: + if add_metadata: + # If the file is a file of a needed type, and we've just entered a new directory, + # find the nereast metadata file (by counting path segments) for the directory + current_dir = os.path.dirname(original_file) + if last_checked_dir is None or last_checked_dir != current_dir: + last_checked_dir = current_dir + metadata_file_candidates = [ + ( + os.path.relpath(original_file, os.path.dirname(metadata_file_candidate)), + metadata_file_candidate, + downloaded_metadata_file, + ) + for metadata_file_candidate, downloaded_metadata_file in split_metadata_files + if metadata_file_candidate + is not None # ignore metadata_files that are inside archives + and not os.path.relpath( + original_file, os.path.dirname(metadata_file_candidate) + ).startswith("..") + ] + if metadata_file_candidates: + _, metadata_file, downloaded_metadata_file = min( + metadata_file_candidates, key=lambda x: count_path_segments(x[0]) + ) + pa_metadata_table = self._read_metadata( + downloaded_metadata_file, metadata_ext=metadata_ext + ) + pa_file_name_array = pa_metadata_table["file_name"] + pa_metadata_table = pa_metadata_table.drop(["file_name"]) + metadata_dir = os.path.dirname(metadata_file) + metadata_dict = { + os.path.normpath(file_name).replace("\\", "/"): sample_metadata + for file_name, sample_metadata in zip( + pa_file_name_array.to_pylist(), pa_metadata_table.to_pylist() + ) + } + else: + raise ValueError( + f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}." + ) + if metadata_dir is not None and downloaded_metadata_file is not None: + file_relpath = os.path.relpath(original_file, metadata_dir) + file_relpath = file_relpath.replace("\\", "/") + if file_relpath not in metadata_dict: + raise ValueError( + f"{self.BASE_COLUMN_NAME} at {file_relpath} doesn't have metadata in {downloaded_metadata_file}." + ) + sample_metadata = metadata_dict[file_relpath] + else: + raise ValueError( + f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}." + ) + else: + sample_metadata = {} + if add_labels: + sample_label = {"label": os.path.basename(os.path.dirname(original_file))} + else: + sample_label = {} + yield ( + file_idx, + { + **sample_empty_metadata, + self.BASE_COLUMN_NAME: downloaded_file_or_dir, + **sample_metadata, + **sample_label, + }, + ) + file_idx += 1 + else: + for downloaded_dir_file in downloaded_file_or_dir: + _, downloaded_dir_file_ext = os.path.splitext(downloaded_dir_file) + if downloaded_dir_file_ext.lower() in self.EXTENSIONS: + if add_metadata: + current_dir = os.path.dirname(downloaded_dir_file) + if last_checked_dir is None or last_checked_dir != current_dir: + last_checked_dir = current_dir + metadata_file_candidates = [ + ( + os.path.relpath( + downloaded_dir_file, os.path.dirname(downloaded_metadata_file) + ), + metadata_file_candidate, + downloaded_metadata_file, + ) + for metadata_file_candidate, downloaded_metadata_file in split_metadata_files + if metadata_file_candidate + is None # ignore metadata_files that are not inside archives + and not os.path.relpath( + downloaded_dir_file, os.path.dirname(downloaded_metadata_file) + ).startswith("..") + ] + if metadata_file_candidates: + _, metadata_file, downloaded_metadata_file = min( + metadata_file_candidates, key=lambda x: count_path_segments(x[0]) + ) + pa_metadata_table = self._read_metadata( + downloaded_metadata_file, metadata_ext=metadata_ext + ) + pa_file_name_array = pa_metadata_table["file_name"] + pa_metadata_table = pa_metadata_table.drop(["file_name"]) + metadata_dir = os.path.dirname(downloaded_metadata_file) + metadata_dict = { + os.path.normpath(file_name).replace("\\", "/"): sample_metadata + for file_name, sample_metadata in zip( + pa_file_name_array.to_pylist(), pa_metadata_table.to_pylist() + ) + } + else: + raise ValueError( + f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}." + ) + if metadata_dir is not None and downloaded_metadata_file is not None: + downloaded_dir_file_relpath = os.path.relpath(downloaded_dir_file, metadata_dir) + downloaded_dir_file_relpath = downloaded_dir_file_relpath.replace("\\", "/") + if downloaded_dir_file_relpath not in metadata_dict: + raise ValueError( + f"{self.BASE_COLUMN_NAME} at {downloaded_dir_file_relpath} doesn't have metadata in {downloaded_metadata_file}." + ) + sample_metadata = metadata_dict[downloaded_dir_file_relpath] + else: + raise ValueError( + f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}." + ) + else: + sample_metadata = {} + if add_labels: + sample_label = {"label": os.path.basename(os.path.dirname(downloaded_dir_file))} + else: + sample_label = {} + yield ( + file_idx, + { + **sample_empty_metadata, + self.BASE_COLUMN_NAME: downloaded_dir_file, + **sample_metadata, + **sample_label, + }, + ) + file_idx += 1 diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/__init__.py b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5aa836e95ad4311d8173d6f21c2b04c0fa26338d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/__pycache__/imagefolder.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/__pycache__/imagefolder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d2dc6649036c5b9ca864010a4088db762368a56 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/__pycache__/imagefolder.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/imagefolder.py b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/imagefolder.py new file mode 100644 index 0000000000000000000000000000000000000000..bd2dd0d419a626dbb5149cb56abf69c82d35deb4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/imagefolder.py @@ -0,0 +1,104 @@ +from typing import List + +import datasets +from datasets.tasks import ImageClassification + +from ..folder_based_builder import folder_based_builder + + +logger = datasets.utils.logging.get_logger(__name__) + + +class ImageFolderConfig(folder_based_builder.FolderBasedBuilderConfig): + """BuilderConfig for ImageFolder.""" + + drop_labels: bool = None + drop_metadata: bool = None + + +class ImageFolder(folder_based_builder.FolderBasedBuilder): + BASE_FEATURE = datasets.Image + BASE_COLUMN_NAME = "image" + BUILDER_CONFIG_CLASS = ImageFolderConfig + EXTENSIONS: List[str] # definition at the bottom of the script + CLASSIFICATION_TASK = ImageClassification(image_column="image", label_column="label") + + +# Obtained with: +# ``` +# import PIL.Image +# IMAGE_EXTENSIONS = [] +# PIL.Image.init() +# for ext, format in PIL.Image.EXTENSION.items(): +# if format in PIL.Image.OPEN: +# IMAGE_EXTENSIONS.append(ext[1:]) +# ``` +# We intentionally do not run this code on launch because: +# (1) Pillow is an optional dependency, so importing Pillow in global namespace is not allowed +# (2) To ensure the list of supported extensions is deterministic +IMAGE_EXTENSIONS = [ + ".blp", + ".bmp", + ".dib", + ".bufr", + ".cur", + ".pcx", + ".dcx", + ".dds", + ".ps", + ".eps", + ".fit", + ".fits", + ".fli", + ".flc", + ".ftc", + ".ftu", + ".gbr", + ".gif", + ".grib", + ".h5", + ".hdf", + ".png", + ".apng", + ".jp2", + ".j2k", + ".jpc", + ".jpf", + ".jpx", + ".j2c", + ".icns", + ".ico", + ".im", + ".iim", + ".tif", + ".tiff", + ".jfif", + ".jpe", + ".jpg", + ".jpeg", + ".mpg", + ".mpeg", + ".msp", + ".pcd", + ".pxr", + ".pbm", + ".pgm", + ".ppm", + ".pnm", + ".psd", + ".bw", + ".rgb", + ".rgba", + ".sgi", + ".ras", + ".tga", + ".icb", + ".vda", + ".vst", + ".webp", + ".wmf", + ".emf", + ".xbm", + ".xpm", +] +ImageFolder.EXTENSIONS = IMAGE_EXTENSIONS diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/json/__init__.py b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/json/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/json/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/json/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..76dc3bcf8d636e379cf7aa3ace5a57d62309596e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/json/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/json/__pycache__/json.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/json/__pycache__/json.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7fc06fb171cf87db6d5863e3c3f17804b63eacfc Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/json/__pycache__/json.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/json/json.py b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/json/json.py new file mode 100644 index 0000000000000000000000000000000000000000..4c017a642f66fe48edb73720cabff5f538fec267 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/json/json.py @@ -0,0 +1,179 @@ +import io +import itertools +import json +from dataclasses import dataclass +from typing import Optional + +import pyarrow as pa +import pyarrow.json as paj + +import datasets +from datasets.table import table_cast +from datasets.utils.file_utils import readline + + +logger = datasets.utils.logging.get_logger(__name__) + + +@dataclass +class JsonConfig(datasets.BuilderConfig): + """BuilderConfig for JSON.""" + + features: Optional[datasets.Features] = None + encoding: str = "utf-8" + encoding_errors: Optional[str] = None + field: Optional[str] = None + use_threads: bool = True # deprecated + block_size: Optional[int] = None # deprecated + chunksize: int = 10 << 20 # 10MB + newlines_in_values: Optional[bool] = None + + +class Json(datasets.ArrowBasedBuilder): + BUILDER_CONFIG_CLASS = JsonConfig + + def _info(self): + if self.config.block_size is not None: + logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead") + self.config.chunksize = self.config.block_size + if self.config.use_threads is not True: + logger.warning( + "The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore." + ) + if self.config.newlines_in_values is not None: + raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported") + return datasets.DatasetInfo(features=self.config.features) + + def _split_generators(self, dl_manager): + """We handle string, list and dicts in datafiles""" + if not self.config.data_files: + raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") + data_files = dl_manager.download_and_extract(self.config.data_files) + if isinstance(data_files, (str, list, tuple)): + files = data_files + if isinstance(files, str): + files = [files] + files = [dl_manager.iter_files(file) for file in files] + return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})] + splits = [] + for split_name, files in data_files.items(): + if isinstance(files, str): + files = [files] + files = [dl_manager.iter_files(file) for file in files] + splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files})) + return splits + + def _cast_table(self, pa_table: pa.Table) -> pa.Table: + if self.config.features is not None: + # adding missing columns + for column_name in set(self.config.features) - set(pa_table.column_names): + type = self.config.features.arrow_schema.field(column_name).type + pa_table = pa_table.append_column(column_name, pa.array([None] * len(pa_table), type=type)) + # more expensive cast to support nested structures with keys in a different order + # allows str <-> int/float or str to Audio for example + pa_table = table_cast(pa_table, self.config.features.arrow_schema) + return pa_table + + def _generate_tables(self, files): + for file_idx, file in enumerate(itertools.chain.from_iterable(files)): + # If the file is one json object and if we need to look at the list of items in one specific field + if self.config.field is not None: + with open(file, encoding=self.config.encoding, errors=self.config.encoding_errors) as f: + dataset = json.load(f) + + # We keep only the field we are interested in + dataset = dataset[self.config.field] + + # We accept two format: a list of dicts or a dict of lists + if isinstance(dataset, (list, tuple)): + keys = set().union(*[row.keys() for row in dataset]) + mapping = {col: [row.get(col) for row in dataset] for col in keys} + else: + mapping = dataset + pa_table = pa.Table.from_pydict(mapping) + yield file_idx, self._cast_table(pa_table) + + # If the file has one json object per line + else: + with open(file, "rb") as f: + batch_idx = 0 + # Use block_size equal to the chunk size divided by 32 to leverage multithreading + # Set a default minimum value of 16kB if the chunk size is really small + block_size = max(self.config.chunksize // 32, 16 << 10) + encoding_errors = ( + self.config.encoding_errors if self.config.encoding_errors is not None else "strict" + ) + while True: + batch = f.read(self.config.chunksize) + if not batch: + break + # Finish current line + try: + batch += f.readline() + except (AttributeError, io.UnsupportedOperation): + batch += readline(f) + # PyArrow only accepts utf-8 encoded bytes + if self.config.encoding != "utf-8": + batch = batch.decode(self.config.encoding, errors=encoding_errors).encode("utf-8") + try: + while True: + try: + pa_table = paj.read_json( + io.BytesIO(batch), read_options=paj.ReadOptions(block_size=block_size) + ) + break + except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: + if ( + isinstance(e, pa.ArrowInvalid) + and "straddling" not in str(e) + or block_size > len(batch) + ): + raise + else: + # Increase the block size in case it was too small. + # The block size will be reset for the next file. + logger.debug( + f"Batch of {len(batch)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}." + ) + block_size *= 2 + except pa.ArrowInvalid as e: + try: + with open( + file, encoding=self.config.encoding, errors=self.config.encoding_errors + ) as f: + dataset = json.load(f) + except json.JSONDecodeError: + logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}") + raise e + # If possible, parse the file as a list of json objects/strings and exit the loop + if isinstance(dataset, list): # list is the only sequence type supported in JSON + try: + if dataset and isinstance(dataset[0], str): + pa_table_names = ( + list(self.config.features) + if self.config.features is not None + else ["text"] + ) + pa_table = pa.Table.from_arrays([pa.array(dataset)], names=pa_table_names) + else: + keys = set().union(*[row.keys() for row in dataset]) + mapping = {col: [row.get(col) for row in dataset] for col in keys} + pa_table = pa.Table.from_pydict(mapping) + except (pa.ArrowInvalid, AttributeError) as e: + logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}") + raise ValueError(f"Not able to read records in the JSON file at {file}.") from None + yield file_idx, self._cast_table(pa_table) + break + else: + logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}") + raise ValueError( + f"Not able to read records in the JSON file at {file}. " + f"You should probably indicate the field of the JSON file containing your records. " + f"This JSON file contain the following fields: {str(list(dataset.keys()))}. " + f"Select the correct one and provide it as `field='XXX'` to the dataset loading method. " + ) from None + # Uncomment for debugging (will print the Arrow table size and elements) + # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") + # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) + yield (file_idx, batch_idx), self._cast_table(pa_table) + batch_idx += 1 diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__init__.py b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..abee086370d6d396e8fabaa955d1dc1144d80a8d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__pycache__/pandas.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__pycache__/pandas.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ffd07eace97c449f8ff0838cad31b4556f75ab7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__pycache__/pandas.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/pandas/pandas.py b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/pandas/pandas.py new file mode 100644 index 0000000000000000000000000000000000000000..6ad9a6f49931bcd0cc2e395becb4017d3f4a18a7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/pandas/pandas.py @@ -0,0 +1,57 @@ +import itertools +from dataclasses import dataclass +from typing import Optional + +import pandas as pd +import pyarrow as pa + +import datasets +from datasets.table import table_cast + + +@dataclass +class PandasConfig(datasets.BuilderConfig): + """BuilderConfig for Pandas.""" + + features: Optional[datasets.Features] = None + + +class Pandas(datasets.ArrowBasedBuilder): + BUILDER_CONFIG_CLASS = PandasConfig + + def _info(self): + return datasets.DatasetInfo(features=self.config.features) + + def _split_generators(self, dl_manager): + """We handle string, list and dicts in datafiles""" + if not self.config.data_files: + raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") + data_files = dl_manager.download_and_extract(self.config.data_files) + if isinstance(data_files, (str, list, tuple)): + files = data_files + if isinstance(files, str): + files = [files] + # Use `dl_manager.iter_files` to skip hidden files in an extracted archive + files = [dl_manager.iter_files(file) for file in files] + return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})] + splits = [] + for split_name, files in data_files.items(): + if isinstance(files, str): + files = [files] + # Use `dl_manager.iter_files` to skip hidden files in an extracted archive + files = [dl_manager.iter_files(file) for file in files] + splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files})) + return splits + + def _cast_table(self, pa_table: pa.Table) -> pa.Table: + if self.config.features is not None: + # more expensive cast to support nested features with keys in a different order + # allows str <-> int/float or str to Audio for example + pa_table = table_cast(pa_table, self.config.features.arrow_schema) + return pa_table + + def _generate_tables(self, files): + for i, file in enumerate(itertools.chain.from_iterable(files)): + with open(file, "rb") as f: + pa_table = pa.Table.from_pandas(pd.read_pickle(f)) + yield i, self._cast_table(pa_table) diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/parquet/__init__.py b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/parquet/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/parquet/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/parquet/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8c6c5bcf1092d7c3813c37f4e6809e4e9a0a312 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/parquet/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/parquet/__pycache__/parquet.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/parquet/__pycache__/parquet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..627be388a9a50c6b7a222d6744db8398aa12e381 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/parquet/__pycache__/parquet.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/parquet/parquet.py b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/parquet/parquet.py new file mode 100644 index 0000000000000000000000000000000000000000..d0a66ac906e35e166ceff9987b2eff1bb3982d3f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/parquet/parquet.py @@ -0,0 +1,99 @@ +import itertools +from dataclasses import dataclass +from typing import List, Optional + +import pyarrow as pa +import pyarrow.parquet as pq + +import datasets +from datasets.table import table_cast + + +logger = datasets.utils.logging.get_logger(__name__) + + +@dataclass +class ParquetConfig(datasets.BuilderConfig): + """BuilderConfig for Parquet.""" + + batch_size: Optional[int] = None + columns: Optional[List[str]] = None + features: Optional[datasets.Features] = None + + +class Parquet(datasets.ArrowBasedBuilder): + BUILDER_CONFIG_CLASS = ParquetConfig + + def _info(self): + if ( + self.config.columns is not None + and self.config.features is not None + and set(self.config.columns) != set(self.config.features) + ): + raise ValueError( + "The columns and features argument must contain the same columns, but got ", + f"{self.config.columns} and {self.config.features}", + ) + return datasets.DatasetInfo(features=self.config.features) + + def _split_generators(self, dl_manager): + """We handle string, list and dicts in datafiles""" + if not self.config.data_files: + raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") + data_files = dl_manager.download_and_extract(self.config.data_files) + if isinstance(data_files, (str, list, tuple)): + files = data_files + if isinstance(files, str): + files = [files] + # Use `dl_manager.iter_files` to skip hidden files in an extracted archive + files = [dl_manager.iter_files(file) for file in files] + return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})] + splits = [] + for split_name, files in data_files.items(): + if isinstance(files, str): + files = [files] + # Use `dl_manager.iter_files` to skip hidden files in an extracted archive + files = [dl_manager.iter_files(file) for file in files] + # Infer features if they are stored in the arrow schema + if self.info.features is None: + for file in itertools.chain.from_iterable(files): + with open(file, "rb") as f: + self.info.features = datasets.Features.from_arrow_schema(pq.read_schema(f)) + break + splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files})) + if self.config.columns is not None and set(self.config.columns) != set(self.info.features): + self.info.features = datasets.Features( + {col: feat for col, feat in self.info.features.items() if col in self.config.columns} + ) + return splits + + def _cast_table(self, pa_table: pa.Table) -> pa.Table: + if self.info.features is not None: + # more expensive cast to support nested features with keys in a different order + # allows str <-> int/float or str to Audio for example + pa_table = table_cast(pa_table, self.info.features.arrow_schema) + return pa_table + + def _generate_tables(self, files): + if self.config.features is not None and self.config.columns is not None: + if sorted(field.name for field in self.info.features.arrow_schema) != sorted(self.config.columns): + raise ValueError( + f"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'" + ) + for file_idx, file in enumerate(itertools.chain.from_iterable(files)): + with open(file, "rb") as f: + parquet_file = pq.ParquetFile(f) + if parquet_file.metadata.num_row_groups > 0: + batch_size = self.config.batch_size or parquet_file.metadata.row_group(0).num_rows + try: + for batch_idx, record_batch in enumerate( + parquet_file.iter_batches(batch_size=batch_size, columns=self.config.columns) + ): + pa_table = pa.Table.from_batches([record_batch]) + # Uncomment for debugging (will print the Arrow table size and elements) + # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") + # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) + yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table) + except ValueError as e: + logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}") + raise diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/text/__init__.py b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/text/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/text/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/text/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4df1e5b5d787f5fa4338940250fde9b499f07ac4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/text/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/text/__pycache__/text.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/text/__pycache__/text.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5d463c0b0bd470978ff1420e36239c7dc0442e6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/text/__pycache__/text.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/text/text.py b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/text/text.py new file mode 100644 index 0000000000000000000000000000000000000000..0f88475203cd018ab3b36eb3407e11bde1d26673 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/text/text.py @@ -0,0 +1,128 @@ +import itertools +import warnings +from dataclasses import InitVar, dataclass +from io import StringIO +from typing import Optional + +import pyarrow as pa + +import datasets +from datasets.features.features import require_storage_cast +from datasets.table import table_cast + + +logger = datasets.utils.logging.get_logger(__name__) + + +@dataclass +class TextConfig(datasets.BuilderConfig): + """BuilderConfig for text files.""" + + features: Optional[datasets.Features] = None + encoding: str = "utf-8" + errors: InitVar[Optional[str]] = "deprecated" + encoding_errors: Optional[str] = None + chunksize: int = 10 << 20 # 10MB + keep_linebreaks: bool = False + sample_by: str = "line" + + def __post_init__(self, errors): + if errors != "deprecated": + warnings.warn( + "'errors' was deprecated in favor of 'encoding_errors' in version 2.14.0 and will be removed in 3.0.0.\n" + f"You can remove this warning by passing 'encoding_errors={errors}' instead.", + FutureWarning, + ) + self.encoding_errors = errors + + +class Text(datasets.ArrowBasedBuilder): + BUILDER_CONFIG_CLASS = TextConfig + + def _info(self): + return datasets.DatasetInfo(features=self.config.features) + + def _split_generators(self, dl_manager): + """The `data_files` kwarg in load_dataset() can be a str, List[str], Dict[str,str], or Dict[str,List[str]]. + + If str or List[str], then the dataset returns only the 'train' split. + If dict, then keys should be from the `datasets.Split` enum. + """ + if not self.config.data_files: + raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") + data_files = dl_manager.download_and_extract(self.config.data_files) + if isinstance(data_files, (str, list, tuple)): + files = data_files + if isinstance(files, str): + files = [files] + files = [dl_manager.iter_files(file) for file in files] + return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})] + splits = [] + for split_name, files in data_files.items(): + if isinstance(files, str): + files = [files] + files = [dl_manager.iter_files(file) for file in files] + splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files})) + return splits + + def _cast_table(self, pa_table: pa.Table) -> pa.Table: + if self.config.features is not None: + schema = self.config.features.arrow_schema + if all(not require_storage_cast(feature) for feature in self.config.features.values()): + # cheaper cast + pa_table = pa_table.cast(schema) + else: + # more expensive cast; allows str <-> int/float or str to Audio for example + pa_table = table_cast(pa_table, schema) + return pa_table + else: + return pa_table.cast(pa.schema({"text": pa.string()})) + + def _generate_tables(self, files): + pa_table_names = list(self.config.features) if self.config.features is not None else ["text"] + for file_idx, file in enumerate(itertools.chain.from_iterable(files)): + # open in text mode, by default translates universal newlines ("\n", "\r\n" and "\r") into "\n" + with open(file, encoding=self.config.encoding, errors=self.config.encoding_errors) as f: + if self.config.sample_by == "line": + batch_idx = 0 + while True: + batch = f.read(self.config.chunksize) + if not batch: + break + batch += f.readline() # finish current line + # StringIO.readlines, by default splits only on "\n" (and keeps line breaks) + batch = StringIO(batch).readlines() + if not self.config.keep_linebreaks: + batch = [line.rstrip("\n") for line in batch] + pa_table = pa.Table.from_arrays([pa.array(batch)], names=pa_table_names) + # Uncomment for debugging (will print the Arrow table size and elements) + # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") + # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) + yield (file_idx, batch_idx), self._cast_table(pa_table) + batch_idx += 1 + elif self.config.sample_by == "paragraph": + batch_idx = 0 + batch = "" + while True: + new_batch = f.read(self.config.chunksize) + if not new_batch: + break + batch += new_batch + batch += f.readline() # finish current line + batch = batch.split("\n\n") + pa_table = pa.Table.from_arrays( + [pa.array([example for example in batch[:-1] if example])], names=pa_table_names + ) + # Uncomment for debugging (will print the Arrow table size and elements) + # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") + # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) + yield (file_idx, batch_idx), self._cast_table(pa_table) + batch_idx += 1 + batch = batch[-1] + if batch: + pa_table = pa.Table.from_arrays([pa.array([batch])], names=pa_table_names) + yield (file_idx, batch_idx), self._cast_table(pa_table) + elif self.config.sample_by == "document": + text = f.read() + pa_table = pa.Table.from_arrays([pa.array([text])], names=pa_table_names) + yield file_idx, self._cast_table(pa_table)