diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/commands/__init__.py b/env-llmeval/lib/python3.10/site-packages/datasets/commands/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..905e753955a348a8e486302e1b6f5e8f53ec7bf4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/commands/__init__.py @@ -0,0 +1,13 @@ +from abc import ABC, abstractmethod +from argparse import ArgumentParser + + +class BaseDatasetsCLICommand(ABC): + @staticmethod + @abstractmethod + def register_subcommand(parser: ArgumentParser): + raise NotImplementedError() + + @abstractmethod + def run(self): + raise NotImplementedError() diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/commands/__pycache__/dummy_data.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/commands/__pycache__/dummy_data.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c2e2402ef895549980031d35c961b994b498f6a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/commands/__pycache__/dummy_data.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/commands/convert.py b/env-llmeval/lib/python3.10/site-packages/datasets/commands/convert.py new file mode 100644 index 0000000000000000000000000000000000000000..f50d6aae5ba2e5c8b3c9766fa639c68ba87b2988 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/commands/convert.py @@ -0,0 +1,195 @@ +import os +import re +import shutil +from argparse import ArgumentParser, Namespace + +from datasets.commands import BaseDatasetsCLICommand +from datasets.utils.logging import get_logger + + +HIGHLIGHT_MESSAGE_PRE = """<<<<<<< This should probably be modified because it mentions: """ + +HIGHLIGHT_MESSAGE_POST = """======= +>>>>>>> +""" + +TO_HIGHLIGHT = [ + "TextEncoderConfig", + "ByteTextEncoder", + "SubwordTextEncoder", + "encoder_config", + "maybe_build_from_corpus", + "manual_dir", +] + +TO_CONVERT = [ + # (pattern, replacement) + # Order is important here for some replacements + (r"tfds\.core", r"datasets"), + (r"tf\.io\.gfile\.GFile", r"open"), + (r"tf\.([\w\d]+)", r"datasets.Value('\1')"), + (r"tfds\.features\.Text\(\)", r"datasets.Value('string')"), + (r"tfds\.features\.Text\(", r"datasets.Value('string'),"), + (r"features\s*=\s*tfds.features.FeaturesDict\(", r"features=datasets.Features("), + (r"tfds\.features\.FeaturesDict\(", r"dict("), + (r"The TensorFlow Datasets Authors", r"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"), + (r"tfds\.", r"datasets."), + (r"dl_manager\.manual_dir", r"self.config.data_dir"), + (r"self\.builder_config", r"self.config"), +] + + +def convert_command_factory(args: Namespace): + """ + Factory function used to convert a model TF 1.0 checkpoint in a PyTorch checkpoint. + + Returns: ConvertCommand + """ + return ConvertCommand(args.tfds_path, args.datasets_directory) + + +class ConvertCommand(BaseDatasetsCLICommand): + @staticmethod + def register_subcommand(parser: ArgumentParser): + """ + Register this command to argparse so it's available for the datasets-cli + + Args: + parser: Root parser to register command-specific arguments + """ + train_parser = parser.add_parser( + "convert", + help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.", + ) + train_parser.add_argument( + "--tfds_path", + type=str, + required=True, + help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.", + ) + train_parser.add_argument( + "--datasets_directory", type=str, required=True, help="Path to the HuggingFace Datasets folder." + ) + train_parser.set_defaults(func=convert_command_factory) + + def __init__(self, tfds_path: str, datasets_directory: str, *args): + self._logger = get_logger("datasets-cli/converting") + + self._tfds_path = tfds_path + self._datasets_directory = datasets_directory + + def run(self): + if os.path.isdir(self._tfds_path): + abs_tfds_path = os.path.abspath(self._tfds_path) + elif os.path.isfile(self._tfds_path): + abs_tfds_path = os.path.dirname(self._tfds_path) + else: + raise ValueError("--tfds_path is neither a directory nor a file. Please check path.") + + abs_datasets_path = os.path.abspath(self._datasets_directory) + + self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}") + + utils_files = [] + with_manual_update = [] + imports_to_builder_map = {} + + if os.path.isdir(self._tfds_path): + file_names = os.listdir(abs_tfds_path) + else: + file_names = [os.path.basename(self._tfds_path)] + + for f_name in file_names: + self._logger.info(f"Looking at file {f_name}") + input_file = os.path.join(abs_tfds_path, f_name) + output_file = os.path.join(abs_datasets_path, f_name) + + if not os.path.isfile(input_file) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: + self._logger.info("Skipping file") + continue + + with open(input_file, encoding="utf-8") as f: + lines = f.readlines() + + out_lines = [] + is_builder = False + needs_manual_update = False + tfds_imports = [] + for line in lines: + out_line = line + + # Convert imports + if "import tensorflow.compat.v2 as tf" in out_line: + continue + elif "@tfds.core" in out_line: + continue + elif "builder=self" in out_line: + continue + elif "import tensorflow_datasets.public_api as tfds" in out_line: + out_line = "import datasets\n" + elif "import tensorflow" in out_line: + # order is important here + out_line = "" + continue + elif "from absl import logging" in out_line: + out_line = "from datasets import logging\n" + elif "getLogger" in out_line: + out_line = out_line.replace("getLogger", "get_logger") + elif any(expression in out_line for expression in TO_HIGHLIGHT): + needs_manual_update = True + to_remove = list(filter(lambda e: e in out_line, TO_HIGHLIGHT)) + out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(to_remove) + "\n") + out_lines.append(out_line) + out_lines.append(HIGHLIGHT_MESSAGE_POST) + continue + else: + for pattern, replacement in TO_CONVERT: + out_line = re.sub(pattern, replacement, out_line) + + # Take care of saving utilities (to later move them together with main script) + if "tensorflow_datasets" in out_line: + match = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)", out_line) + tfds_imports.extend(imp.strip() for imp in match.group(1).split(",")) + out_line = "from . import " + match.group(1) + + # Check we have not forget anything + if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: + raise ValueError(f"Error converting {out_line.strip()}") + + if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: + is_builder = True + out_lines.append(out_line) + + if is_builder or "wmt" in f_name: + # We create a new directory for each dataset + dir_name = f_name.replace(".py", "") + output_dir = os.path.join(abs_datasets_path, dir_name) + output_file = os.path.join(output_dir, f_name) + os.makedirs(output_dir, exist_ok=True) + self._logger.info(f"Adding directory {output_dir}") + imports_to_builder_map.update({imp: output_dir for imp in tfds_imports}) + else: + # Utilities will be moved at the end + utils_files.append(output_file) + + if needs_manual_update: + with_manual_update.append(output_file) + + with open(output_file, "w", encoding="utf-8") as f: + f.writelines(out_lines) + self._logger.info(f"Converted in {output_file}") + + for utils_file in utils_files: + try: + f_name = os.path.basename(utils_file) + dest_folder = imports_to_builder_map[f_name.replace(".py", "")] + self._logger.info(f"Moving {dest_folder} to {utils_file}") + shutil.copy(utils_file, dest_folder) + except KeyError: + self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually.") + + if with_manual_update: + for file_path in with_manual_update: + self._logger.warning( + f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." + ) diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/commands/datasets_cli.py b/env-llmeval/lib/python3.10/site-packages/datasets/commands/datasets_cli.py new file mode 100644 index 0000000000000000000000000000000000000000..927518e311cd7d4950cd650f72f28dfc58810269 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/commands/datasets_cli.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python +from argparse import ArgumentParser + +from datasets.commands.convert import ConvertCommand +from datasets.commands.dummy_data import DummyDataCommand +from datasets.commands.env import EnvironmentCommand +from datasets.commands.run_beam import RunBeamCommand +from datasets.commands.test import TestCommand +from datasets.utils.logging import set_verbosity_info + + +def parse_unknown_args(unknown_args): + return {key.lstrip("-"): value for key, value in zip(unknown_args[::2], unknown_args[1::2])} + + +def main(): + parser = ArgumentParser( + "HuggingFace Datasets CLI tool", usage="datasets-cli []", allow_abbrev=False + ) + commands_parser = parser.add_subparsers(help="datasets-cli command helpers") + set_verbosity_info() + + # Register commands + ConvertCommand.register_subcommand(commands_parser) + EnvironmentCommand.register_subcommand(commands_parser) + TestCommand.register_subcommand(commands_parser) + RunBeamCommand.register_subcommand(commands_parser) + DummyDataCommand.register_subcommand(commands_parser) + + # Parse args + args, unknown_args = parser.parse_known_args() + if not hasattr(args, "func"): + parser.print_help() + exit(1) + kwargs = parse_unknown_args(unknown_args) + + # Run + service = args.func(args, **kwargs) + service.run() + + +if __name__ == "__main__": + main() diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/commands/dummy_data.py b/env-llmeval/lib/python3.10/site-packages/datasets/commands/dummy_data.py new file mode 100644 index 0000000000000000000000000000000000000000..c4321696e67258d80d40422a327dccb35859545d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/commands/dummy_data.py @@ -0,0 +1,468 @@ +import fnmatch +import json +import os +import shutil +import tempfile +import xml.etree.ElementTree as ET +from argparse import ArgumentParser +from pathlib import Path +from typing import Optional + +from datasets import config +from datasets.commands import BaseDatasetsCLICommand +from datasets.download.download_config import DownloadConfig +from datasets.download.download_manager import DownloadManager +from datasets.download.mock_download_manager import MockDownloadManager +from datasets.load import dataset_module_factory, import_main_class +from datasets.utils.deprecation_utils import deprecated +from datasets.utils.logging import get_logger, set_verbosity_warning +from datasets.utils.py_utils import map_nested + + +logger = get_logger(__name__) + +DEFAULT_ENCODING = "utf-8" + + +def dummy_data_command_factory(args): + return DummyDataCommand( + args.path_to_dataset, + args.auto_generate, + args.n_lines, + args.json_field, + args.xml_tag, + args.match_text_files, + args.keep_uncompressed, + args.cache_dir, + args.encoding, + ) + + +class DummyDataGeneratorDownloadManager(DownloadManager): + def __init__(self, mock_download_manager, *args, **kwargs): + super().__init__(*args, **kwargs) + self.mock_download_manager = mock_download_manager + self.downloaded_dummy_paths = [] + self.expected_dummy_paths = [] + + def download(self, url_or_urls): + output = super().download(url_or_urls) + dummy_output = self.mock_download_manager.download(url_or_urls) + map_nested(self.downloaded_dummy_paths.append, output, map_tuple=True) + map_nested(self.expected_dummy_paths.append, dummy_output, map_tuple=True) + return output + + def download_and_extract(self, url_or_urls): + output = super().extract(super().download(url_or_urls)) + dummy_output = self.mock_download_manager.download(url_or_urls) + map_nested(self.downloaded_dummy_paths.append, output, map_tuple=True) + map_nested(self.expected_dummy_paths.append, dummy_output, map_tuple=True) + return output + + def auto_generate_dummy_data_folder( + self, + n_lines: int = 5, + json_field: Optional[str] = None, + xml_tag: Optional[str] = None, + match_text_files: Optional[str] = None, + encoding: Optional[str] = None, + ) -> bool: + os.makedirs( + os.path.join( + self.mock_download_manager.datasets_scripts_dir, + self.mock_download_manager.dataset_name, + self.mock_download_manager.dummy_data_folder, + "dummy_data", + ), + exist_ok=True, + ) + total = 0 + self.mock_download_manager.load_existing_dummy_data = False + for src_path, relative_dst_path in zip(self.downloaded_dummy_paths, self.expected_dummy_paths): + dst_path = os.path.join( + self.mock_download_manager.datasets_scripts_dir, + self.mock_download_manager.dataset_name, + self.mock_download_manager.dummy_data_folder, + relative_dst_path, + ) + total += self._create_dummy_data( + src_path, + dst_path, + n_lines=n_lines, + json_field=json_field, + xml_tag=xml_tag, + match_text_files=match_text_files, + encoding=encoding, + ) + if total == 0: + logger.error( + "Dummy data generation failed: no dummy files were created. " + "Make sure the data files format is supported by the auto-generation." + ) + return total > 0 + + def _create_dummy_data( + self, + src_path: str, + dst_path: str, + n_lines: int, + json_field: Optional[str] = None, + xml_tag: Optional[str] = None, + match_text_files: Optional[str] = None, + encoding: Optional[str] = None, + ) -> int: + encoding = encoding or DEFAULT_ENCODING + if os.path.isfile(src_path): + logger.debug(f"Trying to generate dummy data file {dst_path}") + dst_path_extensions = Path(dst_path).suffixes + line_by_line_extensions = [".txt", ".csv", ".jsonl", ".tsv"] + is_line_by_line_text_file = any(extension in dst_path_extensions for extension in line_by_line_extensions) + if match_text_files is not None: + file_name = os.path.basename(dst_path) + for pattern in match_text_files.split(","): + is_line_by_line_text_file |= fnmatch.fnmatch(file_name, pattern) + # Line by line text file (txt, csv etc.) + if is_line_by_line_text_file: + Path(dst_path).parent.mkdir(exist_ok=True, parents=True) + with open(src_path, encoding=encoding) as src_file: + with open(dst_path, "w", encoding=encoding) as dst_file: + first_lines = [] + for i, line in enumerate(src_file): + if i >= n_lines: + break + first_lines.append(line) + dst_file.write("".join(first_lines).strip()) + return 1 + # json file + elif ".json" in dst_path_extensions: + with open(src_path, encoding=encoding) as src_file: + json_data = json.load(src_file) + if json_field is not None: + json_data = json_data[json_field] + if isinstance(json_data, dict): + if not all(isinstance(v, list) for v in json_data.values()): + raise ValueError( + f"Couldn't parse columns {list(json_data.keys())}. " + "Maybe specify which json field must be used " + "to read the data with --json_field ." + ) + first_json_data = {k: v[:n_lines] for k, v in json_data.items()} + else: + first_json_data = json_data[:n_lines] + if json_field is not None: + first_json_data = {json_field: first_json_data} + Path(dst_path).parent.mkdir(exist_ok=True, parents=True) + with open(dst_path, "w", encoding=encoding) as dst_file: + json.dump(first_json_data, dst_file) + return 1 + # xml file + elif any(extension in dst_path_extensions for extension in [".xml", ".txm"]): + if xml_tag is None: + logger.warning("Found xml file but 'xml_tag' is set to None. Please provide --xml_tag") + else: + self._create_xml_dummy_data(src_path, dst_path, xml_tag, n_lines=n_lines, encoding=encoding) + return 1 + logger.warning( + f"Couldn't generate dummy file '{dst_path}'. " "Ignore that if this file is not useful for dummy data." + ) + return 0 + # directory, iterate through all files + elif os.path.isdir(src_path): + total = 0 + for path, _, files in os.walk(src_path): + for name in files: + if not name.startswith("."): # ignore files like .DS_Store etc. + src_file_path = os.path.join(path, name) + dst_file_path = os.path.join(dst_path, Path(src_file_path).relative_to(src_path)) + total += self._create_dummy_data( + src_file_path, + dst_file_path, + n_lines=n_lines, + json_field=json_field, + xml_tag=xml_tag, + match_text_files=match_text_files, + encoding=encoding, + ) + return total + + @staticmethod + def _create_xml_dummy_data(src_path, dst_path, xml_tag, n_lines=5, encoding=DEFAULT_ENCODING): + Path(dst_path).parent.mkdir(exist_ok=True, parents=True) + with open(src_path, encoding=encoding) as src_file: + n_line = 0 + parents = [] + for event, elem in ET.iterparse(src_file, events=("start", "end")): + if event == "start": + parents.append(elem) + else: + _ = parents.pop() + if elem.tag == xml_tag: + if n_line < n_lines: + n_line += 1 + else: + if parents: + parents[-1].remove(elem) + ET.ElementTree(element=elem).write(dst_path, encoding=encoding) + + def compress_autogenerated_dummy_data(self, path_to_dataset): + root_dir = os.path.join(path_to_dataset, self.mock_download_manager.dummy_data_folder) + base_name = os.path.join(root_dir, "dummy_data") + base_dir = "dummy_data" + logger.info(f"Compressing dummy data folder to '{base_name}.zip'") + shutil.make_archive(base_name, "zip", root_dir, base_dir) + shutil.rmtree(base_name) + + +@deprecated( + "The `datasets` repository does not host the dataset scripts anymore. Therefore, dummy data is no longer needed to test their loading with CI." +) +class DummyDataCommand(BaseDatasetsCLICommand): + @staticmethod + def register_subcommand(parser: ArgumentParser): + test_parser = parser.add_parser("dummy_data", help="Generate dummy data.") + test_parser.add_argument("--auto_generate", action="store_true", help="Automatically generate dummy data") + test_parser.add_argument( + "--n_lines", type=int, default=5, help="Number of lines or samples to keep when auto-generating dummy data" + ) + test_parser.add_argument( + "--json_field", + type=str, + default=None, + help="Optional, json field to read the data from when auto-generating dummy data. In the json data files, this field must point to a list of samples as json objects (ex: the 'data' field for squad-like files)", + ) + test_parser.add_argument( + "--xml_tag", + type=str, + default=None, + help="Optional, xml tag name of the samples inside the xml files when auto-generating dummy data.", + ) + test_parser.add_argument( + "--match_text_files", + type=str, + default=None, + help="Optional, a comma separated list of file patterns that looks for line-by-line text files other than *.txt or *.csv. Example: --match_text_files *.label", + ) + test_parser.add_argument( + "--keep_uncompressed", + action="store_true", + help="Whether to leave the dummy data folders uncompressed when auto-generating dummy data. Useful for debugging for to do manual adjustements before compressing.", + ) + test_parser.add_argument( + "--cache_dir", + type=str, + default=None, + help="Cache directory to download and cache files when auto-generating dummy data", + ) + test_parser.add_argument( + "--encoding", + type=str, + default=None, + help=f"Encoding to use when auto-generating dummy data. Defaults to {DEFAULT_ENCODING}", + ) + test_parser.add_argument("path_to_dataset", type=str, help="Path to the dataset (example: ./datasets/squad)") + test_parser.set_defaults(func=dummy_data_command_factory) + + def __init__( + self, + path_to_dataset: str, + auto_generate: bool, + n_lines: int, + json_field: Optional[str], + xml_tag: Optional[str], + match_text_files: Optional[str], + keep_uncompressed: bool, + cache_dir: Optional[str], + encoding: Optional[str], + ): + self._path_to_dataset = path_to_dataset + if os.path.isdir(path_to_dataset): + self._dataset_name = path_to_dataset.replace(os.sep, "/").split("/")[-1] + else: + self._dataset_name = path_to_dataset.replace(os.sep, "/").split("/")[-2] + cache_dir = os.path.expanduser(cache_dir or config.HF_DATASETS_CACHE) + self._auto_generate = auto_generate + self._n_lines = n_lines + self._json_field = json_field + self._xml_tag = xml_tag + self._match_text_files = match_text_files + self._keep_uncompressed = keep_uncompressed + self._cache_dir = cache_dir + self._encoding = encoding + + def run(self): + set_verbosity_warning() + dataset_module = dataset_module_factory(self._path_to_dataset) + builder_cls = import_main_class(dataset_module.module_path) + + # use `None` as config if no configs + builder_configs = builder_cls.BUILDER_CONFIGS or [None] + auto_generate_results = [] + with tempfile.TemporaryDirectory() as tmp_dir: + for builder_config in builder_configs: + config_name = builder_config.name if builder_config else None + dataset_builder = builder_cls(config_name=config_name, hash=dataset_module.hash, cache_dir=tmp_dir) + version = builder_config.version if builder_config else dataset_builder.config.version + mock_dl_manager = MockDownloadManager( + dataset_name=self._dataset_name, + config=builder_config, + version=version, + use_local_dummy_data=True, + load_existing_dummy_data=False, + ) + + if self._auto_generate: + auto_generate_results.append( + self._autogenerate_dummy_data( + dataset_builder=dataset_builder, + mock_dl_manager=mock_dl_manager, + keep_uncompressed=self._keep_uncompressed, + ) + ) + else: + self._print_dummy_data_instructions( + dataset_builder=dataset_builder, mock_dl_manager=mock_dl_manager + ) + if self._auto_generate and not self._keep_uncompressed: + if all(auto_generate_results): + print(f"Automatic dummy data generation succeeded for all configs of '{self._path_to_dataset}'") + else: + print(f"Automatic dummy data generation failed for some configs of '{self._path_to_dataset}'") + + def _autogenerate_dummy_data(self, dataset_builder, mock_dl_manager, keep_uncompressed) -> Optional[bool]: + dl_cache_dir = ( + os.path.join(self._cache_dir, config.DOWNLOADED_DATASETS_DIR) + if self._cache_dir + else config.DOWNLOADED_DATASETS_PATH + ) + download_config = DownloadConfig(cache_dir=dl_cache_dir) + dl_manager = DummyDataGeneratorDownloadManager( + dataset_name=self._dataset_name, mock_download_manager=mock_dl_manager, download_config=download_config + ) + dataset_builder._split_generators(dl_manager) + mock_dl_manager.load_existing_dummy_data = False # don't use real dummy data + dl_manager.auto_generate_dummy_data_folder( + n_lines=self._n_lines, + json_field=self._json_field, + xml_tag=self._xml_tag, + match_text_files=self._match_text_files, + encoding=self._encoding, + ) + if not keep_uncompressed: + path_do_dataset = os.path.join(mock_dl_manager.datasets_scripts_dir, mock_dl_manager.dataset_name) + dl_manager.compress_autogenerated_dummy_data(path_do_dataset) + # now test that the dummy_data.zip file actually works + mock_dl_manager.load_existing_dummy_data = True # use real dummy data + n_examples_per_split = {} + os.makedirs(dataset_builder._cache_dir, exist_ok=True) + try: + split_generators = dataset_builder._split_generators(mock_dl_manager) + for split_generator in split_generators: + dataset_builder._prepare_split(split_generator, check_duplicate_keys=False) + n_examples_per_split[split_generator.name] = split_generator.split_info.num_examples + except OSError as e: + logger.error( + f"Failed to load dummy data for config '{dataset_builder.config.name}''.\nOriginal error:\n" + + str(e) + ) + return False + else: + if all(n_examples > 0 for n_examples in n_examples_per_split.values()): + logger.warning( + f"Dummy data generation done and dummy data test succeeded for config '{dataset_builder.config.name}''." + ) + return True + else: + empty_splits = [ + split_name for split_name in n_examples_per_split if n_examples_per_split[split_name] == 0 + ] + logger.warning( + f"Dummy data generation done but dummy data test failed since splits {empty_splits} have 0 examples for config '{dataset_builder.config.name}''." + ) + return False + else: + generated_dummy_data_dir = os.path.join(self._path_to_dataset, mock_dl_manager.dummy_data_folder) + logger.info( + f"Dummy data generated in directory '{generated_dummy_data_dir}' but kept uncompressed. " + "Please compress this directory into a zip file to use it for dummy data tests." + ) + + def _print_dummy_data_instructions(self, dataset_builder, mock_dl_manager): + dummy_data_folder = os.path.join(self._path_to_dataset, mock_dl_manager.dummy_data_folder) + logger.info(f"Creating dummy folder structure for {dummy_data_folder}... ") + os.makedirs(dummy_data_folder, exist_ok=True) + + try: + generator_splits = dataset_builder._split_generators(mock_dl_manager) + except FileNotFoundError as e: + print( + f"Dataset {self._dataset_name} with config {mock_dl_manager.config} seems to already open files in the method `_split_generators(...)`. You might consider to instead only open files in the method `_generate_examples(...)` instead. If this is not possible the dummy data has to be created with less guidance. Make sure you create the file {e.filename}." + ) + + files_to_create = set() + split_names = [] + dummy_file_name = mock_dl_manager.dummy_file_name + + for split in generator_splits: + logger.info(f"Collecting dummy data file paths to create for {split.name}") + split_names.append(split.name) + gen_kwargs = split.gen_kwargs + generator = dataset_builder._generate_examples(**gen_kwargs) + + try: + dummy_data_guidance_print = "\n" + 30 * "=" + "DUMMY DATA INSTRUCTIONS" + 30 * "=" + "\n" + config_string = ( + f"config {mock_dl_manager.config.name} of " if mock_dl_manager.config is not None else "" + ) + dummy_data_guidance_print += ( + "- In order to create the dummy data for " + + config_string + + f"{self._dataset_name}, please go into the folder '{dummy_data_folder}' with `cd {dummy_data_folder}` . \n\n" + ) + + # trigger generate function + for key, record in generator: + pass + + dummy_data_guidance_print += f"- It appears that the function `_generate_examples(...)` expects one or more files in the folder {dummy_file_name} using the function `glob.glob(...)`. In this case, please refer to the `_generate_examples(...)` method to see under which filename the dummy data files should be created. \n\n" + + except FileNotFoundError as e: + files_to_create.add(e.filename) + + split_names = ", ".join(split_names) + if len(files_to_create) > 0: + # no glob.glob(...) in `_generate_examples(...)` + if len(files_to_create) == 1 and next(iter(files_to_create)) == dummy_file_name: + dummy_data_guidance_print += f"- Please create a single dummy data file called '{next(iter(files_to_create))}' from the folder '{dummy_data_folder}'. Make sure that the dummy data file provides at least one example for the split(s) '{split_names}' \n\n" + files_string = dummy_file_name + else: + files_string = ", ".join(files_to_create) + dummy_data_guidance_print += f"- Please create the following dummy data files '{files_string}' from the folder '{dummy_data_folder}'\n\n" + + dummy_data_guidance_print += f"- For each of the splits '{split_names}', make sure that one or more of the dummy data files provide at least one example \n\n" + + dummy_data_guidance_print += f"- If the method `_generate_examples(...)` includes multiple `open()` statements, you might have to create other files in addition to '{files_string}'. In this case please refer to the `_generate_examples(...)` method \n\n" + + if len(files_to_create) == 1 and next(iter(files_to_create)) == dummy_file_name: + dummy_data_guidance_print += f"- After the dummy data file is created, it should be zipped to '{dummy_file_name}.zip' with the command `zip {dummy_file_name}.zip {dummy_file_name}` \n\n" + + dummy_data_guidance_print += ( + f"- You can now delete the file '{dummy_file_name}' with the command `rm {dummy_file_name}` \n\n" + ) + + dummy_data_guidance_print += f"- To get the file '{dummy_file_name}' back for further changes to the dummy data, simply unzip {dummy_file_name}.zip with the command `unzip {dummy_file_name}.zip` \n\n" + else: + dummy_data_guidance_print += f"- After all dummy data files are created, they should be zipped recursively to '{dummy_file_name}.zip' with the command `zip -r {dummy_file_name}.zip {dummy_file_name}/` \n\n" + + dummy_data_guidance_print += ( + f"- You can now delete the folder '{dummy_file_name}' with the command `rm -r {dummy_file_name}` \n\n" + ) + + dummy_data_guidance_print += f"- To get the folder '{dummy_file_name}' back for further changes to the dummy data, simply unzip {dummy_file_name}.zip with the command `unzip {dummy_file_name}.zip` \n\n" + + dummy_data_guidance_print += ( + f"- Make sure you have created the file '{dummy_file_name}.zip' in '{dummy_data_folder}' \n" + ) + + dummy_data_guidance_print += 83 * "=" + "\n" + + print(dummy_data_guidance_print) diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/commands/env.py b/env-llmeval/lib/python3.10/site-packages/datasets/commands/env.py new file mode 100644 index 0000000000000000000000000000000000000000..40b2a3654c8f1c6c080222a80b12d108972a5dc9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/commands/env.py @@ -0,0 +1,41 @@ +import platform +from argparse import ArgumentParser + +import fsspec +import huggingface_hub +import pandas +import pyarrow + +from datasets import __version__ as version +from datasets.commands import BaseDatasetsCLICommand + + +def info_command_factory(_): + return EnvironmentCommand() + + +class EnvironmentCommand(BaseDatasetsCLICommand): + @staticmethod + def register_subcommand(parser: ArgumentParser): + download_parser = parser.add_parser("env", help="Print relevant system environment info.") + download_parser.set_defaults(func=info_command_factory) + + def run(self): + info = { + "`datasets` version": version, + "Platform": platform.platform(), + "Python version": platform.python_version(), + "`huggingface_hub` version": huggingface_hub.__version__, + "PyArrow version": pyarrow.__version__, + "Pandas version": pandas.__version__, + "`fsspec` version": fsspec.__version__, + } + + print("\nCopy-and-paste the text below in your GitHub issue.\n") + print(self.format_dict(info)) + + return info + + @staticmethod + def format_dict(d): + return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) + "\n" diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/commands/run_beam.py b/env-llmeval/lib/python3.10/site-packages/datasets/commands/run_beam.py new file mode 100644 index 0000000000000000000000000000000000000000..3843a5568f283a3cc8274f85dc206e42de272074 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/commands/run_beam.py @@ -0,0 +1,165 @@ +import os +from argparse import ArgumentParser +from pathlib import Path +from shutil import copyfile +from typing import List + +from datasets import config +from datasets.builder import DatasetBuilder +from datasets.commands import BaseDatasetsCLICommand +from datasets.download.download_config import DownloadConfig +from datasets.download.download_manager import DownloadMode +from datasets.load import dataset_module_factory, import_main_class +from datasets.utils.info_utils import VerificationMode + + +def run_beam_command_factory(args, **kwargs): + return RunBeamCommand( + args.dataset, + args.name, + args.cache_dir, + args.beam_pipeline_options, + args.data_dir, + args.all_configs, + args.save_info or args.save_infos, + args.ignore_verifications, + args.force_redownload, + **kwargs, + ) + + +class RunBeamCommand(BaseDatasetsCLICommand): + @staticmethod + def register_subcommand(parser: ArgumentParser): + run_beam_parser = parser.add_parser("run_beam", help="Run a Beam dataset processing pipeline") + run_beam_parser.add_argument("dataset", type=str, help="Name of the dataset to download") + run_beam_parser.add_argument("--name", type=str, default=None, help="Dataset config name") + run_beam_parser.add_argument( + "--cache_dir", + type=str, + default=None, + help="Cache directory where the datasets are stored", + ) + run_beam_parser.add_argument( + "--beam_pipeline_options", + type=str, + default="", + help="Beam pipeline options, separated by commas. Example:: `--beam_pipeline_options=job_name=my-job,project=my-project`", + ) + run_beam_parser.add_argument( + "--data_dir", + type=str, + default=None, + help="Can be used to specify a manual directory to get the files from", + ) + run_beam_parser.add_argument("--all_configs", action="store_true", help="Test all dataset configurations") + run_beam_parser.add_argument("--save_info", action="store_true", help="Save the dataset infos file") + run_beam_parser.add_argument( + "--ignore_verifications", action="store_true", help="Run the test without checksums and splits checks" + ) + run_beam_parser.add_argument("--force_redownload", action="store_true", help="Force dataset redownload") + # aliases + run_beam_parser.add_argument("--save_infos", action="store_true", help="alias for save_info") + run_beam_parser.set_defaults(func=run_beam_command_factory) + + def __init__( + self, + dataset: str, + name: str, + cache_dir: str, + beam_pipeline_options: str, + data_dir: str, + all_configs: bool, + save_infos: bool, + ignore_verifications: bool, + force_redownload: bool, + **config_kwargs, + ): + self._dataset = dataset + self._name = name + self._cache_dir = cache_dir + self._beam_pipeline_options = beam_pipeline_options + self._data_dir = data_dir + self._all_configs = all_configs + self._save_infos = save_infos + self._ignore_verifications = ignore_verifications + self._force_redownload = force_redownload + self._config_kwargs = config_kwargs + + def run(self): + import apache_beam as beam + + if self._name is not None and self._all_configs: + print("Both parameters `name` and `all_configs` can't be used at once.") + exit(1) + path, config_name = self._dataset, self._name + dataset_module = dataset_module_factory(path) + builder_cls = import_main_class(dataset_module.module_path) + builders: List[DatasetBuilder] = [] + if self._beam_pipeline_options: + beam_options = beam.options.pipeline_options.PipelineOptions( + flags=[f"--{opt.strip()}" for opt in self._beam_pipeline_options.split(",") if opt] + ) + else: + beam_options = None + if self._all_configs and len(builder_cls.BUILDER_CONFIGS) > 0: + for builder_config in builder_cls.BUILDER_CONFIGS: + builders.append( + builder_cls( + config_name=builder_config.name, + data_dir=self._data_dir, + hash=dataset_module.hash, + beam_options=beam_options, + cache_dir=self._cache_dir, + base_path=dataset_module.builder_kwargs.get("base_path"), + ) + ) + else: + builders.append( + builder_cls( + config_name=config_name, + data_dir=self._data_dir, + beam_options=beam_options, + cache_dir=self._cache_dir, + base_path=dataset_module.builder_kwargs.get("base_path"), + **self._config_kwargs, + ) + ) + + for builder in builders: + builder.download_and_prepare( + download_mode=DownloadMode.REUSE_CACHE_IF_EXISTS + if not self._force_redownload + else DownloadMode.FORCE_REDOWNLOAD, + download_config=DownloadConfig(cache_dir=config.DOWNLOADED_DATASETS_PATH), + verification_mode=VerificationMode.NO_CHECKS + if self._ignore_verifications + else VerificationMode.ALL_CHECKS, + try_from_hf_gcs=False, + ) + if self._save_infos: + builder._save_infos() + + print("Apache beam run successful.") + + # If save_infos=True, the dataset infos file is created next to the loaded module file. + # Let's move it to the original directory of the dataset script, to allow the user to + # upload them on S3 at the same time afterwards. + if self._save_infos: + dataset_infos_path = os.path.join(builder_cls.get_imported_module_dir(), config.DATASETDICT_INFOS_FILENAME) + + name = Path(path).name + ".py" + + combined_path = os.path.join(path, name) + if os.path.isfile(path): + dataset_dir = os.path.dirname(path) + elif os.path.isfile(combined_path): + dataset_dir = path + else: # in case of a remote dataset + print(f"Dataset Infos file saved at {dataset_infos_path}") + exit(1) + + # Move datasetinfo back to the user + user_dataset_infos_path = os.path.join(dataset_dir, config.DATASETDICT_INFOS_FILENAME) + copyfile(dataset_infos_path, user_dataset_infos_path) + print(f"Dataset Infos file saved at {user_dataset_infos_path}") diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/commands/test.py b/env-llmeval/lib/python3.10/site-packages/datasets/commands/test.py new file mode 100644 index 0000000000000000000000000000000000000000..da82427e935e270d5b2b5c1958443305c2e90405 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/commands/test.py @@ -0,0 +1,201 @@ +import logging +import os +from argparse import ArgumentParser +from pathlib import Path +from shutil import copyfile, rmtree +from typing import Generator + +import datasets.config +from datasets.builder import DatasetBuilder +from datasets.commands import BaseDatasetsCLICommand +from datasets.download.download_manager import DownloadMode +from datasets.load import dataset_module_factory, import_main_class +from datasets.utils.info_utils import VerificationMode +from datasets.utils.logging import ERROR, get_logger + + +logger = get_logger(__name__) + + +def _test_command_factory(args): + return TestCommand( + args.dataset, + args.name, + args.cache_dir, + args.data_dir, + args.all_configs, + args.save_info or args.save_infos, + args.ignore_verifications, + args.force_redownload, + args.clear_cache, + args.num_proc, + ) + + +class TestCommand(BaseDatasetsCLICommand): + __test__ = False # to tell pytest it's not a test class + + @staticmethod + def register_subcommand(parser: ArgumentParser): + test_parser = parser.add_parser("test", help="Test dataset implementation.") + test_parser.add_argument("--name", type=str, default=None, help="Dataset processing name") + test_parser.add_argument( + "--cache_dir", + type=str, + default=None, + help="Cache directory where the datasets are stored.", + ) + test_parser.add_argument( + "--data_dir", + type=str, + default=None, + help="Can be used to specify a manual directory to get the files from.", + ) + test_parser.add_argument("--all_configs", action="store_true", help="Test all dataset configurations") + test_parser.add_argument( + "--save_info", action="store_true", help="Save the dataset infos in the dataset card (README.md)" + ) + test_parser.add_argument( + "--ignore_verifications", + action="store_true", + help="Run the test without checksums and splits checks.", + ) + test_parser.add_argument("--force_redownload", action="store_true", help="Force dataset redownload") + test_parser.add_argument( + "--clear_cache", + action="store_true", + help="Remove downloaded files and cached datasets after each config test", + ) + test_parser.add_argument("--num_proc", type=int, default=None, help="Number of processes") + # aliases + test_parser.add_argument("--save_infos", action="store_true", help="alias to save_info") + test_parser.add_argument("dataset", type=str, help="Name of the dataset to download") + test_parser.set_defaults(func=_test_command_factory) + + def __init__( + self, + dataset: str, + name: str, + cache_dir: str, + data_dir: str, + all_configs: bool, + save_infos: bool, + ignore_verifications: bool, + force_redownload: bool, + clear_cache: bool, + num_proc: int, + ): + self._dataset = dataset + self._name = name + self._cache_dir = cache_dir + self._data_dir = data_dir + self._all_configs = all_configs + self._save_infos = save_infos + self._ignore_verifications = ignore_verifications + self._force_redownload = force_redownload + self._clear_cache = clear_cache + self._num_proc = num_proc + if clear_cache and not cache_dir: + print( + "When --clear_cache is used, specifying a cache directory is mandatory.\n" + "The 'download' folder of the cache directory and the dataset builder cache will be deleted after each configuration test.\n" + "Please provide a --cache_dir that will be used to test the dataset script." + ) + exit(1) + if save_infos: + self._ignore_verifications = True + + def run(self): + logging.getLogger("filelock").setLevel(ERROR) + if self._name is not None and self._all_configs: + print("Both parameters `config` and `all_configs` can't be used at once.") + exit(1) + path, config_name = self._dataset, self._name + module = dataset_module_factory(path) + builder_cls = import_main_class(module.module_path) + n_builders = len(builder_cls.BUILDER_CONFIGS) if self._all_configs and builder_cls.BUILDER_CONFIGS else 1 + + def get_builders() -> Generator[DatasetBuilder, None, None]: + if self._all_configs and builder_cls.BUILDER_CONFIGS: + for i, config in enumerate(builder_cls.BUILDER_CONFIGS): + if "config_name" in module.builder_kwargs: + yield builder_cls( + cache_dir=self._cache_dir, + data_dir=self._data_dir, + **module.builder_kwargs, + ) + else: + yield builder_cls( + config_name=config.name, + cache_dir=self._cache_dir, + data_dir=self._data_dir, + **module.builder_kwargs, + ) + else: + if "config_name" in module.builder_kwargs: + yield builder_cls(cache_dir=self._cache_dir, data_dir=self._data_dir, **module.builder_kwargs) + else: + yield builder_cls( + config_name=config_name, + cache_dir=self._cache_dir, + data_dir=self._data_dir, + **module.builder_kwargs, + ) + + for j, builder in enumerate(get_builders()): + print(f"Testing builder '{builder.config.name}' ({j + 1}/{n_builders})") + builder._record_infos = os.path.exists( + os.path.join(builder.get_imported_module_dir(), datasets.config.DATASETDICT_INFOS_FILENAME) + ) # record checksums only if we need to update a (deprecated) dataset_infos.json + builder.download_and_prepare( + download_mode=DownloadMode.REUSE_CACHE_IF_EXISTS + if not self._force_redownload + else DownloadMode.FORCE_REDOWNLOAD, + verification_mode=VerificationMode.NO_CHECKS + if self._ignore_verifications + else VerificationMode.ALL_CHECKS, + try_from_hf_gcs=False, + num_proc=self._num_proc, + ) + builder.as_dataset() + if self._save_infos: + builder._save_infos() + + # If save_infos=True, the dataset card (README.md) is created next to the loaded module file. + # The dataset_infos are saved in the YAML part of the README.md + + # Let's move it to the original directory of the dataset script, to allow the user to + # upload them on S3 at the same time afterwards. + if self._save_infos: + dataset_readme_path = os.path.join( + builder_cls.get_imported_module_dir(), datasets.config.REPOCARD_FILENAME + ) + name = Path(path).name + ".py" + combined_path = os.path.join(path, name) + if os.path.isfile(path): + dataset_dir = os.path.dirname(path) + elif os.path.isfile(combined_path): + dataset_dir = path + elif os.path.isdir(path): # for local directories containing only data files + dataset_dir = path + else: # in case of a remote dataset + dataset_dir = None + print(f"Dataset card saved at {dataset_readme_path}") + + # Move dataset_info back to the user + if dataset_dir is not None: + user_dataset_readme_path = os.path.join(dataset_dir, datasets.config.REPOCARD_FILENAME) + copyfile(dataset_readme_path, user_dataset_readme_path) + print(f"Dataset card saved at {user_dataset_readme_path}") + + # If clear_cache=True, the download folder and the dataset builder cache directory are deleted + if self._clear_cache: + if os.path.isdir(builder._cache_dir): + logger.warning(f"Clearing cache at {builder._cache_dir}") + rmtree(builder._cache_dir) + download_dir = os.path.join(self._cache_dir, datasets.config.DOWNLOADED_DATASETS_DIR) + if os.path.isdir(download_dir): + logger.warning(f"Clearing cache at {download_dir}") + rmtree(download_dir) + + print("Test successful.") diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/download/__init__.py b/env-llmeval/lib/python3.10/site-packages/datasets/download/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8ae0d436504dc2e609b0ca8851509c72a161dbde --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/download/__init__.py @@ -0,0 +1,10 @@ +__all__ = [ + "DownloadConfig", + "DownloadManager", + "DownloadMode", + "StreamingDownloadManager", +] + +from .download_config import DownloadConfig +from .download_manager import DownloadManager, DownloadMode +from .streaming_download_manager import StreamingDownloadManager diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/download/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/download/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ddf076e17f93b8fd89e4d3a1186296b63039763e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/download/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/download/__pycache__/download_config.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/download/__pycache__/download_config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0328793eeeaf5e2a838c682b8b6ff683e88c4de0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/download/__pycache__/download_config.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/download/__pycache__/download_manager.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/download/__pycache__/download_manager.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..127548847e5e25b5dd114e628b0bfc54932b3914 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/download/__pycache__/download_manager.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/download/__pycache__/mock_download_manager.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/download/__pycache__/mock_download_manager.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0bc285b0537b08a1d26cd0d806266568dfa798ed Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/download/__pycache__/mock_download_manager.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/download/__pycache__/streaming_download_manager.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/download/__pycache__/streaming_download_manager.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19f6f4e1e79deaedbfeef9bee75fa4950ad594e7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/download/__pycache__/streaming_download_manager.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/download/download_config.py b/env-llmeval/lib/python3.10/site-packages/datasets/download/download_config.py new file mode 100644 index 0000000000000000000000000000000000000000..8ba032f75ba70d6b8515acac43abb5c1875291ea --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/download/download_config.py @@ -0,0 +1,102 @@ +import copy +import warnings +from dataclasses import InitVar, dataclass, field +from pathlib import Path +from typing import Any, Dict, Optional, Union + +from .. import config + + +@dataclass +class DownloadConfig: + """Configuration for our cached path manager. + + Attributes: + cache_dir (`str` or `Path`, *optional*): + Specify a cache directory to save the file to (overwrite the + default cache dir). + force_download (`bool`, defaults to `False`): + If `True`, re-dowload the file even if it's already cached in + the cache dir. + resume_download (`bool`, defaults to `False`): + If `True`, resume the download if an incompletely received file is + found. + proxies (`dict`, *optional*): + user_agent (`str`, *optional*): + Optional string or dict that will be appended to the user-agent on remote + requests. + extract_compressed_file (`bool`, defaults to `False`): + If `True` and the path point to a zip or tar file, + extract the compressed file in a folder along the archive. + force_extract (`bool`, defaults to `False`): + If `True` when `extract_compressed_file` is `True` and the archive + was already extracted, re-extract the archive and override the folder where it was extracted. + delete_extracted (`bool`, defaults to `False`): + Whether to delete (or keep) the extracted files. + use_etag (`bool`, defaults to `True`): + Whether to use the ETag HTTP response header to validate the cached files. + num_proc (`int`, *optional*): + The number of processes to launch to download the files in parallel. + max_retries (`int`, default to `1`): + The number of times to retry an HTTP request if it fails. + token (`str` or `bool`, *optional*): + Optional string or boolean to use as Bearer token + for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`. + use_auth_token (`str` or `bool`, *optional*): + Optional string or boolean to use as Bearer token + for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`. + + + + `use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0. + + + + ignore_url_params (`bool`, defaults to `False`): + Whether to strip all query parameters and fragments from + the download URL before using it for caching the file. + storage_options (`dict`, *optional*): + Key/value pairs to be passed on to the dataset file-system backend, if any. + download_desc (`str`, *optional*): + A description to be displayed alongside with the progress bar while downloading the files. + """ + + cache_dir: Optional[Union[str, Path]] = None + force_download: bool = False + resume_download: bool = False + local_files_only: bool = False + proxies: Optional[Dict] = None + user_agent: Optional[str] = None + extract_compressed_file: bool = False + force_extract: bool = False + delete_extracted: bool = False + use_etag: bool = True + num_proc: Optional[int] = None + max_retries: int = 1 + token: Optional[Union[str, bool]] = None + use_auth_token: InitVar[Optional[Union[str, bool]]] = "deprecated" + ignore_url_params: bool = False + storage_options: Dict[str, Any] = field(default_factory=dict) + download_desc: Optional[str] = None + + def __post_init__(self, use_auth_token): + if use_auth_token != "deprecated": + warnings.warn( + "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n" + f"You can remove this warning by passing 'token={use_auth_token}' instead.", + FutureWarning, + ) + self.token = use_auth_token + if "hf" not in self.storage_options: + self.storage_options["hf"] = {"token": self.token, "endpoint": config.HF_ENDPOINT} + + def copy(self) -> "DownloadConfig": + return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()}) + + def __setattr__(self, name, value): + if name == "token" and getattr(self, "storage_options", None) is not None: + if "hf" not in self.storage_options: + self.storage_options["hf"] = {"token": value, "endpoint": config.HF_ENDPOINT} + elif getattr(self.storage_options["hf"], "token", None) is None: + self.storage_options["hf"]["token"] = value + super().__setattr__(name, value) diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/download/download_manager.py b/env-llmeval/lib/python3.10/site-packages/datasets/download/download_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..6c838753b9998a8a0fb3d800f44f89e211a1f61d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/download/download_manager.py @@ -0,0 +1,584 @@ +# Copyright 2020 The TensorFlow Datasets Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +"""Download manager interface.""" + +import enum +import io +import os +import posixpath +import tarfile +import warnings +import zipfile +from datetime import datetime +from functools import partial +from itertools import chain +from typing import Callable, Dict, Generator, List, Optional, Tuple, Union + +from .. import config +from ..utils import tqdm as hf_tqdm +from ..utils.deprecation_utils import DeprecatedEnum, deprecated +from ..utils.file_utils import ( + cached_path, + get_from_cache, + hash_url_to_filename, + is_relative_path, + stack_multiprocessing_download_progress_bars, + url_or_path_join, +) +from ..utils.info_utils import get_size_checksum_dict +from ..utils.logging import get_logger +from ..utils.py_utils import NestedDataStructure, map_nested, size_str +from ..utils.track import TrackedIterable, tracked_str +from .download_config import DownloadConfig + + +logger = get_logger(__name__) + + +BASE_KNOWN_EXTENSIONS = [ + "txt", + "csv", + "json", + "jsonl", + "tsv", + "conll", + "conllu", + "orig", + "parquet", + "pkl", + "pickle", + "rel", + "xml", +] +MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL = { + bytes.fromhex("504B0304"): "zip", + bytes.fromhex("504B0506"): "zip", # empty archive + bytes.fromhex("504B0708"): "zip", # spanned archive + bytes.fromhex("425A68"): "bz2", + bytes.fromhex("1F8B"): "gzip", + bytes.fromhex("FD377A585A00"): "xz", + bytes.fromhex("04224D18"): "lz4", + bytes.fromhex("28B52FFD"): "zstd", +} +MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL = { + b"Rar!": "rar", +} +MAGIC_NUMBER_MAX_LENGTH = max( + len(magic_number) + for magic_number in chain(MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL, MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL) +) + + +class DownloadMode(enum.Enum): + """`Enum` for how to treat pre-existing downloads and data. + + The default mode is `REUSE_DATASET_IF_EXISTS`, which will reuse both + raw downloads and the prepared dataset if they exist. + + The generations modes: + + | | Downloads | Dataset | + |-------------------------------------|-----------|---------| + | `REUSE_DATASET_IF_EXISTS` (default) | Reuse | Reuse | + | `REUSE_CACHE_IF_EXISTS` | Reuse | Fresh | + | `FORCE_REDOWNLOAD` | Fresh | Fresh | + + """ + + REUSE_DATASET_IF_EXISTS = "reuse_dataset_if_exists" + REUSE_CACHE_IF_EXISTS = "reuse_cache_if_exists" + FORCE_REDOWNLOAD = "force_redownload" + + +class GenerateMode(DeprecatedEnum): + REUSE_DATASET_IF_EXISTS = "reuse_dataset_if_exists" + REUSE_CACHE_IF_EXISTS = "reuse_cache_if_exists" + FORCE_REDOWNLOAD = "force_redownload" + + @property + def help_message(self): + return "Use 'DownloadMode' instead." + + +def _get_path_extension(path: str) -> str: + # Get extension: train.json.gz -> gz + extension = path.split(".")[-1] + # Remove query params ("dl=1", "raw=true"): gz?dl=1 -> gz + # Remove shards infos (".txt_1", ".txt-00000-of-00100"): txt_1 -> txt + for symb in "?-_": + extension = extension.split(symb)[0] + return extension + + +def _get_extraction_protocol_with_magic_number(f) -> Optional[str]: + """read the magic number from a file-like object and return the compression protocol""" + # Check if the file object is seekable even before reading the magic number (to avoid https://bugs.python.org/issue26440) + try: + f.seek(0) + except (AttributeError, io.UnsupportedOperation): + return None + magic_number = f.read(MAGIC_NUMBER_MAX_LENGTH) + f.seek(0) + for i in range(MAGIC_NUMBER_MAX_LENGTH): + compression = MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i]) + if compression is not None: + return compression + compression = MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i]) + if compression is not None: + raise NotImplementedError(f"Compression protocol '{compression}' not implemented.") + + +def _get_extraction_protocol(path: str) -> Optional[str]: + path = str(path) + extension = _get_path_extension(path) + # TODO(mariosasko): The below check will be useful once we can preserve the original extension in the new cache layout (use the `filename` parameter of `hf_hub_download`) + if ( + extension in BASE_KNOWN_EXTENSIONS + or extension in ["tgz", "tar"] + or path.endswith((".tar.gz", ".tar.bz2", ".tar.xz")) + ): + return None + with open(path, "rb") as f: + return _get_extraction_protocol_with_magic_number(f) + + +class _IterableFromGenerator(TrackedIterable): + """Utility class to create an iterable from a generator function, in order to reset the generator when needed.""" + + def __init__(self, generator: Callable, *args, **kwargs): + super().__init__() + self.generator = generator + self.args = args + self.kwargs = kwargs + + def __iter__(self): + for x in self.generator(*self.args, **self.kwargs): + self.last_item = x + yield x + self.last_item = None + + +class ArchiveIterable(_IterableFromGenerator): + """An iterable of (path, fileobj) from a TAR archive, used by `iter_archive`""" + + @staticmethod + def _iter_tar(f): + stream = tarfile.open(fileobj=f, mode="r|*") + for tarinfo in stream: + file_path = tarinfo.name + if not tarinfo.isreg(): + continue + if file_path is None: + continue + if os.path.basename(file_path).startswith((".", "__")): + # skipping hidden files + continue + file_obj = stream.extractfile(tarinfo) + yield file_path, file_obj + stream.members = [] + del stream + + @staticmethod + def _iter_zip(f): + zipf = zipfile.ZipFile(f) + for member in zipf.infolist(): + file_path = member.filename + if member.is_dir(): + continue + if file_path is None: + continue + if os.path.basename(file_path).startswith((".", "__")): + # skipping hidden files + continue + file_obj = zipf.open(member) + yield file_path, file_obj + + @classmethod + def _iter_from_fileobj(cls, f) -> Generator[Tuple, None, None]: + compression = _get_extraction_protocol_with_magic_number(f) + if compression == "zip": + yield from cls._iter_zip(f) + else: + yield from cls._iter_tar(f) + + @classmethod + def _iter_from_path(cls, urlpath: str) -> Generator[Tuple, None, None]: + compression = _get_extraction_protocol(urlpath) + with open(urlpath, "rb") as f: + if compression == "zip": + yield from cls._iter_zip(f) + else: + yield from cls._iter_tar(f) + + @classmethod + def from_buf(cls, fileobj) -> "ArchiveIterable": + return cls(cls._iter_from_fileobj, fileobj) + + @classmethod + def from_path(cls, urlpath_or_buf) -> "ArchiveIterable": + return cls(cls._iter_from_path, urlpath_or_buf) + + +class FilesIterable(_IterableFromGenerator): + """An iterable of paths from a list of directories or files""" + + @classmethod + def _iter_from_paths(cls, urlpaths: Union[str, List[str]]) -> Generator[str, None, None]: + if not isinstance(urlpaths, list): + urlpaths = [urlpaths] + for urlpath in urlpaths: + if os.path.isfile(urlpath): + yield urlpath + else: + for dirpath, dirnames, filenames in os.walk(urlpath): + # in-place modification to prune the search + dirnames[:] = sorted([dirname for dirname in dirnames if not dirname.startswith((".", "__"))]) + if os.path.basename(dirpath).startswith((".", "__")): + # skipping hidden directories + continue + for filename in sorted(filenames): + if filename.startswith((".", "__")): + # skipping hidden files + continue + yield os.path.join(dirpath, filename) + + @classmethod + def from_paths(cls, urlpaths) -> "FilesIterable": + return cls(cls._iter_from_paths, urlpaths) + + +class DownloadManager: + is_streaming = False + + def __init__( + self, + dataset_name: Optional[str] = None, + data_dir: Optional[str] = None, + download_config: Optional[DownloadConfig] = None, + base_path: Optional[str] = None, + record_checksums=True, + ): + """Download manager constructor. + + Args: + data_dir: + can be used to specify a manual directory to get the files from. + dataset_name (`str`): + name of dataset this instance will be used for. If + provided, downloads will contain which datasets they were used for. + download_config (`DownloadConfig`): + to specify the cache directory and other + download options + base_path (`str`): + base path that is used when relative paths are used to + download files. This can be a remote url. + record_checksums (`bool`, defaults to `True`): + Whether to record the checksums of the downloaded files. If None, the value is inferred from the builder. + """ + self._dataset_name = dataset_name + self._data_dir = data_dir + self._base_path = base_path or os.path.abspath(".") + # To record what is being used: {url: {num_bytes: int, checksum: str}} + self._recorded_sizes_checksums: Dict[str, Dict[str, Optional[Union[int, str]]]] = {} + self.record_checksums = record_checksums + self.download_config = download_config or DownloadConfig() + self.downloaded_paths = {} + self.extracted_paths = {} + + @property + def manual_dir(self): + return self._data_dir + + @property + def downloaded_size(self): + """Returns the total size of downloaded files.""" + return sum(checksums_dict["num_bytes"] for checksums_dict in self._recorded_sizes_checksums.values()) + + @staticmethod + def ship_files_with_pipeline(downloaded_path_or_paths, pipeline): + """Ship the files using Beam FileSystems to the pipeline temp dir. + + Args: + downloaded_path_or_paths (`str` or `list[str]` or `dict[str, str]`): + Nested structure containing the + downloaded path(s). + pipeline ([`utils.beam_utils.BeamPipeline`]): + Apache Beam Pipeline. + + Returns: + `str` or `list[str]` or `dict[str, str]` + """ + from ..utils.beam_utils import upload_local_to_remote + + remote_dir = pipeline._options.get_all_options().get("temp_location") + if remote_dir is None: + raise ValueError("You need to specify 'temp_location' in PipelineOptions to upload files") + + def upload(local_file_path): + remote_file_path = posixpath.join( + remote_dir, config.DOWNLOADED_DATASETS_DIR, os.path.basename(local_file_path) + ) + logger.info( + f"Uploading {local_file_path} ({size_str(os.path.getsize(local_file_path))}) to {remote_file_path}." + ) + upload_local_to_remote(local_file_path, remote_file_path) + return remote_file_path + + uploaded_path_or_paths = map_nested( + lambda local_file_path: upload(local_file_path), + downloaded_path_or_paths, + ) + return uploaded_path_or_paths + + def _record_sizes_checksums(self, url_or_urls: NestedDataStructure, downloaded_path_or_paths: NestedDataStructure): + """Record size/checksum of downloaded files.""" + delay = 5 + for url, path in hf_tqdm( + list(zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten())), + delay=delay, + desc="Computing checksums", + ): + # call str to support PathLike objects + self._recorded_sizes_checksums[str(url)] = get_size_checksum_dict( + path, record_checksum=self.record_checksums + ) + + @deprecated("Use `.download`/`.download_and_extract` with `fsspec` URLs instead.") + def download_custom(self, url_or_urls, custom_download): + """ + Download given urls(s) by calling `custom_download`. + + Args: + url_or_urls (`str` or `list` or `dict`): + URL or `list` or `dict` of URLs to download and extract. Each URL is a `str`. + custom_download (`Callable[src_url, dst_path]`): + The source URL and destination path. For example + `tf.io.gfile.copy`, that lets you download from Google storage. + + Returns: + downloaded_path(s): `str`, The downloaded paths matching the given input + `url_or_urls`. + + Example: + + ```py + >>> downloaded_files = dl_manager.download_custom('s3://my-bucket/data.zip', custom_download_for_my_private_bucket) + ``` + """ + cache_dir = self.download_config.cache_dir or config.DOWNLOADED_DATASETS_PATH + max_retries = self.download_config.max_retries + + def url_to_downloaded_path(url): + return os.path.join(cache_dir, hash_url_to_filename(url)) + + downloaded_path_or_paths = map_nested(url_to_downloaded_path, url_or_urls) + url_or_urls = NestedDataStructure(url_or_urls) + downloaded_path_or_paths = NestedDataStructure(downloaded_path_or_paths) + for url, path in zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten()): + try: + get_from_cache( + url, cache_dir=cache_dir, local_files_only=True, use_etag=False, max_retries=max_retries + ) + cached = True + except FileNotFoundError: + cached = False + if not cached or self.download_config.force_download: + custom_download(url, path) + get_from_cache( + url, cache_dir=cache_dir, local_files_only=True, use_etag=False, max_retries=max_retries + ) + self._record_sizes_checksums(url_or_urls, downloaded_path_or_paths) + return downloaded_path_or_paths.data + + def download(self, url_or_urls): + """Download given URL(s). + + By default, only one process is used for download. Pass customized `download_config.num_proc` to change this behavior. + + Args: + url_or_urls (`str` or `list` or `dict`): + URL or `list` or `dict` of URLs to download. Each URL is a `str`. + + Returns: + `str` or `list` or `dict`: + The downloaded paths matching the given input `url_or_urls`. + + Example: + + ```py + >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') + ``` + """ + download_config = self.download_config.copy() + download_config.extract_compressed_file = False + if download_config.download_desc is None: + download_config.download_desc = "Downloading data" + + download_func = partial(self._download, download_config=download_config) + + start_time = datetime.now() + with stack_multiprocessing_download_progress_bars(): + downloaded_path_or_paths = map_nested( + download_func, + url_or_urls, + map_tuple=True, + num_proc=download_config.num_proc, + desc="Downloading data files", + ) + duration = datetime.now() - start_time + logger.info(f"Downloading took {duration.total_seconds() // 60} min") + url_or_urls = NestedDataStructure(url_or_urls) + downloaded_path_or_paths = NestedDataStructure(downloaded_path_or_paths) + self.downloaded_paths.update(dict(zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten()))) + + start_time = datetime.now() + self._record_sizes_checksums(url_or_urls, downloaded_path_or_paths) + duration = datetime.now() - start_time + logger.info(f"Checksum Computation took {duration.total_seconds() // 60} min") + + return downloaded_path_or_paths.data + + def _download(self, url_or_filename: str, download_config: DownloadConfig) -> str: + url_or_filename = str(url_or_filename) + if is_relative_path(url_or_filename): + # append the relative path to the base_path + url_or_filename = url_or_path_join(self._base_path, url_or_filename) + out = cached_path(url_or_filename, download_config=download_config) + out = tracked_str(out) + out.set_origin(url_or_filename) + return out + + def iter_archive(self, path_or_buf: Union[str, io.BufferedReader]): + """Iterate over files within an archive. + + Args: + path_or_buf (`str` or `io.BufferedReader`): + Archive path or archive binary file object. + + Yields: + `tuple[str, io.BufferedReader]`: + 2-tuple (path_within_archive, file_object). + File object is opened in binary mode. + + Example: + + ```py + >>> archive = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') + >>> files = dl_manager.iter_archive(archive) + ``` + """ + + if hasattr(path_or_buf, "read"): + return ArchiveIterable.from_buf(path_or_buf) + else: + return ArchiveIterable.from_path(path_or_buf) + + def iter_files(self, paths: Union[str, List[str]]): + """Iterate over file paths. + + Args: + paths (`str` or `list` of `str`): + Root paths. + + Yields: + `str`: File path. + + Example: + + ```py + >>> files = dl_manager.download_and_extract('https://huggingface.co/datasets/beans/resolve/main/data/train.zip') + >>> files = dl_manager.iter_files(files) + ``` + """ + return FilesIterable.from_paths(paths) + + def extract(self, path_or_paths, num_proc="deprecated"): + """Extract given path(s). + + Args: + path_or_paths (path or `list` or `dict`): + Path of file to extract. Each path is a `str`. + num_proc (`int`): + Use multi-processing if `num_proc` > 1 and the length of + `path_or_paths` is larger than `num_proc`. + + + + Pass `DownloadConfig(num_proc=)` to the initializer instead. + + + + Returns: + extracted_path(s): `str`, The extracted paths matching the given input + path_or_paths. + + Example: + + ```py + >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') + >>> extracted_files = dl_manager.extract(downloaded_files) + ``` + """ + if num_proc != "deprecated": + warnings.warn( + "'num_proc' was deprecated in version 2.6.2 and will be removed in 3.0.0. Pass `DownloadConfig(num_proc=)` to the initializer instead.", + FutureWarning, + ) + download_config = self.download_config.copy() + download_config.extract_compressed_file = True + extract_func = partial(self._download, download_config=download_config) + extracted_paths = map_nested( + extract_func, + path_or_paths, + num_proc=download_config.num_proc, + desc="Extracting data files", + ) + path_or_paths = NestedDataStructure(path_or_paths) + extracted_paths = NestedDataStructure(extracted_paths) + self.extracted_paths.update(dict(zip(path_or_paths.flatten(), extracted_paths.flatten()))) + return extracted_paths.data + + def download_and_extract(self, url_or_urls): + """Download and extract given `url_or_urls`. + + Is roughly equivalent to: + + ``` + extracted_paths = dl_manager.extract(dl_manager.download(url_or_urls)) + ``` + + Args: + url_or_urls (`str` or `list` or `dict`): + URL or `list` or `dict` of URLs to download and extract. Each URL is a `str`. + + Returns: + extracted_path(s): `str`, extracted paths of given URL(s). + """ + return self.extract(self.download(url_or_urls)) + + def get_recorded_sizes_checksums(self): + return self._recorded_sizes_checksums.copy() + + def delete_extracted_files(self): + paths_to_delete = set(self.extracted_paths.values()) - set(self.downloaded_paths.values()) + for key, path in list(self.extracted_paths.items()): + if path in paths_to_delete and os.path.isfile(path): + os.remove(path) + del self.extracted_paths[key] + + def manage_extracted_files(self): + if self.download_config.delete_extracted: + self.delete_extracted_files() diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/download/mock_download_manager.py b/env-llmeval/lib/python3.10/site-packages/datasets/download/mock_download_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..7c71103a536b2a725ebb1d1dfe239b80baedc740 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/download/mock_download_manager.py @@ -0,0 +1,244 @@ +# Copyright 2020 The TensorFlow Datasets Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +"""Mock download manager interface.""" + +import os +import re +import urllib.parse +from pathlib import Path +from typing import Callable, List, Optional, Union +from zipfile import ZipFile + +from ..utils.file_utils import cached_path, hf_github_url +from ..utils.logging import get_logger +from ..utils.version import Version + + +logger = get_logger(__name__) + + +class MockDownloadManager: + dummy_file_name = "dummy_data" + datasets_scripts_dir = "datasets" + is_streaming = False + + def __init__( + self, + dataset_name: str, + config: str, + version: Union[Version, str], + cache_dir: Optional[str] = None, + use_local_dummy_data: bool = False, + load_existing_dummy_data: bool = True, + download_callbacks: Optional[List[Callable]] = None, + ): + self.downloaded_size = 0 + self.dataset_name = dataset_name + self.cache_dir = cache_dir + self.use_local_dummy_data = use_local_dummy_data + self.config = config + # download_callbacks take a single url as input + self.download_callbacks: List[Callable] = download_callbacks or [] + # if False, it doesn't load existing files and it returns the paths of the dummy files relative + # to the dummy_data zip file root + self.load_existing_dummy_data = load_existing_dummy_data + + # TODO(PVP, QL) might need to make this more general + self.version_name = str(version) + # to be downloaded + self._dummy_file = None + self._bucket_url = None + + @property + def dummy_file(self): + if self._dummy_file is None: + self._dummy_file = self.download_dummy_data() + return self._dummy_file + + @property + def dummy_data_folder(self): + if self.config is not None: + # structure is dummy / config_name / version_name + return os.path.join("dummy", self.config.name, self.version_name) + # structure is dummy / version_name + return os.path.join("dummy", self.version_name) + + @property + def dummy_zip_file(self): + return os.path.join(self.dummy_data_folder, "dummy_data.zip") + + def download_dummy_data(self): + path_to_dummy_data_dir = ( + self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data + ) + + local_path = cached_path( + path_to_dummy_data_dir, cache_dir=self.cache_dir, extract_compressed_file=True, force_extract=True + ) + + return os.path.join(local_path, self.dummy_file_name) + + @property + def local_path_to_dummy_data(self): + return os.path.join(self.datasets_scripts_dir, self.dataset_name, self.dummy_zip_file) + + @property + def github_path_to_dummy_data(self): + if self._bucket_url is None: + self._bucket_url = hf_github_url(self.dataset_name, self.dummy_zip_file.replace(os.sep, "/")) + return self._bucket_url + + @property + def manual_dir(self): + # return full path if its a dir + if os.path.isdir(self.dummy_file): + return self.dummy_file + # else cut off path to file -> example `xsum`. + return "/".join(self.dummy_file.replace(os.sep, "/").split("/")[:-1]) + + # this function has to be in the manager under this name so that testing works + def download_and_extract(self, data_url, *args): + if self.load_existing_dummy_data: + # dummy data is downloaded and tested + dummy_file = self.dummy_file + else: + # dummy data cannot be downloaded and only the path to dummy file is returned + dummy_file = self.dummy_file_name + + # special case when data_url is a dict + if isinstance(data_url, dict): + return self.create_dummy_data_dict(dummy_file, data_url) + elif isinstance(data_url, (list, tuple)): + return self.create_dummy_data_list(dummy_file, data_url) + else: + return self.create_dummy_data_single(dummy_file, data_url) + + # this function has to be in the manager under this name so that testing works + def download(self, data_url, *args): + return self.download_and_extract(data_url) + + # this function has to be in the manager under this name so that testing works + def download_custom(self, data_url, custom_download): + return self.download_and_extract(data_url) + + # this function has to be in the manager under this name so that testing works + def extract(self, path, *args, **kwargs): + return path + + # this function has to be in the manager under this name so that testing works + def get_recorded_sizes_checksums(self): + return {} + + def create_dummy_data_dict(self, path_to_dummy_data, data_url): + dummy_data_dict = {} + for key, single_urls in data_url.items(): + for download_callback in self.download_callbacks: + if isinstance(single_urls, list): + for single_url in single_urls: + download_callback(single_url) + else: + single_url = single_urls + download_callback(single_url) + # we force the name of each key to be the last file / folder name of the url path + # if the url has arguments, we need to encode them with urllib.parse.quote_plus + if isinstance(single_urls, list): + value = [os.path.join(path_to_dummy_data, urllib.parse.quote_plus(Path(x).name)) for x in single_urls] + else: + single_url = single_urls + value = os.path.join(path_to_dummy_data, urllib.parse.quote_plus(Path(single_url).name)) + dummy_data_dict[key] = value + + # make sure that values are unique + if all(isinstance(i, str) for i in dummy_data_dict.values()) and len(set(dummy_data_dict.values())) < len( + dummy_data_dict.values() + ): + # append key to value to make its name unique + dummy_data_dict = {key: value + key for key, value in dummy_data_dict.items()} + + return dummy_data_dict + + def create_dummy_data_list(self, path_to_dummy_data, data_url): + dummy_data_list = [] + # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one + is_tf_records = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}", url)) for url in data_url) + is_pubmed_records = all( + url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed") for url in data_url + ) + if data_url and (is_tf_records or is_pubmed_records): + data_url = [data_url[0]] * len(data_url) + for single_url in data_url: + for download_callback in self.download_callbacks: + download_callback(single_url) + # we force the name of each key to be the last file / folder name of the url path + # if the url has arguments, we need to encode them with urllib.parse.quote_plus + value = os.path.join(path_to_dummy_data, urllib.parse.quote_plus(single_url.split("/")[-1])) + dummy_data_list.append(value) + return dummy_data_list + + def create_dummy_data_single(self, path_to_dummy_data, data_url): + for download_callback in self.download_callbacks: + download_callback(data_url) + # we force the name of each key to be the last file / folder name of the url path + # if the url has arguments, we need to encode them with urllib.parse.quote_plus + value = os.path.join(path_to_dummy_data, urllib.parse.quote_plus(data_url.split("/")[-1])) + if os.path.exists(value) or not self.load_existing_dummy_data: + return value + else: + # Backward compatibility, maybe deprecate at one point. + # For many datasets with single url calls to dl_manager.download_and_extract, + # the dummy_data.zip file is actually the zipped downloaded file + # while now we expected the dummy_data.zip file to be a directory containing + # the downloaded file. + return path_to_dummy_data + + def delete_extracted_files(self): + pass + + def manage_extracted_files(self): + pass + + def iter_archive(self, path): + def _iter_archive_members(path): + # this preserves the order of the members inside the ZIP archive + dummy_parent_path = Path(self.dummy_file).parent + relative_path = path.relative_to(dummy_parent_path) + with ZipFile(self.local_path_to_dummy_data) as zip_file: + members = zip_file.namelist() + for member in members: + if member.startswith(relative_path.as_posix()): + yield dummy_parent_path.joinpath(member) + + path = Path(path) + file_paths = _iter_archive_members(path) if self.use_local_dummy_data else path.rglob("*") + for file_path in file_paths: + if file_path.is_file() and not file_path.name.startswith((".", "__")): + yield file_path.relative_to(path).as_posix(), file_path.open("rb") + + def iter_files(self, paths): + if not isinstance(paths, list): + paths = [paths] + for path in paths: + if os.path.isfile(path): + yield path + else: + for dirpath, dirnames, filenames in os.walk(path): + if os.path.basename(dirpath).startswith((".", "__")): + continue + dirnames.sort() + for filename in sorted(filenames): + if filename.startswith((".", "__")): + continue + yield os.path.join(dirpath, filename) diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/download/streaming_download_manager.py b/env-llmeval/lib/python3.10/site-packages/datasets/download/streaming_download_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..0b347216a9cabc68d77e7f97a935003cfe763ccb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/download/streaming_download_manager.py @@ -0,0 +1,1133 @@ +import glob +import io +import os +import posixpath +import re +import tarfile +import time +import xml.dom.minidom +import zipfile +from asyncio import TimeoutError +from io import BytesIO +from itertools import chain +from pathlib import Path, PurePosixPath +from typing import Any, Callable, Dict, Generator, Iterable, List, Optional, Tuple, Union +from xml.etree import ElementTree as ET + +import fsspec +from aiohttp.client_exceptions import ClientError +from huggingface_hub.utils import EntryNotFoundError +from packaging import version + +from .. import config +from ..filesystems import COMPRESSION_FILESYSTEMS +from ..utils.file_utils import ( + get_authentication_headers_for_url, + get_datasets_user_agent, + http_head, + is_local_path, + is_relative_path, + url_or_path_join, +) +from ..utils.logging import get_logger +from ..utils.py_utils import map_nested +from .download_config import DownloadConfig + + +logger = get_logger(__name__) + +BASE_KNOWN_EXTENSIONS = [ + "txt", + "csv", + "json", + "jsonl", + "tsv", + "conll", + "conllu", + "orig", + "parquet", + "pkl", + "pickle", + "rel", + "xml", +] +COMPRESSION_EXTENSION_TO_PROTOCOL = { + # single file compression + **{fs_class.extension.lstrip("."): fs_class.protocol for fs_class in COMPRESSION_FILESYSTEMS}, + # archive compression + "zip": "zip", +} +SINGLE_FILE_COMPRESSION_PROTOCOLS = {fs_class.protocol for fs_class in COMPRESSION_FILESYSTEMS} +SINGLE_SLASH_AFTER_PROTOCOL_PATTERN = re.compile(r"(?>> xjoin("zip://folder1::https://host.com/archive.zip", "file.txt") + zip://folder1/file.txt::https://host.com/archive.zip + """ + a, *b = str(a).split("::") + if is_local_path(a): + return os.path.join(a, *p) + else: + a = posixpath.join(a, *p) + return "::".join([a] + b) + + +def xdirname(a): + """ + This function extends os.path.dirname to support the "::" hop separator. It supports both paths and urls. + + A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::". + This is used to access files inside a zip file over http for example. + + Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt. + Then you can just chain the url this way: + + zip://folder1/file.txt::https://host.com/archive.zip + + The xdirname function allows you to apply the dirname on the first path of the chain. + + Example:: + + >>> xdirname("zip://folder1/file.txt::https://host.com/archive.zip") + zip://folder1::https://host.com/archive.zip + """ + a, *b = str(a).split("::") + if is_local_path(a): + a = os.path.dirname(Path(a).as_posix()) + else: + a = posixpath.dirname(a) + # if we end up at the root of the protocol, we get for example a = 'http:' + # so we have to fix it by adding the '//' that was removed: + if a.endswith(":"): + a += "//" + return "::".join([a] + b) + + +def xexists(urlpath: str, download_config: Optional[DownloadConfig] = None): + """Extend `os.path.exists` function to support both local and remote files. + + Args: + urlpath (`str`): URL path. + download_config : mainly use token or storage_options to support different platforms and auth types. + + Returns: + `bool` + """ + + main_hop, *rest_hops = _as_str(urlpath).split("::") + if is_local_path(main_hop): + return os.path.exists(main_hop) + else: + urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config) + main_hop, *rest_hops = urlpath.split("::") + fs, *_ = fsspec.get_fs_token_paths(urlpath, storage_options=storage_options) + return fs.exists(main_hop) + + +def xbasename(a): + """ + This function extends os.path.basename to support the "::" hop separator. It supports both paths and urls. + + A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::". + This is used to access files inside a zip file over http for example. + + Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt. + Then you can just chain the url this way: + + zip://folder1/file.txt::https://host.com/archive.zip + + The xbasename function allows you to apply the basename on the first path of the chain. + + Example:: + + >>> xbasename("zip://folder1/file.txt::https://host.com/archive.zip") + file.txt + """ + a, *b = str(a).split("::") + if is_local_path(a): + return os.path.basename(Path(a).as_posix()) + else: + return posixpath.basename(a) + + +def xsplit(a): + """ + This function extends os.path.split to support the "::" hop separator. It supports both paths and urls. + + A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::". + This is used to access files inside a zip file over http for example. + + Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt. + Then you can just chain the url this way: + + zip://folder1/file.txt::https://host.com/archive.zip + + The xsplit function allows you to apply the xsplit on the first path of the chain. + + Example:: + + >>> xsplit("zip://folder1/file.txt::https://host.com/archive.zip") + ('zip://folder1::https://host.com/archive.zip', 'file.txt') + """ + a, *b = str(a).split("::") + if is_local_path(a): + return os.path.split(Path(a).as_posix()) + else: + a, tail = posixpath.split(a) + return "::".join([a + "//" if a.endswith(":") else a] + b), tail + + +def xsplitext(a): + """ + This function extends os.path.splitext to support the "::" hop separator. It supports both paths and urls. + + A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::". + This is used to access files inside a zip file over http for example. + + Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt. + Then you can just chain the url this way: + + zip://folder1/file.txt::https://host.com/archive.zip + + The xsplitext function allows you to apply the splitext on the first path of the chain. + + Example:: + + >>> xsplitext("zip://folder1/file.txt::https://host.com/archive.zip") + ('zip://folder1/file::https://host.com/archive.zip', '.txt') + """ + a, *b = str(a).split("::") + if is_local_path(a): + return os.path.splitext(Path(a).as_posix()) + else: + a, ext = posixpath.splitext(a) + return "::".join([a] + b), ext + + +def xisfile(path, download_config: Optional[DownloadConfig] = None) -> bool: + """Extend `os.path.isfile` function to support remote files. + + Args: + path (`str`): URL path. + download_config : mainly use token or storage_options to support different platforms and auth types. + + Returns: + `bool` + """ + main_hop, *rest_hops = str(path).split("::") + if is_local_path(main_hop): + return os.path.isfile(path) + else: + path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config) + main_hop, *rest_hops = path.split("::") + fs, *_ = fsspec.get_fs_token_paths(path, storage_options=storage_options) + return fs.isfile(main_hop) + + +def xgetsize(path, download_config: Optional[DownloadConfig] = None) -> int: + """Extend `os.path.getsize` function to support remote files. + + Args: + path (`str`): URL path. + download_config : mainly use token or storage_options to support different platforms and auth types. + + Returns: + `int`: optional + """ + main_hop, *rest_hops = str(path).split("::") + if is_local_path(main_hop): + return os.path.getsize(path) + else: + path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config) + main_hop, *rest_hops = path.split("::") + fs, *_ = fsspec.get_fs_token_paths(path, storage_options=storage_options) + try: + size = fs.size(main_hop) + except EntryNotFoundError: + raise FileNotFoundError(f"No such file: {path}") + if size is None: + # use xopen instead of fs.open to make data fetching more robust + with xopen(path, download_config=download_config) as f: + size = len(f.read()) + return size + + +def xisdir(path, download_config: Optional[DownloadConfig] = None) -> bool: + """Extend `os.path.isdir` function to support remote files. + + Args: + path (`str`): URL path. + download_config : mainly use token or storage_options to support different platforms and auth types. + + Returns: + `bool` + """ + main_hop, *rest_hops = str(path).split("::") + if is_local_path(main_hop): + return os.path.isdir(path) + else: + path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config) + main_hop, *rest_hops = path.split("::") + fs, *_ = fsspec.get_fs_token_paths(path, storage_options=storage_options) + inner_path = main_hop.split("://")[-1] + if not inner_path.strip("/"): + return True + return fs.isdir(inner_path) + + +def xrelpath(path, start=None): + """Extend `os.path.relpath` function to support remote files. + + Args: + path (`str`): URL path. + start (`str`): Start URL directory path. + + Returns: + `str` + """ + main_hop, *rest_hops = str(path).split("::") + if is_local_path(main_hop): + return os.path.relpath(main_hop, start=start) if start else os.path.relpath(main_hop) + else: + return posixpath.relpath(main_hop, start=str(start).split("::")[0]) if start else os.path.relpath(main_hop) + + +def _add_retries_to_file_obj_read_method(file_obj): + read = file_obj.read + max_retries = config.STREAMING_READ_MAX_RETRIES + + def read_with_retries(*args, **kwargs): + disconnect_err = None + for retry in range(1, max_retries + 1): + try: + out = read(*args, **kwargs) + break + except (ClientError, TimeoutError) as err: + disconnect_err = err + logger.warning( + f"Got disconnected from remote data host. Retrying in {config.STREAMING_READ_RETRY_INTERVAL}sec [{retry}/{max_retries}]" + ) + time.sleep(config.STREAMING_READ_RETRY_INTERVAL) + else: + raise ConnectionError("Server Disconnected") from disconnect_err + return out + + file_obj.read = read_with_retries + + +def _get_path_extension(path: str) -> str: + # Get extension: https://foo.bar/train.json.gz -> gz + extension = path.split(".")[-1] + # Remove query params ("dl=1", "raw=true"): gz?dl=1 -> gz + # Remove shards infos (".txt_1", ".txt-00000-of-00100"): txt_1 -> txt + for symb in "?-_": + extension = extension.split(symb)[0] + return extension + + +def _get_extraction_protocol_with_magic_number(f) -> Optional[str]: + """read the magic number from a file-like object and return the compression protocol""" + # Check if the file object is seekable even before reading the magic number (to avoid https://bugs.python.org/issue26440) + try: + f.seek(0) + except (AttributeError, io.UnsupportedOperation): + return None + magic_number = f.read(MAGIC_NUMBER_MAX_LENGTH) + f.seek(0) + for i in range(MAGIC_NUMBER_MAX_LENGTH): + compression = MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i]) + if compression is not None: + return compression + compression = MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i]) + if compression is not None: + raise NotImplementedError(f"Compression protocol '{compression}' not implemented.") + + +def _get_extraction_protocol(urlpath: str, download_config: Optional[DownloadConfig] = None) -> Optional[str]: + # get inner file: zip://train-00000.json.gz::https://foo.bar/data.zip -> zip://train-00000.json.gz + urlpath = str(urlpath) + path = urlpath.split("::")[0] + extension = _get_path_extension(path) + if ( + extension in BASE_KNOWN_EXTENSIONS + or extension in ["tgz", "tar"] + or path.endswith((".tar.gz", ".tar.bz2", ".tar.xz")) + ): + return None + elif extension in COMPRESSION_EXTENSION_TO_PROTOCOL: + return COMPRESSION_EXTENSION_TO_PROTOCOL[extension] + urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config) + try: + with fsspec.open(urlpath, **(storage_options or {})) as f: + return _get_extraction_protocol_with_magic_number(f) + except FileNotFoundError: + if urlpath.startswith(config.HF_ENDPOINT): + raise FileNotFoundError( + urlpath + "\nIf the repo is private or gated, make sure to log in with `huggingface-cli login`." + ) from None + else: + raise + + +def _prepare_path_and_storage_options( + urlpath: str, download_config: Optional[DownloadConfig] = None +) -> Tuple[str, Dict[str, Dict[str, Any]]]: + prepared_urlpath = [] + prepared_storage_options = {} + for hop in urlpath.split("::"): + hop, storage_options = _prepare_single_hop_path_and_storage_options(hop, download_config=download_config) + prepared_urlpath.append(hop) + prepared_storage_options.update(storage_options) + return "::".join(prepared_urlpath), storage_options + + +def _prepare_single_hop_path_and_storage_options( + urlpath: str, download_config: Optional[DownloadConfig] = None +) -> Tuple[str, Dict[str, Dict[str, Any]]]: + """ + Prepare the URL and the kwargs that must be passed to the HttpFileSystem or to requests.get/head + + In particular it resolves google drive URLs + It also adds the authentication headers for the Hugging Face Hub, for both https:// and hf:// paths. + + Storage options are formatted in the form {protocol: storage_options_for_protocol} + """ + token = None if download_config is None else download_config.token + if urlpath.startswith(config.HF_ENDPOINT) and "/resolve/" in urlpath: + urlpath = "hf://" + urlpath[len(config.HF_ENDPOINT) + 1 :].replace("/resolve/", "@", 1) + protocol = urlpath.split("://")[0] if "://" in urlpath else "file" + if download_config is not None and protocol in download_config.storage_options: + storage_options = download_config.storage_options[protocol] + elif download_config is not None and protocol not in download_config.storage_options: + storage_options = { + option_name: option_value + for option_name, option_value in download_config.storage_options.items() + if option_name not in fsspec.available_protocols() + } + else: + storage_options = {} + if storage_options: + storage_options = {protocol: storage_options} + if protocol in ["http", "https"]: + storage_options[protocol] = { + "headers": { + **get_authentication_headers_for_url(urlpath, token=token), + "user-agent": get_datasets_user_agent(), + }, + "client_kwargs": {"trust_env": True}, # Enable reading proxy env variables. + **(storage_options.get(protocol, {})), + } + if "drive.google.com" in urlpath: + response = http_head(urlpath) + cookies = None + for k, v in response.cookies.items(): + if k.startswith("download_warning"): + urlpath += "&confirm=" + v + cookies = response.cookies + storage_options[protocol] = {"cookies": cookies, **storage_options.get(protocol, {})} + # Fix Google Drive URL to avoid Virus scan warning + if "drive.google.com" in urlpath and "confirm=" not in urlpath: + urlpath += "&confirm=t" + if urlpath.startswith("https://raw.githubusercontent.com/"): + # Workaround for served data with gzip content-encoding: https://github.com/fsspec/filesystem_spec/issues/389 + storage_options[protocol]["headers"]["Accept-Encoding"] = "identity" + elif protocol == "hf": + storage_options[protocol] = { + "token": token, + "endpoint": config.HF_ENDPOINT, + **storage_options.get(protocol, {}), + } + # streaming with block_size=0 is only implemented in 0.21 (see https://github.com/huggingface/huggingface_hub/pull/1967) + if config.HF_HUB_VERSION < version.parse("0.21.0"): + storage_options[protocol]["block_size"] = "default" + return urlpath, storage_options + + +def xopen(file: str, mode="r", *args, download_config: Optional[DownloadConfig] = None, **kwargs): + """Extend `open` function to support remote files using `fsspec`. + + It also has a retry mechanism in case connection fails. + The `args` and `kwargs` are passed to `fsspec.open`, except `token` which is used for queries to private repos on huggingface.co + + Args: + file (`str`): Path name of the file to be opened. + mode (`str`, *optional*, default "r"): Mode in which the file is opened. + *args: Arguments to be passed to `fsspec.open`. + download_config : mainly use token or storage_options to support different platforms and auth types. + **kwargs: Keyword arguments to be passed to `fsspec.open`. + + Returns: + file object + """ + # This works as well for `xopen(str(Path(...)))` + file_str = _as_str(file) + main_hop, *rest_hops = file_str.split("::") + if is_local_path(main_hop): + # ignore fsspec-specific kwargs + kwargs.pop("block_size", None) + return open(main_hop, mode, *args, **kwargs) + # add headers and cookies for authentication on the HF Hub and for Google Drive + file, storage_options = _prepare_path_and_storage_options(file_str, download_config=download_config) + kwargs = {**kwargs, **(storage_options or {})} + try: + file_obj = fsspec.open(file, mode=mode, *args, **kwargs).open() + except ValueError as e: + if str(e) == "Cannot seek streaming HTTP file": + raise NonStreamableDatasetError( + "Streaming is not possible for this dataset because data host server doesn't support HTTP range " + "requests. You can still load this dataset in non-streaming mode by passing `streaming=False` (default)" + ) from e + else: + raise + except FileNotFoundError: + if file.startswith(config.HF_ENDPOINT): + raise FileNotFoundError( + file + "\nIf the repo is private or gated, make sure to log in with `huggingface-cli login`." + ) from None + else: + raise + _add_retries_to_file_obj_read_method(file_obj) + return file_obj + + +def xlistdir(path: str, download_config: Optional[DownloadConfig] = None) -> List[str]: + """Extend `os.listdir` function to support remote files. + + Args: + path (`str`): URL path. + download_config : mainly use token or storage_options to support different platforms and auth types. + + Returns: + `list` of `str` + """ + main_hop, *rest_hops = _as_str(path).split("::") + if is_local_path(main_hop): + return os.listdir(path) + else: + # globbing inside a zip in a private repo requires authentication + path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config) + main_hop, *rest_hops = path.split("::") + fs, *_ = fsspec.get_fs_token_paths(path, storage_options=storage_options) + inner_path = main_hop.split("://")[-1] + if inner_path.strip("/") and not fs.isdir(inner_path): + raise FileNotFoundError(f"Directory doesn't exist: {path}") + paths = fs.listdir(inner_path, detail=False) + return [os.path.basename(path.rstrip("/")) for path in paths] + + +def xglob(urlpath, *, recursive=False, download_config: Optional[DownloadConfig] = None): + """Extend `glob.glob` function to support remote files. + + Args: + urlpath (`str`): URL path with shell-style wildcard patterns. + recursive (`bool`, default `False`): Whether to match the "**" pattern recursively to zero or more + directories or subdirectories. + download_config : mainly use token or storage_options to support different platforms and auth types. + + Returns: + `list` of `str` + """ + main_hop, *rest_hops = _as_str(urlpath).split("::") + if is_local_path(main_hop): + return glob.glob(main_hop, recursive=recursive) + else: + # globbing inside a zip in a private repo requires authentication + urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config) + main_hop, *rest_hops = urlpath.split("::") + fs, *_ = fsspec.get_fs_token_paths(urlpath, storage_options=storage_options) + # - If there's no "*" in the pattern, get_fs_token_paths() doesn't do any pattern matching + # so to be able to glob patterns like "[0-9]", we have to call `fs.glob`. + # - Also "*" in get_fs_token_paths() only matches files: we have to call `fs.glob` to match directories. + # - If there is "**" in the pattern, `fs.glob` must be called anyway. + inner_path = main_hop.split("://")[1] + globbed_paths = fs.glob(inner_path) + protocol = fs.protocol if isinstance(fs.protocol, str) else fs.protocol[-1] + return ["::".join([f"{protocol}://{globbed_path}"] + rest_hops) for globbed_path in globbed_paths] + + +def xwalk(urlpath, download_config: Optional[DownloadConfig] = None, **kwargs): + """Extend `os.walk` function to support remote files. + + Args: + urlpath (`str`): URL root path. + download_config : mainly use token or storage_options to support different platforms and auth types. + **kwargs: Additional keyword arguments forwarded to the underlying filesystem. + + + Yields: + `tuple`: 3-tuple (dirpath, dirnames, filenames). + """ + main_hop, *rest_hops = _as_str(urlpath).split("::") + if is_local_path(main_hop): + yield from os.walk(main_hop, **kwargs) + else: + # walking inside a zip in a private repo requires authentication + urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config) + main_hop, *rest_hops = urlpath.split("::") + fs, *_ = fsspec.get_fs_token_paths(urlpath, storage_options=storage_options) + inner_path = main_hop.split("://")[-1] + if inner_path.strip("/") and not fs.isdir(inner_path): + return [] + protocol = fs.protocol if isinstance(fs.protocol, str) else fs.protocol[-1] + for dirpath, dirnames, filenames in fs.walk(inner_path, **kwargs): + yield "::".join([f"{protocol}://{dirpath}"] + rest_hops), dirnames, filenames + + +class xPath(type(Path())): + """Extension of `pathlib.Path` to support both local paths and remote URLs.""" + + def __str__(self): + path_str = super().__str__() + main_hop, *rest_hops = path_str.split("::") + if is_local_path(main_hop): + return main_hop + path_as_posix = path_str.replace("\\", "/") + path_as_posix = SINGLE_SLASH_AFTER_PROTOCOL_PATTERN.sub("://", path_as_posix) + path_as_posix += "//" if path_as_posix.endswith(":") else "" # Add slashes to root of the protocol + return path_as_posix + + def exists(self, download_config: Optional[DownloadConfig] = None): + """Extend `pathlib.Path.exists` method to support both local and remote files. + + Args: + download_config : mainly use token or storage_options to support different platforms and auth types. + + Returns: + `bool` + """ + return xexists(str(self), download_config=download_config) + + def glob(self, pattern, download_config: Optional[DownloadConfig] = None): + """Glob function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. + + Args: + pattern (`str`): Pattern that resulting paths must match. + download_config : mainly use token or storage_options to support different platforms and auth types. + + Yields: + [`xPath`] + """ + posix_path = self.as_posix() + main_hop, *rest_hops = posix_path.split("::") + if is_local_path(main_hop): + yield from Path(main_hop).glob(pattern) + else: + # globbing inside a zip in a private repo requires authentication + if rest_hops: + urlpath = rest_hops[0] + urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config) + storage_options = {urlpath.split("://")[0]: storage_options} + posix_path = "::".join([main_hop, urlpath, *rest_hops[1:]]) + else: + storage_options = None + fs, *_ = fsspec.get_fs_token_paths(xjoin(posix_path, pattern), storage_options=storage_options) + # - If there's no "*" in the pattern, get_fs_token_paths() doesn't do any pattern matching + # so to be able to glob patterns like "[0-9]", we have to call `fs.glob`. + # - Also "*" in get_fs_token_paths() only matches files: we have to call `fs.glob` to match directories. + # - If there is "**" in the pattern, `fs.glob` must be called anyway. + globbed_paths = fs.glob(xjoin(main_hop, pattern)) + for globbed_path in globbed_paths: + yield type(self)("::".join([f"{fs.protocol}://{globbed_path}"] + rest_hops)) + + def rglob(self, pattern, **kwargs): + """Rglob function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. + + Args: + pattern (`str`): Pattern that resulting paths must match. + + Yields: + [`xPath`] + """ + return self.glob("**/" + pattern, **kwargs) + + @property + def parent(self) -> "xPath": + """Name function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. + + Returns: + [`xPath`] + """ + return type(self)(xdirname(self.as_posix())) + + @property + def name(self) -> str: + """Name function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. + + Returns: + `str` + """ + return PurePosixPath(self.as_posix().split("::")[0]).name + + @property + def stem(self) -> str: + """Stem function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. + + Returns: + `str` + """ + return PurePosixPath(self.as_posix().split("::")[0]).stem + + @property + def suffix(self) -> str: + """Suffix function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. + + Returns: + `str` + """ + return PurePosixPath(self.as_posix().split("::")[0]).suffix + + def open(self, *args, **kwargs): + """Extend :func:`xopen` to support argument of type :obj:`~pathlib.Path`. + + Args: + **args: Arguments passed to :func:`fsspec.open`. + **kwargs: Keyword arguments passed to :func:`fsspec.open`. + + Returns: + `io.FileIO`: File-like object. + """ + return xopen(str(self), *args, **kwargs) + + def joinpath(self, *p: Tuple[str, ...]) -> "xPath": + """Extend :func:`xjoin` to support argument of type :obj:`~pathlib.Path`. + + Args: + *p (`tuple` of `str`): Other path components. + + Returns: + [`xPath`] + """ + return type(self)(xjoin(self.as_posix(), *p)) + + def __truediv__(self, p: str) -> "xPath": + return self.joinpath(p) + + def with_suffix(self, suffix): + main_hop, *rest_hops = str(self).split("::") + if is_local_path(main_hop): + return type(self)(str(super().with_suffix(suffix))) + return type(self)("::".join([type(self)(PurePosixPath(main_hop).with_suffix(suffix)).as_posix()] + rest_hops)) + + +def _as_str(path: Union[str, Path, xPath]): + return str(path) if isinstance(path, xPath) else str(xPath(str(path))) + + +def xgzip_open(filepath_or_buffer, *args, download_config: Optional[DownloadConfig] = None, **kwargs): + import gzip + + if hasattr(filepath_or_buffer, "read"): + return gzip.open(filepath_or_buffer, *args, **kwargs) + else: + filepath_or_buffer = str(filepath_or_buffer) + return gzip.open(xopen(filepath_or_buffer, "rb", download_config=download_config), *args, **kwargs) + + +def xnumpy_load(filepath_or_buffer, *args, download_config: Optional[DownloadConfig] = None, **kwargs): + import numpy as np + + if hasattr(filepath_or_buffer, "read"): + return np.load(filepath_or_buffer, *args, **kwargs) + else: + filepath_or_buffer = str(filepath_or_buffer) + return np.load(xopen(filepath_or_buffer, "rb", download_config=download_config), *args, **kwargs) + + +def xpandas_read_csv(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs): + import pandas as pd + + if hasattr(filepath_or_buffer, "read"): + return pd.read_csv(filepath_or_buffer, **kwargs) + else: + filepath_or_buffer = str(filepath_or_buffer) + if kwargs.get("compression", "infer") == "infer": + kwargs["compression"] = _get_extraction_protocol(filepath_or_buffer, download_config=download_config) + return pd.read_csv(xopen(filepath_or_buffer, "rb", download_config=download_config), **kwargs) + + +def xpandas_read_excel(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs): + import pandas as pd + + if hasattr(filepath_or_buffer, "read"): + try: + return pd.read_excel(filepath_or_buffer, **kwargs) + except ValueError: # Cannot seek streaming HTTP file + return pd.read_excel(BytesIO(filepath_or_buffer.read()), **kwargs) + else: + filepath_or_buffer = str(filepath_or_buffer) + try: + return pd.read_excel(xopen(filepath_or_buffer, "rb", download_config=download_config), **kwargs) + except ValueError: # Cannot seek streaming HTTP file + return pd.read_excel( + BytesIO(xopen(filepath_or_buffer, "rb", download_config=download_config).read()), **kwargs + ) + + +def xpyarrow_parquet_read_table(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs): + import pyarrow.parquet as pq + + if hasattr(filepath_or_buffer, "read"): + return pq.read_table(filepath_or_buffer, **kwargs) + else: + filepath_or_buffer = str(filepath_or_buffer) + return pq.read_table(xopen(filepath_or_buffer, mode="rb", download_config=download_config), **kwargs) + + +def xsio_loadmat(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs): + import scipy.io as sio + + if hasattr(filepath_or_buffer, "read"): + return sio.loadmat(filepath_or_buffer, **kwargs) + else: + return sio.loadmat(xopen(filepath_or_buffer, "rb", download_config=download_config), **kwargs) + + +def xet_parse(source, parser=None, download_config: Optional[DownloadConfig] = None): + """Extend `xml.etree.ElementTree.parse` function to support remote files. + + Args: + source: File path or file object. + parser (`XMLParser`, *optional*, default `XMLParser`): Parser instance. + download_config : mainly use token or storage_options to support different platforms and auth types. + + Returns: + `xml.etree.ElementTree.Element`: Root element of the given source document. + """ + if hasattr(source, "read"): + return ET.parse(source, parser=parser) + else: + with xopen(source, "rb", download_config=download_config) as f: + return ET.parse(f, parser=parser) + + +def xxml_dom_minidom_parse(filename_or_file, download_config: Optional[DownloadConfig] = None, **kwargs): + """Extend `xml.dom.minidom.parse` function to support remote files. + + Args: + filename_or_file (`str` or file): File path or file object. + download_config : mainly use token or storage_options to support different platforms and auth types. + **kwargs (optional): Additional keyword arguments passed to `xml.dom.minidom.parse`. + + Returns: + :obj:`xml.dom.minidom.Document`: Parsed document. + """ + if hasattr(filename_or_file, "read"): + return xml.dom.minidom.parse(filename_or_file, **kwargs) + else: + with xopen(filename_or_file, "rb", download_config=download_config) as f: + return xml.dom.minidom.parse(f, **kwargs) + + +class _IterableFromGenerator(Iterable): + """Utility class to create an iterable from a generator function, in order to reset the generator when needed.""" + + def __init__(self, generator: Callable, *args, **kwargs): + self.generator = generator + self.args = args + self.kwargs = kwargs + + def __iter__(self): + yield from self.generator(*self.args, **self.kwargs) + + +class ArchiveIterable(_IterableFromGenerator): + """An iterable of (path, fileobj) from a TAR archive, used by `iter_archive`""" + + @staticmethod + def _iter_tar(f): + stream = tarfile.open(fileobj=f, mode="r|*") + for tarinfo in stream: + file_path = tarinfo.name + if not tarinfo.isreg(): + continue + if file_path is None: + continue + if os.path.basename(file_path).startswith((".", "__")): + # skipping hidden files + continue + file_obj = stream.extractfile(tarinfo) + yield file_path, file_obj + stream.members = [] + del stream + + @staticmethod + def _iter_zip(f): + zipf = zipfile.ZipFile(f) + for member in zipf.infolist(): + file_path = member.filename + if member.is_dir(): + continue + if file_path is None: + continue + if os.path.basename(file_path).startswith((".", "__")): + # skipping hidden files + continue + file_obj = zipf.open(member) + yield file_path, file_obj + + @classmethod + def _iter_from_fileobj(cls, f) -> Generator[Tuple, None, None]: + compression = _get_extraction_protocol_with_magic_number(f) + if compression == "zip": + yield from cls._iter_zip(f) + else: + yield from cls._iter_tar(f) + + @classmethod + def _iter_from_urlpath( + cls, urlpath: str, download_config: Optional[DownloadConfig] = None + ) -> Generator[Tuple, None, None]: + compression = _get_extraction_protocol(urlpath, download_config=download_config) + # Set block_size=0 to get faster streaming + # (e.g. for hf:// and https:// it uses streaming Requests file-like instances) + with xopen(urlpath, "rb", download_config=download_config, block_size=0) as f: + if compression == "zip": + yield from cls._iter_zip(f) + else: + yield from cls._iter_tar(f) + + @classmethod + def from_buf(cls, fileobj) -> "ArchiveIterable": + return cls(cls._iter_from_fileobj, fileobj) + + @classmethod + def from_urlpath(cls, urlpath_or_buf, download_config: Optional[DownloadConfig] = None) -> "ArchiveIterable": + return cls(cls._iter_from_urlpath, urlpath_or_buf, download_config) + + +class FilesIterable(_IterableFromGenerator): + """An iterable of paths from a list of directories or files""" + + @classmethod + def _iter_from_urlpaths( + cls, urlpaths: Union[str, List[str]], download_config: Optional[DownloadConfig] = None + ) -> Generator[str, None, None]: + if not isinstance(urlpaths, list): + urlpaths = [urlpaths] + for urlpath in urlpaths: + if xisfile(urlpath, download_config=download_config): + yield urlpath + elif xisdir(urlpath, download_config=download_config): + for dirpath, dirnames, filenames in xwalk(urlpath, download_config=download_config): + # in-place modification to prune the search + dirnames[:] = sorted([dirname for dirname in dirnames if not dirname.startswith((".", "__"))]) + if xbasename(dirpath).startswith((".", "__")): + # skipping hidden directories + continue + for filename in sorted(filenames): + if filename.startswith((".", "__")): + # skipping hidden files + continue + yield xjoin(dirpath, filename) + else: + raise FileNotFoundError(urlpath) + + @classmethod + def from_urlpaths(cls, urlpaths, download_config: Optional[DownloadConfig] = None) -> "FilesIterable": + return cls(cls._iter_from_urlpaths, urlpaths, download_config) + + +class StreamingDownloadManager: + """ + Download manager that uses the "::" separator to navigate through (possibly remote) compressed archives. + Contrary to the regular `DownloadManager`, the `download` and `extract` methods don't actually download nor extract + data, but they rather return the path or url that could be opened using the `xopen` function which extends the + built-in `open` function to stream data from remote files. + """ + + is_streaming = True + + def __init__( + self, + dataset_name: Optional[str] = None, + data_dir: Optional[str] = None, + download_config: Optional[DownloadConfig] = None, + base_path: Optional[str] = None, + ): + self._dataset_name = dataset_name + self._data_dir = data_dir + self._base_path = base_path or os.path.abspath(".") + self.download_config = download_config or DownloadConfig() + + @property + def manual_dir(self): + return self._data_dir + + def download(self, url_or_urls): + """Normalize URL(s) of files to stream data from. + This is the lazy version of `DownloadManager.download` for streaming. + + Args: + url_or_urls (`str` or `list` or `dict`): + URL(s) of files to stream data from. Each url is a `str`. + + Returns: + url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input url_or_urls. + + Example: + + ```py + >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') + ``` + """ + url_or_urls = map_nested(self._download, url_or_urls, map_tuple=True) + return url_or_urls + + def _download(self, urlpath: str) -> str: + urlpath = str(urlpath) + if is_relative_path(urlpath): + # append the relative path to the base_path + urlpath = url_or_path_join(self._base_path, urlpath) + return urlpath + + def extract(self, url_or_urls): + """Add extraction protocol for given url(s) for streaming. + + This is the lazy version of `DownloadManager.extract` for streaming. + + Args: + url_or_urls (`str` or `list` or `dict`): + URL(s) of files to stream data from. Each url is a `str`. + + Returns: + url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input `url_or_urls`. + + Example: + + ```py + >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') + >>> extracted_files = dl_manager.extract(downloaded_files) + ``` + """ + urlpaths = map_nested(self._extract, url_or_urls, map_tuple=True) + return urlpaths + + def _extract(self, urlpath: str) -> str: + urlpath = str(urlpath) + protocol = _get_extraction_protocol(urlpath, download_config=self.download_config) + # get inner file: zip://train-00000.json.gz::https://foo.bar/data.zip -> zip://train-00000.json.gz + path = urlpath.split("::")[0] + extension = _get_path_extension(path) + if extension in ["tgz", "tar"] or path.endswith((".tar.gz", ".tar.bz2", ".tar.xz")): + raise NotImplementedError( + f"Extraction protocol for TAR archives like '{urlpath}' is not implemented in streaming mode. " + f"Please use `dl_manager.iter_archive` instead.\n\n" + f"Example usage:\n\n" + f"\turl = dl_manager.download(url)\n" + f"\ttar_archive_iterator = dl_manager.iter_archive(url)\n\n" + f"\tfor filename, file in tar_archive_iterator:\n" + f"\t\t..." + ) + if protocol is None: + # no extraction + return urlpath + elif protocol in SINGLE_FILE_COMPRESSION_PROTOCOLS: + # there is one single file which is the uncompressed file + inner_file = os.path.basename(urlpath.split("::")[0]) + inner_file = inner_file[: inner_file.rindex(".")] if "." in inner_file else inner_file + return f"{protocol}://{inner_file}::{urlpath}" + else: + return f"{protocol}://::{urlpath}" + + def download_and_extract(self, url_or_urls): + """Prepare given `url_or_urls` for streaming (add extraction protocol). + + This is the lazy version of `DownloadManager.download_and_extract` for streaming. + + Is equivalent to: + + ``` + urls = dl_manager.extract(dl_manager.download(url_or_urls)) + ``` + + Args: + url_or_urls (`str` or `list` or `dict`): + URL(s) to stream from data from. Each url is a `str`. + + Returns: + url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input `url_or_urls`. + """ + return self.extract(self.download(url_or_urls)) + + def iter_archive(self, urlpath_or_buf: Union[str, io.BufferedReader]) -> Iterable[Tuple]: + """Iterate over files within an archive. + + Args: + urlpath_or_buf (`str` or `io.BufferedReader`): + Archive path or archive binary file object. + + Yields: + `tuple[str, io.BufferedReader]`: + 2-tuple (path_within_archive, file_object). + File object is opened in binary mode. + + Example: + + ```py + >>> archive = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') + >>> files = dl_manager.iter_archive(archive) + ``` + """ + + if hasattr(urlpath_or_buf, "read"): + return ArchiveIterable.from_buf(urlpath_or_buf) + else: + return ArchiveIterable.from_urlpath(urlpath_or_buf, download_config=self.download_config) + + def iter_files(self, urlpaths: Union[str, List[str]]) -> Iterable[str]: + """Iterate over files. + + Args: + urlpaths (`str` or `list` of `str`): + Root paths. + + Yields: + str: File URL path. + + Example: + + ```py + >>> files = dl_manager.download_and_extract('https://huggingface.co/datasets/beans/resolve/main/data/train.zip') + >>> files = dl_manager.iter_files(files) + ``` + """ + return FilesIterable.from_urlpaths(urlpaths, download_config=self.download_config) diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/formatting/__init__.py b/env-llmeval/lib/python3.10/site-packages/datasets/formatting/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ba1edcff07086f6bc5d69d0f6222d5a1785e3cda --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/formatting/__init__.py @@ -0,0 +1,131 @@ +# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ruff: noqa + +from typing import Dict, List, Optional, Type + +from .. import config +from ..utils import logging +from .formatting import ( + ArrowFormatter, + CustomFormatter, + Formatter, + PandasFormatter, + PythonFormatter, + TensorFormatter, + format_table, + query_table, +) +from .np_formatter import NumpyFormatter + + +logger = logging.get_logger(__name__) + +_FORMAT_TYPES: Dict[Optional[str], Type[Formatter]] = {} +_FORMAT_TYPES_ALIASES: Dict[Optional[str], str] = {} +_FORMAT_TYPES_ALIASES_UNAVAILABLE: Dict[Optional[str], Exception] = {} + + +def _register_formatter( + formatter_cls: type, + format_type: Optional[str], + aliases: Optional[List[str]] = None, +): + """ + Register a Formatter object using a name and optional aliases. + This function must be used on a Formatter class. + """ + aliases = aliases if aliases is not None else [] + if format_type in _FORMAT_TYPES: + logger.warning( + f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" + ) + _FORMAT_TYPES[format_type] = formatter_cls + for alias in set(aliases + [format_type]): + if alias in _FORMAT_TYPES_ALIASES: + logger.warning( + f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" + ) + _FORMAT_TYPES_ALIASES[alias] = format_type + + +def _register_unavailable_formatter( + unavailable_error: Exception, format_type: Optional[str], aliases: Optional[List[str]] = None +): + """ + Register an unavailable Formatter object using a name and optional aliases. + This function must be used on an Exception object that is raised when trying to get the unavailable formatter. + """ + aliases = aliases if aliases is not None else [] + for alias in set(aliases + [format_type]): + _FORMAT_TYPES_ALIASES_UNAVAILABLE[alias] = unavailable_error + + +# Here we define all the available formatting functions that can be used by `Dataset.set_format` +_register_formatter(PythonFormatter, None, aliases=["python"]) +_register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"]) +_register_formatter(NumpyFormatter, "numpy", aliases=["np"]) +_register_formatter(PandasFormatter, "pandas", aliases=["pd"]) +_register_formatter(CustomFormatter, "custom") + +if config.TORCH_AVAILABLE: + from .torch_formatter import TorchFormatter + + _register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"]) +else: + _torch_error = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.") + _register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"]) + +if config.TF_AVAILABLE: + from .tf_formatter import TFFormatter + + _register_formatter(TFFormatter, "tensorflow", aliases=["tf"]) +else: + _tf_error = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.") + _register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"]) + +if config.JAX_AVAILABLE: + from .jax_formatter import JaxFormatter + + _register_formatter(JaxFormatter, "jax", aliases=[]) +else: + _jax_error = ValueError("JAX needs to be installed to be able to return JAX arrays.") + _register_unavailable_formatter(_jax_error, "jax", aliases=[]) + + +def get_format_type_from_alias(format_type: Optional[str]) -> Optional[str]: + """If the given format type is a known alias, then return its main type name. Otherwise return the type with no change.""" + if format_type in _FORMAT_TYPES_ALIASES: + return _FORMAT_TYPES_ALIASES[format_type] + else: + return format_type + + +def get_formatter(format_type: Optional[str], **format_kwargs) -> Formatter: + """ + Factory function to get a Formatter given its type name and keyword arguments. + A formatter is an object that extracts and formats data from pyarrow table. + It defines the formatting for rows, colums and batches. + If the formatter for a given type name doesn't exist or is not available, an error is raised. + """ + format_type = get_format_type_from_alias(format_type) + if format_type in _FORMAT_TYPES: + return _FORMAT_TYPES[format_type](**format_kwargs) + if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: + raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] + else: + raise ValueError( + f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None)}, but got '{format_type}'" + ) diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/formatting/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/formatting/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..536a512711c0b720133192d999cb0ff2d0efa807 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/formatting/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/formatting/__pycache__/formatting.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/formatting/__pycache__/formatting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1d4c0a9e55baa161ba08f5c321497009573a4db Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/formatting/__pycache__/formatting.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/formatting/__pycache__/jax_formatter.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/formatting/__pycache__/jax_formatter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa7899304f253b432a5c5ee189cd5dfdfd655ca0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/formatting/__pycache__/jax_formatter.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/formatting/__pycache__/np_formatter.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/formatting/__pycache__/np_formatter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc2a28ef450afa3f3910018ae65381169d7d4d75 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/formatting/__pycache__/np_formatter.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/formatting/__pycache__/tf_formatter.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/formatting/__pycache__/tf_formatter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82ebffb5ac6f7e82f1fbe3fd492a6e65a2337778 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/formatting/__pycache__/tf_formatter.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/formatting/__pycache__/torch_formatter.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/formatting/__pycache__/torch_formatter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..efb525738eb68c1746c89c0235e34fc60b8c6c31 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/formatting/__pycache__/torch_formatter.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/formatting/formatting.py b/env-llmeval/lib/python3.10/site-packages/datasets/formatting/formatting.py new file mode 100644 index 0000000000000000000000000000000000000000..94fa453877481dfaecc0f90965929f7bdb46a650 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/formatting/formatting.py @@ -0,0 +1,649 @@ +# Copyright 2020 The HuggingFace Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections.abc import Mapping, MutableMapping +from functools import partial + +# Lint as: python3 +from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union + +import numpy as np +import pandas as pd +import pyarrow as pa +from packaging import version + +from .. import config +from ..features import Features +from ..features.features import _ArrayXDExtensionType, _is_zero_copy_only, decode_nested_example, pandas_types_mapper +from ..table import Table +from ..utils.py_utils import no_op_if_value_is_null + + +T = TypeVar("T") + +RowFormat = TypeVar("RowFormat") +ColumnFormat = TypeVar("ColumnFormat") +BatchFormat = TypeVar("BatchFormat") + + +def _is_range_contiguous(key: range) -> bool: + return key.step == 1 and key.stop >= key.start + + +def _raise_bad_key_type(key: Any): + raise TypeError( + f"Wrong key type: '{key}' of type '{type(key)}'. Expected one of int, slice, range, str or Iterable." + ) + + +def _query_table_with_indices_mapping( + table: Table, key: Union[int, slice, range, str, Iterable], indices: Table +) -> pa.Table: + """ + Query a pyarrow Table to extract the subtable that correspond to the given key. + The :obj:`indices` parameter corresponds to the indices mapping in case we cant to take into + account a shuffling or an indices selection for example. + The indices table must contain one column named "indices" of type uint64. + """ + if isinstance(key, int): + key = indices.fast_slice(key % indices.num_rows, 1).column(0)[0].as_py() + return _query_table(table, key) + if isinstance(key, slice): + key = range(*key.indices(indices.num_rows)) + if isinstance(key, range): + if _is_range_contiguous(key) and key.start >= 0: + return _query_table( + table, [i.as_py() for i in indices.fast_slice(key.start, key.stop - key.start).column(0)] + ) + else: + pass # treat as an iterable + if isinstance(key, str): + table = table.select([key]) + return _query_table(table, indices.column(0).to_pylist()) + if isinstance(key, Iterable): + return _query_table(table, [indices.fast_slice(i, 1).column(0)[0].as_py() for i in key]) + + _raise_bad_key_type(key) + + +def _query_table(table: Table, key: Union[int, slice, range, str, Iterable]) -> pa.Table: + """ + Query a pyarrow Table to extract the subtable that correspond to the given key. + """ + if isinstance(key, int): + return table.fast_slice(key % table.num_rows, 1) + if isinstance(key, slice): + key = range(*key.indices(table.num_rows)) + if isinstance(key, range): + if _is_range_contiguous(key) and key.start >= 0: + return table.fast_slice(key.start, key.stop - key.start) + else: + pass # treat as an iterable + if isinstance(key, str): + return table.table.drop([column for column in table.column_names if column != key]) + if isinstance(key, Iterable): + key = np.fromiter(key, np.int64) + if len(key) == 0: + return table.table.slice(0, 0) + # don't use pyarrow.Table.take even for pyarrow >=1.0 (see https://issues.apache.org/jira/browse/ARROW-9773) + return table.fast_gather(key % table.num_rows) + + _raise_bad_key_type(key) + + +def _is_array_with_nulls(pa_array: pa.Array) -> bool: + return pa_array.null_count > 0 + + +class BaseArrowExtractor(Generic[RowFormat, ColumnFormat, BatchFormat]): + """ + Arrow extractor are used to extract data from pyarrow tables. + It makes it possible to extract rows, columns and batches. + These three extractions types have to be implemented. + """ + + def extract_row(self, pa_table: pa.Table) -> RowFormat: + raise NotImplementedError + + def extract_column(self, pa_table: pa.Table) -> ColumnFormat: + raise NotImplementedError + + def extract_batch(self, pa_table: pa.Table) -> BatchFormat: + raise NotImplementedError + + +def _unnest(py_dict: Dict[str, List[T]]) -> Dict[str, T]: + """Return the first element of a batch (dict) as a row (dict)""" + return {key: array[0] for key, array in py_dict.items()} + + +class SimpleArrowExtractor(BaseArrowExtractor[pa.Table, pa.Array, pa.Table]): + def extract_row(self, pa_table: pa.Table) -> pa.Table: + return pa_table + + def extract_column(self, pa_table: pa.Table) -> pa.Array: + return pa_table.column(0) + + def extract_batch(self, pa_table: pa.Table) -> pa.Table: + return pa_table + + +class PythonArrowExtractor(BaseArrowExtractor[dict, list, dict]): + def extract_row(self, pa_table: pa.Table) -> dict: + return _unnest(pa_table.to_pydict()) + + def extract_column(self, pa_table: pa.Table) -> list: + return pa_table.column(0).to_pylist() + + def extract_batch(self, pa_table: pa.Table) -> dict: + return pa_table.to_pydict() + + +class NumpyArrowExtractor(BaseArrowExtractor[dict, np.ndarray, dict]): + def __init__(self, **np_array_kwargs): + self.np_array_kwargs = np_array_kwargs + + def extract_row(self, pa_table: pa.Table) -> dict: + return _unnest(self.extract_batch(pa_table)) + + def extract_column(self, pa_table: pa.Table) -> np.ndarray: + return self._arrow_array_to_numpy(pa_table[pa_table.column_names[0]]) + + def extract_batch(self, pa_table: pa.Table) -> dict: + return {col: self._arrow_array_to_numpy(pa_table[col]) for col in pa_table.column_names} + + def _arrow_array_to_numpy(self, pa_array: pa.Array) -> np.ndarray: + if isinstance(pa_array, pa.ChunkedArray): + if isinstance(pa_array.type, _ArrayXDExtensionType): + # don't call to_pylist() to preserve dtype of the fixed-size array + zero_copy_only = _is_zero_copy_only(pa_array.type.storage_dtype, unnest=True) + array: List = [ + row for chunk in pa_array.chunks for row in chunk.to_numpy(zero_copy_only=zero_copy_only) + ] + else: + zero_copy_only = _is_zero_copy_only(pa_array.type) and all( + not _is_array_with_nulls(chunk) for chunk in pa_array.chunks + ) + array: List = [ + row for chunk in pa_array.chunks for row in chunk.to_numpy(zero_copy_only=zero_copy_only) + ] + else: + if isinstance(pa_array.type, _ArrayXDExtensionType): + # don't call to_pylist() to preserve dtype of the fixed-size array + zero_copy_only = _is_zero_copy_only(pa_array.type.storage_dtype, unnest=True) + array: List = pa_array.to_numpy(zero_copy_only=zero_copy_only) + else: + zero_copy_only = _is_zero_copy_only(pa_array.type) and not _is_array_with_nulls(pa_array) + array: List = pa_array.to_numpy(zero_copy_only=zero_copy_only).tolist() + if len(array) > 0: + if any( + (isinstance(x, np.ndarray) and (x.dtype == object or x.shape != array[0].shape)) + or (isinstance(x, float) and np.isnan(x)) + for x in array + ): + return np.array(array, copy=False, dtype=object) + return np.array(array, copy=False) + + +class PandasArrowExtractor(BaseArrowExtractor[pd.DataFrame, pd.Series, pd.DataFrame]): + def extract_row(self, pa_table: pa.Table) -> pd.DataFrame: + return pa_table.slice(length=1).to_pandas(types_mapper=pandas_types_mapper) + + def extract_column(self, pa_table: pa.Table) -> pd.Series: + return pa_table.select([0]).to_pandas(types_mapper=pandas_types_mapper)[pa_table.column_names[0]] + + def extract_batch(self, pa_table: pa.Table) -> pd.DataFrame: + return pa_table.to_pandas(types_mapper=pandas_types_mapper) + + +class PythonFeaturesDecoder: + def __init__(self, features: Optional[Features]): + self.features = features + + def decode_row(self, row: dict) -> dict: + return self.features.decode_example(row) if self.features else row + + def decode_column(self, column: list, column_name: str) -> list: + return self.features.decode_column(column, column_name) if self.features else column + + def decode_batch(self, batch: dict) -> dict: + return self.features.decode_batch(batch) if self.features else batch + + +class PandasFeaturesDecoder: + def __init__(self, features: Optional[Features]): + self.features = features + + def decode_row(self, row: pd.DataFrame) -> pd.DataFrame: + decode = ( + { + column_name: no_op_if_value_is_null(partial(decode_nested_example, feature)) + for column_name, feature in self.features.items() + if self.features._column_requires_decoding[column_name] + } + if self.features + else {} + ) + if decode: + row[list(decode.keys())] = row.transform(decode) + return row + + def decode_column(self, column: pd.Series, column_name: str) -> pd.Series: + decode = ( + no_op_if_value_is_null(partial(decode_nested_example, self.features[column_name])) + if self.features and column_name in self.features and self.features._column_requires_decoding[column_name] + else None + ) + if decode: + column = column.transform(decode) + return column + + def decode_batch(self, batch: pd.DataFrame) -> pd.DataFrame: + return self.decode_row(batch) + + +class LazyDict(MutableMapping): + """A dictionary backed by Arrow data. The values are formatted on-the-fly when accessing the dictionary.""" + + def __init__(self, pa_table: pa.Table, formatter: "Formatter"): + self.pa_table = pa_table + self.formatter = formatter + + self.data = {key: None for key in pa_table.column_names} + self.keys_to_format = set(self.data.keys()) + + def __len__(self): + return len(self.data) + + def __getitem__(self, key): + value = self.data[key] + if key in self.keys_to_format: + value = self.format(key) + self.data[key] = value + self.keys_to_format.remove(key) + return value + + def __setitem__(self, key, value): + if key in self.keys_to_format: + self.keys_to_format.remove(key) + self.data[key] = value + + def __delitem__(self, key) -> None: + if key in self.keys_to_format: + self.keys_to_format.remove(key) + del self.data[key] + + def __iter__(self): + return iter(self.data) + + def __contains__(self, key): + return key in self.data + + def __repr__(self): + self._format_all() + return repr(self.data) + + if config.PY_VERSION >= version.parse("3.9"): + # merging with the union ("|") operator is supported in Python 3.9+ + + def __or__(self, other): + if isinstance(other, LazyDict): + inst = self.copy() + other = other.copy() + other._format_all() + inst.keys_to_format -= other.data.keys() + inst.data = inst.data | other.data + return inst + if isinstance(other, dict): + inst = self.copy() + inst.keys_to_format -= other.keys() + inst.data = inst.data | other + return inst + return NotImplemented + + def __ror__(self, other): + if isinstance(other, LazyDict): + inst = self.copy() + other = other.copy() + other._format_all() + inst.keys_to_format -= other.data.keys() + inst.data = other.data | inst.data + return inst + if isinstance(other, dict): + inst = self.copy() + inst.keys_to_format -= other.keys() + inst.data = other | inst.data + return inst + return NotImplemented + + def __ior__(self, other): + if isinstance(other, LazyDict): + other = other.copy() + other._format_all() + self.keys_to_format -= other.data.keys() + self.data |= other.data + else: + self.keys_to_format -= other.keys() + self.data |= other + return self + + def __copy__(self): + # Identical to `UserDict.__copy__` + inst = self.__class__.__new__(self.__class__) + inst.__dict__.update(self.__dict__) + # Create a copy and avoid triggering descriptors + inst.__dict__["data"] = self.__dict__["data"].copy() + inst.__dict__["keys_to_format"] = self.__dict__["keys_to_format"].copy() + return inst + + def copy(self): + import copy + + return copy.copy(self) + + @classmethod + def fromkeys(cls, iterable, value=None): + raise NotImplementedError + + def format(self, key): + raise NotImplementedError + + def _format_all(self): + for key in self.keys_to_format: + self.data[key] = self.format(key) + self.keys_to_format.clear() + + +class LazyRow(LazyDict): + def format(self, key): + return self.formatter.format_column(self.pa_table.select([key]))[0] + + +class LazyBatch(LazyDict): + def format(self, key): + return self.formatter.format_column(self.pa_table.select([key])) + + +class Formatter(Generic[RowFormat, ColumnFormat, BatchFormat]): + """ + A formatter is an object that extracts and formats data from pyarrow tables. + It defines the formatting for rows, columns and batches. + """ + + simple_arrow_extractor = SimpleArrowExtractor + python_arrow_extractor = PythonArrowExtractor + numpy_arrow_extractor = NumpyArrowExtractor + pandas_arrow_extractor = PandasArrowExtractor + + def __init__(self, features: Optional[Features] = None): + self.features = features + self.python_features_decoder = PythonFeaturesDecoder(self.features) + self.pandas_features_decoder = PandasFeaturesDecoder(self.features) + + def __call__(self, pa_table: pa.Table, query_type: str) -> Union[RowFormat, ColumnFormat, BatchFormat]: + if query_type == "row": + return self.format_row(pa_table) + elif query_type == "column": + return self.format_column(pa_table) + elif query_type == "batch": + return self.format_batch(pa_table) + + def format_row(self, pa_table: pa.Table) -> RowFormat: + raise NotImplementedError + + def format_column(self, pa_table: pa.Table) -> ColumnFormat: + raise NotImplementedError + + def format_batch(self, pa_table: pa.Table) -> BatchFormat: + raise NotImplementedError + + +class TensorFormatter(Formatter[RowFormat, ColumnFormat, BatchFormat]): + def recursive_tensorize(self, data_struct: dict): + raise NotImplementedError + + +class ArrowFormatter(Formatter[pa.Table, pa.Array, pa.Table]): + def format_row(self, pa_table: pa.Table) -> pa.Table: + return self.simple_arrow_extractor().extract_row(pa_table) + + def format_column(self, pa_table: pa.Table) -> pa.Array: + return self.simple_arrow_extractor().extract_column(pa_table) + + def format_batch(self, pa_table: pa.Table) -> pa.Table: + return self.simple_arrow_extractor().extract_batch(pa_table) + + +class PythonFormatter(Formatter[Mapping, list, Mapping]): + def __init__(self, features=None, lazy=False): + super().__init__(features) + self.lazy = lazy + + def format_row(self, pa_table: pa.Table) -> Mapping: + if self.lazy: + return LazyRow(pa_table, self) + row = self.python_arrow_extractor().extract_row(pa_table) + row = self.python_features_decoder.decode_row(row) + return row + + def format_column(self, pa_table: pa.Table) -> list: + column = self.python_arrow_extractor().extract_column(pa_table) + column = self.python_features_decoder.decode_column(column, pa_table.column_names[0]) + return column + + def format_batch(self, pa_table: pa.Table) -> Mapping: + if self.lazy: + return LazyBatch(pa_table, self) + batch = self.python_arrow_extractor().extract_batch(pa_table) + batch = self.python_features_decoder.decode_batch(batch) + return batch + + +class PandasFormatter(Formatter[pd.DataFrame, pd.Series, pd.DataFrame]): + def format_row(self, pa_table: pa.Table) -> pd.DataFrame: + row = self.pandas_arrow_extractor().extract_row(pa_table) + row = self.pandas_features_decoder.decode_row(row) + return row + + def format_column(self, pa_table: pa.Table) -> pd.Series: + column = self.pandas_arrow_extractor().extract_column(pa_table) + column = self.pandas_features_decoder.decode_column(column, pa_table.column_names[0]) + return column + + def format_batch(self, pa_table: pa.Table) -> pd.DataFrame: + row = self.pandas_arrow_extractor().extract_batch(pa_table) + row = self.pandas_features_decoder.decode_batch(row) + return row + + +class CustomFormatter(Formatter[dict, ColumnFormat, dict]): + """ + A user-defined custom formatter function defined by a ``transform``. + The transform must take as input a batch of data extracted for an arrow table using the python extractor, + and return a batch. + If the output batch is not a dict, then output_all_columns won't work. + If the ouput batch has several fields, then querying a single column won't work since we don't know which field + to return. + """ + + def __init__(self, transform: Callable[[dict], dict], features=None, **kwargs): + super().__init__(features=features) + self.transform = transform + + def format_row(self, pa_table: pa.Table) -> dict: + formatted_batch = self.format_batch(pa_table) + try: + return _unnest(formatted_batch) + except Exception as exc: + raise TypeError( + f"Custom formatting function must return a dict of sequences to be able to pick a row, but got {formatted_batch}" + ) from exc + + def format_column(self, pa_table: pa.Table) -> ColumnFormat: + formatted_batch = self.format_batch(pa_table) + if hasattr(formatted_batch, "keys"): + if len(formatted_batch.keys()) > 1: + raise TypeError( + "Tried to query a column but the custom formatting function returns too many columns. " + f"Only one column was expected but got columns {list(formatted_batch.keys())}." + ) + else: + raise TypeError( + f"Custom formatting function must return a dict to be able to pick a row, but got {formatted_batch}" + ) + try: + return formatted_batch[pa_table.column_names[0]] + except Exception as exc: + raise TypeError( + f"Custom formatting function must return a dict to be able to pick a row, but got {formatted_batch}" + ) from exc + + def format_batch(self, pa_table: pa.Table) -> dict: + batch = self.python_arrow_extractor().extract_batch(pa_table) + batch = self.python_features_decoder.decode_batch(batch) + return self.transform(batch) + + +def _check_valid_column_key(key: str, columns: List[str]) -> None: + if key not in columns: + raise KeyError(f"Column {key} not in the dataset. Current columns in the dataset: {columns}") + + +def _check_valid_index_key(key: Union[int, slice, range, Iterable], size: int) -> None: + if isinstance(key, int): + if (key < 0 and key + size < 0) or (key >= size): + raise IndexError(f"Invalid key: {key} is out of bounds for size {size}") + return + elif isinstance(key, slice): + pass + elif isinstance(key, range): + if len(key) > 0: + _check_valid_index_key(max(key), size=size) + _check_valid_index_key(min(key), size=size) + elif isinstance(key, Iterable): + if len(key) > 0: + _check_valid_index_key(int(max(key)), size=size) + _check_valid_index_key(int(min(key)), size=size) + else: + _raise_bad_key_type(key) + + +def key_to_query_type(key: Union[int, slice, range, str, Iterable]) -> str: + if isinstance(key, int): + return "row" + elif isinstance(key, str): + return "column" + elif isinstance(key, (slice, range, Iterable)): + return "batch" + _raise_bad_key_type(key) + + +def query_table( + table: Table, + key: Union[int, slice, range, str, Iterable], + indices: Optional[Table] = None, +) -> pa.Table: + """ + Query a Table to extract the subtable that correspond to the given key. + + Args: + table (``datasets.table.Table``): The input Table to query from + key (``Union[int, slice, range, str, Iterable]``): The key can be of different types: + - an integer i: the subtable containing only the i-th row + - a slice [i:j:k]: the subtable containing the rows that correspond to this slice + - a range(i, j, k): the subtable containing the rows that correspond to this range + - a string c: the subtable containing all the rows but only the column c + - an iterable l: the subtable that is the concatenation of all the i-th rows for all i in the iterable + indices (Optional ``datasets.table.Table``): If not None, it is used to re-map the given key to the table rows. + The indices table must contain one column named "indices" of type uint64. + This is used in case of shuffling or rows selection. + + + Returns: + ``pyarrow.Table``: the result of the query on the input table + """ + # Check if key is valid + if not isinstance(key, (int, slice, range, str, Iterable)): + _raise_bad_key_type(key) + if isinstance(key, str): + _check_valid_column_key(key, table.column_names) + else: + size = indices.num_rows if indices is not None else table.num_rows + _check_valid_index_key(key, size) + # Query the main table + if indices is None: + pa_subtable = _query_table(table, key) + else: + pa_subtable = _query_table_with_indices_mapping(table, key, indices=indices) + return pa_subtable + + +def format_table( + table: Table, + key: Union[int, slice, range, str, Iterable], + formatter: Formatter, + format_columns: Optional[list] = None, + output_all_columns=False, +): + """ + Format a Table depending on the key that was used and a Formatter object. + + Args: + table (``datasets.table.Table``): The input Table to format + key (``Union[int, slice, range, str, Iterable]``): Depending on the key that was used, the formatter formats + the table as either a row, a column or a batch. + formatter (``datasets.formatting.formatting.Formatter``): Any subclass of a Formatter such as + PythonFormatter, NumpyFormatter, etc. + format_columns (:obj:`List[str]`, optional): if not None, it defines the columns that will be formatted using the + given formatter. Other columns are discarded (unless ``output_all_columns`` is True) + output_all_columns (:obj:`bool`, defaults to False). If True, the formatted output is completed using the columns + that are not in the ``format_columns`` list. For these columns, the PythonFormatter is used. + + + Returns: + A row, column or batch formatted object defined by the Formatter: + - the PythonFormatter returns a dictionary for a row or a batch, and a list for a column. + - the NumpyFormatter returns a dictionary for a row or a batch, and a np.array for a column. + - the PandasFormatter returns a pd.DataFrame for a row or a batch, and a pd.Series for a column. + - the TorchFormatter returns a dictionary for a row or a batch, and a torch.Tensor for a column. + - the TFFormatter returns a dictionary for a row or a batch, and a tf.Tensor for a column. + """ + if isinstance(table, Table): + pa_table = table.table + else: + pa_table = table + query_type = key_to_query_type(key) + python_formatter = PythonFormatter(features=formatter.features) + if format_columns is None: + return formatter(pa_table, query_type=query_type) + elif query_type == "column": + if key in format_columns: + return formatter(pa_table, query_type) + else: + return python_formatter(pa_table, query_type=query_type) + else: + pa_table_to_format = pa_table.drop(col for col in pa_table.column_names if col not in format_columns) + formatted_output = formatter(pa_table_to_format, query_type=query_type) + if output_all_columns: + if isinstance(formatted_output, MutableMapping): + pa_table_with_remaining_columns = pa_table.drop( + col for col in pa_table.column_names if col in format_columns + ) + remaining_columns_dict = python_formatter(pa_table_with_remaining_columns, query_type=query_type) + formatted_output.update(remaining_columns_dict) + else: + raise TypeError( + f"Custom formatting function must return a dict to work with output_all_columns=True, but got {formatted_output}" + ) + return formatted_output diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/formatting/jax_formatter.py b/env-llmeval/lib/python3.10/site-packages/datasets/formatting/jax_formatter.py new file mode 100644 index 0000000000000000000000000000000000000000..8035341c5cd2794345163b388945b3a092708916 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/formatting/jax_formatter.py @@ -0,0 +1,160 @@ +# Copyright 2021 The HuggingFace Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +import sys +from collections.abc import Mapping +from typing import TYPE_CHECKING, Dict, Optional + +import numpy as np +import pyarrow as pa + +from .. import config +from ..utils.logging import get_logger +from ..utils.py_utils import map_nested +from .formatting import TensorFormatter + + +if TYPE_CHECKING: + import jax + import jaxlib + +logger = get_logger() + +DEVICE_MAPPING: Optional[dict] = None + + +class JaxFormatter(TensorFormatter[Mapping, "jax.Array", Mapping]): + def __init__(self, features=None, device=None, **jnp_array_kwargs): + super().__init__(features=features) + import jax + from jaxlib.xla_client import Device + + if isinstance(device, Device): + raise ValueError( + f"Expected {device} to be a `str` not {type(device)}, as `jaxlib.xla_extension.Device` " + "is not serializable neither with `pickle` nor with `dill`. Instead you can surround " + "the device with `str()` to get its string identifier that will be internally mapped " + "to the actual `jaxlib.xla_extension.Device`." + ) + self.device = device if isinstance(device, str) else str(jax.devices()[0]) + # using global variable since `jaxlib.xla_extension.Device` is not serializable neither + # with `pickle` nor with `dill`, so we need to use a global variable instead + global DEVICE_MAPPING + if DEVICE_MAPPING is None: + DEVICE_MAPPING = self._map_devices_to_str() + if self.device not in list(DEVICE_MAPPING.keys()): + logger.warning( + f"Device with string identifier {self.device} not listed among the available " + f"devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default " + f"device: {str(jax.devices()[0])}." + ) + self.device = str(jax.devices()[0]) + self.jnp_array_kwargs = jnp_array_kwargs + + @staticmethod + def _map_devices_to_str() -> Dict[str, "jaxlib.xla_extension.Device"]: + import jax + + return {str(device): device for device in jax.devices()} + + def _consolidate(self, column): + import jax + import jax.numpy as jnp + + if isinstance(column, list) and column: + if all( + isinstance(x, jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column + ): + return jnp.stack(column, axis=0) + return column + + def _tensorize(self, value): + import jax + import jax.numpy as jnp + + if isinstance(value, (str, bytes, type(None))): + return value + elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character): + return value.tolist() + + default_dtype = {} + + if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer): + # the default int precision depends on the jax config + # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision + if jax.config.jax_enable_x64: + default_dtype = {"dtype": jnp.int64} + else: + default_dtype = {"dtype": jnp.int32} + elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating): + default_dtype = {"dtype": jnp.float32} + elif config.PIL_AVAILABLE and "PIL" in sys.modules: + import PIL.Image + + if isinstance(value, PIL.Image.Image): + value = np.asarray(value) + + # using global variable since `jaxlib.xla_extension.Device` is not serializable neither + # with `pickle` nor with `dill`, so we need to use a global variable instead + global DEVICE_MAPPING + if DEVICE_MAPPING is None: + DEVICE_MAPPING = self._map_devices_to_str() + + with jax.default_device(DEVICE_MAPPING[self.device]): + # calling jnp.array on a np.ndarray does copy the data + # see https://github.com/google/jax/issues/4486 + return jnp.array(value, **{**default_dtype, **self.jnp_array_kwargs}) + + def _recursive_tensorize(self, data_struct): + import jax + + # support for torch, tf, jax etc. + if config.TORCH_AVAILABLE and "torch" in sys.modules: + import torch + + if isinstance(data_struct, torch.Tensor): + return self._tensorize(data_struct.detach().cpu().numpy()[()]) + if hasattr(data_struct, "__array__") and not isinstance(data_struct, jax.Array): + data_struct = data_struct.__array__() + # support for nested types like struct of list of struct + if isinstance(data_struct, np.ndarray): + if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + elif isinstance(data_struct, (list, tuple)): + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + return self._tensorize(data_struct) + + def recursive_tensorize(self, data_struct: dict): + return map_nested(self._recursive_tensorize, data_struct, map_list=False) + + def format_row(self, pa_table: pa.Table) -> Mapping: + row = self.numpy_arrow_extractor().extract_row(pa_table) + row = self.python_features_decoder.decode_row(row) + return self.recursive_tensorize(row) + + def format_column(self, pa_table: pa.Table) -> "jax.Array": + column = self.numpy_arrow_extractor().extract_column(pa_table) + column = self.python_features_decoder.decode_column(column, pa_table.column_names[0]) + column = self.recursive_tensorize(column) + column = self._consolidate(column) + return column + + def format_batch(self, pa_table: pa.Table) -> Mapping: + batch = self.numpy_arrow_extractor().extract_batch(pa_table) + batch = self.python_features_decoder.decode_batch(batch) + batch = self.recursive_tensorize(batch) + for column_name in batch: + batch[column_name] = self._consolidate(batch[column_name]) + return batch diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/formatting/np_formatter.py b/env-llmeval/lib/python3.10/site-packages/datasets/formatting/np_formatter.py new file mode 100644 index 0000000000000000000000000000000000000000..95bcff2b51728fdd9647dad382639724df163ce2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/formatting/np_formatter.py @@ -0,0 +1,106 @@ +# Copyright 2020 The HuggingFace Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +from collections.abc import Mapping + +import numpy as np +import pyarrow as pa + +from .. import config +from ..utils.py_utils import map_nested +from .formatting import TensorFormatter + + +class NumpyFormatter(TensorFormatter[Mapping, np.ndarray, Mapping]): + def __init__(self, features=None, **np_array_kwargs): + super().__init__(features=features) + self.np_array_kwargs = np_array_kwargs + + def _consolidate(self, column): + if isinstance(column, list): + if column and all( + isinstance(x, np.ndarray) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column + ): + return np.stack(column) + else: + # don't use np.array(column, dtype=object) + # since it fails in certain cases + # see https://stackoverflow.com/q/51005699 + out = np.empty(len(column), dtype=object) + out[:] = column + return out + return column + + def _tensorize(self, value): + if isinstance(value, (str, bytes, type(None))): + return value + elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character): + return value + elif isinstance(value, np.number): + return value + + default_dtype = {} + + if isinstance(value, np.ndarray) and np.issubdtype(value.dtype, np.integer): + default_dtype = {"dtype": np.int64} + elif isinstance(value, np.ndarray) and np.issubdtype(value.dtype, np.floating): + default_dtype = {"dtype": np.float32} + elif config.PIL_AVAILABLE and "PIL" in sys.modules: + import PIL.Image + + if isinstance(value, PIL.Image.Image): + return np.asarray(value, **self.np_array_kwargs) + + return np.asarray(value, **{**default_dtype, **self.np_array_kwargs}) + + def _recursive_tensorize(self, data_struct): + # support for torch, tf, jax etc. + if config.TORCH_AVAILABLE and "torch" in sys.modules: + import torch + + if isinstance(data_struct, torch.Tensor): + return self._tensorize(data_struct.detach().cpu().numpy()[()]) + if hasattr(data_struct, "__array__") and not isinstance(data_struct, (np.ndarray, np.character, np.number)): + data_struct = data_struct.__array__() + # support for nested types like struct of list of struct + if isinstance(data_struct, np.ndarray): + if data_struct.dtype == object: + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + if isinstance(data_struct, (list, tuple)): + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + return self._tensorize(data_struct) + + def recursive_tensorize(self, data_struct: dict): + return map_nested(self._recursive_tensorize, data_struct, map_list=False) + + def format_row(self, pa_table: pa.Table) -> Mapping: + row = self.numpy_arrow_extractor().extract_row(pa_table) + row = self.python_features_decoder.decode_row(row) + return self.recursive_tensorize(row) + + def format_column(self, pa_table: pa.Table) -> np.ndarray: + column = self.numpy_arrow_extractor().extract_column(pa_table) + column = self.python_features_decoder.decode_column(column, pa_table.column_names[0]) + column = self.recursive_tensorize(column) + column = self._consolidate(column) + return column + + def format_batch(self, pa_table: pa.Table) -> Mapping: + batch = self.numpy_arrow_extractor().extract_batch(pa_table) + batch = self.python_features_decoder.decode_batch(batch) + batch = self.recursive_tensorize(batch) + for column_name in batch: + batch[column_name] = self._consolidate(batch[column_name]) + return batch diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/formatting/tf_formatter.py b/env-llmeval/lib/python3.10/site-packages/datasets/formatting/tf_formatter.py new file mode 100644 index 0000000000000000000000000000000000000000..adb15cda3815d77fa0272562e83fda029d1babee --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/formatting/tf_formatter.py @@ -0,0 +1,115 @@ +# Copyright 2020 The HuggingFace Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +import sys +from collections.abc import Mapping +from typing import TYPE_CHECKING + +import numpy as np +import pyarrow as pa + +from .. import config +from ..utils.py_utils import map_nested +from .formatting import TensorFormatter + + +if TYPE_CHECKING: + import tensorflow as tf + + +class TFFormatter(TensorFormatter[Mapping, "tf.Tensor", Mapping]): + def __init__(self, features=None, **tf_tensor_kwargs): + super().__init__(features=features) + self.tf_tensor_kwargs = tf_tensor_kwargs + import tensorflow as tf # noqa: F401 - import tf at initialization + + def _consolidate(self, column): + import tensorflow as tf + + if isinstance(column, list) and column: + if all( + isinstance(x, tf.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column + ): + return tf.stack(column) + elif all( + isinstance(x, (tf.Tensor, tf.RaggedTensor)) and x.ndim == 1 and x.dtype == column[0].dtype + for x in column + ): + # only rag 1-D tensors, otherwise some dimensions become ragged even though they were consolidated + return tf.ragged.stack(column) + + return column + + def _tensorize(self, value): + import tensorflow as tf + + if value is None: + return value + + default_dtype = {} + + if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer): + default_dtype = {"dtype": tf.int64} + elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating): + default_dtype = {"dtype": tf.float32} + elif config.PIL_AVAILABLE and "PIL" in sys.modules: + import PIL.Image + + if isinstance(value, PIL.Image.Image): + value = np.asarray(value) + + return tf.convert_to_tensor(value, **{**default_dtype, **self.tf_tensor_kwargs}) + + def _recursive_tensorize(self, data_struct): + import tensorflow as tf + + # support for torch, tf, jax etc. + if config.TORCH_AVAILABLE and "torch" in sys.modules: + import torch + + if isinstance(data_struct, torch.Tensor): + return self._tensorize(data_struct.detach().cpu().numpy()[()]) + if hasattr(data_struct, "__array__") and not isinstance(data_struct, tf.Tensor): + data_struct = data_struct.__array__() + # support for nested types like struct of list of struct + if isinstance(data_struct, np.ndarray): + if data_struct.dtype == object: # tf tensors cannot be instantied from an array of objects + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + elif isinstance(data_struct, (list, tuple)): + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + return self._tensorize(data_struct) + + def recursive_tensorize(self, data_struct: dict): + return map_nested(self._recursive_tensorize, data_struct, map_list=False) + + def format_row(self, pa_table: pa.Table) -> Mapping: + row = self.numpy_arrow_extractor().extract_row(pa_table) + row = self.python_features_decoder.decode_row(row) + return self.recursive_tensorize(row) + + def format_column(self, pa_table: pa.Table) -> "tf.Tensor": + column = self.numpy_arrow_extractor().extract_column(pa_table) + column = self.python_features_decoder.decode_column(column, pa_table.column_names[0]) + column = self.recursive_tensorize(column) + column = self._consolidate(column) + return column + + def format_batch(self, pa_table: pa.Table) -> Mapping: + batch = self.numpy_arrow_extractor().extract_batch(pa_table) + batch = self.python_features_decoder.decode_batch(batch) + batch = self.recursive_tensorize(batch) + for column_name in batch: + batch[column_name] = self._consolidate(batch[column_name]) + return batch diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/formatting/torch_formatter.py b/env-llmeval/lib/python3.10/site-packages/datasets/formatting/torch_formatter.py new file mode 100644 index 0000000000000000000000000000000000000000..cf287e67eb806cbc2be0f0b4febdcd334854ec05 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/formatting/torch_formatter.py @@ -0,0 +1,111 @@ +# Copyright 2020 The HuggingFace Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +import sys +from collections.abc import Mapping +from typing import TYPE_CHECKING + +import numpy as np +import pyarrow as pa + +from .. import config +from ..utils.py_utils import map_nested +from .formatting import TensorFormatter + + +if TYPE_CHECKING: + import torch + + +class TorchFormatter(TensorFormatter[Mapping, "torch.Tensor", Mapping]): + def __init__(self, features=None, **torch_tensor_kwargs): + super().__init__(features=features) + self.torch_tensor_kwargs = torch_tensor_kwargs + import torch # noqa import torch at initialization + + def _consolidate(self, column): + import torch + + if isinstance(column, list) and column: + if all( + isinstance(x, torch.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype + for x in column + ): + return torch.stack(column) + return column + + def _tensorize(self, value): + import torch + + if isinstance(value, (str, bytes, type(None))): + return value + elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character): + return value.tolist() + + default_dtype = {} + + if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer): + default_dtype = {"dtype": torch.int64} + + # Convert dtype to np.int64 if it's either np.uint16 or np.uint32 to ensure compatibility. + # np.uint64 is excluded from this conversion as there is no compatible PyTorch dtype that can handle it without loss. + if value.dtype in [np.uint16, np.uint32]: + value = value.astype(np.int64) + + elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating): + default_dtype = {"dtype": torch.float32} + elif config.PIL_AVAILABLE and "PIL" in sys.modules: + import PIL.Image + + if isinstance(value, PIL.Image.Image): + value = np.asarray(value) + return torch.tensor(value, **{**default_dtype, **self.torch_tensor_kwargs}) + + def _recursive_tensorize(self, data_struct): + import torch + + # support for torch, tf, jax etc. + if hasattr(data_struct, "__array__") and not isinstance(data_struct, torch.Tensor): + data_struct = data_struct.__array__() + # support for nested types like struct of list of struct + if isinstance(data_struct, np.ndarray): + if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + elif isinstance(data_struct, (list, tuple)): + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + return self._tensorize(data_struct) + + def recursive_tensorize(self, data_struct: dict): + return map_nested(self._recursive_tensorize, data_struct, map_list=False) + + def format_row(self, pa_table: pa.Table) -> Mapping: + row = self.numpy_arrow_extractor().extract_row(pa_table) + row = self.python_features_decoder.decode_row(row) + return self.recursive_tensorize(row) + + def format_column(self, pa_table: pa.Table) -> "torch.Tensor": + column = self.numpy_arrow_extractor().extract_column(pa_table) + column = self.python_features_decoder.decode_column(column, pa_table.column_names[0]) + column = self.recursive_tensorize(column) + column = self._consolidate(column) + return column + + def format_batch(self, pa_table: pa.Table) -> Mapping: + batch = self.numpy_arrow_extractor().extract_batch(pa_table) + batch = self.python_features_decoder.decode_batch(batch) + batch = self.recursive_tensorize(batch) + for column_name in batch: + batch[column_name] = self._consolidate(batch[column_name]) + return batch diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45cf8125022101cfb540c3f38d11049f665bf9ee Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__init__.py b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..693c55e3b0f461314a4e5d4f0f48d382d22928c4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__pycache__/arrow.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__pycache__/arrow.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b16da7d5fe3719edc23d581a9f6b599e250a3b3f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__pycache__/arrow.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/csv/__init__.py b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/csv/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/csv/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/csv/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c350e7f32fda869c9af9086c2d24a5821661520 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/csv/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/csv/__pycache__/csv.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/csv/__pycache__/csv.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c61e8cea6cd61145fe1a7fdde538ee65fd5619e3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/csv/__pycache__/csv.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__init__.py b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/generator/__init__.py b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/generator/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/generator/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/generator/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4798d25e4c461b4db9db6a205ceaaff9326da4b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/generator/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/generator/__pycache__/generator.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/generator/__pycache__/generator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a08b1b63ebb6ba630db86ba27b4327382047bec2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/generator/__pycache__/generator.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/generator/generator.py b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/generator/generator.py new file mode 100644 index 0000000000000000000000000000000000000000..1efa721b159668a72d29f5afa38c36bcaff084ea --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/generator/generator.py @@ -0,0 +1,31 @@ +from dataclasses import dataclass +from typing import Callable, Optional + +import datasets + + +@dataclass +class GeneratorConfig(datasets.BuilderConfig): + generator: Optional[Callable] = None + gen_kwargs: Optional[dict] = None + features: Optional[datasets.Features] = None + + def __post_init__(self): + assert self.generator is not None, "generator must be specified" + + if self.gen_kwargs is None: + self.gen_kwargs = {} + + +class Generator(datasets.GeneratorBasedBuilder): + BUILDER_CONFIG_CLASS = GeneratorConfig + + def _info(self): + return datasets.DatasetInfo(features=self.config.features) + + def _split_generators(self, dl_manager): + return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs=self.config.gen_kwargs)] + + def _generate_examples(self, **gen_kwargs): + for idx, ex in enumerate(self.config.generator(**gen_kwargs)): + yield idx, ex diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/spark/__init__.py b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/spark/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/spark/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/spark/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75a96169662a71ff496460e748cfe061e1036577 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/spark/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/spark/__pycache__/spark.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/spark/__pycache__/spark.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5908b876a41900353fac2594e8fd36cac5e686a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/spark/__pycache__/spark.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/spark/spark.py b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/spark/spark.py new file mode 100644 index 0000000000000000000000000000000000000000..fee5f7c4c6123985beb2026ba4a01f80d7625205 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/spark/spark.py @@ -0,0 +1,349 @@ +import os +import posixpath +import uuid +from dataclasses import dataclass +from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union + +import numpy as np +import pyarrow as pa + +import datasets +from datasets.arrow_writer import ArrowWriter, ParquetWriter +from datasets.config import MAX_SHARD_SIZE +from datasets.filesystems import ( + is_remote_filesystem, + rename, +) +from datasets.iterable_dataset import _BaseExamplesIterable +from datasets.utils.py_utils import convert_file_size_to_int + + +logger = datasets.utils.logging.get_logger(__name__) + +if TYPE_CHECKING: + import pyspark + + +@dataclass +class SparkConfig(datasets.BuilderConfig): + """BuilderConfig for Spark.""" + + features: Optional[datasets.Features] = None + + +def _reorder_dataframe_by_partition(df: "pyspark.sql.DataFrame", new_partition_order: List[int]): + df_combined = df.select("*").where(f"part_id = {new_partition_order[0]}") + for partition_id in new_partition_order[1:]: + partition_df = df.select("*").where(f"part_id = {partition_id}") + df_combined = df_combined.union(partition_df) + return df_combined + + +def _generate_iterable_examples( + df: "pyspark.sql.DataFrame", + partition_order: List[int], +): + import pyspark + + def generate_fn(): + df_with_partition_id = df.select("*", pyspark.sql.functions.spark_partition_id().alias("part_id")) + partition_df = _reorder_dataframe_by_partition(df_with_partition_id, partition_order) + row_id = 0 + # pipeline next partition in parallel to hide latency + rows = partition_df.toLocalIterator(prefetchPartitions=True) + curr_partition = -1 + for row in rows: + row_as_dict = row.asDict() + part_id = row_as_dict["part_id"] + row_as_dict.pop("part_id") + if curr_partition != part_id: + curr_partition = part_id + row_id = 0 + yield f"{part_id}_{row_id}", row_as_dict + row_id += 1 + + return generate_fn + + +class SparkExamplesIterable(_BaseExamplesIterable): + def __init__( + self, + df: "pyspark.sql.DataFrame", + partition_order=None, + ): + self.df = df + self.partition_order = partition_order or range(self.df.rdd.getNumPartitions()) + self.generate_examples_fn = _generate_iterable_examples(self.df, self.partition_order) + + def __iter__(self): + yield from self.generate_examples_fn() + + def shuffle_data_sources(self, generator: np.random.Generator) -> "SparkExamplesIterable": + partition_order = list(range(self.df.rdd.getNumPartitions())) + generator.shuffle(partition_order) + return SparkExamplesIterable(self.df, partition_order=partition_order) + + def shard_data_sources(self, worker_id: int, num_workers: int) -> "SparkExamplesIterable": + partition_order = self.split_shard_indices_by_worker(worker_id, num_workers) + return SparkExamplesIterable(self.df, partition_order=partition_order) + + @property + def n_shards(self) -> int: + return len(self.partition_order) + + +class Spark(datasets.DatasetBuilder): + BUILDER_CONFIG_CLASS = SparkConfig + + def __init__( + self, + df: "pyspark.sql.DataFrame", + cache_dir: str = None, + working_dir: str = None, + **config_kwargs, + ): + import pyspark + + self._spark = pyspark.sql.SparkSession.builder.getOrCreate() + self.df = df + self._working_dir = working_dir + + super().__init__( + cache_dir=cache_dir, + config_name=str(self.df.semanticHash()), + **config_kwargs, + ) + + def _validate_cache_dir(self): + # Define this so that we don't reference self in create_cache_and_write_probe, which will result in a pickling + # error due to pickling the SparkContext. + cache_dir = self._cache_dir + + # Returns the path of the created file. + def create_cache_and_write_probe(context): + # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories + # already exist. + os.makedirs(cache_dir, exist_ok=True) + probe_file = os.path.join(cache_dir, "fs_test" + uuid.uuid4().hex) + # Opening the file in append mode will create a new file unless it already exists, in which case it will not + # change the file contents. + open(probe_file, "a") + return [probe_file] + + if self._spark.conf.get("spark.master", "").startswith("local"): + return + + # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS + # accessible to the driver. + # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. + if self._cache_dir: + probe = ( + self._spark.sparkContext.parallelize(range(1), 1).mapPartitions(create_cache_and_write_probe).collect() + ) + if os.path.isfile(probe[0]): + return + + raise ValueError( + "When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" + ) + + def _info(self): + return datasets.DatasetInfo(features=self.config.features) + + def _split_generators(self, dl_manager: datasets.download.download_manager.DownloadManager): + return [datasets.SplitGenerator(name=datasets.Split.TRAIN)] + + def _repartition_df_if_needed(self, max_shard_size): + import pyspark + + def get_arrow_batch_size(it): + for batch in it: + yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]}) + + df_num_rows = self.df.count() + sample_num_rows = df_num_rows if df_num_rows <= 100 else 100 + # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. + approx_bytes_per_row = ( + self.df.limit(sample_num_rows) + .repartition(1) + .mapInArrow(get_arrow_batch_size, "batch_bytes: long") + .agg(pyspark.sql.functions.sum("batch_bytes").alias("sample_bytes")) + .collect()[0] + .sample_bytes + / sample_num_rows + ) + approx_total_size = approx_bytes_per_row * df_num_rows + if approx_total_size > max_shard_size: + # Make sure there is at least one row per partition. + new_num_partitions = min(df_num_rows, int(approx_total_size / max_shard_size)) + self.df = self.df.repartition(new_num_partitions) + + def _prepare_split_single( + self, + fpath: str, + file_format: str, + max_shard_size: int, + ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]: + import pyspark + + writer_class = ParquetWriter if file_format == "parquet" else ArrowWriter + working_fpath = os.path.join(self._working_dir, os.path.basename(fpath)) if self._working_dir else fpath + embed_local_files = file_format == "parquet" + + # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to + # pickling the SparkContext. + features = self.config.features + writer_batch_size = self._writer_batch_size + storage_options = self._fs.storage_options + + def write_arrow(it): + # Within the same SparkContext, no two task attempts will share the same attempt ID. + task_id = pyspark.TaskContext().taskAttemptId() + first_batch = next(it, None) + if first_batch is None: + # Some partitions might not receive any data. + return pa.RecordBatch.from_arrays( + [[task_id], [0], [0]], + names=["task_id", "num_examples", "num_bytes"], + ) + shard_id = 0 + writer = writer_class( + features=features, + path=working_fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"), + writer_batch_size=writer_batch_size, + storage_options=storage_options, + embed_local_files=embed_local_files, + ) + table = pa.Table.from_batches([first_batch]) + writer.write_table(table) + for batch in it: + if max_shard_size is not None and writer._num_bytes >= max_shard_size: + num_examples, num_bytes = writer.finalize() + writer.close() + yield pa.RecordBatch.from_arrays( + [[task_id], [num_examples], [num_bytes]], + names=["task_id", "num_examples", "num_bytes"], + ) + shard_id += 1 + writer = writer_class( + features=writer._features, + path=working_fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"), + writer_batch_size=writer_batch_size, + storage_options=storage_options, + embed_local_files=embed_local_files, + ) + table = pa.Table.from_batches([batch]) + writer.write_table(table) + + if writer._num_bytes > 0: + num_examples, num_bytes = writer.finalize() + writer.close() + yield pa.RecordBatch.from_arrays( + [[task_id], [num_examples], [num_bytes]], + names=["task_id", "num_examples", "num_bytes"], + ) + + if working_fpath != fpath: + for file in os.listdir(os.path.dirname(working_fpath)): + dest = os.path.join(os.path.dirname(fpath), os.path.basename(file)) + shutil.move(file, dest) + + stats = ( + self.df.mapInArrow(write_arrow, "task_id: long, num_examples: long, num_bytes: long") + .groupBy("task_id") + .agg( + pyspark.sql.functions.sum("num_examples").alias("total_num_examples"), + pyspark.sql.functions.sum("num_bytes").alias("total_num_bytes"), + pyspark.sql.functions.count("num_bytes").alias("num_shards"), + pyspark.sql.functions.collect_list("num_examples").alias("shard_lengths"), + ) + .collect() + ) + for row in stats: + yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) + + def _prepare_split( + self, + split_generator: "datasets.SplitGenerator", + file_format: str = "arrow", + max_shard_size: Optional[Union[str, int]] = None, + num_proc: Optional[int] = None, + **kwargs, + ): + self._validate_cache_dir() + + max_shard_size = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE) + self._repartition_df_if_needed(max_shard_size) + is_local = not is_remote_filesystem(self._fs) + path_join = os.path.join if is_local else posixpath.join + + SUFFIX = "-TTTTT-SSSSS-of-NNNNN" + fname = f"{self.name}-{split_generator.name}{SUFFIX}.{file_format}" + fpath = path_join(self._output_dir, fname) + + total_num_examples = 0 + total_num_bytes = 0 + total_shards = 0 + task_id_and_num_shards = [] + all_shard_lengths = [] + + for task_id, content in self._prepare_split_single(fpath, file_format, max_shard_size): + ( + num_examples, + num_bytes, + num_shards, + shard_lengths, + ) = content + if num_bytes > 0: + total_num_examples += num_examples + total_num_bytes += num_bytes + total_shards += num_shards + task_id_and_num_shards.append((task_id, num_shards)) + all_shard_lengths.extend(shard_lengths) + + split_generator.split_info.num_examples = total_num_examples + split_generator.split_info.num_bytes = total_num_bytes + + # should rename everything at the end + logger.debug(f"Renaming {total_shards} shards.") + if total_shards > 1: + split_generator.split_info.shard_lengths = all_shard_lengths + + # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a + # pickling error due to pickling the SparkContext. + fs = self._fs + + # use the -SSSSS-of-NNNNN pattern + def _rename_shard( + task_id: int, + shard_id: int, + global_shard_id: int, + ): + rename( + fs, + fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"), + fpath.replace("TTTTT-SSSSS", f"{global_shard_id:05d}").replace("NNNNN", f"{total_shards:05d}"), + ) + + args = [] + global_shard_id = 0 + for i in range(len(task_id_and_num_shards)): + task_id, num_shards = task_id_and_num_shards[i] + for shard_id in range(num_shards): + args.append([task_id, shard_id, global_shard_id]) + global_shard_id += 1 + self._spark.sparkContext.parallelize(args, len(args)).map(lambda args: _rename_shard(*args)).collect() + else: + # don't use any pattern + shard_id = 0 + task_id = task_id_and_num_shards[0][0] + self._rename( + fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"), + fpath.replace(SUFFIX, ""), + ) + + def _get_examples_iterable_for_split( + self, + split_generator: "datasets.SplitGenerator", + ) -> SparkExamplesIterable: + return SparkExamplesIterable(self.df) diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/sql/__init__.py b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/sql/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e1016734331ddf964fc5fcb078731083c4f2236 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/sql.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/sql.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a313f1f57ee57e391d68bcbca54af3dd1fb6287b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/sql.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/sql/sql.py b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/sql/sql.py new file mode 100644 index 0000000000000000000000000000000000000000..b0791ba88594fb8e76c957a11cca9936cf321bb4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/sql/sql.py @@ -0,0 +1,118 @@ +import sys +from dataclasses import dataclass +from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union + +import pandas as pd +import pyarrow as pa + +import datasets +import datasets.config +from datasets.features.features import require_storage_cast +from datasets.table import table_cast + + +if TYPE_CHECKING: + import sqlite3 + + import sqlalchemy + + +logger = datasets.utils.logging.get_logger(__name__) + + +@dataclass +class SqlConfig(datasets.BuilderConfig): + """BuilderConfig for SQL.""" + + sql: Union[str, "sqlalchemy.sql.Selectable"] = None + con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] = None + index_col: Optional[Union[str, List[str]]] = None + coerce_float: bool = True + params: Optional[Union[List, Tuple, Dict]] = None + parse_dates: Optional[Union[List, Dict]] = None + columns: Optional[List[str]] = None + chunksize: Optional[int] = 10_000 + features: Optional[datasets.Features] = None + + def __post_init__(self): + if self.sql is None: + raise ValueError("sql must be specified") + if self.con is None: + raise ValueError("con must be specified") + + def create_config_id( + self, + config_kwargs: dict, + custom_features: Optional[datasets.Features] = None, + ) -> str: + config_kwargs = config_kwargs.copy() + # We need to stringify the Selectable object to make its hash deterministic + + # The process of stringifying is explained here: http://docs.sqlalchemy.org/en/latest/faq/sqlexpressions.html + sql = config_kwargs["sql"] + if not isinstance(sql, str): + if datasets.config.SQLALCHEMY_AVAILABLE and "sqlalchemy" in sys.modules: + import sqlalchemy + + if isinstance(sql, sqlalchemy.sql.Selectable): + engine = sqlalchemy.create_engine(config_kwargs["con"].split("://")[0] + "://") + sql_str = str(sql.compile(dialect=engine.dialect)) + config_kwargs["sql"] = sql_str + else: + raise TypeError( + f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}" + ) + else: + raise TypeError( + f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}" + ) + con = config_kwargs["con"] + if not isinstance(con, str): + config_kwargs["con"] = id(con) + logger.info( + f"SQL connection 'con' of type {type(con)} couldn't be hashed properly. To enable hashing, specify 'con' as URI string instead." + ) + + return super().create_config_id(config_kwargs, custom_features=custom_features) + + @property + def pd_read_sql_kwargs(self): + pd_read_sql_kwargs = { + "index_col": self.index_col, + "columns": self.columns, + "params": self.params, + "coerce_float": self.coerce_float, + "parse_dates": self.parse_dates, + } + return pd_read_sql_kwargs + + +class Sql(datasets.ArrowBasedBuilder): + BUILDER_CONFIG_CLASS = SqlConfig + + def _info(self): + return datasets.DatasetInfo(features=self.config.features) + + def _split_generators(self, dl_manager): + return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={})] + + def _cast_table(self, pa_table: pa.Table) -> pa.Table: + if self.config.features is not None: + schema = self.config.features.arrow_schema + if all(not require_storage_cast(feature) for feature in self.config.features.values()): + # cheaper cast + pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema) + else: + # more expensive cast; allows str <-> int/float or str to Audio for example + pa_table = table_cast(pa_table, schema) + return pa_table + + def _generate_tables(self): + chunksize = self.config.chunksize + sql_reader = pd.read_sql( + self.config.sql, self.config.con, chunksize=chunksize, **self.config.pd_read_sql_kwargs + ) + sql_reader = [sql_reader] if chunksize is None else sql_reader + for chunk_idx, df in enumerate(sql_reader): + pa_table = pa.Table.from_pandas(df) + yield chunk_idx, self._cast_table(pa_table) diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__init__.py b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c79daa3e57f5494f40ae94b3d78f348dfc3e1fb Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/_tenbin.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/_tenbin.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7be438b78f335e9ba945e0ada443712e1567cd83 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/_tenbin.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/webdataset.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/webdataset.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19265ee55acc792ee2dd1dc9383a75e82780639d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/webdataset.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/_tenbin.py b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/_tenbin.py new file mode 100644 index 0000000000000000000000000000000000000000..cd8c054842e090dc09bdf3d2fee59241a1a928c5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/_tenbin.py @@ -0,0 +1,285 @@ +# +# Copyright (c) 2017-2021 NVIDIA CORPORATION. All rights reserved. +# This file coems from the WebDataset library. +# See the LICENSE file for licensing terms (BSD-style). +# + +""" +Binary tensor encodings for PyTorch and NumPy. + +This defines efficient binary encodings for tensors. The format is 8 byte +aligned and can be used directly for computations when transmitted, say, +via RDMA. The format is supported by WebDataset with the `.ten` filename +extension. It is also used by Tensorcom, Tensorcom RDMA, and can be used +for fast tensor storage with LMDB and in disk files (which can be memory +mapped) + +Data is encoded as a series of chunks: + +- magic number (int64) +- length in bytes (int64) +- bytes (multiple of 64 bytes long) + +Arrays are a header chunk followed by a data chunk. +Header chunks have the following structure: + +- dtype (int64) +- 8 byte array name +- ndim (int64) +- dim[0] +- dim[1] +- ... +""" + +import struct +import sys + +import numpy as np + + +def bytelen(a): + """Determine the length of a in bytes.""" + if hasattr(a, "nbytes"): + return a.nbytes + elif isinstance(a, (bytearray, bytes)): + return len(a) + else: + raise ValueError(a, "cannot determine nbytes") + + +def bytedata(a): + """Return a the raw data corresponding to a.""" + if isinstance(a, (bytearray, bytes, memoryview)): + return a + elif hasattr(a, "data"): + return a.data + else: + raise ValueError(a, "cannot return bytedata") + + +# tables for converting between long/short NumPy dtypes + +long_to_short = """ +float16 f2 +float32 f4 +float64 f8 +int8 i1 +int16 i2 +int32 i4 +int64 i8 +uint8 u1 +uint16 u2 +unit32 u4 +uint64 u8 +""".strip() +long_to_short = [x.split() for x in long_to_short.split("\n")] +long_to_short = {x[0]: x[1] for x in long_to_short} +short_to_long = {v: k for k, v in long_to_short.items()} + + +def check_acceptable_input_type(data, allow64): + """Check that the data has an acceptable type for tensor encoding. + + :param data: array + :param allow64: allow 64 bit types + """ + for a in data: + if a.dtype.name not in long_to_short: + raise ValueError("unsupported dataypte") + if not allow64 and a.dtype.name not in ["float64", "int64", "uint64"]: + raise ValueError("64 bit datatypes not allowed unless explicitly enabled") + + +def str64(s): + """Convert a string to an int64.""" + s = s + "\0" * (8 - len(s)) + s = s.encode("ascii") + return struct.unpack("@q", s)[0] + + +def unstr64(i): + """Convert an int64 to a string.""" + b = struct.pack("@q", i) + return b.decode("ascii").strip("\0") + + +def check_infos(data, infos, required_infos=None): + """Verify the info strings.""" + if required_infos is False or required_infos is None: + return data + if required_infos is True: + return data, infos + if not isinstance(required_infos, (tuple, list)): + raise ValueError("required_infos must be tuple or list") + for required, actual in zip(required_infos, infos): + raise ValueError(f"actual info {actual} doesn't match required info {required}") + return data + + +def encode_header(a, info=""): + """Encode an array header as a byte array.""" + if a.ndim >= 10: + raise ValueError("too many dimensions") + if a.nbytes != np.prod(a.shape) * a.itemsize: + raise ValueError("mismatch between size and shape") + if a.dtype.name not in long_to_short: + raise ValueError("unsupported array type") + header = [str64(long_to_short[a.dtype.name]), str64(info), len(a.shape)] + list(a.shape) + return bytedata(np.array(header, dtype="i8")) + + +def decode_header(h): + """Decode a byte array into an array header.""" + h = np.frombuffer(h, dtype="i8") + if unstr64(h[0]) not in short_to_long: + raise ValueError("unsupported array type") + dtype = np.dtype(short_to_long[unstr64(h[0])]) + info = unstr64(h[1]) + rank = int(h[2]) + shape = tuple(h[3 : 3 + rank]) + return shape, dtype, info + + +def encode_list(l, infos=None): # noqa: E741 + """Given a list of arrays, encode them into a list of byte arrays.""" + if infos is None: + infos = [""] + else: + if len(l) != len(infos): + raise ValueError(f"length of list {l} must muatch length of infos {infos}") + result = [] + for i, a in enumerate(l): + header = encode_header(a, infos[i % len(infos)]) + result += [header, bytedata(a)] + return result + + +def decode_list(l, infos=False): # noqa: E741 + """Given a list of byte arrays, decode them into arrays.""" + result = [] + infos0 = [] + for header, data in zip(l[::2], l[1::2]): + shape, dtype, info = decode_header(header) + a = np.frombuffer(data, dtype=dtype, count=np.prod(shape)).reshape(*shape) + result += [a] + infos0 += [info] + return check_infos(result, infos0, infos) + + +magic_str = "~TenBin~" +magic = str64(magic_str) +magic_bytes = unstr64(magic).encode("ascii") + + +def roundup(n, k=64): + """Round up to the next multiple of 64.""" + return k * ((n + k - 1) // k) + + +def encode_chunks(l): # noqa: E741 + """Encode a list of chunks into a single byte array, with lengths and magics..""" + size = sum(16 + roundup(b.nbytes) for b in l) + result = bytearray(size) + offset = 0 + for b in l: + result[offset : offset + 8] = magic_bytes + offset += 8 + result[offset : offset + 8] = struct.pack("@q", b.nbytes) + offset += 8 + result[offset : offset + bytelen(b)] = b + offset += roundup(bytelen(b)) + return result + + +def decode_chunks(buf): + """Decode a byte array into a list of chunks.""" + result = [] + offset = 0 + total = bytelen(buf) + while offset < total: + if magic_bytes != buf[offset : offset + 8]: + raise ValueError("magic bytes mismatch") + offset += 8 + nbytes = struct.unpack("@q", buf[offset : offset + 8])[0] + offset += 8 + b = buf[offset : offset + nbytes] + offset += roundup(nbytes) + result.append(b) + return result + + +def encode_buffer(l, infos=None): # noqa: E741 + """Encode a list of arrays into a single byte array.""" + if not isinstance(l, list): + raise ValueError("requires list") + return encode_chunks(encode_list(l, infos=infos)) + + +def decode_buffer(buf, infos=False): + """Decode a byte array into a list of arrays.""" + return decode_list(decode_chunks(buf), infos=infos) + + +def write_chunk(stream, buf): + """Write a byte chunk to the stream with magics, length, and padding.""" + nbytes = bytelen(buf) + stream.write(magic_bytes) + stream.write(struct.pack("@q", nbytes)) + stream.write(bytedata(buf)) + padding = roundup(nbytes) - nbytes + if padding > 0: + stream.write(b"\0" * padding) + + +def read_chunk(stream): + """Read a byte chunk from a stream with magics, length, and padding.""" + magic = stream.read(8) + if magic == b"": + return None + if magic != magic_bytes: + raise ValueError("magic number does not match") + nbytes = stream.read(8) + nbytes = struct.unpack("@q", nbytes)[0] + if nbytes < 0: + raise ValueError("negative nbytes") + data = stream.read(nbytes) + padding = roundup(nbytes) - nbytes + if padding > 0: + stream.read(padding) + return data + + +def write(stream, l, infos=None): # noqa: E741 + """Write a list of arrays to a stream, with magics, length, and padding.""" + for chunk in encode_list(l, infos=infos): + write_chunk(stream, chunk) + + +def read(stream, n=sys.maxsize, infos=False): + """Read a list of arrays from a stream, with magics, length, and padding.""" + chunks = [] + for _ in range(n): + header = read_chunk(stream) + if header is None: + break + data = read_chunk(stream) + if data is None: + raise ValueError("premature EOF") + chunks += [header, data] + return decode_list(chunks, infos=infos) + + +def save(fname, *args, infos=None, nocheck=False): + """Save a list of arrays to a file, with magics, length, and padding.""" + if not nocheck and not fname.endswith(".ten"): + raise ValueError("file name should end in .ten") + with open(fname, "wb") as stream: + write(stream, args, infos=infos) + + +def load(fname, infos=False, nocheck=False): + """Read a list of arrays from a file, with magics, length, and padding.""" + if not nocheck and not fname.endswith(".ten"): + raise ValueError("file name should end in .ten") + with open(fname, "rb") as stream: + return read(stream, infos=infos) diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/webdataset.py b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/webdataset.py new file mode 100644 index 0000000000000000000000000000000000000000..3ac1e86fc417863ba9b5fd8fca97581c63d48768 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/webdataset.py @@ -0,0 +1,299 @@ +import io +import json +from itertools import islice +from typing import Any, Callable, Dict, List + +import numpy as np +import pyarrow as pa + +import datasets + + +logger = datasets.utils.logging.get_logger(__name__) + + +class WebDataset(datasets.GeneratorBasedBuilder): + DEFAULT_WRITER_BATCH_SIZE = 100 + IMAGE_EXTENSIONS: List[str] # definition at the bottom of the script + AUDIO_EXTENSIONS: List[str] # definition at the bottom of the script + DECODERS: Dict[str, Callable[[Any], Any]] # definition at the bottom of the script + NUM_EXAMPLES_FOR_FEATURES_INFERENCE = 5 + + @classmethod + def _get_pipeline_from_tar(cls, tar_path, tar_iterator): + current_example = {} + for filename, f in tar_iterator: + if "." in filename: + example_key, field_name = filename.split(".", 1) + if current_example and current_example["__key__"] != example_key: + yield current_example + current_example = {} + current_example["__key__"] = example_key + current_example["__url__"] = tar_path + current_example[field_name.lower()] = f.read() + if field_name in cls.DECODERS: + current_example[field_name] = cls.DECODERS[field_name](current_example[field_name]) + if current_example: + yield current_example + + def _info(self) -> datasets.DatasetInfo: + return datasets.DatasetInfo() + + def _split_generators(self, dl_manager): + """We handle string, list and dicts in datafiles""" + # Download the data files + if not self.config.data_files: + raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") + data_files = dl_manager.download(self.config.data_files) + if isinstance(data_files, (str, list, tuple)): + tar_paths = data_files + if isinstance(tar_paths, str): + tar_paths = [tar_paths] + tar_iterators = [dl_manager.iter_archive(tar_path) for tar_path in tar_paths] + splits = [ + datasets.SplitGenerator( + name=datasets.Split.TRAIN, gen_kwargs={"tar_paths": tar_paths, "tar_iterators": tar_iterators} + ) + ] + else: + splits = [] + for split_name, tar_paths in data_files.items(): + if isinstance(tar_paths, str): + tar_paths = [tar_paths] + tar_iterators = [dl_manager.iter_archive(tar_path) for tar_path in tar_paths] + splits.append( + datasets.SplitGenerator( + name=split_name, gen_kwargs={"tar_paths": tar_paths, "tar_iterators": tar_iterators} + ) + ) + if not self.info.features: + # Get one example to get the feature types + pipeline = self._get_pipeline_from_tar(tar_paths[0], tar_iterators[0]) + first_examples = list(islice(pipeline, self.NUM_EXAMPLES_FOR_FEATURES_INFERENCE)) + if any(example.keys() != first_examples[0].keys() for example in first_examples): + raise ValueError( + "The TAR archives of the dataset should be in WebDataset format, " + "but the files in the archive don't share the same prefix or the same types." + ) + pa_tables = [pa.Table.from_pylist([example]) for example in first_examples] + if datasets.config.PYARROW_VERSION.major < 14: + inferred_arrow_schema = pa.concat_tables(pa_tables, promote=True).schema + else: + inferred_arrow_schema = pa.concat_tables(pa_tables, promote_options="default").schema + features = datasets.Features.from_arrow_schema(inferred_arrow_schema) + + # Set Image types + for field_name in first_examples[0]: + extension = field_name.rsplit(".", 1)[-1] + if extension in self.IMAGE_EXTENSIONS: + features[field_name] = datasets.Image() + # Set Audio types + for field_name in first_examples[0]: + extension = field_name.rsplit(".", 1)[-1] + if extension in self.AUDIO_EXTENSIONS: + features[field_name] = datasets.Audio() + self.info.features = features + + return splits + + def _generate_examples(self, tar_paths, tar_iterators): + image_field_names = [ + field_name for field_name, feature in self.info.features.items() if isinstance(feature, datasets.Image) + ] + audio_field_names = [ + field_name for field_name, feature in self.info.features.items() if isinstance(feature, datasets.Audio) + ] + for tar_idx, (tar_path, tar_iterator) in enumerate(zip(tar_paths, tar_iterators)): + for example_idx, example in enumerate(self._get_pipeline_from_tar(tar_path, tar_iterator)): + for field_name in image_field_names + audio_field_names: + example[field_name] = {"path": example["__key__"] + "." + field_name, "bytes": example[field_name]} + yield f"{tar_idx}_{example_idx}", example + + +# Obtained with: +# ``` +# import PIL.Image +# IMAGE_EXTENSIONS = [] +# PIL.Image.init() +# for ext, format in PIL.Image.EXTENSION.items(): +# if format in PIL.Image.OPEN: +# IMAGE_EXTENSIONS.append(ext[1:]) +# ``` +# We intentionally do not run this code on launch because: +# (1) Pillow is an optional dependency, so importing Pillow in global namespace is not allowed +# (2) To ensure the list of supported extensions is deterministic +IMAGE_EXTENSIONS = [ + "blp", + "bmp", + "dib", + "bufr", + "cur", + "pcx", + "dcx", + "dds", + "ps", + "eps", + "fit", + "fits", + "fli", + "flc", + "ftc", + "ftu", + "gbr", + "gif", + "grib", + "h5", + "hdf", + "png", + "apng", + "jp2", + "j2k", + "jpc", + "jpf", + "jpx", + "j2c", + "icns", + "ico", + "im", + "iim", + "tif", + "tiff", + "jfif", + "jpe", + "jpg", + "jpeg", + "mpg", + "mpeg", + "msp", + "pcd", + "pxr", + "pbm", + "pgm", + "ppm", + "pnm", + "psd", + "bw", + "rgb", + "rgba", + "sgi", + "ras", + "tga", + "icb", + "vda", + "vst", + "webp", + "wmf", + "emf", + "xbm", + "xpm", +] +WebDataset.IMAGE_EXTENSIONS = IMAGE_EXTENSIONS + + +# Obtained with: +# ``` +# import soundfile as sf +# +# AUDIO_EXTENSIONS = [f".{format.lower()}" for format in sf.available_formats().keys()] +# +# # .mp3 is currently decoded via `torchaudio`, .opus decoding is supported if version of `libsndfile` >= 1.0.30: +# AUDIO_EXTENSIONS.extend([".mp3", ".opus"]) +# ``` +# We intentionally do not run this code on launch because: +# (1) Soundfile is an optional dependency, so importing it in global namespace is not allowed +# (2) To ensure the list of supported extensions is deterministic +AUDIO_EXTENSIONS = [ + "aiff", + "au", + "avr", + "caf", + "flac", + "htk", + "svx", + "mat4", + "mat5", + "mpc2k", + "ogg", + "paf", + "pvf", + "raw", + "rf64", + "sd2", + "sds", + "ircam", + "voc", + "w64", + "wav", + "nist", + "wavex", + "wve", + "xi", + "mp3", + "opus", +] +WebDataset.AUDIO_EXTENSIONS = AUDIO_EXTENSIONS + + +def text_loads(data: bytes): + return data.decode("utf-8") + + +def tenbin_loads(data: bytes): + from . import _tenbin + + return _tenbin.decode_buffer(data) + + +def msgpack_loads(data: bytes): + import msgpack + + return msgpack.unpackb(data) + + +def npy_loads(data: bytes): + import numpy.lib.format + + stream = io.BytesIO(data) + return numpy.lib.format.read_array(stream, allow_pickle=False) + + +def npz_loads(data: bytes): + return np.load(io.BytesIO(data), allow_pickle=False) + + +def cbor_loads(data: bytes): + import cbor + + return cbor.loads(data) + + +# Obtained by checking `decoders` in `webdataset.autodecode` +# and removing unsafe extension decoders. +# Removed Pickle decoders: +# - "pyd": lambda data: pickle.loads(data) +# - "pickle": lambda data: pickle.loads(data) +# Removed Torch decoders: +# - "pth": lambda data: torch_loads(data) +# Modified NumPy decoders to fix CVE-2019-6446 (add allow_pickle=False): +# - "npy": npy_loads, +# - "npz": lambda data: np.load(io.BytesIO(data)), +DECODERS = { + "txt": text_loads, + "text": text_loads, + "transcript": text_loads, + "cls": int, + "cls2": int, + "index": int, + "inx": int, + "id": int, + "json": json.loads, + "jsn": json.loads, + "ten": tenbin_loads, + "tb": tenbin_loads, + "mp": msgpack_loads, + "msg": msgpack_loads, + "npy": npy_loads, + "npz": npz_loads, + "cbor": cbor_loads, +} +WebDataset.DECODERS = DECODERS diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/parallel/__init__.py b/env-llmeval/lib/python3.10/site-packages/datasets/parallel/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d83093588514bec18b3536f4287a699939af499e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/parallel/__init__.py @@ -0,0 +1 @@ +from .parallel import parallel_backend, parallel_map, ParallelBackendConfig # noqa F401 diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/parallel/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/parallel/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..87ff5055fa70ba95e2d0646f9827ce8e2d00969d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/parallel/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/parallel/__pycache__/parallel.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/parallel/__pycache__/parallel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e7424d36ff95cd9b1089403eb5099a6eb3ca839 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/parallel/__pycache__/parallel.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/parallel/parallel.py b/env-llmeval/lib/python3.10/site-packages/datasets/parallel/parallel.py new file mode 100644 index 0000000000000000000000000000000000000000..4e1a8546c586b94094f915e64268c58155c99fba --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/parallel/parallel.py @@ -0,0 +1,113 @@ +import contextlib +from multiprocessing import Pool, RLock + +from tqdm.auto import tqdm + +from ..utils import experimental, logging + + +logger = logging.get_logger(__name__) + + +class ParallelBackendConfig: + backend_name = None + + +@experimental +def parallel_map(function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func): + """ + **Experimental.** Apply a function to iterable elements in parallel, where the implementation uses either + multiprocessing.Pool or joblib for parallelization. + + Args: + function (`Callable[[Any], Any]`): Function to be applied to `iterable`. + iterable (`list`, `tuple` or `np.ndarray`): Iterable elements to apply function to. + num_proc (`int`): Number of processes (if no backend specified) or jobs (using joblib). + types (`tuple`): Additional types (besides `dict` values) to apply `function` recursively to their elements. + disable_tqdm (`bool`): Whether to disable the tqdm progressbar. + desc (`str`): Prefix for the tqdm progressbar. + single_map_nested_func (`Callable`): Map function that applies `function` to an element from `iterable`. + Takes a tuple of function, data_struct, types, rank, disable_tqdm, desc as input, where data_struct is an + element of `iterable`, and `rank` is used for progress bar. + """ + if ParallelBackendConfig.backend_name is None: + return _map_with_multiprocessing_pool( + function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func + ) + + return _map_with_joblib(function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func) + + +def _map_with_multiprocessing_pool(function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func): + num_proc = num_proc if num_proc <= len(iterable) else len(iterable) + split_kwds = [] # We organize the splits ourselve (contiguous splits) + for index in range(num_proc): + div = len(iterable) // num_proc + mod = len(iterable) % num_proc + start = div * index + min(index, mod) + end = start + div + (1 if index < mod else 0) + split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc)) + + if len(iterable) != sum(len(i[1]) for i in split_kwds): + raise ValueError( + f"Error dividing inputs iterable among processes. " + f"Total number of objects {len(iterable)}, " + f"length: {sum(len(i[1]) for i in split_kwds)}" + ) + + logger.info( + f"Spawning {num_proc} processes for {len(iterable)} objects in slices of {[len(i[1]) for i in split_kwds]}" + ) + initargs, initializer = None, None + if not disable_tqdm: + initargs, initializer = (RLock(),), tqdm.set_lock + with Pool(num_proc, initargs=initargs, initializer=initializer) as pool: + mapped = pool.map(single_map_nested_func, split_kwds) + logger.info(f"Finished {num_proc} processes") + mapped = [obj for proc_res in mapped for obj in proc_res] + logger.info(f"Unpacked {len(mapped)} objects") + + return mapped + + +def _map_with_joblib(function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func): + # progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib, + # and it requires monkey-patching joblib internal classes which is subject to change + import joblib + + with joblib.parallel_backend(ParallelBackendConfig.backend_name, n_jobs=num_proc): + return joblib.Parallel()( + joblib.delayed(single_map_nested_func)((function, obj, types, None, True, None)) for obj in iterable + ) + + +@experimental +@contextlib.contextmanager +def parallel_backend(backend_name: str): + """ + **Experimental.** Configures the parallel backend for parallelized dataset loading, which uses the parallelization + implemented by joblib. + + Args: + backend_name (str): Name of backend for parallelization implementation, has to be supported by joblib. + + Example usage: + ```py + with parallel_backend('spark'): + dataset = load_dataset(..., num_proc=2) + ``` + """ + ParallelBackendConfig.backend_name = backend_name + + if backend_name == "spark": + from joblibspark import register_spark + + register_spark() + + # TODO: call create_cache_and_write_probe if "download" in steps + # TODO: raise NotImplementedError when Dataset.map etc is called + + try: + yield + finally: + ParallelBackendConfig.backend_name = None diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/utils/__init__.py b/env-llmeval/lib/python3.10/site-packages/datasets/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5df74ff8cac8f1fd30a5dd786c9cd5c89d2880af --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/utils/__init__.py @@ -0,0 +1,27 @@ +# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ruff: noqa + +from . import tqdm as _tqdm # _tqdm is the module +from .info_utils import VerificationMode +from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled +from .version import Version +from .experimental import experimental +from .tqdm import ( + disable_progress_bars, + enable_progress_bars, + are_progress_bars_disabled, + tqdm, +) diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/utils/_datasets_server.py b/env-llmeval/lib/python3.10/site-packages/datasets/utils/_datasets_server.py new file mode 100644 index 0000000000000000000000000000000000000000..3699a4081e85bf22d675f1566f2d710f1bff88e3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/utils/_datasets_server.py @@ -0,0 +1,96 @@ +from typing import Any, Dict, List, Optional, Union + +from .. import config +from ..exceptions import DatasetsError +from .file_utils import ( + get_authentication_headers_for_url, + http_get, +) +from .logging import get_logger + + +logger = get_logger(__name__) + + +class DatasetsServerError(DatasetsError): + """Dataset-server error. + + Raised when trying to use the Datasets-server HTTP API and when trying to access: + - a missing dataset, or + - a private/gated dataset and the user is not authenticated. + - unavailable /parquet or /info responses + """ + + +def get_exported_parquet_files(dataset: str, revision: str, token: Optional[Union[str, bool]]) -> List[Dict[str, Any]]: + """ + Get the dataset exported parquet files + Docs: https://huggingface.co/docs/datasets-server/parquet + """ + datasets_server_parquet_url = config.HF_ENDPOINT.replace("://", "://datasets-server.") + "/parquet?dataset=" + try: + parquet_data_files_response = http_get( + url=datasets_server_parquet_url + dataset, + temp_file=None, + headers=get_authentication_headers_for_url(config.HF_ENDPOINT + f"datasets/{dataset}", token=token), + timeout=100.0, + max_retries=3, + ) + parquet_data_files_response.raise_for_status() + if "X-Revision" in parquet_data_files_response.headers: + if parquet_data_files_response.headers["X-Revision"] == revision or revision is None: + parquet_data_files_response_json = parquet_data_files_response.json() + if ( + parquet_data_files_response_json.get("partial") is False + and not parquet_data_files_response_json.get("pending", True) + and not parquet_data_files_response_json.get("failed", True) + and "parquet_files" in parquet_data_files_response_json + ): + return parquet_data_files_response_json["parquet_files"] + else: + logger.debug(f"Parquet export for {dataset} is not completely ready yet.") + else: + logger.debug( + f"Parquet export for {dataset} is available but outdated (revision='{parquet_data_files_response.headers['X-Revision']}')" + ) + except Exception as e: # noqa catch any exception of the datasets-server and consider the parquet export doesn't exist + logger.debug(f"No parquet export for {dataset} available ({type(e).__name__}: {e})") + raise DatasetsServerError("No exported Parquet files available.") + + +def get_exported_dataset_infos( + dataset: str, revision: str, token: Optional[Union[str, bool]] +) -> Dict[str, Dict[str, Any]]: + """ + Get the dataset information, can be useful to get e.g. the dataset features. + Docs: https://huggingface.co/docs/datasets-server/info + """ + datasets_server_info_url = config.HF_ENDPOINT.replace("://", "://datasets-server.") + "/info?dataset=" + try: + info_response = http_get( + url=datasets_server_info_url + dataset, + temp_file=None, + headers=get_authentication_headers_for_url(config.HF_ENDPOINT + f"datasets/{dataset}", token=token), + timeout=100.0, + max_retries=3, + ) + info_response.raise_for_status() + if "X-Revision" in info_response.headers: + if info_response.headers["X-Revision"] == revision or revision is None: + info_response = info_response.json() + if ( + info_response.get("partial") is False + and not info_response.get("pending", True) + and not info_response.get("failed", True) + and "dataset_info" in info_response + ): + return info_response["dataset_info"] + else: + logger.debug(f"Dataset info for {dataset} is not completely ready yet.") + else: + logger.debug( + f"Dataset info for {dataset} is available but outdated (revision='{info_response.headers['X-Revision']}')" + ) + except Exception as e: # noqa catch any exception of the datasets-server and consider the dataset info doesn't exist + logger.debug(f"No dataset info for {dataset} available ({type(e).__name__}: {e})") + raise DatasetsServerError("No exported dataset infos available.") diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/utils/_dill.py b/env-llmeval/lib/python3.10/site-packages/datasets/utils/_dill.py new file mode 100644 index 0000000000000000000000000000000000000000..15578198a39622340f937a3dfdd9091af26d5453 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/utils/_dill.py @@ -0,0 +1,459 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Extends `dill` to support pickling more types and produce more consistent dumps.""" + +import os +import sys +from io import BytesIO +from types import CodeType, FunctionType + +import dill +from packaging import version + +from .. import config + + +class Pickler(dill.Pickler): + dispatch = dill._dill.MetaCatchingDict(dill.Pickler.dispatch.copy()) + _legacy_no_dict_keys_sorting = False + + def save(self, obj, save_persistent_id=True): + obj_type = type(obj) + if obj_type not in self.dispatch: + if "regex" in sys.modules: + import regex # type: ignore + + if obj_type is regex.Pattern: + pklregister(obj_type)(_save_regexPattern) + if "spacy" in sys.modules: + import spacy # type: ignore + + if issubclass(obj_type, spacy.Language): + pklregister(obj_type)(_save_spacyLanguage) + if "tiktoken" in sys.modules: + import tiktoken # type: ignore + + if obj_type is tiktoken.Encoding: + pklregister(obj_type)(_save_tiktokenEncoding) + if "torch" in sys.modules: + import torch # type: ignore + + if issubclass(obj_type, torch.Tensor): + pklregister(obj_type)(_save_torchTensor) + + if obj_type is torch.Generator: + pklregister(obj_type)(_save_torchGenerator) + + # Unwrap `torch.compile`-ed modules + if issubclass(obj_type, torch.nn.Module): + obj = getattr(obj, "_orig_mod", obj) + if "transformers" in sys.modules: + import transformers # type: ignore + + if issubclass(obj_type, transformers.PreTrainedTokenizerBase): + pklregister(obj_type)(_save_transformersPreTrainedTokenizerBase) + + # Unwrap `torch.compile`-ed functions + if obj_type is FunctionType: + obj = getattr(obj, "_torchdynamo_orig_callable", obj) + dill.Pickler.save(self, obj, save_persistent_id=save_persistent_id) + + def _batch_setitems(self, items): + if self._legacy_no_dict_keys_sorting: + return super()._batch_setitems(items) + # Ignore the order of keys in a dict + try: + # Faster, but fails for unorderable elements + items = sorted(items) + except Exception: # TypeError, decimal.InvalidOperation, etc. + from datasets.fingerprint import Hasher + + items = sorted(items, key=lambda x: Hasher.hash(x[0])) + dill.Pickler._batch_setitems(self, items) + + def memoize(self, obj): + # Don't memoize strings since two identical strings can have different Python ids + if type(obj) is not str: # noqa: E721 + dill.Pickler.memoize(self, obj) + + +def pklregister(t): + """Register a custom reducer for the type.""" + + def proxy(func): + Pickler.dispatch[t] = func + return func + + return proxy + + +def dump(obj, file): + """Pickle an object to a file.""" + Pickler(file, recurse=True).dump(obj) + + +def dumps(obj): + """Pickle an object to a string.""" + file = BytesIO() + dump(obj, file) + return file.getvalue() + + +if config.DILL_VERSION < version.parse("0.3.6"): + + def log(pickler, msg): + dill._dill.log.info(msg) + +elif config.DILL_VERSION.release[:3] in [ + version.parse("0.3.6").release, + version.parse("0.3.7").release, + version.parse("0.3.8").release, +]: + + def log(pickler, msg): + dill._dill.logger.trace(pickler, msg) + + +@pklregister(set) +def _save_set(pickler, obj): + log(pickler, f"Se: {obj}") + try: + # Faster, but fails for unorderable elements + args = (sorted(obj),) + except Exception: # TypeError, decimal.InvalidOperation, etc. + from datasets.fingerprint import Hasher + + args = (sorted(obj, key=Hasher.hash),) + + pickler.save_reduce(set, args, obj=obj) + log(pickler, "# Se") + + +def _save_regexPattern(pickler, obj): + import regex # type: ignore + + log(pickler, f"Re: {obj}") + args = (obj.pattern, obj.flags) + pickler.save_reduce(regex.compile, args, obj=obj) + log(pickler, "# Re") + + +def _save_tiktokenEncoding(pickler, obj): + import tiktoken # type: ignore + + log(pickler, f"Enc: {obj}") + args = (obj.name, obj._pat_str, obj._mergeable_ranks, obj._special_tokens) + pickler.save_reduce(tiktoken.Encoding, args, obj=obj) + log(pickler, "# Enc") + + +def _save_torchTensor(pickler, obj): + import torch # type: ignore + + # `torch.from_numpy` is not picklable in `torch>=1.11.0` + def create_torchTensor(np_array): + return torch.from_numpy(np_array) + + log(pickler, f"To: {obj}") + args = (obj.detach().cpu().numpy(),) + pickler.save_reduce(create_torchTensor, args, obj=obj) + log(pickler, "# To") + + +def _save_torchGenerator(pickler, obj): + import torch # type: ignore + + def create_torchGenerator(state): + generator = torch.Generator() + generator.set_state(state) + return generator + + log(pickler, f"Ge: {obj}") + args = (obj.get_state(),) + pickler.save_reduce(create_torchGenerator, args, obj=obj) + log(pickler, "# Ge") + + +def _save_spacyLanguage(pickler, obj): + import spacy # type: ignore + + def create_spacyLanguage(config, bytes): + lang_cls = spacy.util.get_lang_class(config["nlp"]["lang"]) + lang_inst = lang_cls.from_config(config) + return lang_inst.from_bytes(bytes) + + log(pickler, f"Sp: {obj}") + args = (obj.config, obj.to_bytes()) + pickler.save_reduce(create_spacyLanguage, args, obj=obj) + log(pickler, "# Sp") + + +def _save_transformersPreTrainedTokenizerBase(pickler, obj): + log(pickler, f"Tok: {obj}") + # Ignore the `cache` attribute + state = obj.__dict__ + if "cache" in state and isinstance(state["cache"], dict): + state["cache"] = {} + pickler.save_reduce(type(obj), (), state=state, obj=obj) + log(pickler, "# Tok") + + +if config.DILL_VERSION < version.parse("0.3.6"): + + @pklregister(CodeType) + def _save_code(pickler, obj): + """ + From dill._dill.save_code + This is a modified version that removes the origin (filename + line no.) + of functions created in notebooks or shells for example. + """ + dill._dill.log.info(f"Co: {obj}") + # The filename of a function is the .py file where it is defined. + # Filenames of functions created in notebooks or shells start with '<' + # ex: for ipython, and for shell + # Filenames of functions created in ipykernel the filename + # look like f"{tempdir}/ipykernel_{id1}/{id2}.py" + # Moreover lambda functions have a special name: '' + # ex: (lambda x: x).__code__.co_name == "" # True + # + # For the hashing mechanism we ignore where the function has been defined + # More specifically: + # - we ignore the filename of special functions (filename starts with '<') + # - we always ignore the line number + # - we only use the base name of the file instead of the whole path, + # to be robust in case a script is moved for example. + # + # Only those two lines are different from the original implementation: + co_filename = ( + "" + if obj.co_filename.startswith("<") + or ( + len(obj.co_filename.split(os.path.sep)) > 1 + and obj.co_filename.split(os.path.sep)[-2].startswith("ipykernel_") + ) + or obj.co_name == "" + else os.path.basename(obj.co_filename) + ) + co_firstlineno = 1 + # The rest is the same as in the original dill implementation + if dill._dill.PY3: + if hasattr(obj, "co_posonlyargcount"): + args = ( + obj.co_argcount, + obj.co_posonlyargcount, + obj.co_kwonlyargcount, + obj.co_nlocals, + obj.co_stacksize, + obj.co_flags, + obj.co_code, + obj.co_consts, + obj.co_names, + obj.co_varnames, + co_filename, + obj.co_name, + co_firstlineno, + obj.co_lnotab, + obj.co_freevars, + obj.co_cellvars, + ) + else: + args = ( + obj.co_argcount, + obj.co_kwonlyargcount, + obj.co_nlocals, + obj.co_stacksize, + obj.co_flags, + obj.co_code, + obj.co_consts, + obj.co_names, + obj.co_varnames, + co_filename, + obj.co_name, + co_firstlineno, + obj.co_lnotab, + obj.co_freevars, + obj.co_cellvars, + ) + else: + args = ( + obj.co_argcount, + obj.co_nlocals, + obj.co_stacksize, + obj.co_flags, + obj.co_code, + obj.co_consts, + obj.co_names, + obj.co_varnames, + co_filename, + obj.co_name, + co_firstlineno, + obj.co_lnotab, + obj.co_freevars, + obj.co_cellvars, + ) + pickler.save_reduce(CodeType, args, obj=obj) + dill._dill.log.info("# Co") + return + +elif config.DILL_VERSION.release[:3] in [ + version.parse("0.3.6").release, + version.parse("0.3.7").release, + version.parse("0.3.8").release, +]: + # From: https://github.com/uqfoundation/dill/blob/dill-0.3.6/dill/_dill.py#L1104 + @pklregister(CodeType) + def save_code(pickler, obj): + dill._dill.logger.trace(pickler, "Co: %s", obj) + + ############################################################################################################ + # Modification here for huggingface/datasets + # The filename of a function is the .py file where it is defined. + # Filenames of functions created in notebooks or shells start with '<' + # ex: for ipython, and for shell + # Filenames of functions created in ipykernel the filename + # look like f"{tempdir}/ipykernel_{id1}/{id2}.py" + # Moreover lambda functions have a special name: '' + # ex: (lambda x: x).__code__.co_name == "" # True + # + # For the hashing mechanism we ignore where the function has been defined + # More specifically: + # - we ignore the filename of special functions (filename starts with '<') + # - we always ignore the line number + # - we only use the base name of the file instead of the whole path, + # to be robust in case a script is moved for example. + # + # Only those two lines are different from the original implementation: + co_filename = ( + "" + if obj.co_filename.startswith("<") + or ( + len(obj.co_filename.split(os.path.sep)) > 1 + and obj.co_filename.split(os.path.sep)[-2].startswith("ipykernel_") + ) + or obj.co_name == "" + else os.path.basename(obj.co_filename) + ) + co_firstlineno = 1 + # The rest is the same as in the original dill implementation, except for the replacements: + # - obj.co_filename => co_filename + # - obj.co_firstlineno => co_firstlineno + ############################################################################################################ + + if hasattr(obj, "co_endlinetable"): # python 3.11a (20 args) + args = ( + obj.co_lnotab, # for < python 3.10 [not counted in args] + obj.co_argcount, + obj.co_posonlyargcount, + obj.co_kwonlyargcount, + obj.co_nlocals, + obj.co_stacksize, + obj.co_flags, + obj.co_code, + obj.co_consts, + obj.co_names, + obj.co_varnames, + co_filename, # Modification for huggingface/datasets ############################################ + obj.co_name, + obj.co_qualname, + co_firstlineno, # Modification for huggingface/datasets ######################################### + obj.co_linetable, + obj.co_endlinetable, + obj.co_columntable, + obj.co_exceptiontable, + obj.co_freevars, + obj.co_cellvars, + ) + elif hasattr(obj, "co_exceptiontable"): # python 3.11 (18 args) + args = ( + obj.co_lnotab, # for < python 3.10 [not counted in args] + obj.co_argcount, + obj.co_posonlyargcount, + obj.co_kwonlyargcount, + obj.co_nlocals, + obj.co_stacksize, + obj.co_flags, + obj.co_code, + obj.co_consts, + obj.co_names, + obj.co_varnames, + co_filename, # Modification for huggingface/datasets ############################################ + obj.co_name, + obj.co_qualname, + co_firstlineno, # Modification for huggingface/datasets ######################################### + obj.co_linetable, + obj.co_exceptiontable, + obj.co_freevars, + obj.co_cellvars, + ) + elif hasattr(obj, "co_linetable"): # python 3.10 (16 args) + args = ( + obj.co_lnotab, # for < python 3.10 [not counted in args] + obj.co_argcount, + obj.co_posonlyargcount, + obj.co_kwonlyargcount, + obj.co_nlocals, + obj.co_stacksize, + obj.co_flags, + obj.co_code, + obj.co_consts, + obj.co_names, + obj.co_varnames, + co_filename, # Modification for huggingface/datasets ############################################ + obj.co_name, + co_firstlineno, # Modification for huggingface/datasets ######################################### + obj.co_linetable, + obj.co_freevars, + obj.co_cellvars, + ) + elif hasattr(obj, "co_posonlyargcount"): # python 3.8 (16 args) + args = ( + obj.co_argcount, + obj.co_posonlyargcount, + obj.co_kwonlyargcount, + obj.co_nlocals, + obj.co_stacksize, + obj.co_flags, + obj.co_code, + obj.co_consts, + obj.co_names, + obj.co_varnames, + co_filename, # Modification for huggingface/datasets ############################################ + obj.co_name, + co_firstlineno, # Modification for huggingface/datasets ######################################### + obj.co_lnotab, + obj.co_freevars, + obj.co_cellvars, + ) + else: # python 3.7 (15 args) + args = ( + obj.co_argcount, + obj.co_kwonlyargcount, + obj.co_nlocals, + obj.co_stacksize, + obj.co_flags, + obj.co_code, + obj.co_consts, + obj.co_names, + obj.co_varnames, + co_filename, # Modification for huggingface/datasets ############################################ + obj.co_name, + co_firstlineno, # Modification for huggingface/datasets ######################################### + obj.co_lnotab, + obj.co_freevars, + obj.co_cellvars, + ) + + pickler.save_reduce(dill._dill._create_code, args, obj=obj) + dill._dill.logger.trace(pickler, "# Co") + return diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/utils/download_manager.py b/env-llmeval/lib/python3.10/site-packages/datasets/utils/download_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..b524c2f9686f65d083c424a4e17d001395b743b6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/utils/download_manager.py @@ -0,0 +1 @@ +# deprecated, please use datasets.download.download_manager diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/utils/experimental.py b/env-llmeval/lib/python3.10/site-packages/datasets/utils/experimental.py new file mode 100644 index 0000000000000000000000000000000000000000..cc406154e9347f4df83b1f7b08c32a961d469f6a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/utils/experimental.py @@ -0,0 +1,43 @@ +"""Contains utilities to flag a feature as "experimental" in datasets.""" + +import warnings +from functools import wraps +from typing import Callable + + +def experimental(fn: Callable) -> Callable: + """Decorator to flag a feature as experimental. + + An experimental feature trigger a warning when used as it might be subject to breaking changes in the future. + + Args: + fn (`Callable`): + The function to flag as experimental. + + Returns: + `Callable`: The decorated function. + + Example: + + ```python + >>> from datasets.utils import experimental + + >>> @experimental + ... def my_function(): + ... print("Hello world!") + + >>> my_function() + UserWarning: 'my_function' is experimental and might be subject to breaking changes in the future. + Hello world! + ``` + """ + + @wraps(fn) + def _inner_fn(*args, **kwargs): + warnings.warn( + (f"'{fn.__name__}' is experimental and might be subject to breaking changes in the future."), + UserWarning, + ) + return fn(*args, **kwargs) + + return _inner_fn diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/utils/file_utils.py b/env-llmeval/lib/python3.10/site-packages/datasets/utils/file_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b3532b9697684e1d2077dc4bdee3e2e88956c5ec --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/utils/file_utils.py @@ -0,0 +1,690 @@ +""" +Utilities for working with the local dataset cache. +This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp +Copyright by the AllenNLP authors. +""" + +import copy +import io +import json +import multiprocessing +import os +import posixpath +import re +import shutil +import sys +import time +import urllib +import warnings +from contextlib import closing, contextmanager +from functools import partial +from pathlib import Path +from typing import Optional, TypeVar, Union +from unittest.mock import patch +from urllib.parse import urljoin, urlparse + +import fsspec +import huggingface_hub +import requests +from fsspec.core import strip_protocol +from fsspec.utils import can_be_local +from huggingface_hub.utils import insecure_hashlib +from packaging import version + +from .. import __version__, config +from ..download.download_config import DownloadConfig +from . import _tqdm, logging +from . import tqdm as hf_tqdm +from ._filelock import FileLock +from .extract import ExtractManager + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +INCOMPLETE_SUFFIX = ".incomplete" + +T = TypeVar("T", str, Path) + + +def init_hf_modules(hf_modules_cache: Optional[Union[Path, str]] = None) -> str: + """ + Add hf_modules_cache to the python path. + By default hf_modules_cache='~/.cache/huggingface/modules'. + It can also be set with the environment variable HF_MODULES_CACHE. + This is used to add modules such as `datasets_modules` + """ + hf_modules_cache = hf_modules_cache if hf_modules_cache is not None else config.HF_MODULES_CACHE + hf_modules_cache = str(hf_modules_cache) + if hf_modules_cache not in sys.path: + sys.path.append(hf_modules_cache) + + os.makedirs(hf_modules_cache, exist_ok=True) + if not os.path.exists(os.path.join(hf_modules_cache, "__init__.py")): + with open(os.path.join(hf_modules_cache, "__init__.py"), "w"): + pass + return hf_modules_cache + + +def is_remote_url(url_or_filename: str) -> bool: + return urlparse(url_or_filename).scheme != "" and not os.path.ismount(urlparse(url_or_filename).scheme + ":/") + + +def is_local_path(url_or_filename: str) -> bool: + # On unix the scheme of a local path is empty (for both absolute and relative), + # while on windows the scheme is the drive name (ex: "c") for absolute paths. + # for details on the windows behavior, see https://bugs.python.org/issue42215 + return urlparse(url_or_filename).scheme == "" or os.path.ismount(urlparse(url_or_filename).scheme + ":/") + + +def is_relative_path(url_or_filename: str) -> bool: + return urlparse(url_or_filename).scheme == "" and not os.path.isabs(url_or_filename) + + +def relative_to_absolute_path(path: T) -> T: + """Convert relative path to absolute path.""" + abs_path_str = os.path.abspath(os.path.expanduser(os.path.expandvars(str(path)))) + return Path(abs_path_str) if isinstance(path, Path) else abs_path_str + + +def hf_bucket_url(identifier: str, filename: str, use_cdn=False, dataset=True) -> str: + if dataset: + endpoint = config.CLOUDFRONT_DATASETS_DISTRIB_PREFIX if use_cdn else config.S3_DATASETS_BUCKET_PREFIX + else: + endpoint = config.CLOUDFRONT_METRICS_DISTRIB_PREFIX if use_cdn else config.S3_METRICS_BUCKET_PREFIX + return "/".join((endpoint, identifier, filename)) + + +def head_hf_s3( + identifier: str, filename: str, use_cdn=False, dataset=True, max_retries=0 +) -> Union[requests.Response, Exception]: + return http_head( + hf_bucket_url(identifier=identifier, filename=filename, use_cdn=use_cdn, dataset=dataset), + max_retries=max_retries, + ) + + +def hf_github_url(path: str, name: str, dataset=True, revision: Optional[str] = None) -> str: + default_revision = "main" if version.parse(__version__).is_devrelease else __version__ + revision = revision or default_revision + if dataset: + return config.REPO_DATASETS_URL.format(revision=revision, path=path, name=name) + else: + return config.REPO_METRICS_URL.format(revision=revision, path=path, name=name) + + +def url_or_path_join(base_name: str, *pathnames: str) -> str: + if is_remote_url(base_name): + return posixpath.join(base_name, *(str(pathname).replace(os.sep, "/").lstrip("/") for pathname in pathnames)) + else: + return Path(base_name, *pathnames).as_posix() + + +def url_or_path_parent(url_or_path: str) -> str: + if is_remote_url(url_or_path): + return url_or_path[: url_or_path.rindex("/")] + else: + return os.path.dirname(url_or_path) + + +def hash_url_to_filename(url, etag=None): + """ + Convert `url` into a hashed filename in a repeatable way. + If `etag` is specified, append its hash to the url's, delimited + by a period. + If the url ends with .h5 (Keras HDF5 weights) adds '.h5' to the name + so that TF 2.0 can identify it as a HDF5 file + (see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1380) + """ + url_bytes = url.encode("utf-8") + url_hash = insecure_hashlib.sha256(url_bytes) + filename = url_hash.hexdigest() + + if etag: + etag_bytes = etag.encode("utf-8") + etag_hash = insecure_hashlib.sha256(etag_bytes) + filename += "." + etag_hash.hexdigest() + + if url.endswith(".py"): + filename += ".py" + + return filename + + +def cached_path( + url_or_filename, + download_config=None, + **download_kwargs, +) -> str: + """ + Given something that might be a URL (or might be a local path), + determine which. If it's a URL, download the file and cache it, and + return the path to the cached file. If it's already a local path, + make sure the file exists and then return the path. + + Return: + Local path (string) + + Raises: + FileNotFoundError: in case of non-recoverable file + (non-existent or no cache on disk) + ConnectionError: in case of unreachable url + and no cache on disk + ValueError: if it couldn't parse the url or filename correctly + requests.exceptions.ConnectionError: in case of internet connection issue + """ + if download_config is None: + download_config = DownloadConfig(**download_kwargs) + + cache_dir = download_config.cache_dir or config.DOWNLOADED_DATASETS_PATH + if isinstance(cache_dir, Path): + cache_dir = str(cache_dir) + if isinstance(url_or_filename, Path): + url_or_filename = str(url_or_filename) + + # Convert fsspec URL in the format "file://local/path" to "local/path" + if can_be_local(url_or_filename): + url_or_filename = strip_protocol(url_or_filename) + + if is_remote_url(url_or_filename): + # URL, so get it from the cache (downloading if necessary) + output_path = get_from_cache( + url_or_filename, + cache_dir=cache_dir, + force_download=download_config.force_download, + proxies=download_config.proxies, + resume_download=download_config.resume_download, + user_agent=download_config.user_agent, + local_files_only=download_config.local_files_only, + use_etag=download_config.use_etag, + max_retries=download_config.max_retries, + token=download_config.token, + ignore_url_params=download_config.ignore_url_params, + storage_options=download_config.storage_options, + download_desc=download_config.download_desc, + ) + elif os.path.exists(url_or_filename): + # File, and it exists. + output_path = url_or_filename + elif is_local_path(url_or_filename): + # File, but it doesn't exist. + raise FileNotFoundError(f"Local file {url_or_filename} doesn't exist") + else: + # Something unknown + raise ValueError(f"unable to parse {url_or_filename} as a URL or as a local path") + + if output_path is None: + return output_path + + if download_config.extract_compressed_file: + output_path = ExtractManager(cache_dir=download_config.cache_dir).extract( + output_path, force_extract=download_config.force_extract + ) + + return relative_to_absolute_path(output_path) + + +def get_datasets_user_agent(user_agent: Optional[Union[str, dict]] = None) -> str: + ua = f"datasets/{__version__}" + ua += f"; python/{config.PY_VERSION}" + ua += f"; huggingface_hub/{huggingface_hub.__version__}" + ua += f"; pyarrow/{config.PYARROW_VERSION}" + if config.TORCH_AVAILABLE: + ua += f"; torch/{config.TORCH_VERSION}" + if config.TF_AVAILABLE: + ua += f"; tensorflow/{config.TF_VERSION}" + if config.JAX_AVAILABLE: + ua += f"; jax/{config.JAX_VERSION}" + if config.BEAM_AVAILABLE: + ua += f"; apache_beam/{config.BEAM_VERSION}" + if isinstance(user_agent, dict): + ua += f"; {'; '.join(f'{k}/{v}' for k, v in user_agent.items())}" + elif isinstance(user_agent, str): + ua += "; " + user_agent + return ua + + +def get_authentication_headers_for_url( + url: str, token: Optional[Union[str, bool]] = None, use_auth_token: Optional[Union[str, bool]] = "deprecated" +) -> dict: + """Handle the HF authentication""" + if use_auth_token != "deprecated": + warnings.warn( + "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n" + f"You can remove this warning by passing 'token={use_auth_token}' instead.", + FutureWarning, + ) + token = use_auth_token + if url.startswith(config.HF_ENDPOINT): + return huggingface_hub.utils.build_hf_headers( + token=token, library_name="datasets", library_version=__version__ + ) + else: + return {} + + +class OfflineModeIsEnabled(ConnectionError): + pass + + +def _raise_if_offline_mode_is_enabled(msg: Optional[str] = None): + """Raise an OfflineModeIsEnabled error (subclass of ConnectionError) if HF_DATASETS_OFFLINE is True.""" + if config.HF_DATASETS_OFFLINE: + raise OfflineModeIsEnabled( + "Offline mode is enabled." if msg is None else "Offline mode is enabled. " + str(msg) + ) + + +def _request_with_retry( + method: str, + url: str, + max_retries: int = 0, + base_wait_time: float = 0.5, + max_wait_time: float = 2, + timeout: float = 10.0, + **params, +) -> requests.Response: + """Wrapper around requests to retry in case it fails with a ConnectTimeout, with exponential backoff. + + Note that if the environment variable HF_DATASETS_OFFLINE is set to 1, then a OfflineModeIsEnabled error is raised. + + Args: + method (str): HTTP method, such as 'GET' or 'HEAD'. + url (str): The URL of the resource to fetch. + max_retries (int): Maximum number of retries, defaults to 0 (no retries). + base_wait_time (float): Duration (in seconds) to wait before retrying the first time. Wait time between + retries then grows exponentially, capped by max_wait_time. + max_wait_time (float): Maximum amount of time between two retries, in seconds. + **params (additional keyword arguments): Params to pass to :obj:`requests.request`. + """ + _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") + tries, success = 0, False + while not success: + tries += 1 + try: + response = requests.request(method=method.upper(), url=url, timeout=timeout, **params) + success = True + except (requests.exceptions.ConnectTimeout, requests.exceptions.ConnectionError) as err: + if tries > max_retries: + raise err + else: + logger.info(f"{method} request to {url} timed out, retrying... [{tries/max_retries}]") + sleep_time = min(max_wait_time, base_wait_time * 2 ** (tries - 1)) # Exponential backoff + time.sleep(sleep_time) + return response + + +def fsspec_head(url, storage_options=None): + _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") + fs, _, paths = fsspec.get_fs_token_paths(url, storage_options=storage_options) + if len(paths) > 1: + raise ValueError(f"HEAD can be called with at most one path but was called with {paths}") + return fs.info(paths[0]) + + +def stack_multiprocessing_download_progress_bars(): + # Stack downloads progress bars automatically using HF_DATASETS_STACK_MULTIPROCESSING_DOWNLOAD_PROGRESS_BARS=1 + # We use environment variables since the download may happen in a subprocess + return patch.dict(os.environ, {"HF_DATASETS_STACK_MULTIPROCESSING_DOWNLOAD_PROGRESS_BARS": "1"}) + + +class TqdmCallback(fsspec.callbacks.TqdmCallback): + def __init__(self, tqdm_kwargs=None, *args, **kwargs): + super().__init__(tqdm_kwargs, *args, **kwargs) + self._tqdm = _tqdm # replace tqdm.tqdm by datasets.tqdm.tqdm + + +def fsspec_get(url, temp_file, storage_options=None, desc=None): + _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") + fs, _, paths = fsspec.get_fs_token_paths(url, storage_options=storage_options) + if len(paths) > 1: + raise ValueError(f"GET can be called with at most one path but was called with {paths}") + callback = TqdmCallback( + tqdm_kwargs={ + "desc": desc or "Downloading", + "unit": "B", + "unit_scale": True, + "position": multiprocessing.current_process()._identity[-1] # contains the ranks of subprocesses + if os.environ.get("HF_DATASETS_STACK_MULTIPROCESSING_DOWNLOAD_PROGRESS_BARS") == "1" + and multiprocessing.current_process()._identity + else None, + } + ) + fs.get_file(paths[0], temp_file.name, callback=callback) + + +def ftp_head(url, timeout=10.0): + _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") + try: + with closing(urllib.request.urlopen(url, timeout=timeout)) as r: + r.read(1) + except Exception: + return False + return True + + +def ftp_get(url, temp_file, timeout=10.0): + _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") + try: + logger.info(f"Getting through FTP {url} into {temp_file.name}") + with closing(urllib.request.urlopen(url, timeout=timeout)) as r: + shutil.copyfileobj(r, temp_file) + except urllib.error.URLError as e: + raise ConnectionError(e) from None + + +def http_get( + url, temp_file, proxies=None, resume_size=0, headers=None, cookies=None, timeout=100.0, max_retries=0, desc=None +) -> Optional[requests.Response]: + headers = dict(headers) if headers is not None else {} + headers["user-agent"] = get_datasets_user_agent(user_agent=headers.get("user-agent")) + if resume_size > 0: + headers["Range"] = f"bytes={resume_size:d}-" + response = _request_with_retry( + method="GET", + url=url, + stream=True, + proxies=proxies, + headers=headers, + cookies=cookies, + max_retries=max_retries, + timeout=timeout, + ) + if temp_file is None: + return response + if response.status_code == 416: # Range not satisfiable + return + content_length = response.headers.get("Content-Length") + total = resume_size + int(content_length) if content_length is not None else None + with hf_tqdm( + unit="B", + unit_scale=True, + total=total, + initial=resume_size, + desc=desc or "Downloading", + position=multiprocessing.current_process()._identity[-1] # contains the ranks of subprocesses + if os.environ.get("HF_DATASETS_STACK_MULTIPROCESSING_DOWNLOAD_PROGRESS_BARS") == "1" + and multiprocessing.current_process()._identity + else None, + ) as progress: + for chunk in response.iter_content(chunk_size=1024): + progress.update(len(chunk)) + temp_file.write(chunk) + + +def http_head( + url, proxies=None, headers=None, cookies=None, allow_redirects=True, timeout=10.0, max_retries=0 +) -> requests.Response: + headers = copy.deepcopy(headers) or {} + headers["user-agent"] = get_datasets_user_agent(user_agent=headers.get("user-agent")) + response = _request_with_retry( + method="HEAD", + url=url, + proxies=proxies, + headers=headers, + cookies=cookies, + allow_redirects=allow_redirects, + timeout=timeout, + max_retries=max_retries, + ) + return response + + +def request_etag( + url: str, token: Optional[Union[str, bool]] = None, use_auth_token: Optional[Union[str, bool]] = "deprecated" +) -> Optional[str]: + if use_auth_token != "deprecated": + warnings.warn( + "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n" + f"You can remove this warning by passing 'token={use_auth_token}' instead.", + FutureWarning, + ) + token = use_auth_token + if urlparse(url).scheme not in ("http", "https"): + return None + headers = get_authentication_headers_for_url(url, token=token) + response = http_head(url, headers=headers, max_retries=3) + response.raise_for_status() + etag = response.headers.get("ETag") if response.ok else None + return etag + + +def get_from_cache( + url, + cache_dir=None, + force_download=False, + proxies=None, + etag_timeout=100, + resume_download=False, + user_agent=None, + local_files_only=False, + use_etag=True, + max_retries=0, + token=None, + use_auth_token="deprecated", + ignore_url_params=False, + storage_options=None, + download_desc=None, +) -> str: + """ + Given a URL, look for the corresponding file in the local cache. + If it's not there, download it. Then return the path to the cached file. + + Return: + Local path (string) + + Raises: + FileNotFoundError: in case of non-recoverable file + (non-existent or no cache on disk) + ConnectionError: in case of unreachable url + and no cache on disk + """ + if use_auth_token != "deprecated": + warnings.warn( + "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n" + f"You can remove this warning by passing 'token={use_auth_token}' instead.", + FutureWarning, + ) + token = use_auth_token + if cache_dir is None: + cache_dir = config.HF_DATASETS_CACHE + if isinstance(cache_dir, Path): + cache_dir = str(cache_dir) + + os.makedirs(cache_dir, exist_ok=True) + + if ignore_url_params: + # strip all query parameters and #fragments from the URL + cached_url = urljoin(url, urlparse(url).path) + else: + cached_url = url # additional parameters may be added to the given URL + + connected = False + response = None + cookies = None + etag = None + head_error = None + scheme = None + + # Try a first time to file the file on the local file system without eTag (None) + # if we don't ask for 'force_download' then we spare a request + filename = hash_url_to_filename(cached_url, etag=None) + cache_path = os.path.join(cache_dir, filename) + + if os.path.exists(cache_path) and not force_download and not use_etag: + return cache_path + + # Prepare headers for authentication + headers = get_authentication_headers_for_url(url, token=token) + if user_agent is not None: + headers["user-agent"] = user_agent + + # We don't have the file locally or we need an eTag + if not local_files_only: + scheme = urlparse(url).scheme + if scheme == "ftp": + connected = ftp_head(url) + elif scheme not in ("http", "https"): + response = fsspec_head(url, storage_options=storage_options) + # s3fs uses "ETag", gcsfs uses "etag" + etag = (response.get("ETag", None) or response.get("etag", None)) if use_etag else None + connected = True + try: + response = http_head( + url, + allow_redirects=True, + proxies=proxies, + timeout=etag_timeout, + max_retries=max_retries, + headers=headers, + ) + if response.status_code == 200: # ok + etag = response.headers.get("ETag") if use_etag else None + for k, v in response.cookies.items(): + # In some edge cases, we need to get a confirmation token + if k.startswith("download_warning") and "drive.google.com" in url: + url += "&confirm=" + v + cookies = response.cookies + connected = True + # Fix Google Drive URL to avoid Virus scan warning + if "drive.google.com" in url and "confirm=" not in url: + url += "&confirm=t" + # In some edge cases, head request returns 400 but the connection is actually ok + elif ( + (response.status_code == 400 and "firebasestorage.googleapis.com" in url) + or (response.status_code == 405 and "drive.google.com" in url) + or ( + response.status_code == 403 + and ( + re.match(r"^https?://github.com/.*?/.*?/releases/download/.*?/.*?$", url) + or re.match(r"^https://.*?s3.*?amazonaws.com/.*?$", response.url) + ) + ) + or (response.status_code == 403 and "ndownloader.figstatic.com" in url) + ): + connected = True + logger.info(f"Couldn't get ETag version for url {url}") + elif response.status_code == 401 and config.HF_ENDPOINT in url and token is None: + raise ConnectionError( + f"Unauthorized for URL {url}. Please use the parameter `token=True` after logging in with `huggingface-cli login`" + ) + except (OSError, requests.exceptions.Timeout) as e: + # not connected + head_error = e + pass + + # connected == False = we don't have a connection, or url doesn't exist, or is otherwise inaccessible. + # try to get the last downloaded one + if not connected: + if os.path.exists(cache_path) and not force_download: + return cache_path + if local_files_only: + raise FileNotFoundError( + f"Cannot find the requested files in the cached path at {cache_path} and outgoing traffic has been" + " disabled. To enable file online look-ups, set 'local_files_only' to False." + ) + elif response is not None and response.status_code == 404: + raise FileNotFoundError(f"Couldn't find file at {url}") + _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") + if head_error is not None: + raise ConnectionError(f"Couldn't reach {url} ({repr(head_error)})") + elif response is not None: + raise ConnectionError(f"Couldn't reach {url} (error {response.status_code})") + else: + raise ConnectionError(f"Couldn't reach {url}") + + # Try a second time + filename = hash_url_to_filename(cached_url, etag) + cache_path = os.path.join(cache_dir, filename) + + if os.path.exists(cache_path) and not force_download: + return cache_path + + # From now on, connected is True. + # Prevent parallel downloads of the same file with a lock. + lock_path = cache_path + ".lock" + with FileLock(lock_path): + # Retry in case previously locked processes just enter after the precedent process releases the lock + if os.path.exists(cache_path) and not force_download: + return cache_path + + incomplete_path = cache_path + ".incomplete" + + @contextmanager + def temp_file_manager(mode="w+b"): + with open(incomplete_path, mode) as f: + yield f + + resume_size = 0 + if resume_download: + temp_file_manager = partial(temp_file_manager, mode="a+b") + if os.path.exists(incomplete_path): + resume_size = os.stat(incomplete_path).st_size + + # Download to temporary file, then copy to cache path once finished. + # Otherwise, you get corrupt cache entries if the download gets interrupted. + with temp_file_manager() as temp_file: + logger.info(f"{url} not found in cache or force_download set to True, downloading to {temp_file.name}") + + # GET file object + if scheme == "ftp": + ftp_get(url, temp_file) + elif scheme not in ("http", "https"): + fsspec_get(url, temp_file, storage_options=storage_options, desc=download_desc) + else: + http_get( + url, + temp_file=temp_file, + proxies=proxies, + resume_size=resume_size, + headers=headers, + cookies=cookies, + max_retries=max_retries, + desc=download_desc, + ) + + logger.info(f"storing {url} in cache at {cache_path}") + shutil.move(temp_file.name, cache_path) + umask = os.umask(0o666) + os.umask(umask) + os.chmod(cache_path, 0o666 & ~umask) + + logger.info(f"creating metadata file for {cache_path}") + meta = {"url": url, "etag": etag} + meta_path = cache_path + ".json" + with open(meta_path, "w", encoding="utf-8") as meta_file: + json.dump(meta, meta_file) + + return cache_path + + +def add_start_docstrings(*docstr): + def docstring_decorator(fn): + fn.__doc__ = "".join(docstr) + "\n\n" + (fn.__doc__ if fn.__doc__ is not None else "") + return fn + + return docstring_decorator + + +def add_end_docstrings(*docstr): + def docstring_decorator(fn): + fn.__doc__ = (fn.__doc__ if fn.__doc__ is not None else "") + "\n\n" + "".join(docstr) + return fn + + return docstring_decorator + + +def estimate_dataset_size(paths): + return sum(path.stat().st_size for path in paths) + + +def readline(f: io.RawIOBase): + # From: https://github.com/python/cpython/blob/d27e2f4d118e7a9909b6a3e5da06c5ff95806a85/Lib/_pyio.py#L525 + res = bytearray() + while True: + b = f.read(1) + if not b: + break + res += b + if res.endswith(b"\n"): + break + return bytes(res) diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/utils/filelock.py b/env-llmeval/lib/python3.10/site-packages/datasets/utils/filelock.py new file mode 100644 index 0000000000000000000000000000000000000000..df0728efe644d8eb32f0e578a85e39ba366e9743 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/utils/filelock.py @@ -0,0 +1,11 @@ +# deprecated, please use the `filelock` package instead + +from filelock import ( # noqa: F401 # imported for backward compatibility TODO: remove in 3.0.0 + BaseFileLock, + SoftFileLock, + Timeout, + UnixFileLock, + WindowsFileLock, +) + +from ._filelock import FileLock # noqa: F401 # imported for backward compatibility. TODO: remove in 3.0.0 diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/utils/info_utils.py b/env-llmeval/lib/python3.10/site-packages/datasets/utils/info_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4eaa2f0418b2200b9e6714e6697ee68efe753107 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/utils/info_utils.py @@ -0,0 +1,130 @@ +import enum +import os +from typing import Optional + +from huggingface_hub.utils import insecure_hashlib + +from .. import config +from .logging import get_logger + + +logger = get_logger(__name__) + + +class VerificationMode(enum.Enum): + """`Enum` that specifies which verification checks to run. + + The default mode is `BASIC_CHECKS`, which will perform only rudimentary checks to avoid slowdowns + when generating/downloading a dataset for the first time. + + The verification modes: + + | | Verification checks | + |---------------------------|------------------------------------------------------------------------------ | + | `ALL_CHECKS` | Split checks, uniqueness of the keys yielded in case of the GeneratorBuilder | + | | and the validity (number of files, checksums, etc.) of downloaded files | + | `BASIC_CHECKS` (default) | Same as `ALL_CHECKS` but without checking downloaded files | + | `NO_CHECKS` | None | + + """ + + ALL_CHECKS = "all_checks" + BASIC_CHECKS = "basic_checks" + NO_CHECKS = "no_checks" + + +class ChecksumVerificationException(Exception): + """Exceptions during checksums verifications of downloaded files.""" + + +class UnexpectedDownloadedFile(ChecksumVerificationException): + """Some downloaded files were not expected.""" + + +class ExpectedMoreDownloadedFiles(ChecksumVerificationException): + """Some files were supposed to be downloaded but were not.""" + + +class NonMatchingChecksumError(ChecksumVerificationException): + """The downloaded file checksum don't match the expected checksum.""" + + +def verify_checksums(expected_checksums: Optional[dict], recorded_checksums: dict, verification_name=None): + if expected_checksums is None: + logger.info("Unable to verify checksums.") + return + if len(set(expected_checksums) - set(recorded_checksums)) > 0: + raise ExpectedMoreDownloadedFiles(str(set(expected_checksums) - set(recorded_checksums))) + if len(set(recorded_checksums) - set(expected_checksums)) > 0: + raise UnexpectedDownloadedFile(str(set(recorded_checksums) - set(expected_checksums))) + bad_urls = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]] + for_verification_name = " for " + verification_name if verification_name is not None else "" + if len(bad_urls) > 0: + raise NonMatchingChecksumError( + f"Checksums didn't match{for_verification_name}:\n" + f"{bad_urls}\n" + "Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" + ) + logger.info("All the checksums matched successfully" + for_verification_name) + + +class SplitsVerificationException(Exception): + """Exceptions during splis verifications""" + + +class UnexpectedSplits(SplitsVerificationException): + """The expected splits of the downloaded file is missing.""" + + +class ExpectedMoreSplits(SplitsVerificationException): + """Some recorded splits are missing.""" + + +class NonMatchingSplitsSizesError(SplitsVerificationException): + """The splits sizes don't match the expected splits sizes.""" + + +def verify_splits(expected_splits: Optional[dict], recorded_splits: dict): + if expected_splits is None: + logger.info("Unable to verify splits sizes.") + return + if len(set(expected_splits) - set(recorded_splits)) > 0: + raise ExpectedMoreSplits(str(set(expected_splits) - set(recorded_splits))) + if len(set(recorded_splits) - set(expected_splits)) > 0: + raise UnexpectedSplits(str(set(recorded_splits) - set(expected_splits))) + bad_splits = [ + {"expected": expected_splits[name], "recorded": recorded_splits[name]} + for name in expected_splits + if expected_splits[name].num_examples != recorded_splits[name].num_examples + ] + if len(bad_splits) > 0: + raise NonMatchingSplitsSizesError(str(bad_splits)) + logger.info("All the splits matched successfully.") + + +def get_size_checksum_dict(path: str, record_checksum: bool = True) -> dict: + """Compute the file size and the sha256 checksum of a file""" + if record_checksum: + m = insecure_hashlib.sha256() + with open(path, "rb") as f: + for chunk in iter(lambda: f.read(1 << 20), b""): + m.update(chunk) + checksum = m.hexdigest() + else: + checksum = None + return {"num_bytes": os.path.getsize(path), "checksum": checksum} + + +def is_small_dataset(dataset_size): + """Check if `dataset_size` is smaller than `config.IN_MEMORY_MAX_SIZE`. + + Args: + dataset_size (int): Dataset size in bytes. + + Returns: + bool: Whether `dataset_size` is smaller than `config.IN_MEMORY_MAX_SIZE`. + """ + if dataset_size and config.IN_MEMORY_MAX_SIZE: + return dataset_size < config.IN_MEMORY_MAX_SIZE + else: + return False diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/utils/logging.py b/env-llmeval/lib/python3.10/site-packages/datasets/utils/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..dffd5ce46e0d2da5cbbfb023003c3f4caae86093 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/utils/logging.py @@ -0,0 +1,179 @@ +# Copyright 2020 Optuna, Hugging Face +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Logging utilities.""" + +import logging +import os +from logging import ( + CRITICAL, # NOQA + DEBUG, # NOQA + ERROR, # NOQA + FATAL, # NOQA + INFO, # NOQA + NOTSET, # NOQA + WARN, # NOQA + WARNING, # NOQA +) +from typing import Optional + +from .tqdm import ( # noqa: F401 # imported for backward compatibility + disable_progress_bar, + enable_progress_bar, + is_progress_bar_enabled, + tqdm, +) + + +log_levels = { + "debug": logging.DEBUG, + "info": logging.INFO, + "warning": logging.WARNING, + "error": logging.ERROR, + "critical": logging.CRITICAL, +} + +_default_log_level = logging.WARNING + + +def _get_default_logging_level(): + """ + If DATASETS_VERBOSITY env var is set to one of the valid choices return that as the new default level. + If it is not - fall back to ``_default_log_level`` + """ + env_level_str = os.getenv("DATASETS_VERBOSITY", None) + if env_level_str: + if env_level_str in log_levels: + return log_levels[env_level_str] + else: + logging.getLogger().warning( + f"Unknown option DATASETS_VERBOSITY={env_level_str}, " + f"has to be one of: { ', '.join(log_levels.keys()) }" + ) + return _default_log_level + + +def _get_library_name() -> str: + return __name__.split(".")[0] + + +def _get_library_root_logger() -> logging.Logger: + return logging.getLogger(_get_library_name()) + + +def _configure_library_root_logger() -> None: + # Apply our default configuration to the library root logger. + library_root_logger = _get_library_root_logger() + library_root_logger.addHandler(logging.StreamHandler()) + library_root_logger.setLevel(_get_default_logging_level()) + + +def _reset_library_root_logger() -> None: + library_root_logger = _get_library_root_logger() + library_root_logger.setLevel(logging.NOTSET) + + +def get_logger(name: Optional[str] = None) -> logging.Logger: + """Return a logger with the specified name. + This function can be used in dataset scripts. + """ + if name is None: + name = _get_library_name() + return logging.getLogger(name) + + +def get_verbosity() -> int: + """Return the current level for the HuggingFace datasets library's root logger. + Returns: + Logging level, e.g., `datasets.logging.DEBUG` and `datasets.logging.INFO`. + + + + HuggingFace datasets library has following logging levels: + - `datasets.logging.CRITICAL`, `datasets.logging.FATAL` + - `datasets.logging.ERROR` + - `datasets.logging.WARNING`, `datasets.logging.WARN` + - `datasets.logging.INFO` + - `datasets.logging.DEBUG` + + + """ + return _get_library_root_logger().getEffectiveLevel() + + +def set_verbosity(verbosity: int) -> None: + """Set the level for the Hugging Face Datasets library's root logger. + Args: + verbosity: + Logging level, e.g., `datasets.logging.DEBUG` and `datasets.logging.INFO`. + """ + _get_library_root_logger().setLevel(verbosity) + + +def set_verbosity_info(): + """Set the level for the Hugging Face datasets library's root logger to `INFO`. + + This will display most of the logging information and tqdm bars. + + Shortcut to `datasets.logging.set_verbosity(datasets.logging.INFO)`. + """ + return set_verbosity(INFO) + + +def set_verbosity_warning(): + """Set the level for the Hugging Face datasets library's root logger to `WARNING`. + + This will display only the warning and errors logging information and tqdm bars. + + Shortcut to `datasets.logging.set_verbosity(datasets.logging.WARNING)`. + """ + return set_verbosity(WARNING) + + +def set_verbosity_debug(): + """Set the level for the Hugging Face datasets library's root logger to `DEBUG`. + + This will display all the logging information and tqdm bars. + + Shortcut to `datasets.logging.set_verbosity(datasets.logging.DEBUG)`. + """ + return set_verbosity(DEBUG) + + +def set_verbosity_error(): + """Set the level for the Hugging Face datasets library's root logger to `ERROR`. + + This will display only the errors logging information and tqdm bars. + + Shortcut to `datasets.logging.set_verbosity(datasets.logging.ERROR)`. + """ + return set_verbosity(ERROR) + + +def disable_propagation() -> None: + """Disable propagation of the library log outputs. + Note that log propagation is disabled by default. + """ + _get_library_root_logger().propagate = False + + +def enable_propagation() -> None: + """Enable propagation of the library log outputs. + Please disable the Hugging Face datasets library's default handler to prevent double logging if the root logger has + been configured. + """ + _get_library_root_logger().propagate = True + + +# Configure the library root logger at the module level (singleton-like) +_configure_library_root_logger() diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/utils/patching.py b/env-llmeval/lib/python3.10/site-packages/datasets/utils/patching.py new file mode 100644 index 0000000000000000000000000000000000000000..f245cabd97065d9e82a1320d02999f9ec03bda36 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/utils/patching.py @@ -0,0 +1,119 @@ +from importlib import import_module + +from .logging import get_logger + + +logger = get_logger(__name__) + + +class _PatchedModuleObj: + """Set all the modules components as attributes of the _PatchedModuleObj object.""" + + def __init__(self, module, attrs=None): + attrs = attrs or [] + if module is not None: + for key in module.__dict__: + if key in attrs or not key.startswith("__"): + setattr(self, key, getattr(module, key)) + self._original_module = module._original_module if isinstance(module, _PatchedModuleObj) else module + + +class patch_submodule: + """ + Patch a submodule attribute of an object, by keeping all other submodules intact at all levels. + + Example:: + + >>> import importlib + >>> from datasets.load import dataset_module_factory + >>> from datasets.streaming import patch_submodule, xjoin + >>> + >>> dataset_module = dataset_module_factory("snli") + >>> snli_module = importlib.import_module(dataset_module.module_path) + >>> patcher = patch_submodule(snli_module, "os.path.join", xjoin) + >>> patcher.start() + >>> assert snli_module.os.path.join is xjoin + """ + + _active_patches = [] + + def __init__(self, obj, target: str, new, attrs=None): + self.obj = obj + self.target = target + self.new = new + self.key = target.split(".")[0] + self.original = {} + self.attrs = attrs or [] + + def __enter__(self): + *submodules, target_attr = self.target.split(".") + + # Patch modules: + # it's used to patch attributes of submodules like "os.path.join"; + # in this case we need to patch "os" and "os.path" + + for i in range(len(submodules)): + try: + submodule = import_module(".".join(submodules[: i + 1])) + except ModuleNotFoundError: + continue + # We iterate over all the globals in self.obj in case we find "os" or "os.path" + for attr in self.obj.__dir__(): + obj_attr = getattr(self.obj, attr) + # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". + # This allows to patch renamed modules like "from os import path as ospath". + if obj_attr is submodule or ( + isinstance(obj_attr, _PatchedModuleObj) and obj_attr._original_module is submodule + ): + self.original[attr] = obj_attr + # patch at top level + setattr(self.obj, attr, _PatchedModuleObj(obj_attr, attrs=self.attrs)) + patched = getattr(self.obj, attr) + # construct lower levels patches + for key in submodules[i + 1 :]: + setattr(patched, key, _PatchedModuleObj(getattr(patched, key, None), attrs=self.attrs)) + patched = getattr(patched, key) + # finally set the target attribute + setattr(patched, target_attr, self.new) + + # Patch attribute itself: + # it's used for builtins like "open", + # and also to patch "os.path.join" we may also need to patch "join" + # itself if it was imported as "from os.path import join". + + if submodules: # if it's an attribute of a submodule like "os.path.join" + try: + attr_value = getattr(import_module(".".join(submodules)), target_attr) + except (AttributeError, ModuleNotFoundError): + return + # We iterate over all the globals in self.obj in case we find "os.path.join" + for attr in self.obj.__dir__(): + # We don't check for the name of the global, but rather if its value *is* "os.path.join". + # This allows to patch renamed attributes like "from os.path import join as pjoin". + if getattr(self.obj, attr) is attr_value: + self.original[attr] = getattr(self.obj, attr) + setattr(self.obj, attr, self.new) + elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" + self.original[target_attr] = globals()["__builtins__"][target_attr] + setattr(self.obj, target_attr, self.new) + else: + raise RuntimeError(f"Tried to patch attribute {target_attr} instead of a submodule.") + + def __exit__(self, *exc_info): + for attr in list(self.original): + setattr(self.obj, attr, self.original.pop(attr)) + + def start(self): + """Activate a patch.""" + self.__enter__() + self._active_patches.append(self) + + def stop(self): + """Stop an active patch.""" + try: + self._active_patches.remove(self) + except ValueError: + # If the patch hasn't been started this will fail + return None + + return self.__exit__() diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/utils/py_utils.py b/env-llmeval/lib/python3.10/site-packages/datasets/utils/py_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..db63d5058c1f001b6dc90035e9fb684662c24665 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/utils/py_utils.py @@ -0,0 +1,663 @@ +# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +"""Some python utils function and classes.""" + +import copy +import functools +import itertools +import multiprocessing.pool +import os +import queue +import re +import types +import warnings +from contextlib import contextmanager +from dataclasses import fields, is_dataclass +from multiprocessing import Manager +from queue import Empty +from shutil import disk_usage +from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, TypeVar, Union +from urllib.parse import urlparse + +import multiprocess +import multiprocess.pool +import numpy as np +from tqdm.auto import tqdm + +from .. import config +from ..parallel import parallel_map +from . import logging +from . import tqdm as hf_tqdm +from ._dill import ( # noqa: F401 # imported for backward compatibility. TODO: remove in 3.0.0 + Pickler, + dump, + dumps, + pklregister, +) + + +try: # pragma: no branch + import typing_extensions as _typing_extensions + from typing_extensions import Final, Literal +except ImportError: + _typing_extensions = Literal = Final = None + + +logger = logging.get_logger(__name__) + + +# NOTE: When used on an instance method, the cache is shared across all +# instances and IS NOT per-instance. +# See +# https://stackoverflow.com/questions/14946264/python-lru-cache-decorator-per-instance +# For @property methods, use @memoized_property below. +memoize = functools.lru_cache + + +def size_str(size_in_bytes): + """Returns a human readable size string. + + If size_in_bytes is None, then returns "Unknown size". + + For example `size_str(1.5 * datasets.units.GiB) == "1.50 GiB"`. + + Args: + size_in_bytes: `int` or `None`, the size, in bytes, that we want to + format as a human-readable size string. + """ + if not size_in_bytes: + return "Unknown size" + + _NAME_LIST = [("PiB", 2**50), ("TiB", 2**40), ("GiB", 2**30), ("MiB", 2**20), ("KiB", 2**10)] + + size_in_bytes = float(size_in_bytes) + for name, size_bytes in _NAME_LIST: + value = size_in_bytes / size_bytes + if value >= 1.0: + return f"{value:.2f} {name}" + return f"{int(size_in_bytes)} bytes" + + +def convert_file_size_to_int(size: Union[int, str]) -> int: + """ + Converts a size expressed as a string with digits an unit (like `"50MB"`) to an integer (in bytes). + + Args: + size (`int` or `str`): The size to convert. Will be directly returned if an `int`. + + Example: + + ```py + >>> convert_file_size_to_int("1MiB") + 1048576 + ``` + """ + if isinstance(size, int): + return size + if size.upper().endswith("PIB"): + return int(size[:-3]) * (2**50) + if size.upper().endswith("TIB"): + return int(size[:-3]) * (2**40) + if size.upper().endswith("GIB"): + return int(size[:-3]) * (2**30) + if size.upper().endswith("MIB"): + return int(size[:-3]) * (2**20) + if size.upper().endswith("KIB"): + return int(size[:-3]) * (2**10) + if size.upper().endswith("PB"): + int_size = int(size[:-2]) * (10**15) + return int_size // 8 if size.endswith("b") else int_size + if size.upper().endswith("TB"): + int_size = int(size[:-2]) * (10**12) + return int_size // 8 if size.endswith("b") else int_size + if size.upper().endswith("GB"): + int_size = int(size[:-2]) * (10**9) + return int_size // 8 if size.endswith("b") else int_size + if size.upper().endswith("MB"): + int_size = int(size[:-2]) * (10**6) + return int_size // 8 if size.endswith("b") else int_size + if size.upper().endswith("KB"): + int_size = int(size[:-2]) * (10**3) + return int_size // 8 if size.endswith("b") else int_size + raise ValueError(f"`size={size}` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.") + + +def glob_pattern_to_regex(pattern): + # partially taken from fsspec: + # https://github.com/fsspec/filesystem_spec/blob/697d0f8133d8a5fbc3926e4761d7ecd51337ce50/fsspec/asyn.py#L735 + return ( + pattern.replace("\\", r"\\") + .replace(".", r"\.") + .replace("*", ".*") + .replace("+", r"\+") + .replace("//", "/") + .replace("(", r"\(") + .replace(")", r"\)") + .replace("|", r"\|") + .replace("^", r"\^") + .replace("$", r"\$") + .rstrip("/") + .replace("?", ".") + ) + + +def string_to_dict(string: str, pattern: str) -> Dict[str, str]: + """Un-format a string using a python f-string pattern. + From https://stackoverflow.com/a/36838374 + + Example:: + + >>> p = 'hello, my name is {name} and I am a {age} year old {what}' + >>> s = p.format(name='cody', age=18, what='quarterback') + >>> s + 'hello, my name is cody and I am a 18 year old quarterback' + >>> string_to_dict(s, p) + {'age': '18', 'name': 'cody', 'what': 'quarterback'} + + Args: + string (str): input string + pattern (str): pattern formatted like a python f-string + + Returns: + Dict[str, str]: dictionary of variable -> value, retrieved from the input using the pattern + + Raises: + ValueError: if the string doesn't match the pattern + """ + regex = re.sub(r"{(.+?)}", r"(?P<_\1>.+)", pattern) + result = re.search(regex, string) + if result is None: + raise ValueError(f"String {string} doesn't match the pattern {pattern}") + values = list(result.groups()) + keys = re.findall(r"{(.+?)}", pattern) + _dict = dict(zip(keys, values)) + return _dict + + +def asdict(obj): + """Convert an object to its dictionary representation recursively. + + + """ + + # Implementation based on https://docs.python.org/3/library/dataclasses.html#dataclasses.asdict + + def _is_dataclass_instance(obj): + # https://docs.python.org/3/library/dataclasses.html#dataclasses.is_dataclass + return is_dataclass(obj) and not isinstance(obj, type) + + def _asdict_inner(obj): + if _is_dataclass_instance(obj): + result = {} + for f in fields(obj): + value = _asdict_inner(getattr(obj, f.name)) + if not f.init or value != f.default or f.metadata.get("include_in_asdict_even_if_is_default", False): + result[f.name] = value + return result + elif isinstance(obj, tuple) and hasattr(obj, "_fields"): + # obj is a namedtuple + return type(obj)(*[_asdict_inner(v) for v in obj]) + elif isinstance(obj, (list, tuple)): + # Assume we can create an object of this type by passing in a + # generator (which is not true for namedtuples, handled + # above). + return type(obj)(_asdict_inner(v) for v in obj) + elif isinstance(obj, dict): + return {_asdict_inner(k): _asdict_inner(v) for k, v in obj.items()} + else: + return copy.deepcopy(obj) + + if not isinstance(obj, dict) and not _is_dataclass_instance(obj): + raise TypeError(f"{obj} is not a dict or a dataclass") + + return _asdict_inner(obj) + + +@contextmanager +def temporary_assignment(obj, attr, value): + """Temporarily assign obj.attr to value.""" + original = getattr(obj, attr, None) + setattr(obj, attr, value) + try: + yield + finally: + setattr(obj, attr, original) + + +@contextmanager +def temp_seed(seed: int, set_pytorch=False, set_tensorflow=False): + """Temporarily set the random seed. This works for python numpy, pytorch and tensorflow.""" + np_state = np.random.get_state() + np.random.seed(seed) + + if set_pytorch and config.TORCH_AVAILABLE: + import torch + + torch_state = torch.random.get_rng_state() + torch.random.manual_seed(seed) + + if torch.cuda.is_available(): + torch_cuda_states = torch.cuda.get_rng_state_all() + torch.cuda.manual_seed_all(seed) + + if set_tensorflow and config.TF_AVAILABLE: + import tensorflow as tf + from tensorflow.python.eager import context as tfpycontext + + tf_state = tf.random.get_global_generator() + temp_gen = tf.random.Generator.from_seed(seed) + tf.random.set_global_generator(temp_gen) + + if not tf.executing_eagerly(): + raise ValueError("Setting random seed for TensorFlow is only available in eager mode") + + tf_context = tfpycontext.context() # eager mode context + tf_seed = tf_context._seed + tf_rng_initialized = hasattr(tf_context, "_rng") + if tf_rng_initialized: + tf_rng = tf_context._rng + tf_context._set_global_seed(seed) + + try: + yield + finally: + np.random.set_state(np_state) + + if set_pytorch and config.TORCH_AVAILABLE: + torch.random.set_rng_state(torch_state) + if torch.cuda.is_available(): + torch.cuda.set_rng_state_all(torch_cuda_states) + + if set_tensorflow and config.TF_AVAILABLE: + tf.random.set_global_generator(tf_state) + + tf_context._seed = tf_seed + if tf_rng_initialized: + tf_context._rng = tf_rng + else: + delattr(tf_context, "_rng") + + +def unique_values(values): + """Iterate over iterable and return only unique values in order.""" + seen = set() + for value in values: + if value not in seen: + seen.add(value) + yield value + + +def no_op_if_value_is_null(func): + """If the value is None, return None, else call `func`.""" + + def wrapper(value): + return func(value) if value is not None else None + + return wrapper + + +def first_non_null_value(iterable): + """Return the index and the value of the first non-null value in the iterable. If all values are None, return -1 as index.""" + for i, value in enumerate(iterable): + if value is not None: + return i, value + return -1, None + + +def zip_dict(*dicts): + """Iterate over items of dictionaries grouped by their keys.""" + for key in unique_values(itertools.chain(*dicts)): # set merge all keys + # Will raise KeyError if the dict don't have the same keys + yield key, tuple(d[key] for d in dicts) + + +class NonMutableDict(dict): + """Dict where keys can only be added but not modified. + + Will raise an error if the user try to overwrite one key. The error message + can be customized during construction. It will be formatted using {key} for + the overwritten key. + """ + + def __init__(self, *args, **kwargs): + self._error_msg = kwargs.pop( + "error_msg", + "Try to overwrite existing key: {key}", + ) + if kwargs: + raise ValueError("NonMutableDict cannot be initialized with kwargs.") + super().__init__(*args, **kwargs) + + def __setitem__(self, key, value): + if key in self: + raise ValueError(self._error_msg.format(key=key)) + return super().__setitem__(key, value) + + def update(self, other): + if any(k in self for k in other): + raise ValueError(self._error_msg.format(key=set(self) & set(other))) + return super().update(other) + + +class classproperty(property): # pylint: disable=invalid-name + """Descriptor to be used as decorator for @classmethods.""" + + def __get__(self, obj, objtype=None): + return self.fget.__get__(None, objtype)() + + +def _single_map_nested(args): + """Apply a function recursively to each element of a nested data struct.""" + function, data_struct, types, rank, disable_tqdm, desc = args + + # Singleton first to spare some computation + if not isinstance(data_struct, dict) and not isinstance(data_struct, types): + return function(data_struct) + + # Reduce logging to keep things readable in multiprocessing with tqdm + if rank is not None and logging.get_verbosity() < logging.WARNING: + logging.set_verbosity_warning() + # Print at least one thing to fix tqdm in notebooks in multiprocessing + # see https://github.com/tqdm/tqdm/issues/485#issuecomment-473338308 + if rank is not None and not disable_tqdm and any("notebook" in tqdm_cls.__name__ for tqdm_cls in tqdm.__mro__): + print(" ", end="", flush=True) + + # Loop over single examples or batches and write to buffer/file if examples are to be updated + pbar_iterable = data_struct.items() if isinstance(data_struct, dict) else data_struct + pbar_desc = (desc + " " if desc is not None else "") + "#" + str(rank) if rank is not None else desc + with hf_tqdm(pbar_iterable, disable=disable_tqdm, position=rank, unit="obj", desc=pbar_desc) as pbar: + if isinstance(data_struct, dict): + return {k: _single_map_nested((function, v, types, None, True, None)) for k, v in pbar} + else: + mapped = [_single_map_nested((function, v, types, None, True, None)) for v in pbar] + if isinstance(data_struct, list): + return mapped + elif isinstance(data_struct, tuple): + return tuple(mapped) + else: + return np.array(mapped) + + +def map_nested( + function: Callable[[Any], Any], + data_struct: Any, + dict_only: bool = False, + map_list: bool = True, + map_tuple: bool = False, + map_numpy: bool = False, + num_proc: Optional[int] = None, + parallel_min_length: int = 2, + types: Optional[tuple] = None, + disable_tqdm: bool = True, + desc: Optional[str] = None, +) -> Any: + """Apply a function recursively to each element of a nested data struct. + + Use multiprocessing if num_proc > 1 and the length of data_struct is greater than or equal to + `parallel_min_length`. + + + + Before version 2.5.0, multiprocessing was not used if `num_proc` was greater than or equal to ``len(iterable)``. + + Now, if `num_proc` is greater than or equal to ``len(iterable)``, `num_proc` is set to ``len(iterable)`` and + multiprocessing is used. + + + + Args: + function (`Callable`): Function to be applied to `data_struct`. + data_struct (`Any`): Data structure to apply `function` to. + dict_only (`bool`, default `False`): Whether only apply `function` recursively to `dict` values in + `data_struct`. + map_list (`bool`, default `True`): Whether also apply `function` recursively to `list` elements (besides `dict` + values). + map_tuple (`bool`, default `False`): Whether also apply `function` recursively to `tuple` elements (besides + `dict` values). + map_numpy (`bool, default `False`): Whether also apply `function` recursively to `numpy.array` elements (besides + `dict` values). + num_proc (`int`, *optional*): Number of processes. + parallel_min_length (`int`, default `2`): Minimum length of `data_struct` required for parallel + processing. + + types (`tuple`, *optional*): Additional types (besides `dict` values) to apply `function` recursively to their + elements. + disable_tqdm (`bool`, default `True`): Whether to disable the tqdm progressbar. + desc (`str`, *optional*): Prefix for the tqdm progressbar. + + Returns: + `Any` + """ + if types is None: + types = [] + if not dict_only: + if map_list: + types.append(list) + if map_tuple: + types.append(tuple) + if map_numpy: + types.append(np.ndarray) + types = tuple(types) + + # Singleton + if not isinstance(data_struct, dict) and not isinstance(data_struct, types): + return function(data_struct) + + iterable = list(data_struct.values()) if isinstance(data_struct, dict) else data_struct + + if num_proc is None: + num_proc = 1 + if any(isinstance(v, types) and len(v) > len(iterable) for v in iterable): + mapped = [ + map_nested( + function=function, + data_struct=obj, + num_proc=num_proc, + parallel_min_length=parallel_min_length, + types=types, + ) + for obj in iterable + ] + elif num_proc != -1 and num_proc <= 1 or len(iterable) < parallel_min_length: + mapped = [ + _single_map_nested((function, obj, types, None, True, None)) + for obj in hf_tqdm(iterable, disable=disable_tqdm, desc=desc) + ] + else: + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + message=".* is experimental and might be subject to breaking changes in the future\\.$", + category=UserWarning, + ) + mapped = parallel_map(function, iterable, num_proc, types, disable_tqdm, desc, _single_map_nested) + + if isinstance(data_struct, dict): + return dict(zip(data_struct.keys(), mapped)) + else: + if isinstance(data_struct, list): + return mapped + elif isinstance(data_struct, tuple): + return tuple(mapped) + else: + return np.array(mapped) + + +class NestedDataStructure: + def __init__(self, data=None): + self.data = data if data is not None else [] + + def flatten(self, data=None): + data = data if data is not None else self.data + if isinstance(data, dict): + return self.flatten(list(data.values())) + elif isinstance(data, (list, tuple)): + return [flattened for item in data for flattened in self.flatten(item)] + else: + return [data] + + +def has_sufficient_disk_space(needed_bytes, directory="."): + try: + free_bytes = disk_usage(os.path.abspath(directory)).free + except OSError: + return True + return needed_bytes < free_bytes + + +def _convert_github_url(url_path: str) -> Tuple[str, Optional[str]]: + """Convert a link to a file on a github repo in a link to the raw github object.""" + parsed = urlparse(url_path) + sub_directory = None + if parsed.scheme in ("http", "https", "s3") and parsed.netloc == "github.com": + if "blob" in url_path: + if not url_path.endswith(".py"): + raise ValueError(f"External import from github at {url_path} should point to a file ending with '.py'") + url_path = url_path.replace("blob", "raw") # Point to the raw file + else: + # Parse github url to point to zip + github_path = parsed.path[1:] + repo_info, branch = github_path.split("/tree/") if "/tree/" in github_path else (github_path, "master") + repo_owner, repo_name = repo_info.split("/") + url_path = f"https://github.com/{repo_owner}/{repo_name}/archive/{branch}.zip" + sub_directory = f"{repo_name}-{branch}" + return url_path, sub_directory + + +def get_imports(file_path: str) -> Tuple[str, str, str, str]: + """Find whether we should import or clone additional files for a given processing script. + And list the import. + + We allow: + - library dependencies, + - local dependencies and + - external dependencies whose url is specified with a comment starting from "# From:' followed by the raw url to a file, an archive or a github repository. + external dependencies will be downloaded (and extracted if needed in the dataset folder). + We also add an `__init__.py` to each sub-folder of a downloaded folder so the user can import from them in the script. + + Note that only direct import in the dataset processing script will be handled + We don't recursively explore the additional import to download further files. + + Example:: + + import tensorflow + import .c4_utils + import .clicr.dataset-code.build_json_dataset # From: https://raw.githubusercontent.com/clips/clicr/master/dataset-code/build_json_dataset + """ + lines = [] + with open(file_path, encoding="utf-8") as f: + lines.extend(f.readlines()) + + logger.debug(f"Checking {file_path} for additional imports.") + imports: List[Tuple[str, str, str, Optional[str]]] = [] + is_in_docstring = False + for line in lines: + docstr_start_match = re.findall(r'[\s\S]*?"""[\s\S]*?', line) + + if len(docstr_start_match) == 1: + # flip True <=> False only if doctstring + # starts at line without finishing + is_in_docstring = not is_in_docstring + + if is_in_docstring: + # import statements in doctstrings should + # not be added as required dependencies + continue + + match = re.match(r"^import\s+(\.?)([^\s\.]+)[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)", line, flags=re.MULTILINE) + if match is None: + match = re.match( + r"^from\s+(\.?)([^\s\.]+)(?:[^\s]*)\s+import\s+[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)", + line, + flags=re.MULTILINE, + ) + if match is None: + continue + if match.group(1): + # The import starts with a '.', we will download the relevant file + if any(imp[1] == match.group(2) for imp in imports): + # We already have this import + continue + if match.group(3): + # The import has a comment with 'From:', we'll retrieve it from the given url + url_path = match.group(3) + url_path, sub_directory = _convert_github_url(url_path) + imports.append(("external", match.group(2), url_path, sub_directory)) + elif match.group(2): + # The import should be at the same place as the file + imports.append(("internal", match.group(2), match.group(2), None)) + else: + if match.group(3): + # The import has a comment with `From: git+https:...`, asks user to pip install from git. + url_path = match.group(3) + imports.append(("library", match.group(2), url_path, None)) + else: + imports.append(("library", match.group(2), match.group(2), None)) + + return imports + + +def copyfunc(func): + result = types.FunctionType(func.__code__, func.__globals__, func.__name__, func.__defaults__, func.__closure__) + result.__kwdefaults__ = func.__kwdefaults__ + return result + + +Y = TypeVar("Y") + + +def _write_generator_to_queue(queue: queue.Queue, func: Callable[..., Iterable[Y]], kwargs: dict) -> int: + for i, result in enumerate(func(**kwargs)): + queue.put(result) + return i + + +def _get_pool_pid(pool: Union[multiprocessing.pool.Pool, multiprocess.pool.Pool]) -> Set[int]: + return {f.pid for f in pool._pool} + + +def iflatmap_unordered( + pool: Union[multiprocessing.pool.Pool, multiprocess.pool.Pool], + func: Callable[..., Iterable[Y]], + *, + kwargs_iterable: Iterable[dict], +) -> Iterable[Y]: + initial_pool_pid = _get_pool_pid(pool) + pool_changed = False + manager_cls = Manager if isinstance(pool, multiprocessing.pool.Pool) else multiprocess.Manager + with manager_cls() as manager: + queue = manager.Queue() + async_results = [ + pool.apply_async(_write_generator_to_queue, (queue, func, kwargs)) for kwargs in kwargs_iterable + ] + try: + while True: + try: + yield queue.get(timeout=0.05) + except Empty: + if all(async_result.ready() for async_result in async_results) and queue.empty(): + break + if _get_pool_pid(pool) != initial_pool_pid: + pool_changed = True + # One of the subprocesses has died. We should not wait forever. + raise RuntimeError( + "One of the subprocesses has abruptly died during map operation." + "To debug the error, disable multiprocessing." + ) + finally: + if not pool_changed: + # we get the result in case there's an error to raise + [async_result.get(timeout=0.05) for async_result in async_results] diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/utils/readme.py b/env-llmeval/lib/python3.10/site-packages/datasets/utils/readme.py new file mode 100644 index 0000000000000000000000000000000000000000..66ed087f7d67181c6840179fa634e8b8e4238f85 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/utils/readme.py @@ -0,0 +1,277 @@ +# loading package files: https://stackoverflow.com/a/20885799 +import importlib.resources as pkg_resources +import logging +from pathlib import Path +from typing import Any, List, Tuple + +import yaml + +from . import resources +from .deprecation_utils import deprecated + + +BASE_REF_URL = "https://github.com/huggingface/datasets/tree/main/src/datasets/utils" +this_url = f"{BASE_REF_URL}/{__file__}" +logger = logging.getLogger(__name__) + + +def load_yaml_resource(resource: str) -> Tuple[Any, str]: + content = pkg_resources.read_text(resources, resource) + return yaml.safe_load(content), f"{BASE_REF_URL}/resources/{resource}" + + +readme_structure, known_readme_structure_url = load_yaml_resource("readme_structure.yaml") + +FILLER_TEXT = [ + "[Needs More Information]", + "[More Information Needed]", + "(https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)", +] + +# Dictionary representation of section/readme, error_list, warning_list +ReadmeValidatorOutput = Tuple[dict, List[str], List[str]] + + +class Section: + def __init__(self, name: str, level: str, lines: List[str] = None, suppress_parsing_errors: bool = False): + self.name = name + self.level = level + self.lines = lines + self.text = "" + self.is_empty_text = True + self.content = {} + self.parsing_error_list = [] + self.parsing_warning_list = [] + if self.lines is not None: + self.parse(suppress_parsing_errors=suppress_parsing_errors) + + def parse(self, suppress_parsing_errors: bool = False): + current_sub_level = "" + current_lines = [] + code_start = False + for line in self.lines: + if line.strip(" \n") == "": + continue + elif line.strip(" \n")[:3] == "```": + code_start = not code_start + elif line.split()[0] == self.level + "#" and not code_start: + if current_sub_level != "": + self.content[current_sub_level] = Section(current_sub_level, self.level + "#", current_lines) + current_lines = [] + else: + if current_lines != []: + self.text += "".join(current_lines).strip() + if self.text != "" and self.text not in FILLER_TEXT: + self.is_empty_text = False + current_lines = [] + + current_sub_level = " ".join(line.split()[1:]).strip(" \n") + else: + current_lines.append(line) + else: + if current_sub_level != "": + if current_sub_level in self.content: + self.parsing_error_list.append( + f"Multiple sections with the same heading `{current_sub_level}` have been found. Please keep only one of these sections." + ) + self.content[current_sub_level] = Section(current_sub_level, self.level + "#", current_lines) + else: + if current_lines != []: + self.text += "".join(current_lines).strip() + if self.text != "" and self.text not in FILLER_TEXT: + self.is_empty_text = False + + if self.level == "" and not suppress_parsing_errors: + if self.parsing_error_list != [] or self.parsing_warning_list != []: + errors = errors = "\n".join("-\t" + x for x in self.parsing_error_list + self.parsing_warning_list) + error_string = f"The following issues were found while parsing the README at `{self.name}`:\n" + errors + raise ValueError(error_string) + + def validate(self, structure: dict) -> ReadmeValidatorOutput: + """Validates a Section class object recursively using the structure provided as a dictionary. + + Args: + structute (:obj: `dict`): The dictionary representing expected structure. + + Returns: + :obj: `ReadmeValidatorOutput`: The dictionary representation of the section, and the errors. + """ + # Header text validation + error_list = [] + warning_list = [] + if structure["allow_empty"] is False: + # If content is expected + if self.is_empty_text and self.content == {}: + # If no content is found, mention it in the error_list + error_list.append(f"Expected some content in section `{self.name}` but it is empty.") + + if structure["allow_empty_text"] is False: + # If some text is expected + if self.is_empty_text: + # If no text is found, mention it in the error_list + error_list.append( + f"Expected some text in section `{self.name}` but it is empty (text in subsections are ignored)." + ) + # Subsections Validation + if structure["subsections"] is not None: + # If subsections are expected + if self.content == {}: + # If no subsections are present + values = [subsection["name"] for subsection in structure["subsections"]] + # Mention the expected values in the error_list + error_list.append( + f"Section `{self.name}` expected the following subsections: {', '.join(['`'+x+'`' for x in values])}. Found 'None'." + ) + else: + # If some subsections are present + structure_names = [subsection["name"] for subsection in structure["subsections"]] + has_missing_subsections = False + for idx, name in enumerate(structure_names): + if name not in self.content: + # If the expected subsection is not present + error_list.append(f"Section `{self.name}` is missing subsection: `{name}`.") + has_missing_subsections = True + else: + # If the subsection is present, validate subsection, return the result + # and concat the errors from subsection to section error_list + + # Skip sublevel validation if current level is `###` + if self.level == "###": + continue + else: + _, subsec_error_list, subsec_warning_list = self.content[name].validate( + structure["subsections"][idx] + ) + error_list += subsec_error_list + warning_list += subsec_warning_list + + if has_missing_subsections: # we only allow to have extra subsections if all the other ones are here + for name in self.content: + if name not in structure_names: + # If an extra subsection is present + warning_list.append( + f"`{self.name}` has an extra subsection: `{name}`. Skipping further validation checks for this subsection as expected structure is unknown." + ) + if error_list: + # If there are errors, do not return the dictionary as it is invalid + return {}, error_list, warning_list + else: + return self.to_dict(), error_list, warning_list + + def to_dict(self) -> dict: + """Returns the dictionary representation of a section.""" + return { + "name": self.name, + "text": self.text, + "is_empty_text": self.is_empty_text, + "subsections": [value.to_dict() for value in self.content.values()], + } + + +@deprecated("Use `huggingface_hub.DatasetCard` instead.") +class ReadMe(Section): # Level 0 + def __init__(self, name: str, lines: List[str], structure: dict = None, suppress_parsing_errors: bool = False): + super().__init__(name=name, level="") # Not using lines here as we need to use a child class parse + self.structure = structure + self.yaml_tags_line_count = -2 + self.tag_count = 0 + self.lines = lines + if self.lines is not None: + self.parse(suppress_parsing_errors=suppress_parsing_errors) + + def validate(self): + if self.structure is None: + content, error_list, warning_list = self._validate(readme_structure) + else: + content, error_list, warning_list = self._validate(self.structure) + if error_list != [] or warning_list != []: + errors = "\n".join(["-\t" + x for x in error_list + warning_list]) + error_string = f"The following issues were found for the README at `{self.name}`:\n" + errors + raise ValueError(error_string) + + @classmethod + def from_readme(cls, path: Path, structure: dict = None, suppress_parsing_errors: bool = False): + with open(path, encoding="utf-8") as f: + lines = f.readlines() + return cls(path, lines, structure, suppress_parsing_errors=suppress_parsing_errors) + + @classmethod + def from_string( + cls, string: str, structure: dict = None, root_name: str = "root", suppress_parsing_errors: bool = False + ): + lines = string.split("\n") + return cls(root_name, lines, structure, suppress_parsing_errors=suppress_parsing_errors) + + def parse(self, suppress_parsing_errors: bool = False): + # Skip Tags + line_count = 0 + + for line in self.lines: + self.yaml_tags_line_count += 1 + if line.strip(" \n") == "---": + self.tag_count += 1 + if self.tag_count == 2: + break + line_count += 1 + if self.tag_count == 2: + self.lines = self.lines[line_count + 1 :] # Get the last + 1 th item. + else: + self.lines = self.lines[self.tag_count :] + super().parse(suppress_parsing_errors=suppress_parsing_errors) + + def __str__(self): + """Returns the string of dictionary representation of the ReadMe.""" + return str(self.to_dict()) + + def _validate(self, readme_structure): + error_list = [] + warning_list = [] + if self.yaml_tags_line_count == 0: + warning_list.append("Empty YAML markers are present in the README.") + elif self.tag_count == 0: + warning_list.append("No YAML markers are present in the README.") + elif self.tag_count == 1: + warning_list.append("Only the start of YAML tags present in the README.") + # Check how many first level sections are present. + num_first_level_keys = len(self.content.keys()) + if num_first_level_keys > 1: + # If more than one, add to the error list, continue + error_list.append( + f"The README has several first-level headings: {', '.join(['`'+x+'`' for x in list(self.content.keys())])}. Only one heading is expected. Skipping further validation for this README." + ) + elif num_first_level_keys < 1: + # If less than one, append error. + error_list.append( + "The README has no first-level headings. One heading is expected. Skipping further validation for this README." + ) + + else: + # If one exactly + start_key = list(self.content.keys())[0] # Get the key + if start_key.startswith("Dataset Card for"): # Check correct start + # If the starting is correct, validate all the sections + _, sec_error_list, sec_warning_list = self.content[start_key].validate( + readme_structure["subsections"][0] + ) + error_list += sec_error_list + warning_list += sec_warning_list + else: + # If not found, append error + error_list.append( + "No first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README." + ) + if error_list: + # If there are errors, do not return the dictionary as it is invalid + return {}, error_list, warning_list + else: + return self.to_dict(), error_list, warning_list + + +if __name__ == "__main__": + from argparse import ArgumentParser + + ap = ArgumentParser(usage="Validate the content (excluding YAML tags) of a README.md file.") + ap.add_argument("readme_filepath") + args = ap.parse_args() + readme_filepath = Path(args.readme_filepath) + readme = ReadMe.from_readme(readme_filepath) diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/utils/sharding.py b/env-llmeval/lib/python3.10/site-packages/datasets/utils/sharding.py new file mode 100644 index 0000000000000000000000000000000000000000..7ee3133b80ea927a076eebc7eedc2e7b25013ffa --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/utils/sharding.py @@ -0,0 +1,96 @@ +from typing import List + +import numpy as np + + +def _number_of_shards_in_gen_kwargs(gen_kwargs: dict) -> int: + """Return the number of possible shards according to the input gen_kwargs""" + # Having lists of different sizes makes sharding ambigious, raise an error in this case + # until we decide how to define sharding without ambiguity for users + lists_lengths = {key: len(value) for key, value in gen_kwargs.items() if isinstance(value, list)} + if len(set(lists_lengths.values())) > 1: + raise RuntimeError( + ( + "Sharding is ambiguous for this dataset: " + + "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n" + + "\n".join(f"\t- key {key} has length {length}" for key, length in lists_lengths.items()) + + "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, " + + "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length." + ) + ) + max_length = max(lists_lengths.values(), default=0) + return max(1, max_length) + + +def _distribute_shards(num_shards: int, max_num_jobs: int) -> List[range]: + """ + Get the range of shard indices per job. + If num_shards>> _distribute_shards(2, max_num_jobs=4) + [range(0, 1), range(1, 2)] + >>> _distribute_shards(10, max_num_jobs=3) + [range(0, 4), range(4, 7), range(7, 10)] + ``` + """ + shards_indices_per_group = [] + for group_idx in range(max_num_jobs): + num_shards_to_add = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) + if num_shards_to_add == 0: + break + start = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 + shard_indices = range(start, start + num_shards_to_add) + shards_indices_per_group.append(shard_indices) + return shards_indices_per_group + + +def _split_gen_kwargs(gen_kwargs: dict, max_num_jobs: int) -> List[dict]: + """Split the gen_kwargs into `max_num_job` gen_kwargs""" + # Having lists of different sizes makes sharding ambigious, raise an error in this case + num_shards = _number_of_shards_in_gen_kwargs(gen_kwargs) + if num_shards == 1: + return [dict(gen_kwargs)] + else: + shard_indices_per_group = _distribute_shards(num_shards=num_shards, max_num_jobs=max_num_jobs) + return [ + { + key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] + if isinstance(value, list) + else value + for key, value in gen_kwargs.items() + } + for group_idx in range(len(shard_indices_per_group)) + ] + + +def _merge_gen_kwargs(gen_kwargs_list: List[dict]) -> dict: + return { + key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] + if isinstance(gen_kwargs_list[0][key], list) + else gen_kwargs_list[0][key] + for key in gen_kwargs_list[0] + } + + +def _shuffle_gen_kwargs(rng: np.random.Generator, gen_kwargs: dict) -> dict: + """Return a shuffled copy of the input gen_kwargs""" + # We must shuffle all the lists, and lists of the same size must have the same shuffling. + # This way entangled lists of (shard, shard_metadata) are still in the right order. + + # First, let's generate the shuffled indices per list size + list_sizes = {len(value) for value in gen_kwargs.values() if isinstance(value, list)} + indices_per_size = {} + for size in list_sizes: + indices_per_size[size] = list(range(size)) + rng.shuffle(indices_per_size[size]) + # Now let's copy the gen_kwargs and shuffle the lists based on their sizes + shuffled_kwargs = dict(gen_kwargs) + for key, value in shuffled_kwargs.items(): + if isinstance(value, list): + shuffled_kwargs[key] = [value[i] for i in indices_per_size[len(value)]] + return shuffled_kwargs diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/utils/stratify.py b/env-llmeval/lib/python3.10/site-packages/datasets/utils/stratify.py new file mode 100644 index 0000000000000000000000000000000000000000..d0967aa1abb790f741af5ff920c67e615d1b01da --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/utils/stratify.py @@ -0,0 +1,107 @@ +import numpy as np + + +def approximate_mode(class_counts, n_draws, rng): + """Computes approximate mode of multivariate hypergeometric. + This is an approximation to the mode of the multivariate + hypergeometric given by class_counts and n_draws. + It shouldn't be off by more than one. + It is the mostly likely outcome of drawing n_draws many + samples from the population given by class_counts. + Args + ---------- + class_counts : ndarray of int + Population per class. + n_draws : int + Number of draws (samples to draw) from the overall population. + rng : random state + Used to break ties. + Returns + ------- + sampled_classes : ndarray of int + Number of samples drawn from each class. + np.sum(sampled_classes) == n_draws + + """ + # this computes a bad approximation to the mode of the + # multivariate hypergeometric given by class_counts and n_draws + continuous = n_draws * class_counts / class_counts.sum() + # floored means we don't overshoot n_samples, but probably undershoot + floored = np.floor(continuous) + # we add samples according to how much "left over" probability + # they had, until we arrive at n_samples + need_to_add = int(n_draws - floored.sum()) + if need_to_add > 0: + remainder = continuous - floored + values = np.sort(np.unique(remainder))[::-1] + # add according to remainder, but break ties + # randomly to avoid biases + for value in values: + (inds,) = np.where(remainder == value) + # if we need_to_add less than what's in inds + # we draw randomly from them. + # if we need to add more, we add them all and + # go to the next value + add_now = min(len(inds), need_to_add) + inds = rng.choice(inds, size=add_now, replace=False) + floored[inds] += 1 + need_to_add -= add_now + if need_to_add == 0: + break + return floored.astype(np.int64) + + +def stratified_shuffle_split_generate_indices(y, n_train, n_test, rng, n_splits=10): + """ + + Provides train/test indices to split data in train/test sets. + It's reference is taken from StratifiedShuffleSplit implementation + of scikit-learn library. + + Args + ---------- + + n_train : int, + represents the absolute number of train samples. + + n_test : int, + represents the absolute number of test samples. + + random_state : int or RandomState instance, default=None + Controls the randomness of the training and testing indices produced. + Pass an int for reproducible output across multiple function calls. + + n_splits : int, default=10 + Number of re-shuffling & splitting iterations. + """ + classes, y_indices = np.unique(y, return_inverse=True) + n_classes = classes.shape[0] + class_counts = np.bincount(y_indices) + if np.min(class_counts) < 2: + raise ValueError("Minimum class count error") + if n_train < n_classes: + raise ValueError( + "The train_size = %d should be greater or " "equal to the number of classes = %d" % (n_train, n_classes) + ) + if n_test < n_classes: + raise ValueError( + "The test_size = %d should be greater or " "equal to the number of classes = %d" % (n_test, n_classes) + ) + class_indices = np.split(np.argsort(y_indices, kind="mergesort"), np.cumsum(class_counts)[:-1]) + for _ in range(n_splits): + n_i = approximate_mode(class_counts, n_train, rng) + class_counts_remaining = class_counts - n_i + t_i = approximate_mode(class_counts_remaining, n_test, rng) + + train = [] + test = [] + + for i in range(n_classes): + permutation = rng.permutation(class_counts[i]) + perm_indices_class_i = class_indices[i].take(permutation, mode="clip") + train.extend(perm_indices_class_i[: n_i[i]]) + test.extend(perm_indices_class_i[n_i[i] : n_i[i] + t_i[i]]) + train = rng.permutation(train) + test = rng.permutation(test) + + yield train, test diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/utils/track.py b/env-llmeval/lib/python3.10/site-packages/datasets/utils/track.py new file mode 100644 index 0000000000000000000000000000000000000000..11a3787c7d8595cc7160994973f28db1f709b3b2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/datasets/utils/track.py @@ -0,0 +1,49 @@ +from collections.abc import Iterator +from typing import Iterable + + +class tracked_str(str): + origins = {} + + def set_origin(self, origin: str): + if super().__repr__() not in self.origins: + self.origins[super().__repr__()] = origin + + def get_origin(self): + return self.origins.get(super().__repr__(), str(self)) + + def __repr__(self) -> str: + if super().__repr__() not in self.origins or self.origins[super().__repr__()] == self: + return super().__repr__() + else: + return f"{str(self)} (origin={self.origins[super().__repr__()]})" + + +class tracked_list(list): + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + self.last_item = None + + def __iter__(self) -> Iterator: + for x in super().__iter__(): + self.last_item = x + yield x + self.last_item = None + + def __repr__(self) -> str: + if self.last_item is None: + return super().__repr__() + else: + return f"{self.__class__.__name__}(current={self.last_item})" + + +class TrackedIterable(Iterable): + def __init__(self) -> None: + super().__init__() + self.last_item = None + + def __repr__(self) -> str: + if self.last_item is None: + super().__repr__() + else: + return f"{self.__class__.__name__}(current={self.last_item})"