diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/__init__.py b/env-llmeval/lib/python3.10/site-packages/evaluate/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c7ce8f4c4318c2e5fe8232e9b981bc3299362ac1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/evaluate/__init__.py @@ -0,0 +1,51 @@ +# flake8: noqa +# Copyright 2020 The HuggingFace Evaluate Authors and the TensorFlow Datasets Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +# pylint: enable=line-too-long +# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position + +__version__ = "0.4.1" + +from packaging import version + + +SCRIPTS_VERSION = "main" if version.parse(__version__).is_devrelease else __version__ + +del version + +from .evaluation_suite import EvaluationSuite +from .evaluator import ( + AudioClassificationEvaluator, + AutomaticSpeechRecognitionEvaluator, + Evaluator, + ImageClassificationEvaluator, + QuestionAnsweringEvaluator, + SummarizationEvaluator, + Text2TextGenerationEvaluator, + TextClassificationEvaluator, + TextGenerationEvaluator, + TokenClassificationEvaluator, + TranslationEvaluator, + evaluator, +) +from .hub import push_to_hub +from .info import ComparisonInfo, EvaluationModuleInfo, MeasurementInfo, MetricInfo +from .inspect import inspect_evaluation_module, list_evaluation_modules +from .loading import load +from .module import CombinedEvaluations, Comparison, EvaluationModule, Measurement, Metric, combine +from .saving import save +from .utils import * +from .utils import gradio, logging diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..becd542021aa1070bc285977c672ee4a57088ab1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/config.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ceddbc2605c50c63f4cae430f28eeb874d0a8282 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/config.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/hub.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/hub.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0cfc01d3ffddcbe6fa1cfcc52f515e708c3c7c06 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/hub.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/info.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/info.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c41658ce04bcc740f073d446be78bbbea70934d5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/info.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/inspect.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/inspect.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a443db9b988f78adfab566204170664efc0b7fac Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/inspect.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/loading.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/loading.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..339b43ff1cfd6d4f75c7de542156a22ad69d60b2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/loading.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/module.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3776ead3cdf76efe793cc24726b6f22607df394 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/module.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/naming.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/naming.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b75f11ad922f97ceffe79575805a3feb1c90b265 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/naming.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/saving.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/saving.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5aa96377a1cea8313f05d874d8ba5768d04b438e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/saving.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/visualization.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/visualization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84ec2e068e3270e72a64cafaf1b4e839c3ad6087 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/evaluate/__pycache__/visualization.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/commands/__init__.py b/env-llmeval/lib/python3.10/site-packages/evaluate/commands/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/commands/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/evaluate/commands/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..759b7bc6a9e4c070058aacbbec0246a89cae6467 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/evaluate/commands/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/commands/__pycache__/evaluate_cli.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/evaluate/commands/__pycache__/evaluate_cli.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fbbff0e5f4eec608f44ee9f8de32a6c820debcaa Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/evaluate/commands/__pycache__/evaluate_cli.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/commands/evaluate_cli.py b/env-llmeval/lib/python3.10/site-packages/evaluate/commands/evaluate_cli.py new file mode 100644 index 0000000000000000000000000000000000000000..80593c4dfa0f96c8d3ea5ff6131c13c0a94181eb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/evaluate/commands/evaluate_cli.py @@ -0,0 +1,137 @@ +import argparse +import os +import subprocess +from pathlib import Path + +from cookiecutter.main import cookiecutter +from huggingface_hub import HfApi, Repository, create_repo + +from evaluate.utils.logging import get_logger + + +logger = get_logger(__name__) + +INSTRUCTIONS = """\ +A new repository for your module "{module_name}" of type "{module_type}" has been created at {output_dir} and pushed to the Hugging Face Hub: {repo_url}. + +Here are the next steps: +- implement the module logic in {module_slug}/{module_slug}.py +- document your module in {module_slug}/README.md +- add test cases for your module in {module_slug}/tests.py +- if your module has any dependencies update them in {module_slug}/requirements.txt + +You can test your module's widget locally by running: + +``` +python {output_dir}/{module_slug}/app.py +``` + +When you are happy with your changes you can push your changes with the following commands to the Hugging Face Hub: + +``` +cd {output_dir}/{module_slug} +git add . +git commit -m "Updating module" +git push +``` + +You should then see the update widget on the Hugging Face Hub: {repo_url} +And you can load your module in Python with the following code: + +``` +from evaluate import load +module = load("{namespace}/{module_slug}") +``` +""" + + +def main(): + parser = argparse.ArgumentParser("HuggingFace Evaluate CLI tool", usage="evaluate-cli []") + subparsers = parser.add_subparsers() + parser_create = subparsers.add_parser("create", help="Create new evaluation module.") + parser_create.add_argument( + "module_name", type=str, help='Pretty name of new evaluation module, e.g. "Recall" or "Exact Match".' + ) + parser_create.add_argument( + "--module_type", + default="metric", + type=str, + help="Type of module, has to be one of [metric|comparison|measurement].", + ) + parser_create.add_argument( + "--dataset_name", default="", type=str, help="Name of dataset if evaluation module is dataset specific." + ) + parser_create.add_argument("--module_description", type=str, help="Short description of evaluation module.") + parser_create.add_argument("--output_dir", default=Path.cwd(), type=str, help="Path to output directory.") + parser_create.add_argument( + "--organization", default=None, type=str, help="Organization on the Hub to push evaluation module to." + ) + parser_create.add_argument("--private", action="store_true", help="Sets evaluation module repository to private.") + args = vars(parser.parse_args()) + + if args["module_type"] not in ["metric", "comparison", "measurement"]: + raise ValueError("The module_type needs to be one of metric, comparison, or measurement") + + if "-" in args["module_name"]: + raise ValueError("Hyphens ('-') are not allowed in module names.") + + output_dir = Path(args["output_dir"]) + organization = args["organization"] + module_slug = args["module_name"].lower().replace(" ", "_") + + if organization is None: + hfapi = HfApi() + namespace = hfapi.whoami()["name"] + else: + namespace = organization + args["namespace"] = namespace + repo_url = f"https://huggingface.co/spaces/{namespace}/{module_slug}" + + try: + create_repo(namespace + "/" + module_slug, repo_type="space", space_sdk="gradio", private=args["private"]) + except Exception as exception: + logger.error( + f"Could not create Space for module at hf.co/spaces/{namespace}/{module_slug}. Make sure this space does not exist already." + ) + raise exception + subprocess.run( + f"git clone {repo_url}".split(), + stderr=subprocess.PIPE, + stdout=subprocess.PIPE, + check=True, + encoding="utf-8", + cwd=output_dir, + env=os.environ.copy(), + ) + + repo = Repository( + local_dir=output_dir / module_slug, + ) + + cookiecutter( + "https://github.com/huggingface/evaluate/", + directory="templates", + no_input=True, + extra_context=args, + output_dir=output_dir, + overwrite_if_exists=True, + ) + + repo.git_add() + repo.git_commit("add module default template") + repo.git_push() + + print( + INSTRUCTIONS.format( + module_name=args["module_name"], + module_type=args["module_type"], + module_slug=module_slug, + namespace=namespace, + repo_url=repo_url, + output_dir=output_dir, + ) + ) + + +if __name__ == "__main__": + main() diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/config.py b/env-llmeval/lib/python3.10/site-packages/evaluate/config.py new file mode 100644 index 0000000000000000000000000000000000000000..4909fa251ff82893d7c3c536bb111ae947735a8a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/evaluate/config.py @@ -0,0 +1,192 @@ +import importlib +import os +import platform +from pathlib import Path + +from packaging import version + +from .utils.logging import get_logger + + +logger = get_logger(__name__) + + +# Metrics +S3_METRICS_BUCKET_PREFIX = "https://s3.amazonaws.com/datasets.huggingface.co/datasets/metrics" +CLOUDFRONT_METRICS_DISTRIB_PREFIX = "https://cdn-datasets.huggingface.co/datasets/metric" +REPO_METRICS_URL = "https://raw.githubusercontent.com/huggingface/evaluate/{revision}/metrics/{path}/{name}" +REPO_MEASUREMENTS_URL = "https://raw.githubusercontent.com/huggingface/evaluate/{revision}/measurements/{path}/{name}" +REPO_COMPARISONS_URL = "https://raw.githubusercontent.com/huggingface/evaluate/{revision}/comparisons/{path}/{name}" + +# Evaluation module types +EVALUATION_MODULE_TYPES = ["metric", "comparison", "measurement"] + +# Hub +HF_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co") +HF_LIST_ENDPOINT = HF_ENDPOINT + "/api/spaces?filter={type}" +HUB_EVALUATE_URL = HF_ENDPOINT + "/spaces/{path}/resolve/{revision}/{name}" +HUB_DEFAULT_VERSION = "main" + +PY_VERSION = version.parse(platform.python_version()) + +if PY_VERSION < version.parse("3.8"): + import importlib_metadata +else: + import importlib.metadata as importlib_metadata + +# General environment variables accepted values for booleans +ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"} +ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"}) + + +# Imports +PANDAS_VERSION = version.parse(importlib_metadata.version("pandas")) +PYARROW_VERSION = version.parse(importlib_metadata.version("pyarrow")) + +USE_TF = os.environ.get("USE_TF", "AUTO").upper() +USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper() +USE_JAX = os.environ.get("USE_JAX", "AUTO").upper() + +TORCH_VERSION = "N/A" +TORCH_AVAILABLE = False + +if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES: + TORCH_AVAILABLE = importlib.util.find_spec("torch") is not None + if TORCH_AVAILABLE: + try: + TORCH_VERSION = version.parse(importlib_metadata.version("torch")) + logger.info(f"PyTorch version {TORCH_VERSION} available.") + except importlib_metadata.PackageNotFoundError: + pass +else: + logger.info("Disabling PyTorch because USE_TF is set") + +TF_VERSION = "N/A" +TF_AVAILABLE = False + +if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES: + TF_AVAILABLE = importlib.util.find_spec("tensorflow") is not None + if TF_AVAILABLE: + # For the metadata, we have to look for both tensorflow and tensorflow-cpu + for package in [ + "tensorflow", + "tensorflow-cpu", + "tensorflow-gpu", + "tf-nightly", + "tf-nightly-cpu", + "tf-nightly-gpu", + "intel-tensorflow", + "tensorflow-rocm", + "tensorflow-macos", + ]: + try: + TF_VERSION = version.parse(importlib_metadata.version(package)) + except importlib_metadata.PackageNotFoundError: + continue + else: + break + else: + TF_AVAILABLE = False + if TF_AVAILABLE: + if TF_VERSION.major < 2: + logger.info(f"TensorFlow found but with version {TF_VERSION}. `datasets` requires version 2 minimum.") + TF_AVAILABLE = False + else: + logger.info(f"TensorFlow version {TF_VERSION} available.") +else: + logger.info("Disabling Tensorflow because USE_TORCH is set") + + +JAX_VERSION = "N/A" +JAX_AVAILABLE = False + +if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES: + JAX_AVAILABLE = importlib.util.find_spec("jax") is not None + if JAX_AVAILABLE: + try: + JAX_VERSION = version.parse(importlib_metadata.version("jax")) + logger.info(f"JAX version {JAX_VERSION} available.") + except importlib_metadata.PackageNotFoundError: + pass +else: + logger.info("Disabling JAX because USE_JAX is set to False") + + +# Cache location +DEFAULT_XDG_CACHE_HOME = "~/.cache" +XDG_CACHE_HOME = os.getenv("XDG_CACHE_HOME", DEFAULT_XDG_CACHE_HOME) +DEFAULT_HF_CACHE_HOME = os.path.join(XDG_CACHE_HOME, "huggingface") +HF_CACHE_HOME = os.path.expanduser(os.getenv("HF_HOME", DEFAULT_HF_CACHE_HOME)) + +DEFAULT_HF_EVALUATE_CACHE = os.path.join(HF_CACHE_HOME, "evaluate") +HF_EVALUATE_CACHE = Path(os.getenv("HF_EVALUATE_CACHE", DEFAULT_HF_EVALUATE_CACHE)) + +DEFAULT_HF_METRICS_CACHE = os.path.join(HF_CACHE_HOME, "metrics") +HF_METRICS_CACHE = Path(os.getenv("HF_METRICS_CACHE", DEFAULT_HF_METRICS_CACHE)) + +DEFAULT_HF_MODULES_CACHE = os.path.join(HF_CACHE_HOME, "modules") +HF_MODULES_CACHE = Path(os.getenv("HF_MODULES_CACHE", DEFAULT_HF_MODULES_CACHE)) + +DOWNLOADED_DATASETS_DIR = "downloads" +DEFAULT_DOWNLOADED_EVALUATE_PATH = os.path.join(HF_EVALUATE_CACHE, DOWNLOADED_DATASETS_DIR) +DOWNLOADED_EVALUATE_PATH = Path(os.getenv("HF_DATASETS_DOWNLOADED_EVALUATE_PATH", DEFAULT_DOWNLOADED_EVALUATE_PATH)) + +EXTRACTED_EVALUATE_DIR = "extracted" +DEFAULT_EXTRACTED_EVALUATE_PATH = os.path.join(DEFAULT_DOWNLOADED_EVALUATE_PATH, EXTRACTED_EVALUATE_DIR) +EXTRACTED_EVALUATE_PATH = Path(os.getenv("HF_DATASETS_EXTRACTED_EVALUATE_PATH", DEFAULT_EXTRACTED_EVALUATE_PATH)) + +# Download count for the website +HF_UPDATE_DOWNLOAD_COUNTS = ( + os.environ.get("HF_UPDATE_DOWNLOAD_COUNTS", "AUTO").upper() in ENV_VARS_TRUE_AND_AUTO_VALUES +) + +# Offline mode +HF_EVALUATE_OFFLINE = os.environ.get("HF_EVALUATE_OFFLINE", "AUTO").upper() in ENV_VARS_TRUE_VALUES + + +# File names +LICENSE_FILENAME = "LICENSE" +METRIC_INFO_FILENAME = "metric_info.json" +DATASETDICT_JSON_FILENAME = "dataset_dict.json" + +MODULE_NAME_FOR_DYNAMIC_MODULES = "evaluate_modules" + +HF_HUB_ALLOWED_TASKS = [ + "image-classification", + "translation", + "image-segmentation", + "fill-mask", + "automatic-speech-recognition", + "token-classification", + "sentence-similarity", + "audio-classification", + "question-answering", + "summarization", + "zero-shot-classification", + "table-to-text", + "feature-extraction", + "other", + "multiple-choice", + "text-classification", + "text-to-image", + "text2text-generation", + "zero-shot-image-classification", + "tabular-classification", + "tabular-regression", + "image-to-image", + "tabular-to-text", + "unconditional-image-generation", + "text-retrieval", + "text-to-speech", + "object-detection", + "audio-to-audio", + "text-generation", + "conversational", + "table-question-answering", + "visual-question-answering", + "image-to-text", + "reinforcement-learning", + "voice-activity-detection", + "time-series-forecasting", + "document-question-answering", +] diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/evaluation_suite/__init__.py b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluation_suite/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a306d8068a6d66b60fbcc5420bf0cbb334c36305 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluation_suite/__init__.py @@ -0,0 +1,128 @@ +import importlib +import inspect +from dataclasses import dataclass +from pathlib import Path +from typing import Callable, Dict, Optional, Union + +from datasets import Dataset, DownloadConfig, DownloadMode, load_dataset +from datasets.utils.version import Version + +from ..evaluator import evaluator +from ..loading import evaluation_module_factory +from ..utils.logging import get_logger + + +logger = get_logger(__name__) + + +@dataclass +class SubTask: + task_type: str + data: Optional[Union[str, Dataset]] = None + subset: Optional[str] = None + split: Optional[str] = None + data_preprocessor: Optional[Callable] = None + args_for_task: Optional[dict] = None + + def __post_init__(self): + if type(self.task_type) is not str: + raise ValueError(f"'task_type' must be type 'str', got {type(self.task_type)}") + if type(self.data) not in [Dataset, str]: + raise ValueError( + f"'data' must be an already-instantiated Dataset object or type 'str', got {type(self.data)}" + ) + if self.subset and type(self.subset) is not str: + raise ValueError(f"'subset' must be type 'str', got {type(self.subset)}") + if self.split and type(self.split) is not str: + raise ValueError(f"'split' must be type 'str', got {type(self.split)}") + if self.data_preprocessor and not callable(self.data_preprocessor): + raise ValueError(f"'data_preprocessor' must be a Callable', got {self.data_preprocessor}") + if self.args_for_task and type(self.args_for_task) is not dict: + raise ValueError(f"'args_for_task' must be type 'dict', got {type(self.args_for_task)}") + + +def import_main_class(module_path): + """Import a module at module_path and return the EvaluationSuite class""" + module = importlib.import_module(module_path) + + module_main_cls = None + for name, obj in module.__dict__.items(): + if isinstance(obj, type) and obj.__name__ == "Suite": + if inspect.isabstract(obj): + continue + module_main_cls = obj + break + + return module_main_cls + + +class EvaluationSuite: + """ + This class instantiates an evaluation suite made up of multiple tasks, where each task consists of a dataset and + an associated metric, and runs evaluation on a model or pipeline. Evaluation suites can be a Python script found + either locally or uploaded as a Space on the Hugging Face Hub. + Usage: + ```python + from evaluate import EvaluationSuite + suite = EvaluationSuite.load("evaluate/evaluation-suite-ci") + results = suite.run("lvwerra/distilbert-imdb") + ``` + """ + + def __init__(self, name): + self.name = name + + @staticmethod + def load( + path: str, + download_mode: Optional[DownloadMode] = None, + revision: Optional[Union[str, Version]] = None, + download_config: Optional[DownloadConfig] = None, + ): + download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS) + evaluation_module = evaluation_module_factory( + path, module_type=None, revision=revision, download_config=download_config, download_mode=download_mode + ) + name = Path(path).stem + evaluation_cls = import_main_class(evaluation_module.module_path) + evaluation_instance = evaluation_cls(name) + + return evaluation_instance + + def __repr__(self): + self.tasks = [str(task) for task in self.suite] + return f'EvaluationSuite name: "{self.name}", ' f"Tasks: {self.tasks})" + + def assert_suite_nonempty(self): + if not self.suite: + raise ValueError( + "No evaluation tasks found. The EvaluationSuite must include at least one SubTask definition." + ) + + def run( + self, model_or_pipeline: Union[str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel"] # noqa: F821 + ) -> Dict[str, float]: + + self.assert_suite_nonempty() + + results_all = [] + for task in self.suite: + + task_name = task.data + + if task.data_preprocessor: # task requires extra preprocessing + ds = load_dataset(task.data, name=task.subset, split=task.split) + task.data = ds.map(task.data_preprocessor) + + task_evaluator = evaluator(task.task_type) + args_for_task = task.args_for_task + args_for_task["model_or_pipeline"] = model_or_pipeline + args_for_task["data"] = task.data + args_for_task["subset"] = task.subset + args_for_task["split"] = task.split + results = task_evaluator.compute(**args_for_task) + + results["task_name"] = task_name + "/" + task.subset if task.subset else task_name + results["data_preprocessor"] = str(task.data_preprocessor) if task.data_preprocessor is not None else None + results_all.append(results) + return results_all diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/evaluation_suite/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluation_suite/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..472baf01b410380994cda18368e926bcc6c59047 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluation_suite/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__init__.py b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a2fe4be8a1332417fb8515f019c1b7e8c41a58bf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__init__.py @@ -0,0 +1,140 @@ +# Copyright 2022 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +try: + from transformers.pipelines import SUPPORTED_TASKS as SUPPORTED_PIPELINE_TASKS + from transformers.pipelines import TASK_ALIASES + from transformers.pipelines import check_task as check_pipeline_task + + TRANSFORMERS_AVAILABLE = True +except ImportError: + TRANSFORMERS_AVAILABLE = False + +from typing import Dict, List + +from .audio_classification import AudioClassificationEvaluator +from .automatic_speech_recognition import AutomaticSpeechRecognitionEvaluator +from .base import Evaluator +from .image_classification import ImageClassificationEvaluator +from .question_answering import QuestionAnsweringEvaluator +from .text2text_generation import SummarizationEvaluator, Text2TextGenerationEvaluator, TranslationEvaluator +from .text_classification import TextClassificationEvaluator +from .text_generation import TextGenerationEvaluator +from .token_classification import TokenClassificationEvaluator + + +SUPPORTED_EVALUATOR_TASKS = { + "text-classification": { + "implementation": TextClassificationEvaluator, + "default_metric_name": "accuracy", + }, + "image-classification": { + "implementation": ImageClassificationEvaluator, + "default_metric_name": "accuracy", + }, + "question-answering": { + "implementation": QuestionAnsweringEvaluator, + "default_metric_name": "squad", + }, + "token-classification": { + "implementation": TokenClassificationEvaluator, + "default_metric_name": "seqeval", + }, + "text-generation": { + "implementation": TextGenerationEvaluator, + "default_metric_name": "word_count", + }, + "text2text-generation": { + "implementation": Text2TextGenerationEvaluator, + "default_metric_name": "bleu", + }, + "summarization": { + "implementation": SummarizationEvaluator, + "default_metric_name": "rouge", + }, + "translation": { + "implementation": TranslationEvaluator, + "default_metric_name": "bleu", + }, + "automatic-speech-recognition": { + "implementation": AutomaticSpeechRecognitionEvaluator, + "default_metric_name": "wer", + }, + "audio-classification": { + "implementation": AudioClassificationEvaluator, + "default_metric_name": "accuracy", + }, +} + + +def get_supported_tasks() -> List[str]: + """ + Returns a list of supported task strings. + """ + return list(SUPPORTED_EVALUATOR_TASKS.keys()) + + +def check_task(task: str) -> Dict: + """ + Checks an incoming task string, to validate it's correct and returns the default Evaluator class and default metric + name. It first performs a check to validata that the string is a valid `Pipeline` task, then it checks if it's a + valid `Evaluator` task. `Evaluator` tasks are a substet of `Pipeline` tasks. + Args: + task (`str`): + The task defining which evaluator will be returned. Currently accepted tasks are: + - `"image-classification"` + - `"question-answering"` + - `"text-classification"` (alias `"sentiment-analysis"` available) + - `"token-classification"` + Returns: + task_defaults: `dict`, contains the implementasion class of a give Evaluator and the default metric name. + """ + if task in TASK_ALIASES: + task = TASK_ALIASES[task] + if not check_pipeline_task(task): + raise KeyError(f"Unknown task {task}, available tasks are: {get_supported_tasks()}.") + if task in SUPPORTED_EVALUATOR_TASKS.keys() and task in SUPPORTED_PIPELINE_TASKS.keys(): + return SUPPORTED_EVALUATOR_TASKS[task] + raise KeyError(f"Unknown task {task}, available tasks are: {get_supported_tasks()}.") + + +def evaluator(task: str = None) -> Evaluator: + """ + Utility factory method to build an [`Evaluator`]. + Evaluators encapsulate a task and a default metric name. They leverage `pipeline` functionality from `transformers` + to simplify the evaluation of multiple combinations of models, datasets and metrics for a given task. + Args: + task (`str`): + The task defining which evaluator will be returned. Currently accepted tasks are: + - `"image-classification"`: will return a [`ImageClassificationEvaluator`]. + - `"question-answering"`: will return a [`QuestionAnsweringEvaluator`]. + - `"text-classification"` (alias `"sentiment-analysis"` available): will return a [`TextClassificationEvaluator`]. + - `"token-classification"`: will return a [`TokenClassificationEvaluator`]. + Returns: + [`Evaluator`]: An evaluator suitable for the task. + Examples: + ```python + >>> from evaluate import evaluator + >>> # Sentiment analysis evaluator + >>> evaluator("sentiment-analysis") + ```""" + if not TRANSFORMERS_AVAILABLE: + raise ImportError( + "If you want to use the `Evaluator` you need `transformers`. Run `pip install evaluate[transformers]`." + ) + targeted_task = check_task(task) + evaluator_class = targeted_task["implementation"] + default_metric_name = targeted_task["default_metric_name"] + return evaluator_class(task=task, default_metric_name=default_metric_name) diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..87bdb6002f80e1bdf3f18840777482fc73085f2d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/audio_classification.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/audio_classification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3988a8c88fcb01e9d76888bdc13be7d9224f31e5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/audio_classification.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/automatic_speech_recognition.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/automatic_speech_recognition.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89ffd82f1301e23df3caf3c89515d384c52e9fb1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/automatic_speech_recognition.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/base.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51194387f4480e607932461e4b1075dd1ada07ae Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/base.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/image_classification.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/image_classification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59780319fb5b886de31dbf89b39dfc7bbb50f983 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/image_classification.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/question_answering.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/question_answering.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2ff4b8bb2a4dad7113a2069e2cff78fcaeaa1d9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/question_answering.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/text2text_generation.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/text2text_generation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..563a07532e828a13b10ea3d73a0bfbdb7d660844 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/text2text_generation.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/text_classification.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/text_classification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c38d3eb42140d103cc23bbefe4078da680dff29 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/text_classification.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/text_generation.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/text_generation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..efe604a49dfc61459779e9ed84cb0f0ae6440a01 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/text_generation.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/token_classification.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/token_classification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7ddd37564fcefbed6019d40f5119d4a581a92d6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/token_classification.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1d60b7acc8b1a7a34ecaf14e2b079d88c8b7425 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/__pycache__/utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/audio_classification.py b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/audio_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..685fb9fd8515f8506b89e9375948fea181f79a8f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/audio_classification.py @@ -0,0 +1,151 @@ +# Copyright 2022 The HuggingFace Evaluate Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from numbers import Number +from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Union + +from datasets import Dataset +from typing_extensions import Literal + +from ..module import EvaluationModule +from ..utils.file_utils import add_end_docstrings, add_start_docstrings +from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator + + +if TYPE_CHECKING: + from transformers import FeatureExtractionMixin, Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel + + +TASK_DOCUMENTATION = r""" + Examples: + + + + Remember that, in order to process audio files, you need ffmpeg installed (https://ffmpeg.org/download.html) + + + + ```python + >>> from evaluate import evaluator + >>> from datasets import load_dataset + + >>> task_evaluator = evaluator("audio-classification") + >>> data = load_dataset("superb", 'ks', split="test[:40]") + >>> results = task_evaluator.compute( + >>> model_or_pipeline=""superb/wav2vec2-base-superb-ks"", + >>> data=data, + >>> label_column="label", + >>> input_column="file", + >>> metric="accuracy", + >>> label_mapping={0: "yes", 1: "no", 2: "up", 3: "down"} + >>> ) + ``` + + + + The evaluator supports raw audio data as well, in the form of a numpy array. However, be aware that calling + the audio column automatically decodes and resamples the audio files, which can be slow for large datasets. + + + + ```python + >>> from evaluate import evaluator + >>> from datasets import load_dataset + + >>> task_evaluator = evaluator("audio-classification") + >>> data = load_dataset("superb", 'ks', split="test[:40]") + >>> data = data.map(lambda example: {"audio": example["audio"]["array"]}) + >>> results = task_evaluator.compute( + >>> model_or_pipeline=""superb/wav2vec2-base-superb-ks"", + >>> data=data, + >>> label_column="label", + >>> input_column="audio", + >>> metric="accuracy", + >>> label_mapping={0: "yes", 1: "no", 2: "up", 3: "down"} + >>> ) + ``` +""" + + +class AudioClassificationEvaluator(Evaluator): + """ + Audio classification evaluator. + This audio classification evaluator can currently be loaded from [`evaluator`] using the default task name + `audio-classification`. + Methods in this class assume a data format compatible with the [`transformers.AudioClassificationPipeline`]. + """ + + PIPELINE_KWARGS = {} + + def __init__(self, task="audio-classification", default_metric_name=None): + super().__init__(task, default_metric_name=default_metric_name) + + def predictions_processor(self, predictions, label_mapping): + pred_label = [max(pred, key=lambda x: x["score"])["label"] for pred in predictions] + pred_label = [label_mapping[pred] if label_mapping is not None else pred for pred in pred_label] + + return {"predictions": pred_label} + + @add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING) + @add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION) + def compute( + self, + model_or_pipeline: Union[ + str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821 + ] = None, + data: Union[str, Dataset] = None, + subset: Optional[str] = None, + split: Optional[str] = None, + metric: Union[str, EvaluationModule] = None, + tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821 + feature_extractor: Optional[Union[str, "FeatureExtractionMixin"]] = None, # noqa: F821 + strategy: Literal["simple", "bootstrap"] = "simple", + confidence_level: float = 0.95, + n_resamples: int = 9999, + device: int = None, + random_state: Optional[int] = None, + input_column: str = "file", + label_column: str = "label", + label_mapping: Optional[Dict[str, Number]] = None, + ) -> Tuple[Dict[str, float], Any]: + + """ + input_column (`str`, defaults to `"file"`): + The name of the column containing either the audio files or a raw waveform, represented as a numpy array, in the dataset specified by `data`. + label_column (`str`, defaults to `"label"`): + The name of the column containing the labels in the dataset specified by `data`. + label_mapping (`Dict[str, Number]`, *optional*, defaults to `None`): + We want to map class labels defined by the model in the pipeline to values consistent with those + defined in the `label_column` of the `data` dataset. + """ + + result = super().compute( + model_or_pipeline=model_or_pipeline, + data=data, + subset=subset, + split=split, + metric=metric, + tokenizer=tokenizer, + feature_extractor=feature_extractor, + strategy=strategy, + confidence_level=confidence_level, + n_resamples=n_resamples, + device=device, + random_state=random_state, + input_column=input_column, + label_column=label_column, + label_mapping=label_mapping, + ) + + return result diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/automatic_speech_recognition.py b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/automatic_speech_recognition.py new file mode 100644 index 0000000000000000000000000000000000000000..ee423826cdd7bac384080b3db8a369cc59a53283 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/automatic_speech_recognition.py @@ -0,0 +1,112 @@ +# Copyright 2022 The HuggingFace Evaluate Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Union + +from datasets import Dataset +from typing_extensions import Literal + +from ..module import EvaluationModule +from ..utils.file_utils import add_end_docstrings, add_start_docstrings +from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator + + +if TYPE_CHECKING: + from transformers import Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel + + +TASK_DOCUMENTATION = r""" + Examples: + ```python + >>> from evaluate import evaluator + >>> from datasets import load_dataset + >>> task_evaluator = evaluator("automatic-speech-recognition") + >>> data = load_dataset("mozilla-foundation/common_voice_11_0", "en", split="validation[:40]") + >>> results = task_evaluator.compute( + >>> model_or_pipeline="https://huggingface.co/openai/whisper-tiny.en", + >>> data=data, + >>> input_column="path", + >>> label_column="sentence", + >>> metric="wer", + >>> ) + ``` +""" + + +class AutomaticSpeechRecognitionEvaluator(Evaluator): + """ + Automatic speech recognition evaluator. + This automatic speech recognition evaluator can currently be loaded from [`evaluator`] using the default task name + `automatic-speech-recognition`. + Methods in this class assume a data format compatible with the [`AutomaticSpeechRecognitionPipeline`]. + """ + + PIPELINE_KWARGS = {"truncation": True} + + def __init__(self, task="automatic-speech-recognition", default_metric_name=None): + super().__init__(task, default_metric_name=default_metric_name) + + def predictions_processor(self, predictions, label_mapping): + return {"predictions": [pred["text"] for pred in predictions]} + + @add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING) + @add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION) + def compute( + self, + model_or_pipeline: Union[ + str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821 + ] = None, + data: Union[str, Dataset] = None, + subset: Optional[str] = None, + split: Optional[str] = None, + metric: Union[str, EvaluationModule] = None, + tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821 + strategy: Literal["simple", "bootstrap"] = "simple", + confidence_level: float = 0.95, + n_resamples: int = 9999, + device: int = None, + random_state: Optional[int] = None, + input_column: str = "path", + label_column: str = "sentence", + generation_kwargs: dict = None, + ) -> Tuple[Dict[str, float], Any]: + """ + input_column (`str`, defaults to `"path"`): + the name of the column containing the input audio path in the dataset specified by `data`. + label_column (`str`, defaults to `"sentence"`): + the name of the column containing the labels in the dataset specified by `data`. + generation_kwargs (`Dict`, *optional*, defaults to `None`): + The generation kwargs are passed to the pipeline and set the text generation strategy. + """ + + if generation_kwargs is not None: + self.PIPELINE_KWARGS.update(generation_kwargs) + + result = super().compute( + model_or_pipeline=model_or_pipeline, + data=data, + subset=subset, + split=split, + metric=metric, + tokenizer=tokenizer, + strategy=strategy, + confidence_level=confidence_level, + n_resamples=n_resamples, + device=device, + random_state=random_state, + input_column=input_column, + label_column=label_column, + ) + + return result diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/base.py b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/base.py new file mode 100644 index 0000000000000000000000000000000000000000..04a370eb46efe38c82216213f09358f3c3e4eab2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/base.py @@ -0,0 +1,544 @@ +# Copyright 2022 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from abc import ABC, abstractmethod +from numbers import Number +from typing import Any, Callable, Dict, List, Optional, Union + +# Lint as: python3 +from datasets import Dataset, load_dataset + +from evaluate.evaluator.utils import choose_split + + +try: + from scipy.stats import bootstrap + + SCIPY_AVAILABLE = True +except ImportError: + SCIPY_AVAILABLE = False + +try: + import transformers + from transformers import Pipeline, pipeline + + TRANSFORMERS_AVAILABLE = True +except ImportError: + TRANSFORMERS_AVAILABLE = False + +from time import perf_counter + +from typing_extensions import Literal + +from ..loading import load +from ..module import EvaluationModule +from ..utils.logging import get_logger +from .utils import DatasetColumn + + +logger = get_logger(__name__) + + +EVALUTOR_COMPUTE_START_DOCSTRING = r""" + Compute the metric for a given pipeline and dataset combination. + Args: + model_or_pipeline (`str` or `Pipeline` or `Callable` or `PreTrainedModel` or `TFPreTrainedModel`, defaults to `None`): + If the argument in not specified, we initialize the default pipeline for the task (in this case + `text-classification` or its alias - `sentiment-analysis`). If the argument is of the type `str` or + is a model instance, we use it to initialize a new `Pipeline` with the given model. Otherwise we assume the + argument specifies a pre-initialized pipeline. + data (`str` or `Dataset`, defaults to `None`): + Specifies the dataset we will run evaluation on. If it is of type `str`, we treat it as the dataset + name, and load it. Otherwise we assume it represents a pre-loaded dataset. + subset (`str`, defaults to `None`): + Defines which dataset subset to load. If `None` is passed the default subset is loaded. + split (`str`, defaults to `None`): + Defines which dataset split to load. If `None` is passed, infers based on the `choose_split` function. + metric (`str` or `EvaluationModule`, defaults to `None`): + Specifies the metric we use in evaluator. If it is of type `str`, we treat it as the metric name, and + load it. Otherwise we assume it represents a pre-loaded metric. + tokenizer (`str` or `PreTrainedTokenizer`, *optional*, defaults to `None`): + Argument can be used to overwrite a default tokenizer if `model_or_pipeline` represents a model for + which we build a pipeline. If `model_or_pipeline` is `None` or a pre-initialized pipeline, we ignore + this argument. + strategy (`Literal["simple", "bootstrap"]`, defaults to "simple"): + specifies the evaluation strategy. Possible values are: + - `"simple"` - we evaluate the metric and return the scores. + - `"bootstrap"` - on top of computing the metric scores, we calculate the confidence interval for each + of the returned metric keys, using `scipy`'s `bootstrap` method + https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.bootstrap.html. + confidence_level (`float`, defaults to `0.95`): + The `confidence_level` value passed to `bootstrap` if `"bootstrap"` strategy is chosen. + n_resamples (`int`, defaults to `9999`): + The `n_resamples` value passed to `bootstrap` if `"bootstrap"` strategy is chosen. + device (`int`, defaults to `None`): + Device ordinal for CPU/GPU support of the pipeline. Setting this to -1 will leverage CPU, a positive + integer will run the model on the associated CUDA device ID. If `None` is provided it will be inferred and + CUDA:0 used if available, CPU otherwise. + random_state (`int`, *optional*, defaults to `None`): + The `random_state` value passed to `bootstrap` if `"bootstrap"` strategy is chosen. Useful for + debugging. +""" + +EVALUATOR_COMPUTE_RETURN_DOCSTRING = r""" + Return: + A `Dict`. The keys represent metric keys calculated for the `metric` spefied in function arguments. For the + `"simple"` strategy, the value is the metric score. For the `"bootstrap"` strategy, the value is a `Dict` + containing the score, the confidence interval and the standard error calculated for each metric key. +""" + + +class Evaluator(ABC): + """ + The [`Evaluator`] class is the class from which all evaluators inherit. Refer to this class for methods shared across + different evaluators. + Base class implementing evaluator operations. + """ + + PIPELINE_KWARGS = {} + METRIC_KWARGS = {} + + def __init__(self, task: str, default_metric_name: str = None): + if not TRANSFORMERS_AVAILABLE: + raise ImportError( + "If you want to use the `Evaluator` you need `transformers`. Run `pip install evaluate[evaluator]`." + ) + if not SCIPY_AVAILABLE: + raise ImportError( + "If you want to use the `Evaluator` you need `scipy>=1.7.1`. Run `pip install evaluate[evaluator]`." + ) + self.task = task + self.default_metric_name = default_metric_name + + @staticmethod + def _compute_confidence_interval( + metric, + metric_inputs, + metric_keys: List[str], + confidence_level: float = 0.95, + n_resamples: int = 9999, + random_state: Optional[int] = None, + ) -> Dict[str, Any]: + """ + A utility function enabling the confidence interval calculation for metrics computed + by the evaluator based on `scipy`'s `bootstrap` method. + """ + + # bootstrap only works with functions that use args and no kwargs + def build_args_metric(metric, key, **kwargs): + def args_metric(*args): + return metric.compute(**{k: v for k, v in zip(kwargs.keys(), args)})[key] + + return args_metric + + bootstrap_dict = {} + for key in metric_keys: + bs = bootstrap( + data=list(metric_inputs.values()), + statistic=build_args_metric(metric, key, **metric_inputs), + paired=True, + vectorized=False, + confidence_level=confidence_level, + n_resamples=n_resamples, + random_state=random_state, + ) + bootstrap_dict[key] = { + "confidence_interval": (bs.confidence_interval.low, bs.confidence_interval.high), + "standard_error": bs.standard_error, + } + return bootstrap_dict + + @staticmethod + def _compute_time_perf(start_time: float, end_time: float, num_samples: int) -> Dict[str, Any]: + """ + A utility function computing time performance metrics: + - `total_time_in_seconds` - pipeline inference runtime for the evaluation data in seconds, + - `samples_per_second` - pipeline throughput in the number of samples per second. + - `latency_in_seconds` - pipeline inference runtime for the evaluation data in seconds per sample, + + """ + latency = end_time - start_time + throughput = num_samples / latency + latency_sample = 1.0 / throughput + + return { + "total_time_in_seconds": latency, + "samples_per_second": throughput, + "latency_in_seconds": latency_sample, + } + + @staticmethod + def _infer_device() -> int: + """Helper function to check if GPU or CPU is available for inference.""" + # try infer with torch first + try: + import torch + + if torch.cuda.is_available(): + device = 0 # first GPU + else: + device = -1 # CPU + except ImportError: + # if not available try TF + try: + import tensorflow as tf + + if len(tf.config.list_physical_devices("GPU")) > 0: + device = 0 # first GPU + else: + device = -1 # CPU + except ImportError: + device = -1 + + if device == -1: + logger.info("No GPU found. The default device for pipeline inference is set to CPU.") + else: + logger.info("GPU found. The default device for pipeline inference is set to GPU (CUDA:0).") + + return device + + @abstractmethod + def predictions_processor(self, *args, **kwargs): + """ + A core method of the `Evaluator` class, which processes the pipeline outputs for compatibility with the metric. + """ + raise NotImplementedError() + + def compute( + self, + model_or_pipeline: Union[ + str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821 + ] = None, + data: Union[str, Dataset] = None, + subset: Optional[str] = None, + split: Optional[str] = None, + metric: Union[str, EvaluationModule] = None, + tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821 + feature_extractor: Optional[Union[str, "FeatureExtractionMixin"]] = None, # noqa: F821 + strategy: Literal["simple", "bootstrap"] = "simple", + confidence_level: float = 0.95, + n_resamples: int = 9999, + device: int = None, + random_state: Optional[int] = None, + input_column: str = "text", + label_column: str = "label", + label_mapping: Optional[Dict[str, Number]] = None, + ) -> Dict[str, float]: + + result = {} + + self.check_for_mismatch_in_device_setup(device, model_or_pipeline) + + # Prepare inputs + data = self.load_data(data=data, subset=subset, split=split) + metric_inputs, pipe_inputs = self.prepare_data(data=data, input_column=input_column, label_column=label_column) + pipe = self.prepare_pipeline( + model_or_pipeline=model_or_pipeline, + tokenizer=tokenizer, + feature_extractor=feature_extractor, + device=device, + ) + metric = self.prepare_metric(metric) + + # Compute predictions + predictions, perf_results = self.call_pipeline(pipe, pipe_inputs) + predictions = self.predictions_processor(predictions, label_mapping) + + metric_inputs.update(predictions) + + # Compute metrics from references and predictions + metric_results = self.compute_metric( + metric=metric, + metric_inputs=metric_inputs, + strategy=strategy, + confidence_level=confidence_level, + n_resamples=n_resamples, + random_state=random_state, + ) + + # TODO: To clarify why `wer` and `cer` return float + # even though metric.compute contract says that it + # returns Optional[dict]. + if type(metric_results) == float: + metric_results = {metric.name: metric_results} + + result.update(metric_results) + result.update(perf_results) + + return result + + @staticmethod + def check_for_mismatch_in_device_setup(device, model_or_pipeline): + if device is not None and device != -1 and isinstance(model_or_pipeline, Pipeline): + if model_or_pipeline.device.type == "cpu": + raise ValueError( + "The value of the `device` kwarg passed to `compute` suggests that this pipe should be run on an " + "accelerator, but the pipe was instantiated on CPU. Pass `device` to the pipeline during " + "initialization to use an accelerator, or pass `device=None` to `compute`. " + ) + elif device != model_or_pipeline.device.index: + raise ValueError( + f"This pipeline was instantiated on device {model_or_pipeline.device.index} but device={device} was passed to `compute`." + ) + + def check_required_columns(self, data: Union[str, Dataset], columns_names: Dict[str, str]): + """ + Ensure the columns required for the evaluation are present in the dataset. + + Args: + data (`str` or [`Dataset`]): + Specifies the dataset we will run evaluation on. + columns_names (`List[str]`): + List of column names to check in the dataset. The keys are the arguments to the [`evaluate.EvaluationModule.compute`] method, + while the values are the column names to check. + + Example: + + ```py + >>> from datasets import load_dataset + >>> from evaluate import evaluator + >>> data = load_dataset("rotten_tomatoes', split="train") + >>> evaluator.check_required_columns(data, {"input_column": "text", "label_column": "label"}) + ``` + """ + for input_name, column_name in columns_names.items(): + if column_name not in data.column_names: + raise ValueError( + f"Invalid `{input_name}` {column_name} specified. The dataset contains the following columns: {data.column_names}." + ) + + @staticmethod + def get_dataset_split(data, subset=None, split=None): + """ + Infers which split to use if `None` is given. + + Args: + data (`str`): + Name of dataset. + subset (`str`): + Name of config for datasets with multiple configurations (e.g. 'glue/cola'). + split (`str`, defaults to `None`): + Split to use. + Returns: + `split`: `str` containing which split to use + + Example: + + ```py + >>> from evaluate import evaluator + >>> evaluator("text-classification").get_dataset_split(data="rotten_tomatoes") + WARNING:evaluate.evaluator.base:Dataset split not defined! Automatically evaluating with split: TEST + 'test' + ``` + """ + if split is None: + split = choose_split(data, subset) + logger.warning(f"Dataset split not defined! Automatically evaluating with split: {split.upper()}") + return split + + def load_data(self, data: Union[str, Dataset], subset: str = None, split: str = None): + """ + Load dataset with given subset and split. + Args: + data ([`Dataset`] or `str`, defaults to `None`): + Specifies the dataset we will run evaluation on. If it is of + type `str`, we treat it as the dataset name, and load it. Otherwise we assume it represents a pre-loaded dataset. + subset (`str`, defaults to `None`): + Specifies dataset subset to be passed to `name` in `load_dataset`. To be + used with datasets with several configurations (e.g. glue/sst2). + split (`str`, defaults to `None`): + User-defined dataset split by name (e.g. train, validation, test). Supports slice-split (`test[:n]`). + If not defined and data is a `str` type, will automatically select the best one via `choose_split()`. + Returns: + data ([`Dataset`]): Loaded dataset which will be used for evaluation. + + Example: + + ```py + >>> from evaluate import evaluator + >>> evaluator("text-classification").load_data(data="rotten_tomatoes", split="train") + Dataset({ + features: ['text', 'label'], + num_rows: 8530 + }) + ``` + """ + if isinstance(data, str): + split = self.get_dataset_split(data, subset, split) + data = load_dataset(data, name=subset, split=split) + return data + elif isinstance(data, Dataset): + if split is not None or subset is not None: + logger.warning("`data` is a preloaded Dataset! Ignoring `subset` and `split`.") + return data + else: + raise ValueError( + "Please specify a valid `data` object - either a `str` with a name or a `Dataset` object." + ) + + def prepare_data(self, data: Dataset, input_column: str, label_column: str, *args, **kwargs): + """ + Prepare data. + + Args: + data ([`Dataset`]): + Specifies the dataset we will run evaluation on. + input_column (`str`, defaults to `"text"`): + The name of the column containing the text feature in the dataset specified by `data`. + second_input_column(`str`, *optional*): + The name of the column containing the second text feature if there is one. Otherwise, set to `None`. + label_column (`str`, defaults to `"label"`): + The name of the column containing the labels in the dataset specified by `data`. + Returns: + `dict`: metric inputs. + `list`: pipeline inputs. + + Example: + + ```py + >>> from evaluate import evaluator + >>> from datasets import load_dataset + + >>> ds = load_dataset("rotten_tomatoes", split="train") + >>> evaluator("text-classification").prepare_data(ds, input_column="text", second_input_column=None, label_column="label") + ``` + """ + + self.check_required_columns(data, {"input_column": input_column, "label_column": label_column}) + + return {"references": data[label_column]}, DatasetColumn(data, input_column) + + def prepare_pipeline( + self, + model_or_pipeline: Union[str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel"], # noqa: F821 + tokenizer: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] = None, # noqa: F821 + feature_extractor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] = None, # noqa: F821 + device: int = None, + ): + """ + Prepare pipeline. + + Args: + model_or_pipeline (`str` or [`~transformers.Pipeline`] or `Callable` or [`~transformers.PreTrainedModel`] or [`~transformers.TFPreTrainedModel`], defaults to `None`): + If the argument in not specified, we initialize the default pipeline for the task. If the argument is of the type `str` or + is a model instance, we use it to initialize a new [`~transformers.Pipeline`] with the given model. Otherwise we assume the + argument specifies a pre-initialized pipeline. + preprocessor ([`~transformers.PreTrainedTokenizerBase`] or [`~transformers.FeatureExtractionMixin`], *optional*, defaults to `None`): + Argument can be used to overwrite a default preprocessor if `model_or_pipeline` represents a model for + which we build a pipeline. If `model_or_pipeline` is `None` or a pre-initialized pipeline, we ignore + this argument. + Returns: + The initialized pipeline. + + Example: + + ```py + >>> from evaluate import evaluator + >>> evaluator("text-classification").prepare_pipeline(model_or_pipeline="distilbert-base-uncased") + ``` + """ + + if device is None: + device = self._infer_device() + + if ( + isinstance(model_or_pipeline, str) + or isinstance(model_or_pipeline, transformers.PreTrainedModel) + or isinstance(model_or_pipeline, transformers.TFPreTrainedModel) + ): + pipe = pipeline( + self.task, + model=model_or_pipeline, + tokenizer=tokenizer, + feature_extractor=feature_extractor, + device=device, + ) + else: + if model_or_pipeline is None: + pipe = pipeline(self.task, device=device) + else: + pipe = model_or_pipeline + if tokenizer is not None and feature_extractor is not None: + logger.warning("Ignoring the value of the preprocessor argument (`tokenizer` or `feature_extractor`).") + if (pipe.task != self.task) and not (self.task == "translation" and pipe.task.startswith("translation")): + raise ValueError( + f"Incompatible `model_or_pipeline`. Please specify `model_or_pipeline` compatible with the `{self.task}` task." + ) + return pipe + + def prepare_metric(self, metric: Union[str, EvaluationModule]): + """ + Prepare metric. + + Args: + metric (`str` or [`EvaluationModule`], defaults to `None`): + Specifies the metric we use in evaluator. If it is of type `str`, we treat it as the metric name, and + load it. Otherwise we assume it represents a pre-loaded metric. + + Returns: + The loaded metric. + + Example: + + ```py + >>> from evaluate import evaluator + >>> evaluator("text-classification").prepare_metric("accuracy") + ``` + """ + # Prepare metric. + if metric is None: + if self.default_metric_name is None: + raise ValueError( + "`Evaluator` doesn't specify a default metric. Please specify a valid `metric` argument." + ) + metric = load(self.default_metric_name) + elif isinstance(metric, str): + metric = load(metric) + + return metric + + def call_pipeline(self, pipe, *args, **kwargs): + start_time = perf_counter() + pipe_output = pipe(*args, **kwargs, **self.PIPELINE_KWARGS) + end_time = perf_counter() + return pipe_output, self._compute_time_perf(start_time, end_time, len(pipe_output)) + + def compute_metric( + self, + metric: EvaluationModule, + metric_inputs: Dict, + strategy: Literal["simple", "bootstrap"] = "simple", + confidence_level: float = 0.95, + n_resamples: int = 9999, + random_state: Optional[int] = None, + ): + """Compute and return metrics.""" + result = metric.compute(**metric_inputs, **self.METRIC_KWARGS) + + if strategy == "bootstrap": + metric_keys = result.keys() + bootstrap_dict = self._compute_confidence_interval( + metric, + metric_inputs, + metric_keys, + confidence_level, + n_resamples, + random_state, + ) + for key in metric_keys: + bootstrap_dict[key]["score"] = result[key] + + return bootstrap_dict + + return result diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/image_classification.py b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/image_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..82831458bb8789ce9c9418d6c19d4af4ba5b35a2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/image_classification.py @@ -0,0 +1,119 @@ +# Copyright 2022 The HuggingFace Evaluate Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from numbers import Number +from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Union + +from datasets import Dataset +from typing_extensions import Literal + +from ..module import EvaluationModule +from ..utils.file_utils import add_end_docstrings, add_start_docstrings +from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator + + +if TYPE_CHECKING: + from transformers import FeatureExtractionMixin, Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel + + +TASK_DOCUMENTATION = r""" + Examples: + ```python + >>> from evaluate import evaluator + >>> from datasets import load_dataset + >>> task_evaluator = evaluator("image-classification") + >>> data = load_dataset("beans", split="test[:40]") + >>> results = task_evaluator.compute( + >>> model_or_pipeline="nateraw/vit-base-beans", + >>> data=data, + >>> label_column="labels", + >>> metric="accuracy", + >>> label_mapping={'angular_leaf_spot': 0, 'bean_rust': 1, 'healthy': 2}, + >>> strategy="bootstrap" + >>> ) + ``` +""" + + +class ImageClassificationEvaluator(Evaluator): + """ + Image classification evaluator. + This image classification evaluator can currently be loaded from [`evaluator`] using the default task name + `image-classification`. + Methods in this class assume a data format compatible with the [`ImageClassificationPipeline`]. + """ + + PIPELINE_KWARGS = {} + + def __init__(self, task="image-classification", default_metric_name=None): + super().__init__(task, default_metric_name=default_metric_name) + + def predictions_processor(self, predictions, label_mapping): + pred_label = [max(pred, key=lambda x: x["score"])["label"] for pred in predictions] + pred_label = [label_mapping[pred] if label_mapping is not None else pred for pred in pred_label] + + return {"predictions": pred_label} + + @add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING) + @add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION) + def compute( + self, + model_or_pipeline: Union[ + str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821 + ] = None, + data: Union[str, Dataset] = None, + subset: Optional[str] = None, + split: Optional[str] = None, + metric: Union[str, EvaluationModule] = None, + tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821 + feature_extractor: Optional[Union[str, "FeatureExtractionMixin"]] = None, # noqa: F821 + strategy: Literal["simple", "bootstrap"] = "simple", + confidence_level: float = 0.95, + n_resamples: int = 9999, + device: int = None, + random_state: Optional[int] = None, + input_column: str = "image", + label_column: str = "label", + label_mapping: Optional[Dict[str, Number]] = None, + ) -> Tuple[Dict[str, float], Any]: + + """ + input_column (`str`, defaults to `"image"`): + The name of the column containing the images as PIL ImageFile in the dataset specified by `data`. + label_column (`str`, defaults to `"label"`): + The name of the column containing the labels in the dataset specified by `data`. + label_mapping (`Dict[str, Number]`, *optional*, defaults to `None`): + We want to map class labels defined by the model in the pipeline to values consistent with those + defined in the `label_column` of the `data` dataset. + """ + + result = super().compute( + model_or_pipeline=model_or_pipeline, + data=data, + subset=subset, + split=split, + metric=metric, + tokenizer=tokenizer, + feature_extractor=feature_extractor, + strategy=strategy, + confidence_level=confidence_level, + n_resamples=n_resamples, + device=device, + random_state=random_state, + input_column=input_column, + label_column=label_column, + label_mapping=label_mapping, + ) + + return result diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/question_answering.py b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/question_answering.py new file mode 100644 index 0000000000000000000000000000000000000000..99b4190eebdda4e90617d0979fe23af2965d3204 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/question_answering.py @@ -0,0 +1,239 @@ +# Copyright 2022 The HuggingFace Evaluate Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union + +# Lint as: python3 +from datasets import Dataset + + +try: + TRANSFORMERS_AVAILABLE = True +except ImportError: + TRANSFORMERS_AVAILABLE = False + +from typing_extensions import Literal + +from ..module import EvaluationModule +from ..utils.file_utils import add_end_docstrings, add_start_docstrings +from ..utils.logging import get_logger +from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator +from .utils import DatasetColumn + + +if TYPE_CHECKING: + from transformers import Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel + + +logger = get_logger(__name__) + + +TASK_DOCUMENTATION = r""" + Examples: + ```python + >>> from evaluate import evaluator + >>> from datasets import load_dataset + >>> task_evaluator = evaluator("question-answering") + >>> data = load_dataset("squad", split="validation[:2]") + >>> results = task_evaluator.compute( + >>> model_or_pipeline="sshleifer/tiny-distilbert-base-cased-distilled-squad", + >>> data=data, + >>> metric="squad", + >>> ) + ``` + + + + Datasets where the answer may be missing in the context are supported, for example SQuAD v2 dataset. In this case, it is safer to pass `squad_v2_format=True` to + the compute() call. + + + + ```python + >>> from evaluate import evaluator + >>> from datasets import load_dataset + >>> task_evaluator = evaluator("question-answering") + >>> data = load_dataset("squad_v2", split="validation[:2]") + >>> results = task_evaluator.compute( + >>> model_or_pipeline="mrm8488/bert-tiny-finetuned-squadv2", + >>> data=data, + >>> metric="squad_v2", + >>> squad_v2_format=True, + >>> ) + ``` +""" + + +class QuestionAnsweringEvaluator(Evaluator): + """ + Question answering evaluator. This evaluator handles + [**extractive** question answering](https://huggingface.co/docs/transformers/task_summary#extractive-question-answering), + where the answer to the question is extracted from a context. + + This question answering evaluator can currently be loaded from [`evaluator`] using the default task name + `question-answering`. + + Methods in this class assume a data format compatible with the + [`~transformers.QuestionAnsweringPipeline`]. + """ + + PIPELINE_KWARGS = {} + + def __init__(self, task="question-answering", default_metric_name=None): + super().__init__(task, default_metric_name=default_metric_name) + + def prepare_data( + self, data: Dataset, question_column: str, context_column: str, id_column: str, label_column: str + ): + """Prepare data.""" + if data is None: + raise ValueError( + "Please specify a valid `data` object - either a `str` with a name or a `Dataset` object." + ) + self.check_required_columns( + data, + { + "question_column": question_column, + "context_column": context_column, + "id_column": id_column, + "label_column": label_column, + }, + ) + + metric_inputs = dict() + metric_inputs["references"] = [ + {"id": element[id_column], "answers": element[label_column]} for element in data + ] + + return metric_inputs, { + "question": DatasetColumn(data, question_column), + "context": DatasetColumn(data, context_column), + } + + def is_squad_v2_format(self, data: Dataset, label_column: str = "answers"): + """ + Check if the provided dataset follows the squad v2 data schema, namely possible samples where the answer is not in the context. + In this case, the answer text list should be `[]`. + """ + original_num_rows = data.num_rows + nonempty_num_rows = data.filter( + lambda x: len(x[label_column]["text"]) > 0, load_from_cache_file=False + ).num_rows + if original_num_rows > nonempty_num_rows: + return True + else: + return False + + def predictions_processor(self, predictions: List, squad_v2_format: bool, ids: List): + result = [] + for i in range(len(predictions)): + pred = {"prediction_text": predictions[i]["answer"], "id": ids[i]} + if squad_v2_format: + pred["no_answer_probability"] = predictions[i]["score"] + result.append(pred) + return {"predictions": result} + + @add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING) + @add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION) + def compute( + self, + model_or_pipeline: Union[ + str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821 + ] = None, + data: Union[str, Dataset] = None, + subset: Optional[str] = None, + split: Optional[str] = None, + metric: Union[str, EvaluationModule] = None, + tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821 + strategy: Literal["simple", "bootstrap"] = "simple", + confidence_level: float = 0.95, + n_resamples: int = 9999, + device: int = None, + random_state: Optional[int] = None, + question_column: str = "question", + context_column: str = "context", + id_column: str = "id", + label_column: str = "answers", + squad_v2_format: Optional[bool] = None, + ) -> Tuple[Dict[str, float], Any]: + """ + question_column (`str`, defaults to `"question"`): + The name of the column containing the question in the dataset specified by `data`. + context_column (`str`, defaults to `"context"`): + The name of the column containing the context in the dataset specified by `data`. + id_column (`str`, defaults to `"id"`): + The name of the column containing the identification field of the question and answer pair in the + dataset specified by `data`. + label_column (`str`, defaults to `"answers"`): + The name of the column containing the answers in the dataset specified by `data`. + squad_v2_format (`bool`, *optional*, defaults to `None`): + Whether the dataset follows the format of squad_v2 dataset. This is the case when the provided dataset + has questions where the answer is not in the context, more specifically when are answers as + `{"text": [], "answer_start": []}` in the answer column. If all questions have at least one answer, this parameter + should be set to `False`. If this parameter is not provided, the format will be automatically inferred. + """ + result = {} + self.check_for_mismatch_in_device_setup(device, model_or_pipeline) + + data = self.load_data(data=data, subset=subset, split=split) + metric_inputs, pipe_inputs = self.prepare_data( + data=data, + question_column=question_column, + context_column=context_column, + id_column=id_column, + label_column=label_column, + ) + + if squad_v2_format is None: + squad_v2_format = self.is_squad_v2_format(data=data, label_column=label_column) + logger.warning( + f"`squad_v2_format` parameter not provided to QuestionAnsweringEvaluator.compute(). Automatically inferred `squad_v2_format` as {squad_v2_format}." + ) + pipe = self.prepare_pipeline(model_or_pipeline=model_or_pipeline, tokenizer=tokenizer, device=device) + + metric = self.prepare_metric(metric) + + if squad_v2_format and metric.name == "squad": + logger.warning( + "The dataset has SQuAD v2 format but you are using the SQuAD metric. Consider passing the 'squad_v2' metric." + ) + if not squad_v2_format and metric.name == "squad_v2": + logger.warning( + "The dataset has SQuAD v1 format but you are using the SQuAD v2 metric. Consider passing the 'squad' metric." + ) + + if squad_v2_format: + self.PIPELINE_KWARGS["handle_impossible_answer"] = True + else: + self.PIPELINE_KWARGS["handle_impossible_answer"] = False + + # Compute predictions + predictions, perf_results = self.call_pipeline(pipe, **pipe_inputs) + predictions = self.predictions_processor(predictions, squad_v2_format=squad_v2_format, ids=data[id_column]) + metric_inputs.update(predictions) + + # Compute metrics from references and predictions + metric_results = self.compute_metric( + metric=metric, + metric_inputs=metric_inputs, + strategy=strategy, + confidence_level=confidence_level, + n_resamples=n_resamples, + random_state=random_state, + ) + + result.update(metric_results) + result.update(perf_results) + + return result diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/text2text_generation.py b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/text2text_generation.py new file mode 100644 index 0000000000000000000000000000000000000000..6dfd2c035695b38c1e4f0d9d4929b12c6be30920 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/text2text_generation.py @@ -0,0 +1,267 @@ +# Copyright 2022 The HuggingFace Evaluate Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Union + +from datasets import Dataset +from typing_extensions import Literal + +from ..module import EvaluationModule +from ..utils.file_utils import add_start_docstrings +from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator + + +if TYPE_CHECKING: + from transformers import Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel + + +TASK_DOCUMENTATION_KWARGS = r""" + input_column (`str`, defaults to `"text"`): + the name of the column containing the input text in the dataset specified by `data`. + label_column (`str`, defaults to `"label"`): + the name of the column containing the labels in the dataset specified by `data`. + generation_kwargs (`Dict`, *optional*, defaults to `None`): + The generation kwargs are passed to the pipeline and set the text generation strategy. +""" + +TEXT2TEXT_TASK_DOCSTRING_EXAMPLE = r""" + Examples: + ```python + >>> from evaluate import evaluator + >>> from datasets import load_dataset + >>> task_evaluator = evaluator("text2text-generation") + >>> data = load_dataset("cnn_dailymail", "3.0.0", split="validation[:40]") + >>> results = task_evaluator.compute( + >>> model_or_pipeline="facebook/bart-large-cnn", + >>> data=data, + >>> input_column="article", + >>> label_column="highlights", + >>> metric="rouge", + >>> ) + ``` +""" + +SUMMARIZATION_TASK_DOCSTRING_EXAMPLE = r""" + Examples: + ```python + >>> from evaluate import evaluator + >>> from datasets import load_dataset + >>> task_evaluator = evaluator("summarization") + >>> data = load_dataset("cnn_dailymail", "3.0.0", split="validation[:40]") + >>> results = task_evaluator.compute( + >>> model_or_pipeline="facebook/bart-large-cnn", + >>> data=data, + >>> input_column="article", + >>> label_column="highlights", + >>> ) + ``` +""" + + +TRANSLATION_TASK_DOCSTRING_EXAMPLE = r""" + Examples: + ```python + >>> from evaluate import evaluator + >>> from datasets import load_dataset + >>> task_evaluator = evaluator("translation") + >>> data = load_dataset("wmt19", "fr-de", split="validation[:40]") + >>> data = data.map(lambda x: {"text": x["translation"]["de"], "label": x["translation"]["fr"]}) + >>> results = task_evaluator.compute( + >>> model_or_pipeline="Helsinki-NLP/opus-mt-de-fr", + >>> data=data, + >>> ) + ``` +""" + + +class Text2TextGenerationEvaluator(Evaluator): + """ + Text2Text generation evaluator. + This Text2Text generation evaluator can currently be loaded from [`evaluator`] using the default task name + `text2text-generation`. + Methods in this class assume a data format compatible with the [`~transformers.Text2TextGenerationPipeline`]. + """ + + PREDICTION_PREFIX = "generated" + PIPELINE_KWARGS = {"truncation": True} + + def __init__(self, task="text2text-generation", default_metric_name=None): + super().__init__(task, default_metric_name=default_metric_name) + + def predictions_processor(self, predictions, label_mapping): + return {"predictions": [pred[f"{self.PREDICTION_PREFIX}_text"] for pred in predictions]} + + @add_start_docstrings( + EVALUTOR_COMPUTE_START_DOCSTRING, + TASK_DOCUMENTATION_KWARGS, + EVALUATOR_COMPUTE_RETURN_DOCSTRING, + TEXT2TEXT_TASK_DOCSTRING_EXAMPLE, + ) + def compute( + self, + model_or_pipeline: Union[ + str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821 + ] = None, + data: Union[str, Dataset] = None, + subset: Optional[str] = None, + split: Optional[str] = None, + metric: Union[str, EvaluationModule] = None, + tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821 + strategy: Literal["simple", "bootstrap"] = "simple", + confidence_level: float = 0.95, + n_resamples: int = 9999, + device: int = None, + random_state: Optional[int] = None, + input_column: str = "text", + label_column: str = "label", + generation_kwargs: dict = None, + ) -> Tuple[Dict[str, float], Any]: + if generation_kwargs is not None: + self.PIPELINE_KWARGS.update(generation_kwargs) + + result = super().compute( + model_or_pipeline=model_or_pipeline, + data=data, + subset=subset, + split=split, + metric=metric, + tokenizer=tokenizer, + strategy=strategy, + confidence_level=confidence_level, + n_resamples=n_resamples, + device=device, + random_state=random_state, + input_column=input_column, + label_column=label_column, + ) + + return result + + +class SummarizationEvaluator(Text2TextGenerationEvaluator): + """ + Text summarization evaluator. + This text summarization evaluator can currently be loaded from [`evaluator`] using the default task name + `summarization`. + Methods in this class assume a data format compatible with the [`SummarizationEvaluator`]. + """ + + PREDICTION_PREFIX = "summary" + PIPELINE_KWARGS = {"truncation": True} + + def __init__(self, task="summarization", default_metric_name=None): + super().__init__(task, default_metric_name=default_metric_name) + + @add_start_docstrings( + EVALUTOR_COMPUTE_START_DOCSTRING, + TASK_DOCUMENTATION_KWARGS, + EVALUATOR_COMPUTE_RETURN_DOCSTRING, + SUMMARIZATION_TASK_DOCSTRING_EXAMPLE, + ) + def compute( + self, + model_or_pipeline: Union[ + str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821 + ] = None, + data: Union[str, Dataset] = None, + subset: Optional[str] = None, + split: Optional[str] = None, + metric: Union[str, EvaluationModule] = None, + tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821 + strategy: Literal["simple", "bootstrap"] = "simple", + confidence_level: float = 0.95, + n_resamples: int = 9999, + device: int = None, + random_state: Optional[int] = None, + input_column: str = "text", + label_column: str = "label", + generation_kwargs: dict = None, + ) -> Tuple[Dict[str, float], Any]: + result = super().compute( + model_or_pipeline=model_or_pipeline, + data=data, + subset=subset, + split=split, + metric=metric, + tokenizer=tokenizer, + strategy=strategy, + confidence_level=confidence_level, + n_resamples=n_resamples, + device=device, + random_state=random_state, + input_column=input_column, + label_column=label_column, + generation_kwargs=generation_kwargs, + ) + + return result + + +class TranslationEvaluator(Text2TextGenerationEvaluator): + """ + Translation evaluator. + This translation generation evaluator can currently be loaded from [`evaluator`] using the default task name + `translation`. + Methods in this class assume a data format compatible with the [`~transformers.TranslationPipeline`]. + """ + + PREDICTION_PREFIX = "translation" + PIPELINE_KWARGS = {"truncation": True} + + def __init__(self, task="translation", default_metric_name=None): + super().__init__(task, default_metric_name=default_metric_name) + + @add_start_docstrings( + EVALUTOR_COMPUTE_START_DOCSTRING, + TASK_DOCUMENTATION_KWARGS, + EVALUATOR_COMPUTE_RETURN_DOCSTRING, + TRANSLATION_TASK_DOCSTRING_EXAMPLE, + ) + def compute( + self, + model_or_pipeline: Union[ + str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821 + ] = None, + data: Union[str, Dataset] = None, + subset: Optional[str] = None, + split: Optional[str] = None, + metric: Union[str, EvaluationModule] = None, + tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821 + strategy: Literal["simple", "bootstrap"] = "simple", + confidence_level: float = 0.95, + n_resamples: int = 9999, + device: int = None, + random_state: Optional[int] = None, + input_column: str = "text", + label_column: str = "label", + generation_kwargs: dict = None, + ) -> Tuple[Dict[str, float], Any]: + result = super().compute( + model_or_pipeline=model_or_pipeline, + data=data, + subset=subset, + split=split, + metric=metric, + tokenizer=tokenizer, + strategy=strategy, + confidence_level=confidence_level, + n_resamples=n_resamples, + device=device, + random_state=random_state, + input_column=input_column, + label_column=label_column, + generation_kwargs=generation_kwargs, + ) + + return result diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/text_classification.py b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/text_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..200eb01d70336148db473edebebc96e3137c5799 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/text_classification.py @@ -0,0 +1,160 @@ +# Copyright 2022 The HuggingFace Evaluate Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from numbers import Number +from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Union + +from datasets import Dataset, load_dataset +from typing_extensions import Literal + +from ..module import EvaluationModule +from ..utils.file_utils import add_end_docstrings, add_start_docstrings +from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator +from .utils import DatasetColumnPair + + +if TYPE_CHECKING: + from transformers import FeatureExtractionMixin, Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel + + +TASK_DOCUMENTATION = r""" + Examples: + ```python + >>> from evaluate import evaluator + >>> from datasets import load_dataset + >>> task_evaluator = evaluator("text-classification") + >>> data = load_dataset("imdb", split="test[:2]") + >>> results = task_evaluator.compute( + >>> model_or_pipeline="huggingface/prunebert-base-uncased-6-finepruned-w-distil-mnli", + >>> data=data, + >>> metric="accuracy", + >>> label_mapping={"LABEL_0": 0.0, "LABEL_1": 1.0}, + >>> strategy="bootstrap", + >>> n_resamples=10, + >>> random_state=0 + >>> ) + ``` +""" + + +class TextClassificationEvaluator(Evaluator): + """ + Text classification evaluator. + This text classification evaluator can currently be loaded from [`evaluator`] using the default task name + `text-classification` or with a `"sentiment-analysis"` alias. + Methods in this class assume a data format compatible with the [`~transformers.TextClassificationPipeline`] - a single textual + feature as input and a categorical label as output. + """ + + PIPELINE_KWARGS = {"truncation": True} + + def __init__(self, task="text-classification", default_metric_name=None): + super().__init__(task, default_metric_name=default_metric_name) + + def prepare_data(self, data: Union[str, Dataset], input_column: str, second_input_column: str, label_column: str): + if data is None: + raise ValueError( + "Please specify a valid `data` object - either a `str` with a name or a `Dataset` object." + ) + + self.check_required_columns(data, {"input_column": input_column, "label_column": label_column}) + + if second_input_column is not None: + self.check_required_columns(data, {"second_input_column": second_input_column}) + + data = load_dataset(data) if isinstance(data, str) else data + + return {"references": data[label_column]}, DatasetColumnPair( + data, input_column, second_input_column, "text", "text_pair" + ) + + def predictions_processor(self, predictions, label_mapping): + predictions = [ + label_mapping[element["label"]] if label_mapping is not None else element["label"] + for element in predictions + ] + return {"predictions": predictions} + + @add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING) + @add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION) + def compute( + self, + model_or_pipeline: Union[ + str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821 + ] = None, + data: Union[str, Dataset] = None, + subset: Optional[str] = None, + split: Optional[str] = None, + metric: Union[str, EvaluationModule] = None, + tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821 + feature_extractor: Optional[Union[str, "FeatureExtractionMixin"]] = None, # noqa: F821 + strategy: Literal["simple", "bootstrap"] = "simple", + confidence_level: float = 0.95, + n_resamples: int = 9999, + device: int = None, + random_state: Optional[int] = None, + input_column: str = "text", + second_input_column: Optional[str] = None, + label_column: str = "label", + label_mapping: Optional[Dict[str, Number]] = None, + ) -> Tuple[Dict[str, float], Any]: + """ + input_column (`str`, *optional*, defaults to `"text"`): + The name of the column containing the text feature in the dataset specified by `data`. + second_input_column (`str`, *optional*, defaults to `None`): + The name of the second column containing the text features. This may be useful for classification tasks + as MNLI, where two columns are used. + label_column (`str`, defaults to `"label"`): + The name of the column containing the labels in the dataset specified by `data`. + label_mapping (`Dict[str, Number]`, *optional*, defaults to `None`): + We want to map class labels defined by the model in the pipeline to values consistent with those + defined in the `label_column` of the `data` dataset. + """ + + result = {} + + self.check_for_mismatch_in_device_setup(device, model_or_pipeline) + + # Prepare inputs + data = self.load_data(data=data, subset=subset, split=split) + metric_inputs, pipe_inputs = self.prepare_data( + data=data, input_column=input_column, second_input_column=second_input_column, label_column=label_column + ) + pipe = self.prepare_pipeline( + model_or_pipeline=model_or_pipeline, + tokenizer=tokenizer, + feature_extractor=feature_extractor, + device=device, + ) + metric = self.prepare_metric(metric) + + # Compute predictions + predictions, perf_results = self.call_pipeline(pipe, pipe_inputs) + predictions = self.predictions_processor(predictions, label_mapping) + metric_inputs.update(predictions) + + # Compute metrics from references and predictions + metric_results = self.compute_metric( + metric=metric, + metric_inputs=metric_inputs, + strategy=strategy, + confidence_level=confidence_level, + n_resamples=n_resamples, + random_state=random_state, + ) + + result.update(metric_results) + result.update(perf_results) + + return result diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/text_generation.py b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/text_generation.py new file mode 100644 index 0000000000000000000000000000000000000000..15522e860f7eb6fc693780f637337c0fdb22a21c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/text_generation.py @@ -0,0 +1,69 @@ +# Copyright 2022 The HuggingFace Evaluate Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict, Tuple + +from datasets import Dataset + +from .base import Evaluator +from .utils import DatasetColumn + + +TASK_DOCUMENTATION_KWARGS = r""" + input_column (`str`, defaults to `"text"`): + the name of the column containing the input text in the dataset specified by `data`. + generation_kwargs (`Dict`, *optional*, defaults to `None`): + The generation kwargs are passed to the pipeline and set the text generation strategy. +""" + + +class TextGenerationEvaluator(Evaluator): + """ + Text generation evaluator. + This Text generation evaluator can currently be loaded from [`evaluator`] using the default task name + `text-generation`. + Methods in this class assume a data format compatible with the [`~transformers.TextGenerationPipeline`]. + """ + + def predictions_processor(self, predictions, *args, **kwargs): + """ + Args: + predictions: A list of lists of dicts + + Returns: + `dict`: All the generated texts are flattened and stored under the "data" key. + """ + return {"data": [pred[f"{self.predictions_prefix}_text"] for pred_list in predictions for pred in pred_list]} + + def __init__(self, task="text-generation", default_metric_name=None, predictions_prefix: str = "generated"): + super().__init__(task=task, default_metric_name=default_metric_name) + self.predictions_prefix = predictions_prefix + + def prepare_data(self, data: Dataset, input_column: str, *args, **kwargs) -> Tuple[Dict, DatasetColumn]: + """ + Prepare data. + + Args: + data ([`Dataset`]): + Specifies the dataset we will run evaluation on. + input_column (`str`, defaults to `"text"`): + The name of the column containing the text feature in the dataset specified by `data`. + Returns: + `dict`: metric inputs. + `list`: pipeline inputs. + """ + + self.check_required_columns(data, {"input_column": input_column}) + + return {}, DatasetColumn(data, input_column) diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/token_classification.py b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/token_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..ba08ebd58d72417eed4e20c93a46c53adaa49811 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/token_classification.py @@ -0,0 +1,278 @@ +# Copyright 2022 The HuggingFace Evaluate Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union + +from datasets import ClassLabel, Dataset, Sequence +from typing_extensions import Literal + +from ..module import EvaluationModule +from ..utils.file_utils import add_end_docstrings, add_start_docstrings +from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator +from .utils import DatasetColumn + + +if TYPE_CHECKING: + from transformers import Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel + + +TASK_DOCUMENTATION = r""" + The dataset input and label columns are expected to be formatted as a list of words and a list of labels respectively, following [conll2003 dataset](https://huggingface.co/datasets/conll2003). Datasets whose inputs are single strings, and labels are a list of offset are not supported. + + Examples: + ```python + >>> from evaluate import evaluator + >>> from datasets import load_dataset + >>> task_evaluator = evaluator("token-classification") + >>> data = load_dataset("conll2003", split="validation[:2]") + >>> results = task_evaluator.compute( + >>> model_or_pipeline="elastic/distilbert-base-uncased-finetuned-conll03-english", + >>> data=data, + >>> metric="seqeval", + >>> ) + ``` + + + + For example, the following dataset format is accepted by the evaluator: + + ```python + dataset = Dataset.from_dict( + mapping={ + "tokens": [["New", "York", "is", "a", "city", "and", "Felix", "a", "person", "."]], + "ner_tags": [[1, 2, 0, 0, 0, 0, 3, 0, 0, 0]], + }, + features=Features({ + "tokens": Sequence(feature=Value(dtype="string")), + "ner_tags": Sequence(feature=ClassLabel(names=["O", "B-LOC", "I-LOC", "B-PER", "I-PER"])), + }), + ) + ``` + + + + + + For example, the following dataset format is **not** accepted by the evaluator: + + ```python + dataset = Dataset.from_dict( + mapping={ + "tokens": [["New York is a city and Felix a person."]], + "starts": [[0, 23]], + "ends": [[7, 27]], + "ner_tags": [["LOC", "PER"]], + }, + features=Features({ + "tokens": Value(dtype="string"), + "starts": Sequence(feature=Value(dtype="int32")), + "ends": Sequence(feature=Value(dtype="int32")), + "ner_tags": Sequence(feature=Value(dtype="string")), + }), + ) + ``` + + +""" + + +class TokenClassificationEvaluator(Evaluator): + """ + Token classification evaluator. + + This token classification evaluator can currently be loaded from [`evaluator`] using the default task name + `token-classification`. + + Methods in this class assume a data format compatible with the [`~transformers.TokenClassificationPipeline`]. + """ + + PIPELINE_KWARGS = {"ignore_labels": []} + + def __init__(self, task="token-classification", default_metric_name=None): + super().__init__(task, default_metric_name=default_metric_name) + + def predictions_processor(self, predictions: List[List[Dict]], words: List[List[str]], join_by: str): + """ + Transform the pipeline predictions into a list of predicted labels of the same length as the true labels. + + Args: + predictions (`List[List[Dict]]`): + List of pipeline predictions, where each token has been labeled. + words (`List[List[str]]`): + Original input data to the pipeline, used to build predicted labels of the same length. + join_by (`str`): + String to use to join two words. In English, it will typically be " ". + + Returns: + `dict`: a dictionary holding the predictions + """ + preds = [] + + # iterate over the data rows + for i, prediction in enumerate(predictions): + pred_processed = [] + + # get a list of tuples giving the indexes of the start and end character of each word + words_offsets = self.words_to_offsets(words[i], join_by) + + token_index = 0 + for word_offset in words_offsets: + # for each word, we may keep only the predicted label for the first token, discard the others + while prediction[token_index]["start"] < word_offset[0]: + token_index += 1 + + if prediction[token_index]["start"] > word_offset[0]: # bad indexing + pred_processed.append("O") + elif prediction[token_index]["start"] == word_offset[0]: + pred_processed.append(prediction[token_index]["entity"]) + + preds.append(pred_processed) + + return {"predictions": preds} + + def words_to_offsets(self, words: List[str], join_by: str): + """ + Convert a list of words to a list of offsets, where word are joined by `join_by`. + + Args: + words (`List[str]`): + List of words to get offsets from. + join_by (`str`): + String to insert between words. + + Returns: + `List[Tuple[int, int]]`: List of the characters (start index, end index) for each of the words. + """ + offsets = [] + + start = 0 + for word in words: + end = start + len(word) - 1 + offsets.append((start, end)) + start = end + len(join_by) + 1 + + return offsets + + def prepare_data(self, data: Union[str, Dataset], input_column: str, label_column: str, join_by: str): + super().prepare_data(data, input_column, label_column) + + if not isinstance(data.features[input_column], Sequence) or not isinstance( + data.features[label_column], Sequence + ): + raise ValueError( + "TokenClassificationEvaluator expects the input and label columns to be provided as lists." + ) + + # If the labels are of type ClassLabel, they are already integers and we have the map stored somewhere. + # Otherwise, we have to get the list of labels manually. + labels_are_int = isinstance(data.features[label_column].feature, ClassLabel) + if labels_are_int: + label_list = data.features[label_column].feature.names # list of string labels + id_to_label = {i: label for i, label in enumerate(label_list)} + references = [[id_to_label[label_id] for label_id in label_ids] for label_ids in data[label_column]] + elif data.features[label_column].feature.dtype.startswith("int"): + raise NotImplementedError( + "References provided as integers, but the reference column is not a Sequence of ClassLabels." + ) + else: + # In the event the labels are not a `Sequence[ClassLabel]`, we have already labels as strings + # An example is labels as ["PER", "PER", "O", "LOC", "O", "LOC", "O"], e.g. in polyglot_ner dataset + references = data[label_column] + + metric_inputs = {"references": references} + data = data.map(lambda x: {input_column: join_by.join(x[input_column])}) + pipeline_inputs = DatasetColumn(data, input_column) + + return metric_inputs, pipeline_inputs + + def prepare_pipeline( + self, + model_or_pipeline: Union[str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel"], # noqa: F821 + tokenizer: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] = None, # noqa: F821 + feature_extractor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] = None, # noqa: F821 + device: int = None, + ): + pipe = super().prepare_pipeline(model_or_pipeline, tokenizer, feature_extractor, device) + + # check the pipeline outputs start characters in its predictions + dummy_output = pipe(["2003 New York Gregory"], **self.PIPELINE_KWARGS) + if dummy_output[0][0]["start"] is None: + raise ValueError( + "TokenClassificationEvaluator supports only pipelines giving 'start' index as a pipeline output (got None). " + "Transformers pipelines with a slow tokenizer will raise this error." + ) + + return pipe + + @add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING) + @add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION) + def compute( + self, + model_or_pipeline: Union[ + str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821 + ] = None, + data: Union[str, Dataset] = None, + subset: Optional[str] = None, + split: str = None, + metric: Union[str, EvaluationModule] = None, + tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821 + strategy: Literal["simple", "bootstrap"] = "simple", + confidence_level: float = 0.95, + n_resamples: int = 9999, + device: Optional[int] = None, + random_state: Optional[int] = None, + input_column: str = "tokens", + label_column: str = "ner_tags", + join_by: Optional[str] = " ", + ) -> Tuple[Dict[str, float], Any]: + """ + input_column (`str`, defaults to `"tokens"`): + The name of the column containing the tokens feature in the dataset specified by `data`. + label_column (`str`, defaults to `"label"`): + The name of the column containing the labels in the dataset specified by `data`. + join_by (`str`, *optional*, defaults to `" "`): + This evaluator supports dataset whose input column is a list of words. This parameter specifies how to join + words to generate a string input. This is especially useful for languages that do not separate words by a space. + """ + result = {} + + self.check_for_mismatch_in_device_setup(device, model_or_pipeline) + + # Prepare inputs + data = self.load_data(data=data, subset=subset, split=split) + metric_inputs, pipe_inputs = self.prepare_data( + data=data, input_column=input_column, label_column=label_column, join_by=join_by + ) + pipe = self.prepare_pipeline(model_or_pipeline=model_or_pipeline, tokenizer=tokenizer, device=device) + metric = self.prepare_metric(metric) + + # Compute predictions + predictions, perf_results = self.call_pipeline(pipe, pipe_inputs) + predictions = self.predictions_processor(predictions, data[input_column], join_by) + metric_inputs.update(predictions) + + # Compute metrics from references and predictions + metric_results = self.compute_metric( + metric=metric, + metric_inputs=metric_inputs, + strategy=strategy, + confidence_level=confidence_level, + n_resamples=n_resamples, + random_state=random_state, + ) + + result.update(metric_results) + result.update(perf_results) + + return result diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/utils.py b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e364276d008b689d726b8dbbea1402fa93886d9b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/evaluate/evaluator/utils.py @@ -0,0 +1,84 @@ +from datasets import Dataset, get_dataset_split_names + + +class DatasetColumn(list): + """Helper class to avoid loading a dataset column into memory when accessing it.""" + + def __init__(self, dataset: Dataset, key: str): + self.dataset = dataset + self.key = key + + def __len__(self): + return len(self.dataset) + + def __getitem__(self, i): + return self.dataset[i][self.key] + + def __iter__(self): + return (self.dataset[i][self.key] for i in range(len(self))) + + +def choose_split(data, subset=None): + available_splits = get_dataset_split_names(data, subset) + preferred_split_order = [ + "test", + "testing", + "eval", + "evaluation", + "validation", + "val", + "valid", + "dev", + "train", + "training", + ] + for split in preferred_split_order: + if split in available_splits: + return split + raise ValueError("No dataset split defined! Pass an explicit value to the `split` kwarg.") + + +class DatasetColumnPair(list): + """Helper class to avoid loading two dataset columns into memory when accessing it.""" + + def __init__( + self, + dataset: Dataset, + first_col: str, + second_col: str, + first_key: str, + second_key: str, + ): + """ + Args: + dataset (Dataset): dataset to build an iterator on + first_col (str): first column name to use in the dataset + second_col (str): second column name to use in the dataset + first_key (str): key name used for the first column in the returned dictionary + second_key (str): key name used for the second column in the returned dictionary + """ + self.dataset = dataset + + self.first_col = first_col + self.second_col = second_col + + self.first_key = first_key + self.second_key = second_key + + def __len__(self): + return len(self.dataset) + + def __getitem__(self, i): + return { + self.first_key: self.dataset[i][self.first_col], + self.second_key: self.dataset[i][self.second_col] if self.second_col else None, + } + + def __iter__(self): + return ( + { + self.first_key: self.dataset[i][self.first_col], + self.second_key: self.dataset[i][self.second_col] if self.second_col else None, + } + for i in range(len(self)) + ) diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/hub.py b/env-llmeval/lib/python3.10/site-packages/evaluate/hub.py new file mode 100644 index 0000000000000000000000000000000000000000..86118332c6d2293f475e84e80726364dcc63e292 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/evaluate/hub.py @@ -0,0 +1,133 @@ +from typing import Dict + +import requests +from huggingface_hub import dataset_info, model_info +from huggingface_hub.repocard import metadata_update + +from .config import HF_HUB_ALLOWED_TASKS +from .utils.logging import get_logger + + +logger = get_logger(__name__) + + +def push_to_hub( + model_id: str, + task_type: str, + dataset_type: str, + dataset_name: str, + metric_type: str, + metric_name: str, + metric_value: float, + task_name: str = None, + dataset_config: str = None, + dataset_split: str = None, + dataset_revision: str = None, + dataset_args: Dict[str, int] = None, + metric_config: str = None, + metric_args: Dict[str, int] = None, + overwrite: bool = False, +): + r""" + Pushes the result of a metric to the metadata of a model repository in the Hub. + + Args: + model_id (`str`): + Model id from https://hf.co/models. + task_type (`str`): + Task id, refer to the [Hub allowed tasks](https://github.com/huggingface/evaluate/blob/main/src/evaluate/config.py#L154) for allowed values. + dataset_type (`str`): + Dataset id from https://hf.co/datasets. + dataset_name (`str`): + Pretty name for the dataset. + metric_type (`str`): + Metric id from https://hf.co/metrics. + metric_name (`str`): + Pretty name for the metric. + metric_value (`float`): + Computed metric value. + task_name (`str`, *optional*): + Pretty name for the task. + dataset_config (`str`, *optional*): + Dataset configuration used in [`~datasets.load_dataset`]. + See [`~datasets.load_dataset`] for more info. + dataset_split (`str`, *optional*): + Name of split used for metric computation. + dataset_revision (`str`, *optional*): + Git hash for the specific version of the dataset. + dataset_args (`dict[str, int]`, *optional*): + Additional arguments passed to [`~datasets.load_dataset`]. + metric_config (`str`, *optional*): + Configuration for the metric (e.g. the GLUE metric has a configuration for each subset). + metric_args (`dict[str, int]`, *optional*): + Arguments passed during [`~evaluate.EvaluationModule.compute`]. + overwrite (`bool`, *optional*, defaults to `False`): + If set to `True` an existing metric field can be overwritten, otherwise + attempting to overwrite any existing fields will cause an error. + + Example: + + ```python + >>> push_to_hub( + ... model_id="huggingface/gpt2-wikitext2", + ... metric_value=0.5 + ... metric_type="bleu", + ... metric_name="BLEU", + ... dataset_name="WikiText", + ... dataset_type="wikitext", + ... dataset_split="test", + ... task_type="text-generation", + ... task_name="Text Generation" + ... ) + ```""" + if task_type not in HF_HUB_ALLOWED_TASKS: + raise ValueError(f"Task type not supported. Task has to be one of {HF_HUB_ALLOWED_TASKS}") + + try: + dataset_info(dataset_type) + except requests.exceptions.HTTPError: + logger.warning(f"Dataset {dataset_type} not found on the Hub at hf.co/datasets/{dataset_type}") + + try: + model_info(model_id) + except requests.exceptions.HTTPError: + raise ValueError(f"Model {model_id} not found on the Hub at hf.co/{model_id}") + + result = { + "task": { + "type": task_type, + }, + "dataset": { + "type": dataset_type, + "name": dataset_name, + }, + "metrics": [ + { + "type": metric_type, + "value": metric_value, + }, + ], + } + + if dataset_config is not None: + result["dataset"]["config"] = dataset_config + if dataset_split is not None: + result["dataset"]["split"] = dataset_split + if dataset_revision is not None: + result["dataset"]["revision"] = dataset_revision + if dataset_args is not None: + result["dataset"]["args"] = dataset_args + + if task_name is not None: + result["task"]["name"] = task_name + + if metric_name is not None: + result["metrics"][0]["name"] = metric_name + if metric_config is not None: + result["metrics"][0]["config"] = metric_config + if metric_args is not None: + result["metrics"][0]["args"] = metric_args + + metadata = {"model-index": [{"results": [result]}]} + + return metadata_update(repo_id=model_id, metadata=metadata, overwrite=overwrite) diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/info.py b/env-llmeval/lib/python3.10/site-packages/evaluate/info.py new file mode 100644 index 0000000000000000000000000000000000000000..cc095784e4f1c1f473dd85955447d97d5fdc4e65 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/evaluate/info.py @@ -0,0 +1,157 @@ +# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +""" EvaluationModuleInfo records information we know about a dataset and a metric. +""" + +import dataclasses +import json +import os +from dataclasses import asdict, dataclass, field +from typing import List, Optional, Union + +from datasets.features import Features, Value + +from . import config +from .utils.logging import get_logger + + +logger = get_logger(__name__) + + +@dataclass +class EvaluationModuleInfo: + """Base class to store information about an evaluation used for `MetricInfo`, `ComparisonInfo`, + and `MeasurementInfo`. + + `EvaluationModuleInfo` documents an evaluation, including its name, version, and features. + See the constructor arguments and properties for a full list. + + Note: Not all fields are known on construction and may be updated later. + """ + + # Set in the dataset scripts + description: str + citation: str + features: Union[Features, List[Features]] + inputs_description: str = field(default_factory=str) + homepage: str = field(default_factory=str) + license: str = field(default_factory=str) + codebase_urls: List[str] = field(default_factory=list) + reference_urls: List[str] = field(default_factory=list) + streamable: bool = False + format: Optional[str] = None + module_type: str = "metric" # deprecate this in the future + + # Set later by the builder + module_name: Optional[str] = None + config_name: Optional[str] = None + experiment_id: Optional[str] = None + + def __post_init__(self): + if self.format is not None: + for key, value in self.features.items(): + if not isinstance(value, Value): + raise ValueError( + f"When using 'numpy' format, all features should be a `datasets.Value` feature. " + f"Here {key} is an instance of {value.__class__.__name__}" + ) + + def write_to_directory(self, metric_info_dir): + """Write `EvaluationModuleInfo` as JSON to `metric_info_dir`. + Also save the license separately in LICENSE. + + Args: + metric_info_dir (`str`): + The directory to save `metric_info_dir` to. + + Example: + + ```py + >>> my_metric.info.write_to_directory("/path/to/directory/") + ``` + """ + with open(os.path.join(metric_info_dir, config.METRIC_INFO_FILENAME), "w", encoding="utf-8") as f: + json.dump(asdict(self), f) + + with open(os.path.join(metric_info_dir, config.LICENSE_FILENAME), "w", encoding="utf-8") as f: + f.write(self.license) + + @classmethod + def from_directory(cls, metric_info_dir) -> "EvaluationModuleInfo": + """Create `EvaluationModuleInfo` from the JSON file in `metric_info_dir`. + + Args: + metric_info_dir (`str`): + The directory containing the `metric_info` JSON file. This + should be the root directory of a specific metric version. + + Example: + + ```py + >>> my_metric = EvaluationModuleInfo.from_directory("/path/to/directory/") + ``` + """ + logger.info(f"Loading Metric info from {metric_info_dir}") + if not metric_info_dir: + raise ValueError("Calling EvaluationModuleInfo.from_directory() with undefined metric_info_dir.") + + with open(os.path.join(metric_info_dir, config.METRIC_INFO_FILENAME), encoding="utf-8") as f: + metric_info_dict = json.load(f) + return cls.from_dict(metric_info_dict) + + @classmethod + def from_dict(cls, metric_info_dict: dict) -> "EvaluationModuleInfo": + field_names = {f.name for f in dataclasses.fields(cls)} + return cls(**{k: v for k, v in metric_info_dict.items() if k in field_names}) + + +@dataclass +class MetricInfo(EvaluationModuleInfo): + """Information about a metric. + + `EvaluationModuleInfo` documents a metric, including its name, version, and features. + See the constructor arguments and properties for a full list. + + Note: Not all fields are known on construction and may be updated later. + """ + + module_type: str = "metric" + + +@dataclass +class ComparisonInfo(EvaluationModuleInfo): + """Information about a comparison. + + `EvaluationModuleInfo` documents a comparison, including its name, version, and features. + See the constructor arguments and properties for a full list. + + Note: Not all fields are known on construction and may be updated later. + """ + + module_type: str = "comparison" + + +@dataclass +class MeasurementInfo(EvaluationModuleInfo): + """Information about a measurement. + + `EvaluationModuleInfo` documents a measurement, including its name, version, and features. + See the constructor arguments and properties for a full list. + + Note: Not all fields are known on construction and may be updated later. + """ + + module_type: str = "measurement" diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/inspect.py b/env-llmeval/lib/python3.10/site-packages/evaluate/inspect.py new file mode 100644 index 0000000000000000000000000000000000000000..20e2af28ed4df4e99c6d67cccdd24dda1c8cecf9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/evaluate/inspect.py @@ -0,0 +1,129 @@ +# Copyright 2020 The HuggingFace Evaluate Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +""" List and inspect metrics.""" + +from typing import Optional + +import requests +from datasets import DownloadConfig + +from .config import EVALUATION_MODULE_TYPES, HF_LIST_ENDPOINT +from .loading import evaluation_module_factory +from .utils.logging import get_logger + + +logger = get_logger(__name__) + + +class SplitsNotFoundError(ValueError): + pass + + +def list_evaluation_modules(module_type=None, include_community=True, with_details=False): + """List all evaluation modules available on the Hugging Face Hub. + + Args: + module_type (`str`, *optional*, defaults to `None`): + Type of evaluation modules to list. Has to be one of `'metric'`, `'comparison'`, or `'measurement'`. If `None`, all types are listed. + include_community (`bool`, *optional*, defaults to `True`): + Include community modules in the list. + with_details (`bool`, *optional*, defaults to `False`): + Return the full details on the metrics instead of only the ID. + + Returns: + `List[Union[str, dict]]` + + Example: + + ```py + >>> from evaluate import list_evaluation_modules + >>> list_evaluation_modules(module_type="metric") + ``` + """ + + if module_type is None: + evaluations_list = [] + for module_type in EVALUATION_MODULE_TYPES: + evaluations_list.extend( + _list_evaluation_modules_type( + module_type, include_community=include_community, with_details=with_details + ) + ) + else: + if module_type not in EVALUATION_MODULE_TYPES: + raise ValueError(f"Invalid module type '{module_type}'. Has to be one of {EVALUATION_MODULE_TYPES}.") + evaluations_list = _list_evaluation_modules_type( + module_type, include_community=include_community, with_details=with_details + ) + return evaluations_list + + +def _list_evaluation_modules_type(module_type, include_community=True, with_details=False): + + r = requests.get(HF_LIST_ENDPOINT.format(type=module_type)) + r.raise_for_status() + d = r.json() + + if not include_community: + d = [element for element in d if element["id"].split("/")[0] == f"evaluate-{module_type}"] + + # remove namespace for canonical modules and add community tag + for element in d: + if element["id"].split("/")[0] == f"evaluate-{module_type}": + element["id"] = element["id"].split("/")[1] + element["community"] = False + else: + element["community"] = True + + if with_details: + return [ + { + "name": element["id"], + "type": module_type, + "community": element["community"], + "likes": element.get("likes", 0), + } + for element in d + ] + else: + return [element["id"] for element in d] + + +def inspect_evaluation_module( + path: str, local_path: str, download_config: Optional[DownloadConfig] = None, **download_kwargs +): + r""" + Allow inspection/modification of a evaluation script by copying it on local drive at local_path. + + Args: + path (``str``): path to the evaluation script. Can be either: + + - a local path to script or the directory containing the script (if the script has the same name as the directory), + e.g. ``'./metrics/accuracy'`` or ``'./metrics/accuracy/accuracy.py'`` + - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with ``evaluate.list_evaluation_modules()``) + e.g. ``'accuracy'``, ``'bleu'`` or ``'word_length'`` + local_path (``str``): path to the local folder to copy the datset script to. + download_config (Optional ``datasets.DownloadConfig``: specific download configuration parameters. + **download_kwargs: optional attributes for DownloadConfig() which will override the attributes in download_config if supplied. + """ + evaluation_module = evaluation_module_factory( + path, download_config=download_config, force_local_path=local_path, **download_kwargs + ) + print( + f"The processing scripts for metric {path} can be inspected at {local_path}. " + f"The main class is in {evaluation_module.module_path}. " + f"You can modify this processing scripts and use it with `evaluate.load({local_path})`." + ) diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/loading.py b/env-llmeval/lib/python3.10/site-packages/evaluate/loading.py new file mode 100644 index 0000000000000000000000000000000000000000..2a3437681c28519cd4589d034e5ef6353d166250 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/evaluate/loading.py @@ -0,0 +1,771 @@ +# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +"""Access datasets.""" +import filecmp +import importlib +import inspect +import json +import os +import re +import shutil +import time +from dataclasses import dataclass +from pathlib import Path +from typing import List, Optional, Tuple, Type, Union +from urllib.parse import urlparse + +from datasets import DownloadConfig, DownloadMode +from datasets.builder import DatasetBuilder +from datasets.packaged_modules import _EXTENSION_TO_MODULE, _hash_python_lines +from datasets.utils.filelock import FileLock +from datasets.utils.version import Version + +from . import SCRIPTS_VERSION, config +from .module import EvaluationModule +from .utils.file_utils import ( + cached_path, + head_hf_s3, + hf_hub_url, + init_hf_modules, + is_relative_path, + relative_to_absolute_path, + url_or_path_join, +) +from .utils.logging import get_logger + + +logger = get_logger(__name__) + + +ALL_ALLOWED_EXTENSIONS = list(_EXTENSION_TO_MODULE.keys()) + ["zip"] + + +def init_dynamic_modules( + name: str = config.MODULE_NAME_FOR_DYNAMIC_MODULES, hf_modules_cache: Optional[Union[Path, str]] = None +): + """ + Create a module with name `name` in which you can add dynamic modules + such as metrics or datasets. The module can be imported using its name. + The module is created in the HF_MODULE_CACHE directory by default (~/.cache/huggingface/modules) but it can + be overriden by specifying a path to another directory in `hf_modules_cache`. + """ + hf_modules_cache = init_hf_modules(hf_modules_cache) + dynamic_modules_path = os.path.join(hf_modules_cache, name) + os.makedirs(dynamic_modules_path, exist_ok=True) + if not os.path.exists(os.path.join(dynamic_modules_path, "__init__.py")): + with open(os.path.join(dynamic_modules_path, "__init__.py"), "w"): + pass + return dynamic_modules_path + + +def import_main_class(module_path) -> Optional[Union[Type[DatasetBuilder], Type[EvaluationModule]]]: + """Import a module at module_path and return its main class, a Metric by default""" + module = importlib.import_module(module_path) + main_cls_type = EvaluationModule + + # Find the main class in our imported module + module_main_cls = None + for name, obj in module.__dict__.items(): + if isinstance(obj, type) and issubclass(obj, main_cls_type): + if inspect.isabstract(obj): + continue + module_main_cls = obj + break + + return module_main_cls + + +def files_to_hash(file_paths: List[str]) -> str: + """ + Convert a list of scripts or text files provided in file_paths into a hashed filename in a repeatable way. + """ + # List all python files in directories if directories are supplied as part of external imports + to_use_files: List[Union[Path, str]] = [] + for file_path in file_paths: + if os.path.isdir(file_path): + to_use_files.extend(list(Path(file_path).rglob("*.[pP][yY]"))) + else: + to_use_files.append(file_path) + + # Get the code from all these files + lines = [] + for file_path in to_use_files: + with open(file_path, encoding="utf-8") as f: + lines.extend(f.readlines()) + return _hash_python_lines(lines) + + +def convert_github_url(url_path: str) -> Tuple[str, Optional[str]]: + """Convert a link to a file on a github repo in a link to the raw github object.""" + parsed = urlparse(url_path) + sub_directory = None + if parsed.scheme in ("http", "https", "s3") and parsed.netloc == "github.com": + if "blob" in url_path: + if not url_path.endswith(".py"): + raise ValueError(f"External import from github at {url_path} should point to a file ending with '.py'") + url_path = url_path.replace("blob", "raw") # Point to the raw file + else: + # Parse github url to point to zip + github_path = parsed.path[1:] + repo_info, branch = github_path.split("/tree/") if "/tree/" in github_path else (github_path, "master") + repo_owner, repo_name = repo_info.split("/") + url_path = f"https://github.com/{repo_owner}/{repo_name}/archive/{branch}.zip" + sub_directory = f"{repo_name}-{branch}" + return url_path, sub_directory + + +def increase_load_count(name: str, resource_type: str): + """Update the download count of a dataset or metric.""" + if not config.HF_EVALUATE_OFFLINE and config.HF_UPDATE_DOWNLOAD_COUNTS: + try: + head_hf_s3(name, filename=name + ".py", dataset=(resource_type == "dataset")) + except Exception: + pass + + +def get_imports(file_path: str) -> Tuple[str, str, str, str]: + """Find whether we should import or clone additional files for a given processing script. + And list the import. + + We allow: + - library dependencies, + - local dependencies and + - external dependencies whose url is specified with a comment starting from "# From:' followed by the raw url to a file, an archive or a github repository. + external dependencies will be downloaded (and extracted if needed in the dataset folder). + We also add an `__init__.py` to each sub-folder of a downloaded folder so the user can import from them in the script. + + Note that only direct import in the dataset processing script will be handled + We don't recursively explore the additional import to download further files. + + Example:: + + import tensorflow + import .c4_utils + import .clicr.dataset-code.build_json_dataset # From: https://raw.githubusercontent.com/clips/clicr/master/dataset-code/build_json_dataset + """ + lines = [] + with open(file_path, encoding="utf-8") as f: + lines.extend(f.readlines()) + + logger.debug(f"Checking {file_path} for additional imports.") + imports: List[Tuple[str, str, str, Optional[str]]] = [] + is_in_docstring = False + for line in lines: + docstr_start_match = re.findall(r'[\s\S]*?"""[\s\S]*?', line) + + if len(docstr_start_match) == 1: + # flip True <=> False only if doctstring + # starts at line without finishing + is_in_docstring = not is_in_docstring + + if is_in_docstring: + # import statements in doctstrings should + # not be added as required dependencies + continue + + match = re.match(r"^import\s+(\.?)([^\s\.]+)[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)", line, flags=re.MULTILINE) + if match is None: + match = re.match( + r"^from\s+(\.?)([^\s\.]+)(?:[^\s]*)\s+import\s+[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)", + line, + flags=re.MULTILINE, + ) + if match is None: + continue + if match.group(1): + # The import starts with a '.', we will download the relevant file + if any(imp[1] == match.group(2) for imp in imports): + # We already have this import + continue + if match.group(3): + # The import has a comment with 'From:', we'll retrieve it from the given url + url_path = match.group(3) + url_path, sub_directory = convert_github_url(url_path) + imports.append(("external", match.group(2), url_path, sub_directory)) + elif match.group(2): + # The import should be at the same place as the file + imports.append(("internal", match.group(2), match.group(2), None)) + else: + if match.group(3): + # The import has a comment with `From: git+https:...`, asks user to pip install from git. + url_path = match.group(3) + imports.append(("library", match.group(2), url_path, None)) + else: + imports.append(("library", match.group(2), match.group(2), None)) + + return imports + + +def _download_additional_modules( + name: str, base_path: str, imports: Tuple[str, str, str, str], download_config: Optional[DownloadConfig] +) -> List[Tuple[str, str]]: + """ + Download additional module for a module .py at URL (or local path) /.py + The imports must have been parsed first using ``get_imports``. + + If some modules need to be installed with pip, an error is raised showing how to install them. + This function return the list of downloaded modules as tuples (import_name, module_file_path). + + The downloaded modules can then be moved into an importable directory with ``_copy_script_and_other_resources_in_importable_dir``. + """ + local_imports = [] + library_imports = [] + download_config = download_config.copy() + if download_config.download_desc is None: + download_config.download_desc = "Downloading extra modules" + for import_type, import_name, import_path, sub_directory in imports: + if import_type == "library": + library_imports.append((import_name, import_path)) # Import from a library + continue + + if import_name == name: + raise ValueError( + f"Error in the {name} script, importing relative {import_name} module " + f"but {import_name} is the name of the script. " + f"Please change relative import {import_name} to another name and add a '# From: URL_OR_PATH' " + f"comment pointing to the original relative import file path." + ) + if import_type == "internal": + url_or_filename = url_or_path_join(base_path, import_path + ".py") + elif import_type == "external": + url_or_filename = import_path + else: + raise ValueError("Wrong import_type") + + local_import_path = cached_path( + url_or_filename, + download_config=download_config, + ) + if sub_directory is not None: + local_import_path = os.path.join(local_import_path, sub_directory) + local_imports.append((import_name, local_import_path)) + + # Check library imports + needs_to_be_installed = set() + for library_import_name, library_import_path in library_imports: + try: + lib = importlib.import_module(library_import_name) # noqa F841 + except ImportError: + library_import_name = "scikit-learn" if library_import_name == "sklearn" else library_import_name + needs_to_be_installed.add((library_import_name, library_import_path)) + if needs_to_be_installed: + raise ImportError( + f"To be able to use {name}, you need to install the following dependencies" + f"{[lib_name for lib_name, lib_path in needs_to_be_installed]} using 'pip install " + f"{' '.join([lib_path for lib_name, lib_path in needs_to_be_installed])}' for instance'" + ) + return local_imports + + +def _copy_script_and_other_resources_in_importable_dir( + name: str, + importable_directory_path: str, + subdirectory_name: str, + original_local_path: str, + local_imports: List[Tuple[str, str]], + additional_files: List[Tuple[str, str]], + download_mode: Optional[DownloadMode], +) -> str: + """Copy a script and its required imports to an importable directory + + Args: + name (str): name of the resource to load + importable_directory_path (str): path to the loadable folder in the dynamic modules directory + subdirectory_name (str): name of the subdirectory in importable_directory_path in which to place the script + original_local_path (str): local path to the resource script + local_imports (List[Tuple[str, str]]): list of (destination_filename, import_file_to_copy) + additional_files (List[Tuple[str, str]]): list of (destination_filename, additional_file_to_copy) + download_mode (Optional[DownloadMode]): download mode + + Return: + importable_local_file: path to an importable module with importlib.import_module + """ + + # Define a directory with a unique name in our dataset or metric folder + # path is: ./datasets|metrics/dataset|metric_name/hash_from_code/script.py + # we use a hash as subdirectory_name to be able to have multiple versions of a dataset/metric processing file together + importable_subdirectory = os.path.join(importable_directory_path, subdirectory_name) + importable_local_file = os.path.join(importable_subdirectory, name + ".py") + + # Prevent parallel disk operations + lock_path = importable_directory_path + ".lock" + with FileLock(lock_path): + # Create main dataset/metrics folder if needed + if download_mode == DownloadMode.FORCE_REDOWNLOAD and os.path.exists(importable_directory_path): + shutil.rmtree(importable_directory_path) + os.makedirs(importable_directory_path, exist_ok=True) + + # add an __init__ file to the main dataset folder if needed + init_file_path = os.path.join(importable_directory_path, "__init__.py") + if not os.path.exists(init_file_path): + with open(init_file_path, "w"): + pass + + # Create hash dataset folder if needed + os.makedirs(importable_subdirectory, exist_ok=True) + # add an __init__ file to the hash dataset folder if needed + init_file_path = os.path.join(importable_subdirectory, "__init__.py") + if not os.path.exists(init_file_path): + with open(init_file_path, "w"): + pass + + # Copy dataset.py file in hash folder if needed + if not os.path.exists(importable_local_file): + shutil.copyfile(original_local_path, importable_local_file) + + # Record metadata associating original dataset path with local unique folder + meta_path = importable_local_file.split(".py")[0] + ".json" + if not os.path.exists(meta_path): + meta = {"original file path": original_local_path, "local file path": importable_local_file} + # the filename is *.py in our case, so better rename to filenam.json instead of filename.py.json + with open(meta_path, "w", encoding="utf-8") as meta_file: + json.dump(meta, meta_file) + + # Copy all the additional imports + for import_name, import_path in local_imports: + if os.path.isfile(import_path): + full_path_local_import = os.path.join(importable_subdirectory, import_name + ".py") + if not os.path.exists(full_path_local_import): + shutil.copyfile(import_path, full_path_local_import) + elif os.path.isdir(import_path): + full_path_local_import = os.path.join(importable_subdirectory, import_name) + if not os.path.exists(full_path_local_import): + shutil.copytree(import_path, full_path_local_import) + else: + raise OSError(f"Error with local import at {import_path}") + + # Copy aditional files like dataset infos file if needed + for file_name, original_path in additional_files: + destination_additional_path = os.path.join(importable_subdirectory, file_name) + if not os.path.exists(destination_additional_path) or not filecmp.cmp( + original_path, destination_additional_path + ): + shutil.copyfile(original_path, destination_additional_path) + return importable_local_file + + +def _create_importable_file( + local_path: str, + local_imports: List[Tuple[str, str]], + additional_files: List[Tuple[str, str]], + dynamic_modules_path: str, + module_namespace: str, + name: str, + download_mode: DownloadMode, +) -> Tuple[str, str]: + importable_directory_path = os.path.join(dynamic_modules_path, module_namespace, name.replace("/", "--")) + Path(importable_directory_path).mkdir(parents=True, exist_ok=True) + (Path(importable_directory_path).parent / "__init__.py").touch(exist_ok=True) + hash = files_to_hash([local_path] + [loc[1] for loc in local_imports]) + importable_local_file = _copy_script_and_other_resources_in_importable_dir( + name=name.split("/")[-1], + importable_directory_path=importable_directory_path, + subdirectory_name=hash, + original_local_path=local_path, + local_imports=local_imports, + additional_files=additional_files, + download_mode=download_mode, + ) + logger.debug(f"Created importable dataset file at {importable_local_file}") + module_path = ".".join( + [os.path.basename(dynamic_modules_path), module_namespace, name.replace("/", "--"), hash, name.split("/")[-1]] + ) + return module_path, hash + + +@dataclass +class ImportableModule: + module_path: str + hash: str + + +class _EvaluationModuleFactory: + def get_module(self) -> ImportableModule: + raise NotImplementedError + + +class LocalEvaluationModuleFactory(_EvaluationModuleFactory): + """Get the module of a local metric. The metric script is loaded from a local script.""" + + def __init__( + self, + path: str, + module_type: str = "metrics", + download_config: Optional[DownloadConfig] = None, + download_mode: Optional[DownloadMode] = None, + dynamic_modules_path: Optional[str] = None, + ): + self.path = path + self.module_type = module_type + self.name = Path(path).stem + self.download_config = download_config or DownloadConfig() + self.download_mode = download_mode + self.dynamic_modules_path = dynamic_modules_path + + def get_module(self) -> ImportableModule: + # get script and other files + imports = get_imports(self.path) + local_imports = _download_additional_modules( + name=self.name, + base_path=str(Path(self.path).parent), + imports=imports, + download_config=self.download_config, + ) + # copy the script and the files in an importable directory + dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules() + module_path, hash = _create_importable_file( + local_path=self.path, + local_imports=local_imports, + additional_files=[], + dynamic_modules_path=dynamic_modules_path, + module_namespace=self.module_type, + name=self.name, + download_mode=self.download_mode, + ) + # make the new module to be noticed by the import system + importlib.invalidate_caches() + return ImportableModule(module_path, hash) + + +class HubEvaluationModuleFactory(_EvaluationModuleFactory): + """Get the module of a metric from a metric repository on the Hub.""" + + def __init__( + self, + name: str, + module_type: str = "metrics", + revision: Optional[Union[str, Version]] = None, + download_config: Optional[DownloadConfig] = None, + download_mode: Optional[DownloadMode] = None, + dynamic_modules_path: Optional[str] = None, + ): + self.name = name + self.module_type = module_type + self.revision = revision + self.download_config = download_config or DownloadConfig() + self.download_mode = download_mode + self.dynamic_modules_path = dynamic_modules_path + assert self.name.count("/") == 1 + increase_load_count(name, resource_type="metric") + + def download_loading_script(self, revision) -> str: + file_path = hf_hub_url(path=self.name, name=self.name.split("/")[1] + ".py", revision=revision) + download_config = self.download_config.copy() + if download_config.download_desc is None: + download_config.download_desc = "Downloading builder script" + return cached_path(file_path, download_config=download_config) + + def get_module(self) -> ImportableModule: + revision = self.revision or os.getenv("HF_SCRIPTS_VERSION", SCRIPTS_VERSION) + + if re.match(r"\d*\.\d*\.\d*", revision): # revision is version number (three digits separated by full stops) + revision = "v" + revision # tagging convention on evaluate repository starts with v + + # get script and other files + try: + local_path = self.download_loading_script(revision) + except FileNotFoundError as err: + # if there is no file found with current revision tag try to load main + if self.revision is None and os.getenv("HF_SCRIPTS_VERSION", SCRIPTS_VERSION) != "main": + revision = "main" + local_path = self.download_loading_script(revision) + else: + raise err + + imports = get_imports(local_path) + local_imports = _download_additional_modules( + name=self.name, + base_path=hf_hub_url(path=self.name, name="", revision=revision), + imports=imports, + download_config=self.download_config, + ) + # copy the script and the files in an importable directory + dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules() + module_path, hash = _create_importable_file( + local_path=local_path, + local_imports=local_imports, + additional_files=[], + dynamic_modules_path=dynamic_modules_path, + module_namespace=self.module_type, + name=self.name, + download_mode=self.download_mode, + ) + # make the new module to be noticed by the import system + importlib.invalidate_caches() + return ImportableModule(module_path, hash) + + +class CachedEvaluationModuleFactory(_EvaluationModuleFactory): + """ + Get the module of a metric that has been loaded once already and cached. + The script that is loaded from the cache is the most recent one with a matching name. + """ + + def __init__( + self, + name: str, + module_type: str = "metrics", + dynamic_modules_path: Optional[str] = None, + ): + self.name = name + self.module_type = module_type + self.dynamic_modules_path = dynamic_modules_path + assert self.name.count("/") == 0 + + def get_module(self) -> ImportableModule: + dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules() + importable_directory_path = os.path.join(dynamic_modules_path, self.module_type, self.name) + hashes = ( + [h for h in os.listdir(importable_directory_path) if len(h) == 64] + if os.path.isdir(importable_directory_path) + else None + ) + if not hashes: + raise FileNotFoundError(f"Metric {self.name} is not cached in {dynamic_modules_path}") + # get most recent + + def _get_modification_time(module_hash): + return ( + (Path(importable_directory_path) / module_hash / (self.name.split("--")[-1] + ".py")).stat().st_mtime + ) + + hash = sorted(hashes, key=_get_modification_time)[-1] + logger.warning( + f"Using the latest cached version of the module from {os.path.join(importable_directory_path, hash)} " + f"(last modified on {time.ctime(_get_modification_time(hash))}) since it " + f"couldn't be found locally at {self.name}, or remotely on the Hugging Face Hub." + ) + # make the new module to be noticed by the import system + module_path = ".".join( + [os.path.basename(dynamic_modules_path), self.module_type, self.name, hash, self.name.split("--")[-1]] + ) + importlib.invalidate_caches() + return ImportableModule(module_path, hash) + + +def evaluation_module_factory( + path: str, + module_type: Optional[str] = None, + revision: Optional[Union[str, Version]] = None, + download_config: Optional[DownloadConfig] = None, + download_mode: Optional[DownloadMode] = None, + force_local_path: Optional[str] = None, + dynamic_modules_path: Optional[str] = None, + **download_kwargs, +) -> ImportableModule: + """ + Download/extract/cache a metric module. + + Metrics codes are cached inside the the dynamic modules cache to allow easy import (avoid ugly sys.path tweaks). + + Args: + + path (str): Path or name of the metric script. + + - if ``path`` is a local metric script or a directory containing a local metric script (if the script has the same name as the directory): + -> load the module from the metric script + e.g. ``'./metrics/accuracy'`` or ``'./metrics/accuracy/accuracy.py'``. + - if ``path`` is a metric on the Hugging Face Hub (ex: `glue`, `squad`) + -> load the module from the metric script in the github repository at huggingface/datasets + e.g. ``'accuracy'`` or ``'rouge'``. + + revision (Optional ``Union[str, datasets.Version]``): + If specified, the module will be loaded from the datasets repository at this version. + By default: + - it is set to the local version of the lib. + - it will also try to load it from the master branch if it's not available at the local version of the lib. + Specifying a version that is different from your local version of the lib might cause compatibility issues. + download_config (:class:`DownloadConfig`, optional): Specific download configuration parameters. + download_mode (:class:`DownloadMode`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode. + force_local_path (Optional str): Optional path to a local path to download and prepare the script to. + Used to inspect or modify the script folder. + dynamic_modules_path (Optional str, defaults to HF_MODULES_CACHE / "datasets_modules", i.e. ~/.cache/huggingface/modules/datasets_modules): + Optional path to the directory in which the dynamic modules are saved. It must have been initialized with :obj:`init_dynamic_modules`. + By default the datasets and metrics are stored inside the `datasets_modules` module. + download_kwargs: optional attributes for DownloadConfig() which will override the attributes in download_config if supplied. + + Returns: + ImportableModule + """ + if download_config is None: + download_config = DownloadConfig(**download_kwargs) + download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS) + download_config.extract_compressed_file = True + download_config.force_extract = True + + filename = list(filter(lambda x: x, path.replace(os.sep, "/").split("/")))[-1] + if not filename.endswith(".py"): + filename = filename + ".py" + combined_path = os.path.join(path, filename) + # Try locally + if path.endswith(filename): + if os.path.isfile(path): + return LocalEvaluationModuleFactory( + path, download_mode=download_mode, dynamic_modules_path=dynamic_modules_path + ).get_module() + else: + raise FileNotFoundError(f"Couldn't find a metric script at {relative_to_absolute_path(path)}") + elif os.path.isfile(combined_path): + return LocalEvaluationModuleFactory( + combined_path, download_mode=download_mode, dynamic_modules_path=dynamic_modules_path + ).get_module() + elif is_relative_path(path) and path.count("/") <= 1 and not force_local_path: + try: + # load a canonical evaluation module from hub + if path.count("/") == 0: + # if no type provided look through all possible modules + if module_type is None: + for current_type in ["metric", "comparison", "measurement"]: + try: + return HubEvaluationModuleFactory( + f"evaluate-{current_type}/{path}", + revision=revision, + download_config=download_config, + download_mode=download_mode, + dynamic_modules_path=dynamic_modules_path, + ).get_module() + except ConnectionError: + pass + raise FileNotFoundError + # if module_type provided load specific module_type + else: + return HubEvaluationModuleFactory( + f"evaluate-{module_type}/{path}", + revision=revision, + download_config=download_config, + download_mode=download_mode, + dynamic_modules_path=dynamic_modules_path, + ).get_module() + # load community evaluation module from hub + elif path.count("/") == 1: + return HubEvaluationModuleFactory( + path, + revision=revision, + download_config=download_config, + download_mode=download_mode, + dynamic_modules_path=dynamic_modules_path, + ).get_module() + except Exception as e1: # noqa: all the attempts failed, before raising the error we should check if the module is already cached. + # if it's a canonical module we need to check if it's any of the types + if path.count("/") == 0: + for current_type in ["metric", "comparison", "measurement"]: + try: + return CachedEvaluationModuleFactory( + f"evaluate-{current_type}--{path}", dynamic_modules_path=dynamic_modules_path + ).get_module() + except Exception as e2: # noqa: if it's not in the cache, then it doesn't exist. + pass + # if it's a community module we just need to check on path + elif path.count("/") == 1: + try: + return CachedEvaluationModuleFactory( + path.replace("/", "--"), dynamic_modules_path=dynamic_modules_path + ).get_module() + except Exception as e2: # noqa: if it's not in the cache, then it doesn't exist. + pass + if not isinstance(e1, (ConnectionError, FileNotFoundError)): + raise e1 from None + raise FileNotFoundError( + f"Couldn't find a module script at {relative_to_absolute_path(combined_path)}. " + f"Module '{path}' doesn't exist on the Hugging Face Hub either." + ) from None + else: + raise FileNotFoundError(f"Couldn't find a module script at {relative_to_absolute_path(combined_path)}.") + + +def load( + path: str, + config_name: Optional[str] = None, + module_type: Optional[str] = None, + process_id: int = 0, + num_process: int = 1, + cache_dir: Optional[str] = None, + experiment_id: Optional[str] = None, + keep_in_memory: bool = False, + download_config: Optional[DownloadConfig] = None, + download_mode: Optional[DownloadMode] = None, + revision: Optional[Union[str, Version]] = None, + **init_kwargs, +) -> EvaluationModule: + """Load a [`~evaluate.EvaluationModule`]. + + Args: + + path (`str`): + Path to the evaluation processing script with the evaluation builder. Can be either: + - a local path to processing script or the directory containing the script (if the script has the same name as the directory), + e.g. `'./metrics/rouge'` or `'./metrics/rouge/rouge.py'` + - a evaluation module identifier on the HuggingFace evaluate repo e.g. `'rouge'` or `'bleu'` that are in either `'metrics/'`, + `'comparisons/'`, or `'measurements/'` depending on the provided `module_type` + config_name (`str`, *optional*): + Selecting a configuration for the metric (e.g. the GLUE metric has a configuration for each subset). + module_type (`str`, default `'metric'`): + Type of evaluation module, can be one of `'metric'`, `'comparison'`, or `'measurement'`. + process_id (`int`, *optional*): + For distributed evaluation: id of the process. + num_process (`int`, *optional*): + For distributed evaluation: total number of processes. + cache_dir (`str`, *optional*): + Path to store the temporary predictions and references (default to `~/.cache/huggingface/evaluate/`). + experiment_id (`str`): + A specific experiment id. This is used if several distributed evaluations share the same file system. + This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1). + keep_in_memory (`bool`): + Whether to store the temporary results in memory (defaults to `False`). + download_config ([`~evaluate.DownloadConfig`], *optional*): + Specific download configuration parameters. + download_mode ([`DownloadMode`], defaults to `REUSE_DATASET_IF_EXISTS`): + Download/generate mode. + revision (`Union[str, evaluate.Version]`, *optional*): + If specified, the module will be loaded from the datasets repository + at this version. By default it is set to the local version of the lib. Specifying a version that is different from + your local version of the lib might cause compatibility issues. + + Returns: + [`evaluate.EvaluationModule`] + + Example: + + ```py + >>> from evaluate import load + >>> accuracy = evaluate.load("accuracy") + ``` + """ + download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS) + evaluation_module = evaluation_module_factory( + path, module_type=module_type, revision=revision, download_config=download_config, download_mode=download_mode + ) + evaluation_cls = import_main_class(evaluation_module.module_path) + evaluation_instance = evaluation_cls( + config_name=config_name, + process_id=process_id, + num_process=num_process, + cache_dir=cache_dir, + keep_in_memory=keep_in_memory, + experiment_id=experiment_id, + hash=evaluation_module.hash, + **init_kwargs, + ) + + if module_type and module_type != evaluation_instance.module_type: + raise TypeError( + f"No module of module type '{module_type}' not found for '{path}' locally, or on the Hugging Face Hub. Found module of module type '{evaluation_instance.module_type}' instead." + ) + + # Download and prepare resources for the metric + evaluation_instance.download_and_prepare(download_config=download_config) + + return evaluation_instance diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/module.py b/env-llmeval/lib/python3.10/site-packages/evaluate/module.py new file mode 100644 index 0000000000000000000000000000000000000000..3652ad1b6ea0691afba35b70b3197b307a66a428 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/evaluate/module.py @@ -0,0 +1,1029 @@ +# Copyright 2020 The HuggingFace Datasets Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +""" EvaluationModule base class.""" +import collections +import itertools +import os +import types +import uuid +from typing import Any, Dict, List, Optional, Tuple, Union + +import numpy as np +import pyarrow as pa +from datasets import DatasetInfo, DownloadConfig, DownloadManager +from datasets.arrow_dataset import Dataset +from datasets.arrow_reader import ArrowReader +from datasets.arrow_writer import ArrowWriter +from datasets.features import Features, Sequence, Value +from datasets.features.features import _check_non_null_non_empty_recursive +from datasets.utils.filelock import BaseFileLock, FileLock, Timeout +from datasets.utils.py_utils import copyfunc, temp_seed, zip_dict + +from . import config +from .info import EvaluationModuleInfo +from .naming import camelcase_to_snakecase +from .utils.logging import get_logger + + +logger = get_logger(__name__) + + +class FileFreeLock(BaseFileLock): + """Thread lock until a file **cannot** be locked""" + + def __init__(self, lock_file, *args, **kwargs): + self.filelock = FileLock(lock_file) + super().__init__(lock_file, *args, **kwargs) + + def _acquire(self): + try: + self.filelock.acquire(timeout=0.01, poll_intervall=0.02) # Try to lock once + except Timeout: + # We couldn't acquire the lock, the file is locked! + self._lock_file_fd = self.filelock.lock_file + else: + # We were able to acquire the lock, the file is not yet locked! + self.filelock.release() + self._lock_file_fd = None + + def _release(self): + self._lock_file_fd = None + + +# lists - summarize long lists similarly to NumPy +# arrays/tensors - let the frameworks control formatting +def summarize_if_long_list(obj): + if not type(obj) == list or len(obj) <= 6: + return f"{obj}" + + def format_chunk(chunk): + return ", ".join(repr(x) for x in chunk) + + return f"[{format_chunk(obj[:3])}, ..., {format_chunk(obj[-3:])}]" + + +class EvaluationModuleInfoMixin: + """This base class exposes some attributes of EvaluationModuleInfo + at the base level of the EvaluationModule for easy access. + """ + + def __init__(self, info: EvaluationModuleInfo): + self._module_info = info + + @property + def info(self): + """:class:`evaluate.EvaluationModuleInfo` object containing all the metadata in the evaluation module.""" + return self._module_info + + @property + def name(self) -> str: + return self._module_info.module_name + + @property + def experiment_id(self) -> Optional[str]: + return self._module_info.experiment_id + + @property + def description(self) -> str: + return self._module_info.description + + @property + def citation(self) -> str: + return self._module_info.citation + + @property + def features(self) -> Features: + return self._module_info.features + + @property + def inputs_description(self) -> str: + return self._module_info.inputs_description + + @property + def homepage(self) -> Optional[str]: + return self._module_info.homepage + + @property + def license(self) -> str: + return self._module_info.license + + @property + def codebase_urls(self) -> Optional[List[str]]: + return self._module_info.codebase_urls + + @property + def reference_urls(self) -> Optional[List[str]]: + return self._module_info.reference_urls + + @property + def streamable(self) -> bool: + return self._module_info.streamable + + @property + def format(self) -> Optional[str]: + return self._module_info.format + + @property + def module_type(self) -> str: + return self._module_info.module_type + + +class EvaluationModule(EvaluationModuleInfoMixin): + """A `EvaluationModule` is the base class and common API for metrics, comparisons, and measurements. + + Args: + config_name (`str`): + This is used to define a hash specific to a module computation script and prevents the module's data + to be overridden when the module loading script is modified. + keep_in_memory (`bool`): + Keep all predictions and references in memory. Not possible in distributed settings. + cache_dir (`str`): + Path to a directory in which temporary prediction/references data will be stored. + The data directory should be located on a shared file-system in distributed setups. + num_process (`int`): + Specify the total number of nodes in a distributed settings. + This is useful to compute module in distributed setups (in particular non-additive modules like F1). + process_id (`int`): + Specify the id of the current process in a distributed setup (between 0 and num_process-1) + This is useful to compute module in distributed setups (in particular non-additive metrics like F1). + seed (`int`, optional): + If specified, this will temporarily set numpy's random seed when [`~evaluate.EvaluationModule.compute`] is run. + experiment_id (`str`): + A specific experiment id. This is used if several distributed evaluations share the same file system. + This is useful to compute module in distributed setups (in particular non-additive metrics like F1). + hash (`str`): + Used to identify the evaluation module according to the hashed file contents. + max_concurrent_cache_files (`int`): + Max number of concurrent module cache files (default `10000`). + timeout (`Union[int, float]`): + Timeout in second for distributed setting synchronization. + """ + + def __init__( + self, + config_name: Optional[str] = None, + keep_in_memory: bool = False, + cache_dir: Optional[str] = None, + num_process: int = 1, + process_id: int = 0, + seed: Optional[int] = None, + experiment_id: Optional[str] = None, + hash: str = None, + max_concurrent_cache_files: int = 10000, + timeout: Union[int, float] = 100, + **kwargs, + ): + # prepare info + self.config_name = config_name or "default" + info = self._info() + info.module_name = camelcase_to_snakecase(self.__class__.__name__) + info.config_name = self.config_name + info.experiment_id = experiment_id or "default_experiment" + EvaluationModuleInfoMixin.__init__(self, info) # For easy access on low level + + # Safety checks on num_process and process_id + if not isinstance(process_id, int) or process_id < 0: + raise ValueError("'process_id' should be a number greater than 0") + if not isinstance(num_process, int) or num_process <= process_id: + raise ValueError("'num_process' should be a number greater than process_id") + if keep_in_memory and num_process != 1: + raise ValueError("Using 'keep_in_memory' is not possible in distributed setting (num_process > 1).") + + self.num_process = num_process + self.process_id = process_id + self.max_concurrent_cache_files = max_concurrent_cache_files + + self.keep_in_memory = keep_in_memory + self._data_dir_root = os.path.expanduser(cache_dir or config.HF_METRICS_CACHE) + self.data_dir = self._build_data_dir() + if seed is None: + _, seed, pos, *_ = np.random.get_state() + self.seed: int = seed[pos] if pos < 624 else seed[0] + else: + self.seed: int = seed + self.timeout: Union[int, float] = timeout + + # Update 'compute' and 'add' docstring + # methods need to be copied otherwise it changes the docstrings of every instance + self.compute = types.MethodType(copyfunc(self.compute), self) + self.add_batch = types.MethodType(copyfunc(self.add_batch), self) + self.add = types.MethodType(copyfunc(self.add), self) + self.compute.__func__.__doc__ += self.info.inputs_description + self.add_batch.__func__.__doc__ += self.info.inputs_description + self.add.__func__.__doc__ += self.info.inputs_description + + # self.arrow_schema = pa.schema(field for field in self.info.features.type) + self.selected_feature_format = None + self.buf_writer = None + self.writer = None + self.writer_batch_size = None + self.data = None + + # This is the cache file we store our predictions/references in + # Keep it None for now so we can (cloud)pickle the object + self.cache_file_name = None + self.filelock = None + self.rendez_vous_lock = None + + # This is all the cache files on which we have a lock when we are in a distributed setting + self.file_paths = None + self.filelocks = None + + # This fingerprints the evaluation module according to the hashed contents of the module code + self._hash = hash + + def __len__(self): + """Return the number of examples (predictions or predictions/references pair) + currently stored in the evaluation module's cache. + """ + return 0 if self.writer is None else len(self.writer) + + def __repr__(self): + return ( + f'EvaluationModule(name: "{self.name}", module_type: "{self.module_type}", ' + f'features: {self.features}, usage: """{self.inputs_description}""", ' + f"stored examples: {len(self)})" + ) + + def _build_data_dir(self): + """Path of this evaluation module in cache_dir: + Will be: + self._data_dir_root/self.name/self.config_name/self.hash (if not none)/ + If any of these element is missing or if ``with_version=False`` the corresponding subfolders are dropped. + """ + builder_data_dir = self._data_dir_root + builder_data_dir = os.path.join(builder_data_dir, self.name, self.config_name) + os.makedirs(builder_data_dir, exist_ok=True) + return builder_data_dir + + def _create_cache_file(self, timeout=1) -> Tuple[str, FileLock]: + """Create a new cache file. If the default cache file is used, we generated a new hash.""" + file_path = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{self.process_id}.arrow") + filelock = None + for i in range(self.max_concurrent_cache_files): + filelock = FileLock(file_path + ".lock") + try: + filelock.acquire(timeout=timeout) + except Timeout: + # If we have reached the max number of attempts or we are not allow to find a free name (distributed setup) + # We raise an error + if self.num_process != 1: + raise ValueError( + f"Error in _create_cache_file: another evaluation module instance is already using the local cache file at {file_path}. " + f"Please specify an experiment_id (currently: {self.experiment_id}) to avoid collision " + f"between distributed evaluation module instances." + ) from None + if i == self.max_concurrent_cache_files - 1: + raise ValueError( + f"Cannot acquire lock, too many evaluation module instance are operating concurrently on this file system." + f"You should set a larger value of max_concurrent_cache_files when creating the evaluation module " + f"(current value is {self.max_concurrent_cache_files})." + ) from None + # In other cases (allow to find new file name + not yet at max num of attempts) we can try to sample a new hashing name. + file_uuid = str(uuid.uuid4()) + file_path = os.path.join( + self.data_dir, f"{self.experiment_id}-{file_uuid}-{self.num_process}-{self.process_id}.arrow" + ) + else: + break + + return file_path, filelock + + def _get_all_cache_files(self) -> Tuple[List[str], List[FileLock]]: + """Get a lock on all the cache files in a distributed setup. + We wait for timeout second to let all the distributed node finish their tasks (default is 100 seconds). + """ + if self.num_process == 1: + if self.cache_file_name is None: + raise ValueError( + "Evaluation module cache file doesn't exist. Please make sure that you call `add` or `add_batch` " + "at least once before calling `compute`." + ) + file_paths = [self.cache_file_name] + else: + file_paths = [ + os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{process_id}.arrow") + for process_id in range(self.num_process) + ] + + # Let's acquire a lock on each process files to be sure they are finished writing + filelocks = [] + for process_id, file_path in enumerate(file_paths): + if process_id == 0: # process 0 already has its lock file + filelocks.append(self.filelock) + else: + filelock = FileLock(file_path + ".lock") + try: + filelock.acquire(timeout=self.timeout) + except Timeout: + raise ValueError( + f"Cannot acquire lock on cached file {file_path} for process {process_id}." + ) from None + else: + filelocks.append(filelock) + + return file_paths, filelocks + + def _check_all_processes_locks(self): + expected_lock_file_names = [ + os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{process_id}.arrow.lock") + for process_id in range(self.num_process) + ] + for expected_lock_file_name in expected_lock_file_names: + nofilelock = FileFreeLock(expected_lock_file_name) + try: + nofilelock.acquire(timeout=self.timeout) + except Timeout: + raise ValueError( + f"Expected to find locked file {expected_lock_file_name} from process {self.process_id} but it doesn't exist." + ) from None + else: + nofilelock.release() + + def _check_rendez_vous(self): + expected_lock_file_name = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-0.arrow.lock") + nofilelock = FileFreeLock(expected_lock_file_name) + try: + nofilelock.acquire(timeout=self.timeout) + except Timeout: + raise ValueError( + f"Expected to find locked file {expected_lock_file_name} from process {self.process_id} but it doesn't exist." + ) from None + else: + nofilelock.release() + lock_file_name = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-rdv.lock") + rendez_vous_lock = FileLock(lock_file_name) + try: + rendez_vous_lock.acquire(timeout=self.timeout) + except Timeout: + raise ValueError(f"Couldn't acquire lock on {lock_file_name} from process {self.process_id}.") from None + else: + rendez_vous_lock.release() + + def _finalize(self): + """Close all the writing process and load/gather the data + from all the nodes if main node or all_process is True. + """ + if self.writer is not None: + self.writer.finalize() + self.writer = None + # release the locks of the processes > 0 so that process 0 can lock them to read + delete the data + if self.filelock is not None and self.process_id > 0: + self.filelock.release() + + if self.keep_in_memory: + # Read the predictions and references + reader = ArrowReader(path=self.data_dir, info=DatasetInfo(features=self.selected_feature_format)) + self.data = Dataset.from_buffer(self.buf_writer.getvalue()) + + elif self.process_id == 0: + # Let's acquire a lock on each node files to be sure they are finished writing + file_paths, filelocks = self._get_all_cache_files() + + # Read the predictions and references + try: + reader = ArrowReader(path="", info=DatasetInfo(features=self.selected_feature_format)) + self.data = Dataset(**reader.read_files([{"filename": f} for f in file_paths])) + except FileNotFoundError: + raise ValueError( + "Error in finalize: another evaluation module instance is already using the local cache file. " + "Please specify an experiment_id to avoid collision between distributed evaluation module instances." + ) from None + + # Store file paths and locks and we will release/delete them after the computation. + self.file_paths = file_paths + self.filelocks = filelocks + + def compute(self, *, predictions=None, references=None, **kwargs) -> Optional[dict]: + """Compute the evaluation module. + + Usage of positional arguments is not allowed to prevent mistakes. + + Args: + predictions (`list/array/tensor`, *optional*): + Predictions. + references (`list/array/tensor`, *optional*): + References. + **kwargs (optional): + Keyword arguments that will be forwarded to the evaluation module [`~evaluate.EvaluationModule.compute`] + method (see details in the docstring). + + Return: + `dict` or `None` + + - Dictionary with the results if this evaluation module is run on the main process (`process_id == 0`). + - `None` if the evaluation module is not run on the main process (`process_id != 0`). + + ```py + >>> import evaluate + >>> accuracy = evaluate.load("accuracy") + >>> accuracy.compute(predictions=[0, 1, 1, 0], references=[0, 1, 0, 1]) + ``` + """ + all_kwargs = {"predictions": predictions, "references": references, **kwargs} + if predictions is None and references is None: + missing_kwargs = {k: None for k in self._feature_names() if k not in all_kwargs} + all_kwargs.update(missing_kwargs) + else: + missing_inputs = [k for k in self._feature_names() if k not in all_kwargs] + if missing_inputs: + raise ValueError( + f"Evaluation module inputs are missing: {missing_inputs}. All required inputs are {list(self._feature_names())}" + ) + inputs = {input_name: all_kwargs[input_name] for input_name in self._feature_names()} + compute_kwargs = {k: kwargs[k] for k in kwargs if k not in self._feature_names()} + + if any(v is not None for v in inputs.values()): + self.add_batch(**inputs) + self._finalize() + + self.cache_file_name = None + self.filelock = None + self.selected_feature_format = None + + if self.process_id == 0: + self.data.set_format(type=self.info.format) + + inputs = {input_name: self.data[input_name] for input_name in self._feature_names()} + with temp_seed(self.seed): + output = self._compute(**inputs, **compute_kwargs) + + if self.buf_writer is not None: + self.buf_writer = None + del self.data + self.data = None + else: + # Release locks and delete all the cache files. Process 0 is released last. + for filelock, file_path in reversed(list(zip(self.filelocks, self.file_paths))): + logger.info(f"Removing {file_path}") + del self.data + self.data = None + del self.writer + self.writer = None + os.remove(file_path) + filelock.release() + + return output + else: + return None + + def add_batch(self, *, predictions=None, references=None, **kwargs): + """Add a batch of predictions and references for the evaluation module's stack. + + Args: + predictions (`list/array/tensor`, *optional*): + Predictions. + references (`list/array/tensor`, *optional*): + References. + + Example: + + ```py + >>> import evaluate + >>> accuracy = evaluate.load("accuracy") + >>> for refs, preds in zip([[0,1],[0,1]], [[1,0],[0,1]]): + ... accuracy.add_batch(references=refs, predictions=preds) + ``` + """ + bad_inputs = [input_name for input_name in kwargs if input_name not in self._feature_names()] + if bad_inputs: + raise ValueError( + f"Bad inputs for evaluation module: {bad_inputs}. All required inputs are {list(self._feature_names())}" + ) + batch = {"predictions": predictions, "references": references, **kwargs} + batch = {input_name: batch[input_name] for input_name in self._feature_names()} + if self.writer is None: + self.selected_feature_format = self._infer_feature_from_batch(batch) + self._init_writer() + try: + for key, column in batch.items(): + if len(column) > 0: + self._enforce_nested_string_type(self.selected_feature_format[key], column[0]) + batch = self.selected_feature_format.encode_batch(batch) + self.writer.write_batch(batch) + except (pa.ArrowInvalid, TypeError): + if any(len(batch[c]) != len(next(iter(batch.values()))) for c in batch): + col0 = next(iter(batch)) + bad_col = [c for c in batch if len(batch[c]) != len(batch[col0])][0] + error_msg = ( + f"Mismatch in the number of {col0} ({len(batch[col0])}) and {bad_col} ({len(batch[bad_col])})" + ) + elif set(self.selected_feature_format) != {"references", "predictions"}: + error_msg = ( + f"Module inputs don't match the expected format.\n" + f"Expected format: {self.selected_feature_format },\n" + ) + error_msg_inputs = ",\n".join( + f"Input {input_name}: {summarize_if_long_list(batch[input_name])}" + for input_name in self.selected_feature_format + ) + error_msg += error_msg_inputs + else: + error_msg = ( + f"Predictions and/or references don't match the expected format.\n" + f"Expected format: {self.selected_feature_format },\n" + f"Input predictions: {summarize_if_long_list(predictions)},\n" + f"Input references: {summarize_if_long_list(references)}" + ) + raise ValueError(error_msg) from None + + def add(self, *, prediction=None, reference=None, **kwargs): + """Add one prediction and reference for the evaluation module's stack. + + Args: + prediction (`list/array/tensor`, *optional*): + Predictions. + reference (`list/array/tensor`, *optional*): + References. + + Example: + + ```py + >>> import evaluate + >>> accuracy = evaluate.load("accuracy") + >>> accuracy.add(references=[0,1], predictions=[1,0]) + ``` + """ + bad_inputs = [input_name for input_name in kwargs if input_name not in self._feature_names()] + if bad_inputs: + raise ValueError( + f"Bad inputs for evaluation module: {bad_inputs}. All required inputs are {list(self._feature_names())}" + ) + example = {"predictions": prediction, "references": reference, **kwargs} + example = {input_name: example[input_name] for input_name in self._feature_names()} + if self.writer is None: + self.selected_feature_format = self._infer_feature_from_example(example) + self._init_writer() + try: + self._enforce_nested_string_type(self.selected_feature_format, example) + example = self.selected_feature_format.encode_example(example) + self.writer.write(example) + except (pa.ArrowInvalid, TypeError): + error_msg = ( + f"Evaluation module inputs don't match the expected format.\n" + f"Expected format: {self.selected_feature_format},\n" + ) + error_msg_inputs = ",\n".join( + f"Input {input_name}: {summarize_if_long_list(example[input_name])}" + for input_name in self.selected_feature_format + ) + error_msg += error_msg_inputs + raise ValueError(error_msg) from None + + def _infer_feature_from_batch(self, batch): + if isinstance(self.features, Features): + return self.features + else: + example = dict([(k, v[0]) for k, v in batch.items()]) + return self._infer_feature_from_example(example) + + def _infer_feature_from_example(self, example): + if isinstance(self.features, Features): + return self.features + else: + for features in self.features: + try: + self._enforce_nested_string_type(features, example) + features.encode_example(example) + return features + except (ValueError, TypeError): + continue + feature_strings = "\n".join([f"Feature option {i}: {feature}" for i, feature in enumerate(self.features)]) + error_msg = ( + f"Predictions and/or references don't match the expected format.\n" + f"Expected format:\n{feature_strings},\n" + f"Input predictions: {summarize_if_long_list(example['predictions'])},\n" + f"Input references: {summarize_if_long_list(example['references'])}" + ) + raise ValueError(error_msg) from None + + def _feature_names(self): + if isinstance(self.features, list): + feature_names = list(self.features[0].keys()) + else: + feature_names = list(self.features.keys()) + return feature_names + + def _init_writer(self, timeout=1): + if self.num_process > 1: + if self.process_id == 0: + file_path = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-rdv.lock") + self.rendez_vous_lock = FileLock(file_path) + try: + self.rendez_vous_lock.acquire(timeout=timeout) + except TimeoutError: + raise ValueError( + f"Error in _init_writer: another evalution module instance is already using the local cache file at {file_path}. " + f"Please specify an experiment_id (currently: {self.experiment_id}) to avoid collision " + f"between distributed evaluation module instances." + ) from None + + if self.keep_in_memory: + self.buf_writer = pa.BufferOutputStream() + self.writer = ArrowWriter( + features=self.selected_feature_format, stream=self.buf_writer, writer_batch_size=self.writer_batch_size + ) + else: + self.buf_writer = None + + # Get cache file name and lock it + if self.cache_file_name is None or self.filelock is None: + cache_file_name, filelock = self._create_cache_file() # get ready + self.cache_file_name = cache_file_name + self.filelock = filelock + + self.writer = ArrowWriter( + features=self.selected_feature_format, + path=self.cache_file_name, + writer_batch_size=self.writer_batch_size, + ) + # Setup rendez-vous here if + if self.num_process > 1: + if self.process_id == 0: + self._check_all_processes_locks() # wait for everyone to be ready + self.rendez_vous_lock.release() # let everyone go + else: + self._check_rendez_vous() # wait for master to be ready and to let everyone go + + def _info(self) -> EvaluationModuleInfo: + """Construct the EvaluationModuleInfo object. See `EvaluationModuleInfo` for details. + + Warning: This function is only called once and the result is cached for all + following .info() calls. + + Returns: + info: (EvaluationModuleInfo) The EvaluationModule information + """ + raise NotImplementedError + + def download_and_prepare( + self, + download_config: Optional[DownloadConfig] = None, + dl_manager: Optional[DownloadManager] = None, + ): + """Downloads and prepares evaluation module for reading. + + Args: + download_config ([`DownloadConfig`], *optional*): + Specific download configuration parameters. + dl_manager ([`DownloadManager`], *optional*): + Specific download manager to use. + + Example: + + ```py + >>> import evaluate + ``` + """ + if dl_manager is None: + if download_config is None: + download_config = DownloadConfig() + download_config.cache_dir = os.path.join(self.data_dir, "downloads") + download_config.force_download = False + + dl_manager = DownloadManager( + dataset_name=self.name, download_config=download_config, data_dir=self.data_dir + ) + + self._download_and_prepare(dl_manager) + + def _download_and_prepare(self, dl_manager): + """Downloads and prepares resources for the evaluation module. + + This is the internal implementation to overwrite called when user calls + `download_and_prepare`. It should download all required resources for the evaluation module. + + Args: + dl_manager (:class:`DownloadManager`): `DownloadManager` used to download and cache data. + """ + return None + + def _compute(self, *, predictions=None, references=None, **kwargs) -> Dict[str, Any]: + """This method defines the common API for all the evaluation module in the library""" + raise NotImplementedError + + def __del__(self): + if hasattr(self, "filelock") and self.filelock is not None: + self.filelock.release() + if hasattr(self, "rendez_vous_lock") and self.rendez_vous_lock is not None: + self.rendez_vous_lock.release() + if hasattr(self, "writer"): # in case it was already deleted + del self.writer + if hasattr(self, "data"): # in case it was already deleted + del self.data + + def _enforce_nested_string_type(self, schema, obj): + """ + Recursively checks if there is any Value feature of type string and throws TypeError if corresponding object is not a string. + Since any Python object can be cast to string this avoids implicitly casting wrong input types (e.g. lists) to string without error. + """ + # Nested structures: we allow dict, list, tuples, sequences + if isinstance(schema, dict): + return [self._enforce_nested_string_type(sub_schema, o) for k, (sub_schema, o) in zip_dict(schema, obj)] + + elif isinstance(schema, (list, tuple)): + sub_schema = schema[0] + return [self._enforce_nested_string_type(sub_schema, o) for o in obj] + elif isinstance(schema, Sequence): + # We allow to reverse list of dict => dict of list for compatiblity with tfds + if isinstance(schema.feature, dict): + if isinstance(obj, (list, tuple)): + # obj is a list of dict + for k, dict_tuples in zip_dict(schema.feature, *obj): + for sub_obj in dict_tuples[1:]: + if _check_non_null_non_empty_recursive(sub_obj, dict_tuples[0]): + self._enforce_nested_string_type(dict_tuples[0], sub_obj) + break + return None + else: + # obj is a single dict + for k, (sub_schema, sub_objs) in zip_dict(schema.feature, obj): + for sub_obj in sub_objs: + if _check_non_null_non_empty_recursive(sub_obj, sub_schema): + self._enforce_nested_string_type(sub_schema, sub_obj) + break + return None + # schema.feature is not a dict + if isinstance(obj, str): # don't interpret a string as a list + raise ValueError(f"Got a string but expected a list instead: '{obj}'") + if obj is None: + return None + else: + if len(obj) > 0: + for first_elmt in obj: + if _check_non_null_non_empty_recursive(first_elmt, schema.feature): + break + if not isinstance(first_elmt, list): + return self._enforce_nested_string_type(schema.feature, first_elmt) + + elif isinstance(schema, Value): + if pa.types.is_string(schema.pa_type) and not isinstance(obj, str): + raise TypeError(f"Expected type str but got {type(obj)}.") + + +class Metric(EvaluationModule): + """A Metric is the base class and common API for all metrics. + + Args: + config_name (`str`): + This is used to define a hash specific to a metric computation script and prevents the metric's data + to be overridden when the metric loading script is modified. + keep_in_memory (`bool`): + Keep all predictions and references in memory. Not possible in distributed settings. + cache_dir (`str`): + Path to a directory in which temporary prediction/references data will be stored. + The data directory should be located on a shared file-system in distributed setups. + num_process (`int`): + Specify the total number of nodes in a distributed settings. + This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1). + process_id (`int`): + Specify the id of the current process in a distributed setup (between 0 and num_process-1) + This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1). + seed (`int`, *optional*): + If specified, this will temporarily set numpy's random seed when [`~evaluate.Metric.compute`] is run. + experiment_id (`str`): + A specific experiment id. This is used if several distributed evaluations share the same file system. + This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1). + max_concurrent_cache_files (`int`): + Max number of concurrent metric cache files (default `10000`). + timeout (`Union[int, float]`): + Timeout in second for distributed setting synchronization. + """ + + +class Comparison(EvaluationModule): + """A Comparison is the base class and common API for all comparisons. + + Args: + config_name (`str`): + This is used to define a hash specific to a comparison computation script and prevents the comparison's data + to be overridden when the comparison loading script is modified. + keep_in_memory (`bool`): + Keep all predictions and references in memory. Not possible in distributed settings. + cache_dir (`str`): + Path to a directory in which temporary prediction/references data will be stored. + The data directory should be located on a shared file-system in distributed setups. + num_process (`int`): + Specify the total number of nodes in a distributed settings. + This is useful to compute comparisons in distributed setups (in particular non-additive comparisons). + process_id (`int`): + Specify the id of the current process in a distributed setup (between 0 and num_process-1) + This is useful to compute comparisons in distributed setups (in particular non-additive comparisons). + seed (`int`, *optional*): + If specified, this will temporarily set numpy's random seed when [`~evaluate.Comparison.compute`] is run. + experiment_id (`str`): + A specific experiment id. This is used if several distributed evaluations share the same file system. + This is useful to compute comparisons in distributed setups (in particular non-additive comparisons). + max_concurrent_cache_files (`int`): + Max number of concurrent comparison cache files (default `10000`). + timeout (`Union[int, float]`): + Timeout in second for distributed setting synchronization. + """ + + +class Measurement(EvaluationModule): + """A Measurement is the base class and common API for all measurements. + + Args: + config_name (`str`): + This is used to define a hash specific to a measurement computation script and prevents the measurement's data + to be overridden when the measurement loading script is modified. + keep_in_memory (`bool`): + Keep all predictions and references in memory. Not possible in distributed settings. + cache_dir (`str`): + Path to a directory in which temporary prediction/references data will be stored. + The data directory should be located on a shared file-system in distributed setups. + num_process (`int`): + Specify the total number of nodes in a distributed settings. + This is useful to compute measurements in distributed setups (in particular non-additive measurements). + process_id (`int`): + Specify the id of the current process in a distributed setup (between 0 and num_process-1) + This is useful to compute measurements in distributed setups (in particular non-additive measurements). + seed (`int`, *optional*): + If specified, this will temporarily set numpy's random seed when [`~evaluate.Measurement.compute`] is run. + experiment_id (`str`): + A specific experiment id. This is used if several distributed evaluations share the same file system. + This is useful to compute measurements in distributed setups (in particular non-additive measurements). + max_concurrent_cache_files (`int`): + Max number of concurrent measurement cache files (default `10000`). + timeout (`Union[int, float]`): + Timeout in second for distributed setting synchronization. + """ + + +class CombinedEvaluations: + def __init__(self, evaluation_modules, force_prefix=False): + from .loading import load # avoid circular imports + + self.evaluation_module_names = None + if isinstance(evaluation_modules, list): + self.evaluation_modules = evaluation_modules + elif isinstance(evaluation_modules, dict): + self.evaluation_modules = list(evaluation_modules.values()) + self.evaluation_module_names = list(evaluation_modules.keys()) + loaded_modules = [] + + for module in self.evaluation_modules: + if isinstance(module, str): + module = load(module) + loaded_modules.append(module) + self.evaluation_modules = loaded_modules + + if self.evaluation_module_names is None: + self.evaluation_module_names = [module.name for module in self.evaluation_modules] + + self.force_prefix = force_prefix + + def add(self, prediction=None, reference=None, **kwargs): + """Add one prediction and reference for each evaluation module's stack. + + Args: + predictions (`list/array/tensor`, *optional*): + Predictions. + references (`list/array/tensor`, *optional*): + References. + + Example: + + ```py + >>> import evaluate + >>> accuracy = evaluate.load("accuracy") + >>> f1 = evaluate.load("f1") + >>> clf_metrics = combine(["accuracy", "f1"]) + >>> for ref, pred in zip([0,1,0,1], [1,0,0,1]): + ... clf_metrics.add(references=ref, predictions=pred) + ``` + """ + for evaluation_module in self.evaluation_modules: + batch = {"predictions": prediction, "references": reference, **kwargs} + batch = {input_name: batch[input_name] for input_name in evaluation_module._feature_names()} + evaluation_module.add(**batch) + + def add_batch(self, predictions=None, references=None, **kwargs): + """Add a batch of predictions and references for each evaluation module's stack. + + Args: + predictions (`list/array/tensor`, *optional*): + Predictions. + references (`list/array/tensor`, *optional*): + References. + + Example: + ```py + >>> import evaluate + >>> accuracy = evaluate.load("accuracy") + >>> f1 = evaluate.load("f1") + >>> clf_metrics = combine(["accuracy", "f1"]) + >>> for refs, preds in zip([[0,1],[0,1]], [[1,0],[0,1]]): + ... clf_metrics.add(references=refs, predictions=preds) + ``` + """ + for evaluation_module in self.evaluation_modules: + batch = {"predictions": predictions, "references": references, **kwargs} + batch = {input_name: batch[input_name] for input_name in evaluation_module._feature_names()} + evaluation_module.add_batch(**batch) + + def compute(self, predictions=None, references=None, **kwargs): + """Compute each evaluation module. + + Usage of positional arguments is not allowed to prevent mistakes. + + Args: + predictions (`list/array/tensor`, *optional*): + Predictions. + references (`list/array/tensor`, *optional*): + References. + **kwargs (*optional*): + Keyword arguments that will be forwarded to the evaluation module [`~evaluate.EvaluationModule.compute`] + method (see details in the docstring). + + Return: + `dict` or `None` + + - Dictionary with the results if this evaluation module is run on the main process (`process_id == 0`). + - `None` if the evaluation module is not run on the main process (`process_id != 0`). + + Example: + + ```py + >>> import evaluate + >>> accuracy = evaluate.load("accuracy") + >>> f1 = evaluate.load("f1") + >>> clf_metrics = combine(["accuracy", "f1"]) + >>> clf_metrics.compute(predictions=[0,1], references=[1,1]) + {'accuracy': 0.5, 'f1': 0.6666666666666666} + ``` + """ + results = [] + + for evaluation_module in self.evaluation_modules: + batch = {"predictions": predictions, "references": references, **kwargs} + results.append(evaluation_module.compute(**batch)) + + return self._merge_results(results) + + def _merge_results(self, results): + merged_results = {} + results_keys = list(itertools.chain.from_iterable([r.keys() for r in results])) + duplicate_keys = {item for item, count in collections.Counter(results_keys).items() if count > 1} + + duplicate_names = [ + item for item, count in collections.Counter(self.evaluation_module_names).items() if count > 1 + ] + duplicate_counter = {name: 0 for name in duplicate_names} + + for module_name, result in zip(self.evaluation_module_names, results): + for k, v in result.items(): + if k not in duplicate_keys and not self.force_prefix: + merged_results[f"{k}"] = v + elif module_name in duplicate_counter: + merged_results[f"{module_name}_{duplicate_counter[module_name]}_{k}"] = v + else: + merged_results[f"{module_name}_{k}"] = v + + if module_name in duplicate_counter: + duplicate_counter[module_name] += 1 + + return merged_results + + +def combine(evaluations, force_prefix=False): + """Combines several metrics, comparisons, or measurements into a single `CombinedEvaluations` object that + can be used like a single evaluation module. + + If two scores have the same name, then they are prefixed with their module names. + And if two modules have the same name, please use a dictionary to give them different names, otherwise an integer id is appended to the prefix. + + Args: + evaluations (`Union[list, dict]`): + A list or dictionary of evaluation modules. The modules can either be passed + as strings or loaded `EvaluationModule`s. If a dictionary is passed its keys are the names used and the values the modules. + The names are used as prefix in case there are name overlaps in the returned results of each module or if `force_prefix=True`. + force_prefix (`bool`, *optional*, defaults to `False`): + If `True` all scores from the modules are prefixed with their name. If + a dictionary is passed the keys are used as name otherwise the module's name. + + Examples: + + ```py + >>> import evaluate + >>> accuracy = evaluate.load("accuracy") + >>> f1 = evaluate.load("f1") + >>> clf_metrics = combine(["accuracy", "f1"]) + ``` + """ + + return CombinedEvaluations(evaluations, force_prefix=force_prefix) diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/naming.py b/env-llmeval/lib/python3.10/site-packages/evaluate/naming.py new file mode 100644 index 0000000000000000000000000000000000000000..6335cf1b0ff47f0f2a409d6641fd5c528a31e949 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/evaluate/naming.py @@ -0,0 +1,82 @@ +# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +"""Utilities for file names.""" + +import itertools +import os +import re + + +_uppercase_uppercase_re = re.compile(r"([A-Z]+)([A-Z][a-z])") +_lowercase_uppercase_re = re.compile(r"([a-z\d])([A-Z])") + +_single_underscore_re = re.compile(r"(? str: + """ + Add hf_modules_cache to the python path. + By default hf_modules_cache='~/.cache/huggingface/modules'. + It can also be set with the environment variable HF_MODULES_CACHE. + This is used to add modules such as `datasets_modules` + """ + hf_modules_cache = hf_modules_cache if hf_modules_cache is not None else config.HF_MODULES_CACHE + hf_modules_cache = str(hf_modules_cache) + if hf_modules_cache not in sys.path: + sys.path.append(hf_modules_cache) + + os.makedirs(hf_modules_cache, exist_ok=True) + if not os.path.exists(os.path.join(hf_modules_cache, "__init__.py")): + with open(os.path.join(hf_modules_cache, "__init__.py"), "w"): + pass + return hf_modules_cache + + +def is_remote_url(url_or_filename: str) -> bool: + parsed = urlparse(url_or_filename) + return parsed.scheme in ("http", "https", "s3", "gs", "hdfs", "ftp") + + +def is_local_path(url_or_filename: str) -> bool: + # On unix the scheme of a local path is empty (for both absolute and relative), + # while on windows the scheme is the drive name (ex: "c") for absolute paths. + # for details on the windows behavior, see https://bugs.python.org/issue42215 + return urlparse(url_or_filename).scheme == "" or os.path.ismount(urlparse(url_or_filename).scheme + ":/") + + +def is_relative_path(url_or_filename: str) -> bool: + return urlparse(url_or_filename).scheme == "" and not os.path.isabs(url_or_filename) + + +def relative_to_absolute_path(path: T) -> T: + """Convert relative path to absolute path.""" + abs_path_str = os.path.abspath(os.path.expanduser(os.path.expandvars(str(path)))) + return Path(abs_path_str) if isinstance(path, Path) else abs_path_str + + +def hf_bucket_url(identifier: str, filename: str, use_cdn=False, dataset=True) -> str: + if dataset: + endpoint = config.CLOUDFRONT_DATASETS_DISTRIB_PREFIX if use_cdn else config.S3_DATASETS_BUCKET_PREFIX + else: + endpoint = config.CLOUDFRONT_METRICS_DISTRIB_PREFIX if use_cdn else config.S3_METRICS_BUCKET_PREFIX + return "/".join((endpoint, identifier, filename)) + + +def head_hf_s3( + identifier: str, filename: str, use_cdn=False, dataset=True, max_retries=0 +) -> Union[requests.Response, Exception]: + return http_head( + hf_bucket_url(identifier=identifier, filename=filename, use_cdn=use_cdn, dataset=dataset), + max_retries=max_retries, + ) + + +def hf_hub_url(path: str, name: str, revision: Optional[str] = None) -> str: + revision = revision or config.HUB_DEFAULT_VERSION + return config.HUB_EVALUATE_URL.format(path=path, name=name, revision=revision) + + +def url_or_path_join(base_name: str, *pathnames: str) -> str: + if is_remote_url(base_name): + return posixpath.join(base_name, *(str(pathname).replace(os.sep, "/").lstrip("/") for pathname in pathnames)) + else: + return Path(base_name, *pathnames).as_posix() + + +def url_or_path_parent(url_or_path: str) -> str: + if is_remote_url(url_or_path): + return url_or_path[: url_or_path.rindex("/")] + else: + return os.path.dirname(url_or_path) + + +def hash_url_to_filename(url, etag=None): + """ + Convert `url` into a hashed filename in a repeatable way. + If `etag` is specified, append its hash to the url's, delimited + by a period. + If the url ends with .h5 (Keras HDF5 weights) adds '.h5' to the name + so that TF 2.0 can identify it as a HDF5 file + (see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1380) + """ + url_bytes = url.encode("utf-8") + url_hash = sha256(url_bytes) + filename = url_hash.hexdigest() + + if etag: + etag_bytes = etag.encode("utf-8") + etag_hash = sha256(etag_bytes) + filename += "." + etag_hash.hexdigest() + + if url.endswith(".py"): + filename += ".py" + + return filename + + +def cached_path( + url_or_filename, + download_config=None, + **download_kwargs, +) -> str: + """ + Given something that might be a URL (or might be a local path), + determine which. If it's a URL, download the file and cache it, and + return the path to the cached file. If it's already a local path, + make sure the file exists and then return the path. + + Return: + Local path (string) + + Raises: + FileNotFoundError: in case of non-recoverable file + (non-existent or no cache on disk) + ConnectionError: in case of unreachable url + and no cache on disk + ValueError: if it couldn't parse the url or filename correctly + requests.exceptions.ConnectionError: in case of internet connection issue + """ + if download_config is None: + download_config = DownloadConfig(**download_kwargs) + + cache_dir = download_config.cache_dir or config.DOWNLOADED_EVALUATE_PATH + if isinstance(cache_dir, Path): + cache_dir = str(cache_dir) + if isinstance(url_or_filename, Path): + url_or_filename = str(url_or_filename) + + if is_remote_url(url_or_filename): + # URL, so get it from the cache (downloading if necessary) + output_path = get_from_cache( + url_or_filename, + cache_dir=cache_dir, + force_download=download_config.force_download, + proxies=download_config.proxies, + resume_download=download_config.resume_download, + user_agent=download_config.user_agent, + local_files_only=download_config.local_files_only, + use_etag=download_config.use_etag, + max_retries=download_config.max_retries, + use_auth_token=download_config.use_auth_token, + ignore_url_params=download_config.ignore_url_params, + download_desc=download_config.download_desc, + ) + elif os.path.exists(url_or_filename): + # File, and it exists. + output_path = url_or_filename + elif is_local_path(url_or_filename): + # File, but it doesn't exist. + raise FileNotFoundError(f"Local file {url_or_filename} doesn't exist") + else: + # Something unknown + raise ValueError(f"unable to parse {url_or_filename} as a URL or as a local path") + + if output_path is None: + return output_path + + if download_config.extract_compressed_file: + output_path = ExtractManager(cache_dir=download_config.cache_dir).extract( + output_path, force_extract=download_config.force_extract + ) + + return output_path + + +def get_datasets_user_agent(user_agent: Optional[Union[str, dict]] = None) -> str: + ua = f"datasets/{__version__}; python/{config.PY_VERSION}" + ua += f"; pyarrow/{config.PYARROW_VERSION}" + if config.TORCH_AVAILABLE: + ua += f"; torch/{config.TORCH_VERSION}" + if config.TF_AVAILABLE: + ua += f"; tensorflow/{config.TF_VERSION}" + if config.JAX_AVAILABLE: + ua += f"; jax/{config.JAX_VERSION}" + if isinstance(user_agent, dict): + ua += f"; {'; '.join(f'{k}/{v}' for k, v in user_agent.items())}" + elif isinstance(user_agent, str): + ua += "; " + user_agent + return ua + + +def get_authentication_headers_for_url(url: str, use_auth_token: Optional[Union[str, bool]] = None) -> dict: + """Handle the HF authentication""" + headers = {} + if url.startswith(config.HF_ENDPOINT): + token = None + if isinstance(use_auth_token, str): + token = use_auth_token + elif bool(use_auth_token): + from huggingface_hub import hf_api + + token = hf_api.HfFolder.get_token() + if token: + headers["authorization"] = f"Bearer {token}" + return headers + + +class OfflineModeIsEnabled(ConnectionError): + pass + + +def _raise_if_offline_mode_is_enabled(msg: Optional[str] = None): + """Raise an OfflineModeIsEnabled error (subclass of ConnectionError) if HF_EVALUATE_OFFLINE is True.""" + if config.HF_EVALUATE_OFFLINE: + raise OfflineModeIsEnabled( + "Offline mode is enabled." if msg is None else "Offline mode is enabled. " + str(msg) + ) + + +def _retry( + func, + func_args: Optional[tuple] = None, + func_kwargs: Optional[dict] = None, + exceptions: Type[requests.exceptions.RequestException] = requests.exceptions.RequestException, + status_codes: Optional[List[int]] = None, + max_retries: int = 0, + base_wait_time: float = 0.5, + max_wait_time: float = 2, +): + func_args = func_args or () + func_kwargs = func_kwargs or {} + retry = 0 + while True: + try: + return func(*func_args, **func_kwargs) + except exceptions as err: + if retry >= max_retries or (status_codes and err.response.status_code not in status_codes): + raise err + else: + sleep_time = min(max_wait_time, base_wait_time * 2**retry) # Exponential backoff + logger.info(f"{func} timed out, retrying in {sleep_time}s... [{retry/max_retries}]") + time.sleep(sleep_time) + retry += 1 + + +def _request_with_retry( + method: str, + url: str, + max_retries: int = 0, + base_wait_time: float = 0.5, + max_wait_time: float = 2, + timeout: float = 10.0, + **params, +) -> requests.Response: + """Wrapper around requests to retry in case it fails with a ConnectTimeout, with exponential backoff. + + Note that if the environment variable HF_EVALUATE_OFFLINE is set to 1, then a OfflineModeIsEnabled error is raised. + + Args: + method (str): HTTP method, such as 'GET' or 'HEAD'. + url (str): The URL of the resource to fetch. + max_retries (int): Maximum number of retries, defaults to 0 (no retries). + base_wait_time (float): Duration (in seconds) to wait before retrying the first time. Wait time between + retries then grows exponentially, capped by max_wait_time. + max_wait_time (float): Maximum amount of time between two retries, in seconds. + **params: Params to pass to :obj:`requests.request`. + """ + _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") + tries, success = 0, False + while not success: + tries += 1 + try: + response = requests.request(method=method.upper(), url=url, timeout=timeout, **params) + success = True + except (requests.exceptions.ConnectTimeout, requests.exceptions.ConnectionError) as err: + if tries > max_retries: + raise err + else: + logger.info(f"{method} request to {url} timed out, retrying... [{tries/max_retries}]") + sleep_time = min(max_wait_time, base_wait_time * 2 ** (tries - 1)) # Exponential backoff + time.sleep(sleep_time) + return response + + +def ftp_head(url, timeout=10.0): + _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") + try: + with closing(urllib.request.urlopen(url, timeout=timeout)) as r: + r.read(1) + except Exception: + return False + return True + + +def ftp_get(url, temp_file, timeout=10.0): + _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") + try: + logger.info(f"Getting through FTP {url} into {temp_file.name}") + with closing(urllib.request.urlopen(url, timeout=timeout)) as r: + shutil.copyfileobj(r, temp_file) + except urllib.error.URLError as e: + raise ConnectionError(e) from None + + +def http_get( + url, temp_file, proxies=None, resume_size=0, headers=None, cookies=None, timeout=100.0, max_retries=0, desc=None +): + headers = copy.deepcopy(headers) or {} + headers["user-agent"] = get_datasets_user_agent(user_agent=headers.get("user-agent")) + if resume_size > 0: + headers["Range"] = f"bytes={resume_size:d}-" + response = _request_with_retry( + method="GET", + url=url, + stream=True, + proxies=proxies, + headers=headers, + cookies=cookies, + max_retries=max_retries, + timeout=timeout, + ) + if response.status_code == 416: # Range not satisfiable + return + content_length = response.headers.get("Content-Length") + total = resume_size + int(content_length) if content_length is not None else None + with logging.tqdm( + unit="B", + unit_scale=True, + total=total, + initial=resume_size, + desc=desc or "Downloading", + disable=not logging.is_progress_bar_enabled(), + ) as progress: + for chunk in response.iter_content(chunk_size=1024): + progress.update(len(chunk)) + temp_file.write(chunk) + + +def http_head( + url, proxies=None, headers=None, cookies=None, allow_redirects=True, timeout=10.0, max_retries=0 +) -> requests.Response: + headers = copy.deepcopy(headers) or {} + headers["user-agent"] = get_datasets_user_agent(user_agent=headers.get("user-agent")) + response = _request_with_retry( + method="HEAD", + url=url, + proxies=proxies, + headers=headers, + cookies=cookies, + allow_redirects=allow_redirects, + timeout=timeout, + max_retries=max_retries, + ) + return response + + +def request_etag(url: str, use_auth_token: Optional[Union[str, bool]] = None) -> Optional[str]: + headers = get_authentication_headers_for_url(url, use_auth_token=use_auth_token) + response = http_head(url, headers=headers, max_retries=3) + response.raise_for_status() + etag = response.headers.get("ETag") if response.ok else None + return etag + + +def get_from_cache( + url, + cache_dir=None, + force_download=False, + proxies=None, + etag_timeout=100, + resume_download=False, + user_agent=None, + local_files_only=False, + use_etag=True, + max_retries=0, + use_auth_token=None, + ignore_url_params=False, + download_desc=None, +) -> str: + """ + Given a URL, look for the corresponding file in the local cache. + If it's not there, download it. Then return the path to the cached file. + + Return: + Local path (string) + + Raises: + FileNotFoundError: in case of non-recoverable file + (non-existent or no cache on disk) + ConnectionError: in case of unreachable url + and no cache on disk + """ + if cache_dir is None: + cache_dir = config.HF_EVALUATE_CACHE + if isinstance(cache_dir, Path): + cache_dir = str(cache_dir) + + os.makedirs(cache_dir, exist_ok=True) + + if ignore_url_params: + # strip all query parameters and #fragments from the URL + cached_url = urljoin(url, urlparse(url).path) + else: + cached_url = url # additional parameters may be added to the given URL + + connected = False + response = None + cookies = None + etag = None + head_error = None + + # Try a first time to file the file on the local file system without eTag (None) + # if we don't ask for 'force_download' then we spare a request + filename = hash_url_to_filename(cached_url, etag=None) + cache_path = os.path.join(cache_dir, filename) + + if os.path.exists(cache_path) and not force_download and not use_etag: + return cache_path + + # Prepare headers for authentication + headers = get_authentication_headers_for_url(url, use_auth_token=use_auth_token) + if user_agent is not None: + headers["user-agent"] = user_agent + + # We don't have the file locally or we need an eTag + if not local_files_only: + if url.startswith("ftp://"): + connected = ftp_head(url) + try: + response = http_head( + url, + allow_redirects=True, + proxies=proxies, + timeout=etag_timeout, + max_retries=max_retries, + headers=headers, + ) + if response.status_code == 200: # ok + etag = response.headers.get("ETag") if use_etag else None + for k, v in response.cookies.items(): + # In some edge cases, we need to get a confirmation token + if k.startswith("download_warning") and "drive.google.com" in url: + url += "&confirm=" + v + cookies = response.cookies + connected = True + # Fix Google Drive URL to avoid Virus scan warning + if "drive.google.com" in url and "confirm=" not in url: + url += "&confirm=t" + # In some edge cases, head request returns 400 but the connection is actually ok + elif ( + (response.status_code == 400 and "firebasestorage.googleapis.com" in url) + or (response.status_code == 405 and "drive.google.com" in url) + or ( + response.status_code == 403 + and ( + re.match(r"^https?://github.com/.*?/.*?/releases/download/.*?/.*?$", url) + or re.match(r"^https://.*?s3.*?amazonaws.com/.*?$", response.url) + ) + ) + or (response.status_code == 403 and "ndownloader.figstatic.com" in url) + ): + connected = True + logger.info(f"Couldn't get ETag version for url {url}") + elif response.status_code == 401 and config.HF_ENDPOINT in url and use_auth_token is None: + raise ConnectionError( + f"Unauthorized for URL {url}. Please use the parameter ``use_auth_token=True`` after logging in with ``huggingface-cli login``" + ) + except (OSError, requests.exceptions.Timeout) as e: + # not connected + head_error = e + pass + + # connected == False = we don't have a connection, or url doesn't exist, or is otherwise inaccessible. + # try to get the last downloaded one + if not connected: + if os.path.exists(cache_path) and not force_download: + return cache_path + if local_files_only: + raise FileNotFoundError( + f"Cannot find the requested files in the cached path at {cache_path} and outgoing traffic has been" + " disabled. To enable file online look-ups, set 'local_files_only' to False." + ) + elif response is not None and response.status_code == 404: + raise FileNotFoundError(f"Couldn't find file at {url}") + _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") + if head_error is not None: + raise ConnectionError(f"Couldn't reach {url} ({repr(head_error)})") + elif response is not None: + raise ConnectionError(f"Couldn't reach {url} (error {response.status_code})") + else: + raise ConnectionError(f"Couldn't reach {url}") + + # Try a second time + filename = hash_url_to_filename(cached_url, etag) + cache_path = os.path.join(cache_dir, filename) + + if os.path.exists(cache_path) and not force_download: + return cache_path + + # From now on, connected is True. + # Prevent parallel downloads of the same file with a lock. + lock_path = cache_path + ".lock" + with FileLock(lock_path): + + if resume_download: + incomplete_path = cache_path + ".incomplete" + + @contextmanager + def _resumable_file_manager(): + with open(incomplete_path, "a+b") as f: + yield f + + temp_file_manager = _resumable_file_manager + if os.path.exists(incomplete_path): + resume_size = os.stat(incomplete_path).st_size + else: + resume_size = 0 + else: + temp_file_manager = partial(tempfile.NamedTemporaryFile, dir=cache_dir, delete=False) + resume_size = 0 + + # Download to temporary file, then copy to cache dir once finished. + # Otherwise you get corrupt cache entries if the download gets interrupted. + with temp_file_manager() as temp_file: + logger.info(f"{url} not found in cache or force_download set to True, downloading to {temp_file.name}") + + # GET file object + if url.startswith("ftp://"): + ftp_get(url, temp_file) + else: + http_get( + url, + temp_file, + proxies=proxies, + resume_size=resume_size, + headers=headers, + cookies=cookies, + max_retries=max_retries, + desc=download_desc, + ) + + logger.info(f"storing {url} in cache at {cache_path}") + shutil.move(temp_file.name, cache_path) + + logger.info(f"creating metadata file for {cache_path}") + meta = {"url": url, "etag": etag} + meta_path = cache_path + ".json" + with open(meta_path, "w", encoding="utf-8") as meta_file: + json.dump(meta, meta_file) + + return cache_path + + +def add_start_docstrings(*docstr): + def docstring_decorator(fn): + fn.__doc__ = "".join(docstr) + "\n\n" + (fn.__doc__ if fn.__doc__ is not None else "") + return fn + + return docstring_decorator + + +def add_end_docstrings(*docstr): + def docstring_decorator(fn): + fn.__doc__ = (fn.__doc__ if fn.__doc__ is not None else "") + "\n\n" + "".join(docstr) + return fn + + return docstring_decorator + + +def estimate_dataset_size(paths): + return sum(path.stat().st_size for path in paths) + + +def readline(f: io.RawIOBase): + # From: https://github.com/python/cpython/blob/d27e2f4d118e7a9909b6a3e5da06c5ff95806a85/Lib/_pyio.py#L525 + res = bytearray() + while True: + b = f.read(1) + if not b: + break + res += b + if res.endswith(b"\n"): + break + return bytes(res) diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/utils/gradio.py b/env-llmeval/lib/python3.10/site-packages/evaluate/utils/gradio.py new file mode 100644 index 0000000000000000000000000000000000000000..3b73d9c67e711caad66edaf0f66808fff296aabd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/evaluate/utils/gradio.py @@ -0,0 +1,131 @@ +import json +import os +import re +import sys +from pathlib import Path + +import numpy as np +from datasets import Value + +from .logging import get_logger + + +logger = get_logger(__name__) + +REGEX_YAML_BLOCK = re.compile(r"---[\n\r]+([\S\s]*?)[\n\r]+---[\n\r]") + + +def infer_gradio_input_types(feature_types): + """ + Maps metric feature types to input types for gradio Dataframes: + - float/int -> numbers + - string -> strings + - any other -> json + Note that json is not a native gradio type but will be treated as string that + is then parsed as a json. + """ + input_types = [] + for feature_type in feature_types: + input_type = "json" + if isinstance(feature_type, Value): + if feature_type.dtype.startswith("int") or feature_type.dtype.startswith("float"): + input_type = "number" + elif feature_type.dtype == "string": + input_type = "str" + input_types.append(input_type) + return input_types + + +def json_to_string_type(input_types): + """Maps json input type to str.""" + return ["str" if i == "json" else i for i in input_types] + + +def parse_readme(filepath): + """Parses a repositories README and removes""" + if not os.path.exists(filepath): + return "No README.md found." + with open(filepath, "r") as f: + text = f.read() + match = REGEX_YAML_BLOCK.search(text) + if match: + text = text[match.end() :] + return text + + +def parse_gradio_data(data, input_types): + """Parses data from gradio Dataframe for use in metric.""" + metric_inputs = {} + data.replace("", np.nan, inplace=True) + data.dropna(inplace=True) + for feature_name, input_type in zip(data, input_types): + if input_type == "json": + metric_inputs[feature_name] = [json.loads(d) for d in data[feature_name].to_list()] + elif input_type == "str": + metric_inputs[feature_name] = [d.strip('"') for d in data[feature_name].to_list()] + else: + metric_inputs[feature_name] = data[feature_name] + return metric_inputs + + +def parse_test_cases(test_cases, feature_names, input_types): + """ + Parses test cases to be used in gradio Dataframe. Note that an apostrophe is added + to strings to follow the format in json. + """ + if len(test_cases) == 0: + return None + examples = [] + for test_case in test_cases: + parsed_cases = [] + for feat, input_type in zip(feature_names, input_types): + if input_type == "json": + parsed_cases.append([str(element) for element in test_case[feat]]) + elif input_type == "str": + parsed_cases.append(['"' + element + '"' for element in test_case[feat]]) + else: + parsed_cases.append(test_case[feat]) + examples.append([list(i) for i in zip(*parsed_cases)]) + return examples + + +def launch_gradio_widget(metric): + """Launches `metric` widget with Gradio.""" + + try: + import gradio as gr + except ImportError as error: + logger.error("To create a metric widget with Gradio make sure gradio is installed.") + raise error + + local_path = Path(sys.path[0]) + # if there are several input types, use first as default. + if isinstance(metric.features, list): + (feature_names, feature_types) = zip(*metric.features[0].items()) + else: + (feature_names, feature_types) = zip(*metric.features.items()) + gradio_input_types = infer_gradio_input_types(feature_types) + + def compute(data): + return metric.compute(**parse_gradio_data(data, gradio_input_types)) + + iface = gr.Interface( + fn=compute, + inputs=gr.inputs.Dataframe( + headers=feature_names, + col_count=len(feature_names), + row_count=1, + datatype=json_to_string_type(gradio_input_types), + ), + outputs=gr.outputs.Textbox(label=metric.name), + description=( + metric.info.description + "\nIf this is a text-based metric, make sure to wrap you input in double quotes." + " Alternatively you can use a JSON-formatted list as input." + ), + title=f"Metric: {metric.name}", + article=parse_readme(local_path / "README.md"), + # TODO: load test cases and use them to populate examples + # examples=[parse_test_cases(test_cases, feature_names, gradio_input_types)] + ) + + iface.launch() diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/utils/logging.py b/env-llmeval/lib/python3.10/site-packages/evaluate/utils/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..8df58d3dcfb4c8b903b78c244387561dc659e423 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/evaluate/utils/logging.py @@ -0,0 +1,234 @@ +# Copyright 2020 Optuna, Hugging Face +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Logging utilities. """ + +import logging +import os +from logging import CRITICAL # NOQA +from logging import DEBUG # NOQA +from logging import ERROR # NOQA +from logging import FATAL # NOQA +from logging import INFO # NOQA +from logging import NOTSET # NOQA +from logging import WARN # NOQA +from logging import WARNING # NOQA +from typing import Optional + +from tqdm import auto as tqdm_lib + + +log_levels = { + "debug": logging.DEBUG, + "info": logging.INFO, + "warning": logging.WARNING, + "error": logging.ERROR, + "critical": logging.CRITICAL, +} + +_default_log_level = logging.WARNING + + +def _get_default_logging_level(): + """ + If EVALUATE_VERBOSITY env var is set to one of the valid choices return that as the new default level. + If it is not - fall back to ``_default_log_level`` + """ + env_level_str = os.getenv("EVALUATE_VERBOSITY", None) + if env_level_str: + if env_level_str in log_levels: + return log_levels[env_level_str] + else: + logging.getLogger().warning( + f"Unknown option EVALUATE_VERBOSITY={env_level_str}, " + f"has to be one of: { ', '.join(log_levels.keys()) }" + ) + return _default_log_level + + +def _get_library_name() -> str: + return __name__.split(".")[0] + + +def _get_library_root_logger() -> logging.Logger: + return logging.getLogger(_get_library_name()) + + +def _configure_library_root_logger() -> None: + # Apply our default configuration to the library root logger. + library_root_logger = _get_library_root_logger() + library_root_logger.setLevel(_get_default_logging_level()) + + +def _reset_library_root_logger() -> None: + library_root_logger = _get_library_root_logger() + library_root_logger.setLevel(logging.NOTSET) + + +def get_logger(name: Optional[str] = None) -> logging.Logger: + """Return a logger with the specified name.""" + if name is None: + name = _get_library_name() + return logging.getLogger(name) + + +def get_verbosity() -> int: + """Return the current level for the Hugging Face Evaluate library's root logger. + Returns: + Logging level, e.g., `evaluate.logging.DEBUG` and `evaluate.logging.INFO`. + + + + Hugging Face Evaluate library has following logging levels: + - `evaluate.logging.CRITICAL`, `evaluate.logging.FATAL` + - `evaluate.logging.ERROR` + - `evaluate.logging.WARNING`, `evaluate.logging.WARN` + - `evaluate.logging.INFO` + - `evaluate.logging.DEBUG` + + + """ + return _get_library_root_logger().getEffectiveLevel() + + +def set_verbosity(verbosity: int) -> None: + """Set the level for the Hugging Face Evaluate library's root logger. + Args: + verbosity: + Logging level, e.g., `evaluate.logging.DEBUG` and `evaluate.logging.INFO`. + """ + _get_library_root_logger().setLevel(verbosity) + + +def set_verbosity_info(): + """Set the level for the Hugging Face Evaluate library's root logger to `INFO`. + + This will display most of the logging information and tqdm bars. + + Shortcut to `evaluate.logging.set_verbosity(evaluate.logging.INFO)`. + """ + return set_verbosity(INFO) + + +def set_verbosity_warning(): + """Set the level for the Hugging Face Evaluate library's root logger to `WARNING`. + + This will display only the warning and errors logging information and tqdm bars. + + Shortcut to `evaluate.logging.set_verbosity(evaluate.logging.WARNING)`. + """ + return set_verbosity(WARNING) + + +def set_verbosity_debug(): + """Set the level for the Hugging Face Evaluate library's root logger to `DEBUG`. + + This will display all the logging information and tqdm bars. + + Shortcut to `evaluate.logging.set_verbosity(evaluate.logging.DEBUG)`. + """ + return set_verbosity(DEBUG) + + +def set_verbosity_error(): + """Set the level for the Hugging Face Evaluate library's root logger to `ERROR`. + + This will display only the errors logging information and tqdm bars. + + Shortcut to `evaluate.logging.set_verbosity(evaluate.logging.ERROR)`. + """ + return set_verbosity(ERROR) + + +def disable_propagation() -> None: + """Disable propagation of the library log outputs. + Note that log propagation is disabled by default. + """ + _get_library_root_logger().propagate = False + + +def enable_propagation() -> None: + """Enable propagation of the library log outputs. + Please disable the Hugging Face Evaluate library's default handler to prevent double logging if the root logger has + been configured. + """ + _get_library_root_logger().propagate = True + + +# Configure the library root logger at the module level (singleton-like) +_configure_library_root_logger() + + +class EmptyTqdm: + """Dummy tqdm which doesn't do anything.""" + + def __init__(self, *args, **kwargs): # pylint: disable=unused-argument + self._iterator = args[0] if args else None + + def __iter__(self): + return iter(self._iterator) + + def __getattr__(self, _): + """Return empty function.""" + + def empty_fn(*args, **kwargs): # pylint: disable=unused-argument + return + + return empty_fn + + def __enter__(self): + return self + + def __exit__(self, type_, value, traceback): + return + + +_tqdm_active = True + + +class _tqdm_cls: + def __call__(self, *args, **kwargs): + if _tqdm_active: + return tqdm_lib.tqdm(*args, **kwargs) + else: + return EmptyTqdm(*args, **kwargs) + + def set_lock(self, *args, **kwargs): + self._lock = None + if _tqdm_active: + return tqdm_lib.tqdm.set_lock(*args, **kwargs) + + def get_lock(self): + if _tqdm_active: + return tqdm_lib.tqdm.get_lock() + + +tqdm = _tqdm_cls() + + +def is_progress_bar_enabled() -> bool: + """Return a boolean indicating whether tqdm progress bars are enabled.""" + global _tqdm_active + return bool(_tqdm_active) + + +def enable_progress_bar(): + """Enable tqdm progress bar.""" + global _tqdm_active + _tqdm_active = True + + +def disable_progress_bar(): + """Enable tqdm progress bar.""" + global _tqdm_active + _tqdm_active = False diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/visualization.py b/env-llmeval/lib/python3.10/site-packages/evaluate/visualization.py new file mode 100644 index 0000000000000000000000000000000000000000..b8be8605805e4d11ef93a2911ab32afe934a78bb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/evaluate/visualization.py @@ -0,0 +1,230 @@ +import textwrap + +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd + + +class ComplexRadar: + """Create a complex radar chart with different scales for each variable + Args: + fig (`matplotlib.figure`) : A matplotlib figure object to add the axes on. + variables (`list`) : a list of variables to. plot + ranges (`list` of `tuples`): A list of ranges (min, max) for each variable + n_ring_levels (`int): Number of ordinate or ring levels to draw. + Default: 5. + show_scales (`bool`): Indicates if we the ranges for each variable are plotted. + Default: True. + format_cfg (`dict`): A dictionary with formatting configurations. + Default: None. + Returns: + `matplotlib.figure.Figure`: a radar plot. + """ + + def __init__(self, fig, variables, ranges, n_ring_levels=5, show_scales=True, format_cfg=None): + + self.format_cfg = format_cfg + + # Calculate angles and create for each variable an axes + # Consider here the trick with having the first axes element twice (len+1) + angles = np.arange(0, 360, 360.0 / len(variables)) + axes = [ + fig.add_axes([0.1, 0.1, 0.9, 0.9], polar=True, label="axes{}".format(i), **self.format_cfg["axes_args"]) + for i in range(len(variables) + 1) + ] + + # Ensure clockwise rotation (first variable at the top N) + for ax in axes: + ax.set_theta_zero_location("N") + ax.set_theta_direction(-1) + ax.set_axisbelow(True) + + # Writing the ranges on each axes + for i, ax in enumerate(axes): + + # Here we do the trick by repeating the first iteration + j = 0 if (i == 0 or i == 1) else i - 1 + ax.set_ylim(*ranges[j]) + # Set endpoint to True if you like to have values right before the last circle + grid = np.linspace(*ranges[j], num=n_ring_levels, endpoint=self.format_cfg["incl_endpoint"]) + gridlabel = ["{}".format(round(x, 2)) for x in grid] + gridlabel[0] = "" # remove values from the center + lines, labels = ax.set_rgrids( + grid, labels=gridlabel, angle=angles[j], **self.format_cfg["rgrid_tick_lbls_args"] + ) + + ax.set_ylim(*ranges[j]) + ax.spines["polar"].set_visible(False) + ax.grid(visible=False) + + if show_scales is False: + ax.set_yticklabels([]) + + # Set all axes except the first one unvisible + for ax in axes[1:]: + ax.patch.set_visible(False) + ax.xaxis.set_visible(False) + + # Setting the attributes + self.angle = np.deg2rad(np.r_[angles, angles[0]]) + self.ranges = ranges + self.ax = axes[0] + self.ax1 = axes[1] + self.plot_counter = 0 + + # Draw (inner) circles and lines + self.ax.yaxis.grid(**self.format_cfg["rad_ln_args"]) + # Draw outer circle + self.ax.spines["polar"].set(**self.format_cfg["outer_ring"]) + # Draw angle lines + self.ax.xaxis.grid(**self.format_cfg["angle_ln_args"]) + + # ax1 is the duplicate of axes[0] (self.ax) + # Remove everything from ax1 except the plot itself + self.ax1.axis("off") + self.ax1.set_zorder(9) + + # Create the outer labels for each variable + l, text = self.ax.set_thetagrids(angles, labels=variables) + + # Beautify them + labels = [t.get_text() for t in self.ax.get_xticklabels()] + labels = [ + "\n".join( + textwrap.wrap( + label, + self.format_cfg["theta_tick_lbls_txt_wrap"], + break_long_words=self.format_cfg["theta_tick_lbls_brk_lng_wrds"], + ) + ) + for label in labels + ] + self.ax.set_xticklabels(labels, **self.format_cfg["theta_tick_lbls"]) + + for t, a in zip(self.ax.get_xticklabels(), angles): + if a == 0: + t.set_ha("center") + elif a > 0 and a < 180: + t.set_ha("left") + elif a == 180: + t.set_ha("center") + else: + t.set_ha("right") + + self.ax.tick_params(axis="both", pad=self.format_cfg["theta_tick_lbls_pad"]) + + def _scale_data(self, data, ranges): + """Scales data[1:] to ranges[0]""" + for d, (y1, y2) in zip(data[1:], ranges[1:]): + assert (y1 <= d <= y2) or (y2 <= d <= y1) + x1, x2 = ranges[0] + d = data[0] + sdata = [d] + for d, (y1, y2) in zip(data[1:], ranges[1:]): + sdata.append((d - y1) / (y2 - y1) * (x2 - x1) + x1) + return sdata + + def plot(self, data, *args, **kwargs): + """Plots a line""" + sdata = self._scale_data(data, self.ranges) + self.ax1.plot(self.angle, np.r_[sdata, sdata[0]], *args, **kwargs) + self.plot_counter = self.plot_counter + 1 + + def use_legend(self, *args, **kwargs): + """Shows a legend""" + self.ax1.legend(*args, **kwargs) + + +def radar_plot(data, model_names, invert_range=[], config=None, fig=None): + """Create a complex radar chart with different scales for each variable + Source: https://towardsdatascience.com/how-to-create-and-visualize-complex-radar-charts-f7764d0f3652 + + Args: + data (`List[dict]`): the results (list of metric + value pairs). + E.g. data = [{"accuracy": 0.9, "precision":0.8},{"accuracy": 0.7, "precision":0.6}] + names (`List[dict]`): model names. + E.g. names = ["model1", "model 2", ...] + invert_range (`List[dict]`, optional): the metrics to invert (in cases when smaller is better, e.g. speed) + E.g. invert_range=["latency_in_seconds"] + config (`dict`, optional) : a specification of the formatting configurations, namely: + + - rad_ln_args (`dict`, default `{"visible": True}`): The visibility of the radial (circle) lines. + + - outer_ring (`dict`, default `{"visible": True}`): The visibility of the outer ring. + + - angle_ln_args (`dict`, default `{"visible": True}`): The visibility of the angle lines. + + - rgrid_tick_lbls_args (`dict`, default `{"fontsize": 12}`): The font size of the tick labels on the scales. + + - theta_tick_lbls (`dict`, default `{"fontsize": 12}`): The font size of the variable labels on the plot. + + - theta_tick_lbls_pad (`int`, default `3`): The padding of the variable labels on the plot. + + - theta_tick_lbls_brk_lng_wrds (`bool`, default `True` ): Whether long words in the label are broken up or not. + + - theta_tick_lbls_txt_wrap (`int`, default `15`): Text wrap for tick labels + + - incl_endpoint (`bool`, default `False`): Include value endpoints on calse + + - marker (`str`, default `"o"`): the shape of the marker used in the radar plot. + + - markersize (`int`, default `3`): the shape of the marker used in the radar plot. + + - legend_loc (`str`, default `"upper right"`): the location of the legend in the radar plot. Must be one of: 'upper left', 'upper right', 'lower left', 'lower right'. + + - bbox_to_anchor (`tuple`, default `(2, 1)`: anchor for the legend. + fig (`matplotlib.figure.Figure`, optional): figure used to plot the radar plot. + + Returns: + `matplotlib.figure.Figure` + """ + data = pd.DataFrame(data) + data.index = model_names + variables = data.keys() + if all(x in variables for x in invert_range) is False: + raise ValueError("All of the metrics in `invert_range` should be in the data provided.") + min_max_per_variable = data.describe().T[["min", "max"]] + min_max_per_variable["min"] = min_max_per_variable["min"] - 0.1 * ( + min_max_per_variable["max"] - min_max_per_variable["min"] + ) + min_max_per_variable["max"] = min_max_per_variable["max"] + 0.1 * ( + min_max_per_variable["max"] - min_max_per_variable["min"] + ) + + ranges = list(min_max_per_variable.itertuples(index=False, name=None)) + ranges = [ + (max_value, min_value) if var in invert_range else (min_value, max_value) + for var, (min_value, max_value) in zip(variables, ranges) + ] + format_cfg = { + "axes_args": {}, + "rad_ln_args": {"visible": True}, + "outer_ring": {"visible": True}, + "angle_ln_args": {"visible": True}, + "rgrid_tick_lbls_args": {"fontsize": 12}, + "theta_tick_lbls": {"fontsize": 12}, + "theta_tick_lbls_pad": 3, + "theta_tick_lbls_brk_lng_wrds": True, + "theta_tick_lbls_txt_wrap": 15, + "incl_endpoint": False, + "marker": "o", + "markersize": 3, + "legend_loc": "upper right", + "bbox_to_anchor": (2, 1), + } + if config is not None: + format_cfg.update(config) + if fig is None: + fig = plt.figure() + radar = ComplexRadar( + fig, + variables, + ranges, + n_ring_levels=3, + show_scales=True, + format_cfg=format_cfg, + ) + for g in zip(data.index): + radar.plot(data.loc[g].values, label=g, marker=format_cfg["marker"], markersize=format_cfg["markersize"]) + radar.use_legend(**{"loc": format_cfg["legend_loc"], "bbox_to_anchor": format_cfg["bbox_to_anchor"]}) + return fig diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ca2eba20432924304517be99d5113bc9f57614d2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/__init__.py @@ -0,0 +1,367 @@ +from __future__ import annotations + +import os +import warnings + +__docformat__ = "restructuredtext" + +# Let users know if they're missing any of our hard dependencies +_hard_dependencies = ("numpy", "pytz", "dateutil") +_missing_dependencies = [] + +for _dependency in _hard_dependencies: + try: + __import__(_dependency) + except ImportError as _e: # pragma: no cover + _missing_dependencies.append(f"{_dependency}: {_e}") + +if _missing_dependencies: # pragma: no cover + raise ImportError( + "Unable to import required dependencies:\n" + "\n".join(_missing_dependencies) + ) +del _hard_dependencies, _dependency, _missing_dependencies + +try: + # numpy compat + from pandas.compat import ( + is_numpy_dev as _is_numpy_dev, # pyright: ignore[reportUnusedImport] # noqa: F401 + ) +except ImportError as _err: # pragma: no cover + _module = _err.name + raise ImportError( + f"C extension: {_module} not built. If you want to import " + "pandas from the source directory, you may need to run " + "'python setup.py build_ext' to build the C extensions first." + ) from _err + +from pandas._config import ( + get_option, + set_option, + reset_option, + describe_option, + option_context, + options, +) + +# let init-time option registration happen +import pandas.core.config_init # pyright: ignore[reportUnusedImport] # noqa: F401 + +from pandas.core.api import ( + # dtype + ArrowDtype, + Int8Dtype, + Int16Dtype, + Int32Dtype, + Int64Dtype, + UInt8Dtype, + UInt16Dtype, + UInt32Dtype, + UInt64Dtype, + Float32Dtype, + Float64Dtype, + CategoricalDtype, + PeriodDtype, + IntervalDtype, + DatetimeTZDtype, + StringDtype, + BooleanDtype, + # missing + NA, + isna, + isnull, + notna, + notnull, + # indexes + Index, + CategoricalIndex, + RangeIndex, + MultiIndex, + IntervalIndex, + TimedeltaIndex, + DatetimeIndex, + PeriodIndex, + IndexSlice, + # tseries + NaT, + Period, + period_range, + Timedelta, + timedelta_range, + Timestamp, + date_range, + bdate_range, + Interval, + interval_range, + DateOffset, + # conversion + to_numeric, + to_datetime, + to_timedelta, + # misc + Flags, + Grouper, + factorize, + unique, + value_counts, + NamedAgg, + array, + Categorical, + set_eng_float_format, + Series, + DataFrame, +) + +from pandas.core.dtypes.dtypes import SparseDtype + +from pandas.tseries.api import infer_freq +from pandas.tseries import offsets + +from pandas.core.computation.api import eval + +from pandas.core.reshape.api import ( + concat, + lreshape, + melt, + wide_to_long, + merge, + merge_asof, + merge_ordered, + crosstab, + pivot, + pivot_table, + get_dummies, + from_dummies, + cut, + qcut, +) + +from pandas import api, arrays, errors, io, plotting, tseries +from pandas import testing +from pandas.util._print_versions import show_versions + +from pandas.io.api import ( + # excel + ExcelFile, + ExcelWriter, + read_excel, + # parsers + read_csv, + read_fwf, + read_table, + # pickle + read_pickle, + to_pickle, + # pytables + HDFStore, + read_hdf, + # sql + read_sql, + read_sql_query, + read_sql_table, + # misc + read_clipboard, + read_parquet, + read_orc, + read_feather, + read_gbq, + read_html, + read_xml, + read_json, + read_stata, + read_sas, + read_spss, +) + +from pandas.io.json._normalize import json_normalize + +from pandas.util._tester import test + +# use the closest tagged version if possible +_built_with_meson = False +try: + from pandas._version_meson import ( # pyright: ignore [reportMissingImports] + __version__, + __git_version__, + ) + + _built_with_meson = True +except ImportError: + from pandas._version import get_versions + + v = get_versions() + __version__ = v.get("closest-tag", v["version"]) + __git_version__ = v.get("full-revisionid") + del get_versions, v + +# GH#55043 - deprecation of the data_manager option +if "PANDAS_DATA_MANAGER" in os.environ: + warnings.warn( + "The env variable PANDAS_DATA_MANAGER is set. The data_manager option is " + "deprecated and will be removed in a future version. Only the BlockManager " + "will be available. Unset this environment variable to silence this warning.", + FutureWarning, + stacklevel=2, + ) + +del warnings, os + +# module level doc-string +__doc__ = """ +pandas - a powerful data analysis and manipulation library for Python +===================================================================== + +**pandas** is a Python package providing fast, flexible, and expressive data +structures designed to make working with "relational" or "labeled" data both +easy and intuitive. It aims to be the fundamental high-level building block for +doing practical, **real world** data analysis in Python. Additionally, it has +the broader goal of becoming **the most powerful and flexible open source data +analysis / manipulation tool available in any language**. It is already well on +its way toward this goal. + +Main Features +------------- +Here are just a few of the things that pandas does well: + + - Easy handling of missing data in floating point as well as non-floating + point data. + - Size mutability: columns can be inserted and deleted from DataFrame and + higher dimensional objects + - Automatic and explicit data alignment: objects can be explicitly aligned + to a set of labels, or the user can simply ignore the labels and let + `Series`, `DataFrame`, etc. automatically align the data for you in + computations. + - Powerful, flexible group by functionality to perform split-apply-combine + operations on data sets, for both aggregating and transforming data. + - Make it easy to convert ragged, differently-indexed data in other Python + and NumPy data structures into DataFrame objects. + - Intelligent label-based slicing, fancy indexing, and subsetting of large + data sets. + - Intuitive merging and joining data sets. + - Flexible reshaping and pivoting of data sets. + - Hierarchical labeling of axes (possible to have multiple labels per tick). + - Robust IO tools for loading data from flat files (CSV and delimited), + Excel files, databases, and saving/loading data from the ultrafast HDF5 + format. + - Time series-specific functionality: date range generation and frequency + conversion, moving window statistics, date shifting and lagging. +""" + +# Use __all__ to let type checkers know what is part of the public API. +# Pandas is not (yet) a py.typed library: the public API is determined +# based on the documentation. +__all__ = [ + "ArrowDtype", + "BooleanDtype", + "Categorical", + "CategoricalDtype", + "CategoricalIndex", + "DataFrame", + "DateOffset", + "DatetimeIndex", + "DatetimeTZDtype", + "ExcelFile", + "ExcelWriter", + "Flags", + "Float32Dtype", + "Float64Dtype", + "Grouper", + "HDFStore", + "Index", + "IndexSlice", + "Int16Dtype", + "Int32Dtype", + "Int64Dtype", + "Int8Dtype", + "Interval", + "IntervalDtype", + "IntervalIndex", + "MultiIndex", + "NA", + "NaT", + "NamedAgg", + "Period", + "PeriodDtype", + "PeriodIndex", + "RangeIndex", + "Series", + "SparseDtype", + "StringDtype", + "Timedelta", + "TimedeltaIndex", + "Timestamp", + "UInt16Dtype", + "UInt32Dtype", + "UInt64Dtype", + "UInt8Dtype", + "api", + "array", + "arrays", + "bdate_range", + "concat", + "crosstab", + "cut", + "date_range", + "describe_option", + "errors", + "eval", + "factorize", + "get_dummies", + "from_dummies", + "get_option", + "infer_freq", + "interval_range", + "io", + "isna", + "isnull", + "json_normalize", + "lreshape", + "melt", + "merge", + "merge_asof", + "merge_ordered", + "notna", + "notnull", + "offsets", + "option_context", + "options", + "period_range", + "pivot", + "pivot_table", + "plotting", + "qcut", + "read_clipboard", + "read_csv", + "read_excel", + "read_feather", + "read_fwf", + "read_gbq", + "read_hdf", + "read_html", + "read_json", + "read_orc", + "read_parquet", + "read_pickle", + "read_sas", + "read_spss", + "read_sql", + "read_sql_query", + "read_sql_table", + "read_stata", + "read_table", + "read_xml", + "reset_option", + "set_eng_float_format", + "set_option", + "show_versions", + "test", + "testing", + "timedelta_range", + "to_datetime", + "to_numeric", + "to_pickle", + "to_timedelta", + "tseries", + "unique", + "value_counts", + "wide_to_long", +] diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_typing.py b/env-llmeval/lib/python3.10/site-packages/pandas/_typing.py new file mode 100644 index 0000000000000000000000000000000000000000..3df9a47a35fca32547947560a8df1cea1d1863c2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_typing.py @@ -0,0 +1,525 @@ +from __future__ import annotations + +from collections.abc import ( + Hashable, + Iterator, + Mapping, + MutableMapping, + Sequence, +) +from datetime import ( + date, + datetime, + timedelta, + tzinfo, +) +from os import PathLike +import sys +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, + Optional, + Protocol, + Type as type_t, + TypeVar, + Union, + overload, +) + +import numpy as np + +# To prevent import cycles place any internal imports in the branch below +# and use a string literal forward reference to it in subsequent types +# https://mypy.readthedocs.io/en/latest/common_issues.html#import-cycles +if TYPE_CHECKING: + import numpy.typing as npt + + from pandas._libs import ( + NaTType, + Period, + Timedelta, + Timestamp, + ) + from pandas._libs.tslibs import BaseOffset + + from pandas.core.dtypes.dtypes import ExtensionDtype + + from pandas import Interval + from pandas.arrays import ( + DatetimeArray, + TimedeltaArray, + ) + from pandas.core.arrays.base import ExtensionArray + from pandas.core.frame import DataFrame + from pandas.core.generic import NDFrame + from pandas.core.groupby.generic import ( + DataFrameGroupBy, + GroupBy, + SeriesGroupBy, + ) + from pandas.core.indexes.base import Index + from pandas.core.internals import ( + ArrayManager, + BlockManager, + SingleArrayManager, + SingleBlockManager, + ) + from pandas.core.resample import Resampler + from pandas.core.series import Series + from pandas.core.window.rolling import BaseWindow + + from pandas.io.formats.format import EngFormatter + from pandas.tseries.holiday import AbstractHolidayCalendar + + ScalarLike_co = Union[ + int, + float, + complex, + str, + bytes, + np.generic, + ] + + # numpy compatible types + NumpyValueArrayLike = Union[ScalarLike_co, npt.ArrayLike] + # Name "npt._ArrayLikeInt_co" is not defined [name-defined] + NumpySorter = Optional[npt._ArrayLikeInt_co] # type: ignore[name-defined] + + from typing import SupportsIndex + + if sys.version_info >= (3, 10): + from typing import TypeGuard # pyright: ignore[reportUnusedImport] + else: + from typing_extensions import TypeGuard # pyright: ignore[reportUnusedImport] + + if sys.version_info >= (3, 11): + from typing import Self # pyright: ignore[reportUnusedImport] + else: + from typing_extensions import Self # pyright: ignore[reportUnusedImport] +else: + npt: Any = None + Self: Any = None + TypeGuard: Any = None + +HashableT = TypeVar("HashableT", bound=Hashable) +MutableMappingT = TypeVar("MutableMappingT", bound=MutableMapping) + +# array-like + +ArrayLike = Union["ExtensionArray", np.ndarray] +AnyArrayLike = Union[ArrayLike, "Index", "Series"] +TimeArrayLike = Union["DatetimeArray", "TimedeltaArray"] + +# list-like + +# from https://github.com/hauntsaninja/useful_types +# includes Sequence-like objects but excludes str and bytes +_T_co = TypeVar("_T_co", covariant=True) + + +class SequenceNotStr(Protocol[_T_co]): + @overload + def __getitem__(self, index: SupportsIndex, /) -> _T_co: + ... + + @overload + def __getitem__(self, index: slice, /) -> Sequence[_T_co]: + ... + + def __contains__(self, value: object, /) -> bool: + ... + + def __len__(self) -> int: + ... + + def __iter__(self) -> Iterator[_T_co]: + ... + + def index(self, value: Any, /, start: int = 0, stop: int = ...) -> int: + ... + + def count(self, value: Any, /) -> int: + ... + + def __reversed__(self) -> Iterator[_T_co]: + ... + + +ListLike = Union[AnyArrayLike, SequenceNotStr, range] + +# scalars + +PythonScalar = Union[str, float, bool] +DatetimeLikeScalar = Union["Period", "Timestamp", "Timedelta"] +PandasScalar = Union["Period", "Timestamp", "Timedelta", "Interval"] +Scalar = Union[PythonScalar, PandasScalar, np.datetime64, np.timedelta64, date] +IntStrT = TypeVar("IntStrT", bound=Union[int, str]) + + +# timestamp and timedelta convertible types + +TimestampConvertibleTypes = Union[ + "Timestamp", date, np.datetime64, np.int64, float, str +] +TimestampNonexistent = Union[ + Literal["shift_forward", "shift_backward", "NaT", "raise"], timedelta +] +TimedeltaConvertibleTypes = Union[ + "Timedelta", timedelta, np.timedelta64, np.int64, float, str +] +Timezone = Union[str, tzinfo] + +ToTimestampHow = Literal["s", "e", "start", "end"] + +# NDFrameT is stricter and ensures that the same subclass of NDFrame always is +# used. E.g. `def func(a: NDFrameT) -> NDFrameT: ...` means that if a +# Series is passed into a function, a Series is always returned and if a DataFrame is +# passed in, a DataFrame is always returned. +NDFrameT = TypeVar("NDFrameT", bound="NDFrame") + +NumpyIndexT = TypeVar("NumpyIndexT", np.ndarray, "Index") + +AxisInt = int +Axis = Union[AxisInt, Literal["index", "columns", "rows"]] +IndexLabel = Union[Hashable, Sequence[Hashable]] +Level = Hashable +Shape = tuple[int, ...] +Suffixes = tuple[Optional[str], Optional[str]] +Ordered = Optional[bool] +JSONSerializable = Optional[Union[PythonScalar, list, dict]] +Frequency = Union[str, "BaseOffset"] +Axes = ListLike + +RandomState = Union[ + int, + np.ndarray, + np.random.Generator, + np.random.BitGenerator, + np.random.RandomState, +] + +# dtypes +NpDtype = Union[str, np.dtype, type_t[Union[str, complex, bool, object]]] +Dtype = Union["ExtensionDtype", NpDtype] +AstypeArg = Union["ExtensionDtype", "npt.DTypeLike"] +# DtypeArg specifies all allowable dtypes in a functions its dtype argument +DtypeArg = Union[Dtype, dict[Hashable, Dtype]] +DtypeObj = Union[np.dtype, "ExtensionDtype"] + +# converters +ConvertersArg = dict[Hashable, Callable[[Dtype], Dtype]] + +# parse_dates +ParseDatesArg = Union[ + bool, list[Hashable], list[list[Hashable]], dict[Hashable, list[Hashable]] +] + +# For functions like rename that convert one label to another +Renamer = Union[Mapping[Any, Hashable], Callable[[Any], Hashable]] + +# to maintain type information across generic functions and parametrization +T = TypeVar("T") + +# used in decorators to preserve the signature of the function it decorates +# see https://mypy.readthedocs.io/en/stable/generics.html#declaring-decorators +FuncType = Callable[..., Any] +F = TypeVar("F", bound=FuncType) + +# types of vectorized key functions for DataFrame::sort_values and +# DataFrame::sort_index, among others +ValueKeyFunc = Optional[Callable[["Series"], Union["Series", AnyArrayLike]]] +IndexKeyFunc = Optional[Callable[["Index"], Union["Index", AnyArrayLike]]] + +# types of `func` kwarg for DataFrame.aggregate and Series.aggregate +AggFuncTypeBase = Union[Callable, str] +AggFuncTypeDict = MutableMapping[ + Hashable, Union[AggFuncTypeBase, list[AggFuncTypeBase]] +] +AggFuncType = Union[ + AggFuncTypeBase, + list[AggFuncTypeBase], + AggFuncTypeDict, +] +AggObjType = Union[ + "Series", + "DataFrame", + "GroupBy", + "SeriesGroupBy", + "DataFrameGroupBy", + "BaseWindow", + "Resampler", +] + +PythonFuncType = Callable[[Any], Any] + +# filenames and file-like-objects +AnyStr_co = TypeVar("AnyStr_co", str, bytes, covariant=True) +AnyStr_contra = TypeVar("AnyStr_contra", str, bytes, contravariant=True) + + +class BaseBuffer(Protocol): + @property + def mode(self) -> str: + # for _get_filepath_or_buffer + ... + + def seek(self, __offset: int, __whence: int = ...) -> int: + # with one argument: gzip.GzipFile, bz2.BZ2File + # with two arguments: zip.ZipFile, read_sas + ... + + def seekable(self) -> bool: + # for bz2.BZ2File + ... + + def tell(self) -> int: + # for zip.ZipFile, read_stata, to_stata + ... + + +class ReadBuffer(BaseBuffer, Protocol[AnyStr_co]): + def read(self, __n: int = ...) -> AnyStr_co: + # for BytesIOWrapper, gzip.GzipFile, bz2.BZ2File + ... + + +class WriteBuffer(BaseBuffer, Protocol[AnyStr_contra]): + def write(self, __b: AnyStr_contra) -> Any: + # for gzip.GzipFile, bz2.BZ2File + ... + + def flush(self) -> Any: + # for gzip.GzipFile, bz2.BZ2File + ... + + +class ReadPickleBuffer(ReadBuffer[bytes], Protocol): + def readline(self) -> bytes: + ... + + +class WriteExcelBuffer(WriteBuffer[bytes], Protocol): + def truncate(self, size: int | None = ...) -> int: + ... + + +class ReadCsvBuffer(ReadBuffer[AnyStr_co], Protocol): + def __iter__(self) -> Iterator[AnyStr_co]: + # for engine=python + ... + + def fileno(self) -> int: + # for _MMapWrapper + ... + + def readline(self) -> AnyStr_co: + # for engine=python + ... + + @property + def closed(self) -> bool: + # for enine=pyarrow + ... + + +FilePath = Union[str, "PathLike[str]"] + +# for arbitrary kwargs passed during reading/writing files +StorageOptions = Optional[dict[str, Any]] + + +# compression keywords and compression +CompressionDict = dict[str, Any] +CompressionOptions = Optional[ + Union[Literal["infer", "gzip", "bz2", "zip", "xz", "zstd", "tar"], CompressionDict] +] + +# types in DataFrameFormatter +FormattersType = Union[ + list[Callable], tuple[Callable, ...], Mapping[Union[str, int], Callable] +] +ColspaceType = Mapping[Hashable, Union[str, int]] +FloatFormatType = Union[str, Callable, "EngFormatter"] +ColspaceArgType = Union[ + str, int, Sequence[Union[str, int]], Mapping[Hashable, Union[str, int]] +] + +# Arguments for fillna() +FillnaOptions = Literal["backfill", "bfill", "ffill", "pad"] +InterpolateOptions = Literal[ + "linear", + "time", + "index", + "values", + "nearest", + "zero", + "slinear", + "quadratic", + "cubic", + "barycentric", + "polynomial", + "krogh", + "piecewise_polynomial", + "spline", + "pchip", + "akima", + "cubicspline", + "from_derivatives", +] + +# internals +Manager = Union[ + "ArrayManager", "SingleArrayManager", "BlockManager", "SingleBlockManager" +] +SingleManager = Union["SingleArrayManager", "SingleBlockManager"] +Manager2D = Union["ArrayManager", "BlockManager"] + +# indexing +# PositionalIndexer -> valid 1D positional indexer, e.g. can pass +# to ndarray.__getitem__ +# ScalarIndexer is for a single value as the index +# SequenceIndexer is for list like or slices (but not tuples) +# PositionalIndexerTuple is extends the PositionalIndexer for 2D arrays +# These are used in various __getitem__ overloads +# TODO(typing#684): add Ellipsis, see +# https://github.com/python/typing/issues/684#issuecomment-548203158 +# https://bugs.python.org/issue41810 +# Using List[int] here rather than Sequence[int] to disallow tuples. +ScalarIndexer = Union[int, np.integer] +SequenceIndexer = Union[slice, list[int], np.ndarray] +PositionalIndexer = Union[ScalarIndexer, SequenceIndexer] +PositionalIndexerTuple = tuple[PositionalIndexer, PositionalIndexer] +PositionalIndexer2D = Union[PositionalIndexer, PositionalIndexerTuple] +if TYPE_CHECKING: + TakeIndexer = Union[Sequence[int], Sequence[np.integer], npt.NDArray[np.integer]] +else: + TakeIndexer = Any + +# Shared by functions such as drop and astype +IgnoreRaise = Literal["ignore", "raise"] + +# Windowing rank methods +WindowingRankType = Literal["average", "min", "max"] + +# read_csv engines +CSVEngine = Literal["c", "python", "pyarrow", "python-fwf"] + +# read_json engines +JSONEngine = Literal["ujson", "pyarrow"] + +# read_xml parsers +XMLParsers = Literal["lxml", "etree"] + +# read_html flavors +HTMLFlavors = Literal["lxml", "html5lib", "bs4"] + +# Interval closed type +IntervalLeftRight = Literal["left", "right"] +IntervalClosedType = Union[IntervalLeftRight, Literal["both", "neither"]] + +# datetime and NaTType +DatetimeNaTType = Union[datetime, "NaTType"] +DateTimeErrorChoices = Union[IgnoreRaise, Literal["coerce"]] + +# sort_index +SortKind = Literal["quicksort", "mergesort", "heapsort", "stable"] +NaPosition = Literal["first", "last"] + +# Arguments for nsmalles and n_largest +NsmallestNlargestKeep = Literal["first", "last", "all"] + +# quantile interpolation +QuantileInterpolation = Literal["linear", "lower", "higher", "midpoint", "nearest"] + +# plotting +PlottingOrientation = Literal["horizontal", "vertical"] + +# dropna +AnyAll = Literal["any", "all"] + +# merge +MergeHow = Literal["left", "right", "inner", "outer", "cross"] +MergeValidate = Literal[ + "one_to_one", + "1:1", + "one_to_many", + "1:m", + "many_to_one", + "m:1", + "many_to_many", + "m:m", +] + +# join +JoinHow = Literal["left", "right", "inner", "outer"] +JoinValidate = Literal[ + "one_to_one", + "1:1", + "one_to_many", + "1:m", + "many_to_one", + "m:1", + "many_to_many", + "m:m", +] + +# reindex +ReindexMethod = Union[FillnaOptions, Literal["nearest"]] + +MatplotlibColor = Union[str, Sequence[float]] +TimeGrouperOrigin = Union[ + "Timestamp", Literal["epoch", "start", "start_day", "end", "end_day"] +] +TimeAmbiguous = Union[Literal["infer", "NaT", "raise"], "npt.NDArray[np.bool_]"] +TimeNonexistent = Union[ + Literal["shift_forward", "shift_backward", "NaT", "raise"], timedelta +] +DropKeep = Literal["first", "last", False] +CorrelationMethod = Union[ + Literal["pearson", "kendall", "spearman"], Callable[[np.ndarray, np.ndarray], float] +] +AlignJoin = Literal["outer", "inner", "left", "right"] +DtypeBackend = Literal["pyarrow", "numpy_nullable"] + +TimeUnit = Literal["s", "ms", "us", "ns"] +OpenFileErrors = Literal[ + "strict", + "ignore", + "replace", + "surrogateescape", + "xmlcharrefreplace", + "backslashreplace", + "namereplace", +] + +# update +UpdateJoin = Literal["left"] + +# applymap +NaAction = Literal["ignore"] + +# from_dict +FromDictOrient = Literal["columns", "index", "tight"] + +# to_gbc +ToGbqIfexist = Literal["fail", "replace", "append"] + +# to_stata +ToStataByteorder = Literal[">", "<", "little", "big"] + +# ExcelWriter +ExcelWriterIfSheetExists = Literal["error", "new", "replace", "overlay"] + +# Offsets +OffsetCalendar = Union[np.busdaycalendar, "AbstractHolidayCalendar"] + +# read_csv: usecols +UsecolsArgType = Union[ + SequenceNotStr[Hashable], + range, + AnyArrayLike, + Callable[[HashableT], bool], + None, +] diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_version.py b/env-llmeval/lib/python3.10/site-packages/pandas/_version.py new file mode 100644 index 0000000000000000000000000000000000000000..f8a960630126d021d86f685a160b98cb5eada197 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_version.py @@ -0,0 +1,692 @@ +# This file helps to compute a version number in source trees obtained from +# git-archive tarball (such as those provided by githubs download-from-tag +# feature). Distribution tarballs (built by setup.py sdist) and build +# directories (produced by setup.py build) will contain a much shorter file +# that just contains the computed version number. + +# This file is released into the public domain. +# Generated by versioneer-0.28 +# https://github.com/python-versioneer/python-versioneer + +"""Git implementation of _version.py.""" + +import errno +import functools +import os +import re +import subprocess +import sys +from typing import Callable + + +def get_keywords(): + """Get the keywords needed to look up the version information.""" + # these strings will be replaced by git during git-archive. + # setup.py/versioneer.py will grep for the variable names, so they must + # each be defined on a line of their own. _version.py will just call + # get_keywords(). + git_refnames = "$Format:%d$" + git_full = "$Format:%H$" + git_date = "$Format:%ci$" + keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} + return keywords + + +class VersioneerConfig: + """Container for Versioneer configuration parameters.""" + + +def get_config(): + """Create, populate and return the VersioneerConfig() object.""" + # these strings are filled in when 'setup.py versioneer' creates + # _version.py + cfg = VersioneerConfig() + cfg.VCS = "git" + cfg.style = "pep440" + cfg.tag_prefix = "v" + cfg.parentdir_prefix = "pandas-" + cfg.versionfile_source = "pandas/_version.py" + cfg.verbose = False + return cfg + + +class NotThisMethod(Exception): + """Exception raised if a method is not valid for the current scenario.""" + + +LONG_VERSION_PY: dict[str, str] = {} +HANDLERS: dict[str, dict[str, Callable]] = {} + + +def register_vcs_handler(vcs, method): # decorator + """Create decorator to mark a method as the handler of a VCS.""" + + def decorate(f): + """Store f in HANDLERS[vcs][method].""" + if vcs not in HANDLERS: + HANDLERS[vcs] = {} + HANDLERS[vcs][method] = f + return f + + return decorate + + +def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): + """Call the given command(s).""" + assert isinstance(commands, list) + process = None + + popen_kwargs = {} + if sys.platform == "win32": + # This hides the console window if pythonw.exe is used + startupinfo = subprocess.STARTUPINFO() + startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW + popen_kwargs["startupinfo"] = startupinfo + + for command in commands: + dispcmd = str([command] + args) + try: + # remember shell=False, so use git.cmd on windows, not just git + process = subprocess.Popen( + [command] + args, + cwd=cwd, + env=env, + stdout=subprocess.PIPE, + stderr=(subprocess.PIPE if hide_stderr else None), + **popen_kwargs, + ) + break + except OSError: + e = sys.exc_info()[1] + if e.errno == errno.ENOENT: + continue + if verbose: + print(f"unable to run {dispcmd}") + print(e) + return None, None + else: + if verbose: + print(f"unable to find command, tried {commands}") + return None, None + stdout = process.communicate()[0].strip().decode() + if process.returncode != 0: + if verbose: + print(f"unable to run {dispcmd} (error)") + print(f"stdout was {stdout}") + return None, process.returncode + return stdout, process.returncode + + +def versions_from_parentdir(parentdir_prefix, root, verbose): + """Try to determine the version from the parent directory name. + + Source tarballs conventionally unpack into a directory that includes both + the project name and a version string. We will also support searching up + two directory levels for an appropriately named parent directory + """ + rootdirs = [] + + for _ in range(3): + dirname = os.path.basename(root) + if dirname.startswith(parentdir_prefix): + return { + "version": dirname[len(parentdir_prefix) :], + "full-revisionid": None, + "dirty": False, + "error": None, + "date": None, + } + rootdirs.append(root) + root = os.path.dirname(root) # up a level + + if verbose: + print( + f"Tried directories {str(rootdirs)} \ + but none started with prefix {parentdir_prefix}" + ) + raise NotThisMethod("rootdir doesn't start with parentdir_prefix") + + +@register_vcs_handler("git", "get_keywords") +def git_get_keywords(versionfile_abs): + """Extract version information from the given file.""" + # the code embedded in _version.py can just fetch the value of these + # keywords. When used from setup.py, we don't want to import _version.py, + # so we do it with a regexp instead. This function is not used from + # _version.py. + keywords = {} + try: + with open(versionfile_abs, encoding="utf-8") as fobj: + for line in fobj: + if line.strip().startswith("git_refnames ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["refnames"] = mo.group(1) + if line.strip().startswith("git_full ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["full"] = mo.group(1) + if line.strip().startswith("git_date ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["date"] = mo.group(1) + except OSError: + pass + return keywords + + +@register_vcs_handler("git", "keywords") +def git_versions_from_keywords(keywords, tag_prefix, verbose): + """Get version information from git keywords.""" + if "refnames" not in keywords: + raise NotThisMethod("Short version file found") + date = keywords.get("date") + if date is not None: + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] + + # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant + # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 + # -like" string, which we must then edit to make compliant), because + # it's been around since git-1.5.3, and it's too difficult to + # discover which version we're using, or to work around using an + # older one. + date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) + refnames = keywords["refnames"].strip() + if refnames.startswith("$Format"): + if verbose: + print("keywords are unexpanded, not using") + raise NotThisMethod("unexpanded keywords, not a git-archive tarball") + refs = {r.strip() for r in refnames.strip("()").split(",")} + # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of + # just "foo-1.0". If we see a "tag: " prefix, prefer those. + TAG = "tag: " + tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)} + if not tags: + # Either we're using git < 1.8.3, or there really are no tags. We use + # a heuristic: assume all version tags have a digit. The old git %d + # expansion behaves like git log --decorate=short and strips out the + # refs/heads/ and refs/tags/ prefixes that would let us distinguish + # between branches and tags. By ignoring refnames without digits, we + # filter out many common branch names like "release" and + # "stabilization", as well as "HEAD" and "master". + tags = {r for r in refs if re.search(r"\d", r)} + if verbose: + print(f"discarding '{','.join(refs - tags)}', no digits") + if verbose: + print(f"likely tags: {','.join(sorted(tags))}") + for ref in sorted(tags): + # sorting will prefer e.g. "2.0" over "2.0rc1" + if ref.startswith(tag_prefix): + r = ref[len(tag_prefix) :] + # Filter out refs that exactly match prefix or that don't start + # with a number once the prefix is stripped (mostly a concern + # when prefix is '') + if not re.match(r"\d", r): + continue + if verbose: + print(f"picking {r}") + return { + "version": r, + "full-revisionid": keywords["full"].strip(), + "dirty": False, + "error": None, + "date": date, + } + # no suitable tags, so version is "0+unknown", but full hex is still there + if verbose: + print("no suitable tags, using unknown + full revision id") + return { + "version": "0+unknown", + "full-revisionid": keywords["full"].strip(), + "dirty": False, + "error": "no suitable tags", + "date": None, + } + + +@register_vcs_handler("git", "pieces_from_vcs") +def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command): + """Get version from 'git describe' in the root of the source tree. + + This only gets called if the git-archive 'subst' keywords were *not* + expanded, and _version.py hasn't already been rewritten with a short + version string, meaning we're inside a checked out source tree. + """ + GITS = ["git"] + if sys.platform == "win32": + GITS = ["git.cmd", "git.exe"] + + # GIT_DIR can interfere with correct operation of Versioneer. + # It may be intended to be passed to the Versioneer-versioned project, + # but that should not change where we get our version from. + env = os.environ.copy() + env.pop("GIT_DIR", None) + runner = functools.partial(runner, env=env) + + _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=not verbose) + if rc != 0: + if verbose: + print(f"Directory {root} not under git control") + raise NotThisMethod("'git rev-parse --git-dir' returned error") + + # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] + # if there isn't one, this yields HEX[-dirty] (no NUM) + describe_out, rc = runner( + GITS, + [ + "describe", + "--tags", + "--dirty", + "--always", + "--long", + "--match", + f"{tag_prefix}[[:digit:]]*", + ], + cwd=root, + ) + # --long was added in git-1.5.5 + if describe_out is None: + raise NotThisMethod("'git describe' failed") + describe_out = describe_out.strip() + full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) + if full_out is None: + raise NotThisMethod("'git rev-parse' failed") + full_out = full_out.strip() + + pieces = {} + pieces["long"] = full_out + pieces["short"] = full_out[:7] # maybe improved later + pieces["error"] = None + + branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root) + # --abbrev-ref was added in git-1.6.3 + if rc != 0 or branch_name is None: + raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") + branch_name = branch_name.strip() + + if branch_name == "HEAD": + # If we aren't exactly on a branch, pick a branch which represents + # the current commit. If all else fails, we are on a branchless + # commit. + branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) + # --contains was added in git-1.5.4 + if rc != 0 or branches is None: + raise NotThisMethod("'git branch --contains' returned error") + branches = branches.split("\n") + + # Remove the first line if we're running detached + if "(" in branches[0]: + branches.pop(0) + + # Strip off the leading "* " from the list of branches. + branches = [branch[2:] for branch in branches] + if "master" in branches: + branch_name = "master" + elif not branches: + branch_name = None + else: + # Pick the first branch that is returned. Good or bad. + branch_name = branches[0] + + pieces["branch"] = branch_name + + # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] + # TAG might have hyphens. + git_describe = describe_out + + # look for -dirty suffix + dirty = git_describe.endswith("-dirty") + pieces["dirty"] = dirty + if dirty: + git_describe = git_describe[: git_describe.rindex("-dirty")] + + # now we have TAG-NUM-gHEX or HEX + + if "-" in git_describe: + # TAG-NUM-gHEX + mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe) + if not mo: + # unparsable. Maybe git-describe is misbehaving? + pieces["error"] = f"unable to parse git-describe output: '{describe_out}'" + return pieces + + # tag + full_tag = mo.group(1) + if not full_tag.startswith(tag_prefix): + if verbose: + fmt = "tag '%s' doesn't start with prefix '%s'" + print(fmt % (full_tag, tag_prefix)) + pieces[ + "error" + ] = f"tag '{full_tag}' doesn't start with prefix '{tag_prefix}'" + return pieces + pieces["closest-tag"] = full_tag[len(tag_prefix) :] + + # distance: number of commits since tag + pieces["distance"] = int(mo.group(2)) + + # commit: short hex revision ID + pieces["short"] = mo.group(3) + + else: + # HEX: no tags + pieces["closest-tag"] = None + out, rc = runner(GITS, ["rev-list", "HEAD", "--left-right"], cwd=root) + pieces["distance"] = len(out.split()) # total number of commits + + # commit date: see ISO-8601 comment in git_versions_from_keywords() + date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] + pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) + + return pieces + + +def plus_or_dot(pieces) -> str: + """Return a + if we don't already have one, else return a .""" + if "+" in pieces.get("closest-tag", ""): + return "." + return "+" + + +def render_pep440(pieces): + """Build up version string, with post-release "local version identifier". + + Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you + get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty + + Exceptions: + 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += plus_or_dot(pieces) + rendered += f"{pieces['distance']}.g{pieces['short']}" + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = f"0+untagged.{pieces['distance']}.g{pieces['short']}" + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def render_pep440_branch(pieces): + """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . + + The ".dev0" means not master branch. Note that .dev0 sorts backwards + (a feature branch will appear "older" than the master branch). + + Exceptions: + 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += f"{pieces['distance']}.g{pieces['short']}" + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0" + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += f"+untagged.{pieces['distance']}.g{pieces['short']}" + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def pep440_split_post(ver): + """Split pep440 version string at the post-release segment. + + Returns the release segments before the post-release and the + post-release version number (or -1 if no post-release segment is present). + """ + vc = str.split(ver, ".post") + return vc[0], int(vc[1] or 0) if len(vc) == 2 else None + + +def render_pep440_pre(pieces): + """TAG[.postN.devDISTANCE] -- No -dirty. + + Exceptions: + 1: no tags. 0.post0.devDISTANCE + """ + if pieces["closest-tag"]: + if pieces["distance"]: + # update the post release segment + tag_version, post_version = pep440_split_post(pieces["closest-tag"]) + rendered = tag_version + if post_version is not None: + rendered += f".post{post_version + 1}.dev{pieces['distance']}" + else: + rendered += f".post0.dev{pieces['distance']}" + else: + # no commits, use the tag as the version + rendered = pieces["closest-tag"] + else: + # exception #1 + rendered = f"0.post0.dev{pieces['distance']}" + return rendered + + +def render_pep440_post(pieces): + """TAG[.postDISTANCE[.dev0]+gHEX] . + + The ".dev0" means dirty. Note that .dev0 sorts backwards + (a dirty tree will appear "older" than the corresponding clean one), + but you shouldn't be releasing software with -dirty anyways. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += f".post{pieces['distance']}" + if pieces["dirty"]: + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += f"g{pieces['short']}" + else: + # exception #1 + rendered = f"0.post{pieces['distance']}" + if pieces["dirty"]: + rendered += ".dev0" + rendered += f"+g{pieces['short']}" + return rendered + + +def render_pep440_post_branch(pieces): + """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . + + The ".dev0" means not master branch. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += f".post{pieces['distance']}" + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += f"g{pieces['short']}" + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = f"0.post{pieces['distance']}" + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += f"+g{pieces['short']}" + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def render_pep440_old(pieces): + """TAG[.postDISTANCE[.dev0]] . + + The ".dev0" means dirty. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += f"0.post{pieces['distance']}" + if pieces["dirty"]: + rendered += ".dev0" + else: + # exception #1 + rendered = f"0.post{pieces['distance']}" + if pieces["dirty"]: + rendered += ".dev0" + return rendered + + +def render_git_describe(pieces): + """TAG[-DISTANCE-gHEX][-dirty]. + + Like 'git describe --tags --dirty --always'. + + Exceptions: + 1: no tags. HEX[-dirty] (note: no 'g' prefix) + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"]: + rendered += f"-{pieces['distance']}-g{pieces['short']}" + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render_git_describe_long(pieces): + """TAG-DISTANCE-gHEX[-dirty]. + + Like 'git describe --tags --dirty --always -long'. + The distance/hash is unconditional. + + Exceptions: + 1: no tags. HEX[-dirty] (note: no 'g' prefix) + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + rendered += f"-{pieces['distance']}-g{pieces['short']}" + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render(pieces, style): + """Render the given version pieces into the requested style.""" + if pieces["error"]: + return { + "version": "unknown", + "full-revisionid": pieces.get("long"), + "dirty": None, + "error": pieces["error"], + "date": None, + } + + if not style or style == "default": + style = "pep440" # the default + + if style == "pep440": + rendered = render_pep440(pieces) + elif style == "pep440-branch": + rendered = render_pep440_branch(pieces) + elif style == "pep440-pre": + rendered = render_pep440_pre(pieces) + elif style == "pep440-post": + rendered = render_pep440_post(pieces) + elif style == "pep440-post-branch": + rendered = render_pep440_post_branch(pieces) + elif style == "pep440-old": + rendered = render_pep440_old(pieces) + elif style == "git-describe": + rendered = render_git_describe(pieces) + elif style == "git-describe-long": + rendered = render_git_describe_long(pieces) + else: + raise ValueError(f"unknown style '{style}'") + + return { + "version": rendered, + "full-revisionid": pieces["long"], + "dirty": pieces["dirty"], + "error": None, + "date": pieces.get("date"), + } + + +def get_versions(): + """Get version information or return default if unable to do so.""" + # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have + # __file__, we can work backwards from there to the root. Some + # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which + # case we can only use expanded keywords. + + cfg = get_config() + verbose = cfg.verbose + + try: + return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) + except NotThisMethod: + pass + + try: + root = os.path.realpath(__file__) + # versionfile_source is the relative path from the top of the source + # tree (where the .git directory might live) to this file. Invert + # this to find the root from __file__. + for _ in cfg.versionfile_source.split("/"): + root = os.path.dirname(root) + except NameError: + return { + "version": "0+unknown", + "full-revisionid": None, + "dirty": None, + "error": "unable to find root of source tree", + "date": None, + } + + try: + pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) + return render(pieces, cfg.style) + except NotThisMethod: + pass + + try: + if cfg.parentdir_prefix: + return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) + except NotThisMethod: + pass + + return { + "version": "0+unknown", + "full-revisionid": None, + "dirty": None, + "error": "unable to compute version", + "date": None, + } diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_version_meson.py b/env-llmeval/lib/python3.10/site-packages/pandas/_version_meson.py new file mode 100644 index 0000000000000000000000000000000000000000..360ff90b89f68a2b2188f060b314ee2373df71de --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_version_meson.py @@ -0,0 +1,2 @@ +__version__="2.2.2" +__git_version__="d9cdd2ee5a58015ef6f4d15c7226110c9aab8140" diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/conftest.py b/env-llmeval/lib/python3.10/site-packages/pandas/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..7c35dfdde90ba75d277c7817e97df5506a237fac --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/conftest.py @@ -0,0 +1,1965 @@ +""" +This file is very long and growing, but it was decided to not split it yet, as +it's still manageable (2020-03-17, ~1.1k LoC). See gh-31989 + +Instead of splitting it was decided to define sections here: +- Configuration / Settings +- Autouse fixtures +- Common arguments +- Missing values & co. +- Classes +- Indices +- Series' +- DataFrames +- Operators & Operations +- Data sets/files +- Time zones +- Dtypes +- Misc +""" +from __future__ import annotations + +from collections import abc +from datetime import ( + date, + datetime, + time, + timedelta, + timezone, +) +from decimal import Decimal +import operator +import os +from typing import ( + TYPE_CHECKING, + Callable, +) + +from dateutil.tz import ( + tzlocal, + tzutc, +) +import hypothesis +from hypothesis import strategies as st +import numpy as np +import pytest +from pytz import ( + FixedOffset, + utc, +) + +from pandas._config.config import _get_option + +import pandas.util._test_decorators as td + +from pandas.core.dtypes.dtypes import ( + DatetimeTZDtype, + IntervalDtype, +) + +import pandas as pd +from pandas import ( + CategoricalIndex, + DataFrame, + Interval, + IntervalIndex, + Period, + RangeIndex, + Series, + Timedelta, + Timestamp, + date_range, + period_range, + timedelta_range, +) +import pandas._testing as tm +from pandas.core import ops +from pandas.core.indexes.api import ( + Index, + MultiIndex, +) +from pandas.util.version import Version + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Iterator, + ) + +try: + import pyarrow as pa +except ImportError: + has_pyarrow = False +else: + del pa + has_pyarrow = True + +import zoneinfo + +try: + zoneinfo.ZoneInfo("UTC") +except zoneinfo.ZoneInfoNotFoundError: + zoneinfo = None # type: ignore[assignment] + + +# ---------------------------------------------------------------- +# Configuration / Settings +# ---------------------------------------------------------------- +# pytest + + +def pytest_addoption(parser) -> None: + parser.addoption( + "--no-strict-data-files", + action="store_false", + help="Don't fail if a test is skipped for missing data file.", + ) + + +def ignore_doctest_warning(item: pytest.Item, path: str, message: str) -> None: + """Ignore doctest warning. + + Parameters + ---------- + item : pytest.Item + pytest test item. + path : str + Module path to Python object, e.g. "pandas.core.frame.DataFrame.append". A + warning will be filtered when item.name ends with in given path. So it is + sufficient to specify e.g. "DataFrame.append". + message : str + Message to be filtered. + """ + if item.name.endswith(path): + item.add_marker(pytest.mark.filterwarnings(f"ignore:{message}")) + + +def pytest_collection_modifyitems(items, config) -> None: + is_doctest = config.getoption("--doctest-modules") or config.getoption( + "--doctest-cython", default=False + ) + + # Warnings from doctests that can be ignored; place reason in comment above. + # Each entry specifies (path, message) - see the ignore_doctest_warning function + ignored_doctest_warnings = [ + ("is_int64_dtype", "is_int64_dtype is deprecated"), + ("is_interval_dtype", "is_interval_dtype is deprecated"), + ("is_period_dtype", "is_period_dtype is deprecated"), + ("is_datetime64tz_dtype", "is_datetime64tz_dtype is deprecated"), + ("is_categorical_dtype", "is_categorical_dtype is deprecated"), + ("is_sparse", "is_sparse is deprecated"), + ("DataFrameGroupBy.fillna", "DataFrameGroupBy.fillna is deprecated"), + ("NDFrame.replace", "The 'method' keyword"), + ("NDFrame.replace", "Series.replace without 'value'"), + ("NDFrame.clip", "Downcasting behavior in Series and DataFrame methods"), + ("Series.idxmin", "The behavior of Series.idxmin"), + ("Series.idxmax", "The behavior of Series.idxmax"), + ("SeriesGroupBy.fillna", "SeriesGroupBy.fillna is deprecated"), + ("SeriesGroupBy.idxmin", "The behavior of Series.idxmin"), + ("SeriesGroupBy.idxmax", "The behavior of Series.idxmax"), + # Docstring divides by zero to show behavior difference + ("missing.mask_zero_div_zero", "divide by zero encountered"), + ( + "to_pydatetime", + "The behavior of DatetimeProperties.to_pydatetime is deprecated", + ), + ( + "pandas.core.generic.NDFrame.bool", + "(Series|DataFrame).bool is now deprecated and will be removed " + "in future version of pandas", + ), + ( + "pandas.core.generic.NDFrame.first", + "first is deprecated and will be removed in a future version. " + "Please create a mask and filter using `.loc` instead", + ), + ( + "Resampler.fillna", + "DatetimeIndexResampler.fillna is deprecated", + ), + ( + "DataFrameGroupBy.fillna", + "DataFrameGroupBy.fillna with 'method' is deprecated", + ), + ( + "DataFrameGroupBy.fillna", + "DataFrame.fillna with 'method' is deprecated", + ), + ("read_parquet", "Passing a BlockManager to DataFrame is deprecated"), + ] + + if is_doctest: + for item in items: + for path, message in ignored_doctest_warnings: + ignore_doctest_warning(item, path, message) + + +hypothesis_health_checks = [hypothesis.HealthCheck.too_slow] +if Version(hypothesis.__version__) >= Version("6.83.2"): + hypothesis_health_checks.append(hypothesis.HealthCheck.differing_executors) + +# Hypothesis +hypothesis.settings.register_profile( + "ci", + # Hypothesis timing checks are tuned for scalars by default, so we bump + # them from 200ms to 500ms per test case as the global default. If this + # is too short for a specific test, (a) try to make it faster, and (b) + # if it really is slow add `@settings(deadline=...)` with a working value, + # or `deadline=None` to entirely disable timeouts for that test. + # 2022-02-09: Changed deadline from 500 -> None. Deadline leads to + # non-actionable, flaky CI failures (# GH 24641, 44969, 45118, 44969) + deadline=None, + suppress_health_check=tuple(hypothesis_health_checks), +) +hypothesis.settings.load_profile("ci") + +# Registering these strategies makes them globally available via st.from_type, +# which is use for offsets in tests/tseries/offsets/test_offsets_properties.py +for name in "MonthBegin MonthEnd BMonthBegin BMonthEnd".split(): + cls = getattr(pd.tseries.offsets, name) + st.register_type_strategy( + cls, st.builds(cls, n=st.integers(-99, 99), normalize=st.booleans()) + ) + +for name in "YearBegin YearEnd BYearBegin BYearEnd".split(): + cls = getattr(pd.tseries.offsets, name) + st.register_type_strategy( + cls, + st.builds( + cls, + n=st.integers(-5, 5), + normalize=st.booleans(), + month=st.integers(min_value=1, max_value=12), + ), + ) + +for name in "QuarterBegin QuarterEnd BQuarterBegin BQuarterEnd".split(): + cls = getattr(pd.tseries.offsets, name) + st.register_type_strategy( + cls, + st.builds( + cls, + n=st.integers(-24, 24), + normalize=st.booleans(), + startingMonth=st.integers(min_value=1, max_value=12), + ), + ) + + +# ---------------------------------------------------------------- +# Autouse fixtures +# ---------------------------------------------------------------- + + +# https://github.com/pytest-dev/pytest/issues/11873 +# Would like to avoid autouse=True, but cannot as of pytest 8.0.0 +@pytest.fixture(autouse=True) +def add_doctest_imports(doctest_namespace) -> None: + """ + Make `np` and `pd` names available for doctests. + """ + doctest_namespace["np"] = np + doctest_namespace["pd"] = pd + + +@pytest.fixture(autouse=True) +def configure_tests() -> None: + """ + Configure settings for all tests and test modules. + """ + pd.set_option("chained_assignment", "raise") + + +# ---------------------------------------------------------------- +# Common arguments +# ---------------------------------------------------------------- +@pytest.fixture(params=[0, 1, "index", "columns"], ids=lambda x: f"axis={repr(x)}") +def axis(request): + """ + Fixture for returning the axis numbers of a DataFrame. + """ + return request.param + + +axis_frame = axis + + +@pytest.fixture(params=[1, "columns"], ids=lambda x: f"axis={repr(x)}") +def axis_1(request): + """ + Fixture for returning aliases of axis 1 of a DataFrame. + """ + return request.param + + +@pytest.fixture(params=[True, False, None]) +def observed(request): + """ + Pass in the observed keyword to groupby for [True, False] + This indicates whether categoricals should return values for + values which are not in the grouper [False / None], or only values which + appear in the grouper [True]. [None] is supported for future compatibility + if we decide to change the default (and would need to warn if this + parameter is not passed). + """ + return request.param + + +@pytest.fixture(params=[True, False, None]) +def ordered(request): + """ + Boolean 'ordered' parameter for Categorical. + """ + return request.param + + +@pytest.fixture(params=[True, False]) +def skipna(request): + """ + Boolean 'skipna' parameter. + """ + return request.param + + +@pytest.fixture(params=["first", "last", False]) +def keep(request): + """ + Valid values for the 'keep' parameter used in + .duplicated or .drop_duplicates + """ + return request.param + + +@pytest.fixture(params=["both", "neither", "left", "right"]) +def inclusive_endpoints_fixture(request): + """ + Fixture for trying all interval 'inclusive' parameters. + """ + return request.param + + +@pytest.fixture(params=["left", "right", "both", "neither"]) +def closed(request): + """ + Fixture for trying all interval closed parameters. + """ + return request.param + + +@pytest.fixture(params=["left", "right", "both", "neither"]) +def other_closed(request): + """ + Secondary closed fixture to allow parametrizing over all pairs of closed. + """ + return request.param + + +@pytest.fixture( + params=[ + None, + "gzip", + "bz2", + "zip", + "xz", + "tar", + pytest.param("zstd", marks=td.skip_if_no("zstandard")), + ] +) +def compression(request): + """ + Fixture for trying common compression types in compression tests. + """ + return request.param + + +@pytest.fixture( + params=[ + "gzip", + "bz2", + "zip", + "xz", + "tar", + pytest.param("zstd", marks=td.skip_if_no("zstandard")), + ] +) +def compression_only(request): + """ + Fixture for trying common compression types in compression tests excluding + uncompressed case. + """ + return request.param + + +@pytest.fixture(params=[True, False]) +def writable(request): + """ + Fixture that an array is writable. + """ + return request.param + + +@pytest.fixture(params=["inner", "outer", "left", "right"]) +def join_type(request): + """ + Fixture for trying all types of join operations. + """ + return request.param + + +@pytest.fixture(params=["nlargest", "nsmallest"]) +def nselect_method(request): + """ + Fixture for trying all nselect methods. + """ + return request.param + + +# ---------------------------------------------------------------- +# Missing values & co. +# ---------------------------------------------------------------- +@pytest.fixture(params=tm.NULL_OBJECTS, ids=lambda x: type(x).__name__) +def nulls_fixture(request): + """ + Fixture for each null type in pandas. + """ + return request.param + + +nulls_fixture2 = nulls_fixture # Generate cartesian product of nulls_fixture + + +@pytest.fixture(params=[None, np.nan, pd.NaT]) +def unique_nulls_fixture(request): + """ + Fixture for each null type in pandas, each null type exactly once. + """ + return request.param + + +# Generate cartesian product of unique_nulls_fixture: +unique_nulls_fixture2 = unique_nulls_fixture + + +@pytest.fixture(params=tm.NP_NAT_OBJECTS, ids=lambda x: type(x).__name__) +def np_nat_fixture(request): + """ + Fixture for each NaT type in numpy. + """ + return request.param + + +# Generate cartesian product of np_nat_fixture: +np_nat_fixture2 = np_nat_fixture + + +# ---------------------------------------------------------------- +# Classes +# ---------------------------------------------------------------- + + +@pytest.fixture(params=[DataFrame, Series]) +def frame_or_series(request): + """ + Fixture to parametrize over DataFrame and Series. + """ + return request.param + + +@pytest.fixture(params=[Index, Series], ids=["index", "series"]) +def index_or_series(request): + """ + Fixture to parametrize over Index and Series, made necessary by a mypy + bug, giving an error: + + List item 0 has incompatible type "Type[Series]"; expected "Type[PandasObject]" + + See GH#29725 + """ + return request.param + + +# Generate cartesian product of index_or_series fixture: +index_or_series2 = index_or_series + + +@pytest.fixture(params=[Index, Series, pd.array], ids=["index", "series", "array"]) +def index_or_series_or_array(request): + """ + Fixture to parametrize over Index, Series, and ExtensionArray + """ + return request.param + + +@pytest.fixture(params=[Index, Series, DataFrame, pd.array], ids=lambda x: x.__name__) +def box_with_array(request): + """ + Fixture to test behavior for Index, Series, DataFrame, and pandas Array + classes + """ + return request.param + + +box_with_array2 = box_with_array + + +@pytest.fixture +def dict_subclass() -> type[dict]: + """ + Fixture for a dictionary subclass. + """ + + class TestSubDict(dict): + def __init__(self, *args, **kwargs) -> None: + dict.__init__(self, *args, **kwargs) + + return TestSubDict + + +@pytest.fixture +def non_dict_mapping_subclass() -> type[abc.Mapping]: + """ + Fixture for a non-mapping dictionary subclass. + """ + + class TestNonDictMapping(abc.Mapping): + def __init__(self, underlying_dict) -> None: + self._data = underlying_dict + + def __getitem__(self, key): + return self._data.__getitem__(key) + + def __iter__(self) -> Iterator: + return self._data.__iter__() + + def __len__(self) -> int: + return self._data.__len__() + + return TestNonDictMapping + + +# ---------------------------------------------------------------- +# Indices +# ---------------------------------------------------------------- +@pytest.fixture +def multiindex_year_month_day_dataframe_random_data(): + """ + DataFrame with 3 level MultiIndex (year, month, day) covering + first 100 business days from 2000-01-01 with random data + """ + tdf = DataFrame( + np.random.default_rng(2).standard_normal((100, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=100, freq="B"), + ) + ymd = tdf.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]).sum() + # use int64 Index, to make sure things work + ymd.index = ymd.index.set_levels([lev.astype("i8") for lev in ymd.index.levels]) + ymd.index.set_names(["year", "month", "day"], inplace=True) + return ymd + + +@pytest.fixture +def lexsorted_two_level_string_multiindex() -> MultiIndex: + """ + 2-level MultiIndex, lexsorted, with string names. + """ + return MultiIndex( + levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]], + codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], + names=["first", "second"], + ) + + +@pytest.fixture +def multiindex_dataframe_random_data( + lexsorted_two_level_string_multiindex, +) -> DataFrame: + """DataFrame with 2 level MultiIndex with random data""" + index = lexsorted_two_level_string_multiindex + return DataFrame( + np.random.default_rng(2).standard_normal((10, 3)), + index=index, + columns=Index(["A", "B", "C"], name="exp"), + ) + + +def _create_multiindex(): + """ + MultiIndex used to test the general functionality of this object + """ + + # See Also: tests.multi.conftest.idx + major_axis = Index(["foo", "bar", "baz", "qux"]) + minor_axis = Index(["one", "two"]) + + major_codes = np.array([0, 0, 1, 2, 3, 3]) + minor_codes = np.array([0, 1, 0, 1, 0, 1]) + index_names = ["first", "second"] + return MultiIndex( + levels=[major_axis, minor_axis], + codes=[major_codes, minor_codes], + names=index_names, + verify_integrity=False, + ) + + +def _create_mi_with_dt64tz_level(): + """ + MultiIndex with a level that is a tzaware DatetimeIndex. + """ + # GH#8367 round trip with pickle + return MultiIndex.from_product( + [[1, 2], ["a", "b"], date_range("20130101", periods=3, tz="US/Eastern")], + names=["one", "two", "three"], + ) + + +indices_dict = { + "string": Index([f"pandas_{i}" for i in range(100)]), + "datetime": date_range("2020-01-01", periods=100), + "datetime-tz": date_range("2020-01-01", periods=100, tz="US/Pacific"), + "period": period_range("2020-01-01", periods=100, freq="D"), + "timedelta": timedelta_range(start="1 day", periods=100, freq="D"), + "range": RangeIndex(100), + "int8": Index(np.arange(100), dtype="int8"), + "int16": Index(np.arange(100), dtype="int16"), + "int32": Index(np.arange(100), dtype="int32"), + "int64": Index(np.arange(100), dtype="int64"), + "uint8": Index(np.arange(100), dtype="uint8"), + "uint16": Index(np.arange(100), dtype="uint16"), + "uint32": Index(np.arange(100), dtype="uint32"), + "uint64": Index(np.arange(100), dtype="uint64"), + "float32": Index(np.arange(100), dtype="float32"), + "float64": Index(np.arange(100), dtype="float64"), + "bool-object": Index([True, False] * 5, dtype=object), + "bool-dtype": Index([True, False] * 5, dtype=bool), + "complex64": Index( + np.arange(100, dtype="complex64") + 1.0j * np.arange(100, dtype="complex64") + ), + "complex128": Index( + np.arange(100, dtype="complex128") + 1.0j * np.arange(100, dtype="complex128") + ), + "categorical": CategoricalIndex(list("abcd") * 25), + "interval": IntervalIndex.from_breaks(np.linspace(0, 100, num=101)), + "empty": Index([]), + "tuples": MultiIndex.from_tuples(zip(["foo", "bar", "baz"], [1, 2, 3])), + "mi-with-dt64tz-level": _create_mi_with_dt64tz_level(), + "multi": _create_multiindex(), + "repeats": Index([0, 0, 1, 1, 2, 2]), + "nullable_int": Index(np.arange(100), dtype="Int64"), + "nullable_uint": Index(np.arange(100), dtype="UInt16"), + "nullable_float": Index(np.arange(100), dtype="Float32"), + "nullable_bool": Index(np.arange(100).astype(bool), dtype="boolean"), + "string-python": Index( + pd.array([f"pandas_{i}" for i in range(100)], dtype="string[python]") + ), +} +if has_pyarrow: + idx = Index(pd.array([f"pandas_{i}" for i in range(100)], dtype="string[pyarrow]")) + indices_dict["string-pyarrow"] = idx + + +@pytest.fixture(params=indices_dict.keys()) +def index(request): + """ + Fixture for many "simple" kinds of indices. + + These indices are unlikely to cover corner cases, e.g. + - no names + - no NaTs/NaNs + - no values near implementation bounds + - ... + """ + # copy to avoid mutation, e.g. setting .name + return indices_dict[request.param].copy() + + +# Needed to generate cartesian product of indices +index_fixture2 = index + + +@pytest.fixture( + params=[ + key for key, value in indices_dict.items() if not isinstance(value, MultiIndex) + ] +) +def index_flat(request): + """ + index fixture, but excluding MultiIndex cases. + """ + key = request.param + return indices_dict[key].copy() + + +# Alias so we can test with cartesian product of index_flat +index_flat2 = index_flat + + +@pytest.fixture( + params=[ + key + for key, value in indices_dict.items() + if not ( + key.startswith(("int", "uint", "float")) + or key in ["range", "empty", "repeats", "bool-dtype"] + ) + and not isinstance(value, MultiIndex) + ] +) +def index_with_missing(request): + """ + Fixture for indices with missing values. + + Integer-dtype and empty cases are excluded because they cannot hold missing + values. + + MultiIndex is excluded because isna() is not defined for MultiIndex. + """ + + # GH 35538. Use deep copy to avoid illusive bug on np-dev + # GHA pipeline that writes into indices_dict despite copy + ind = indices_dict[request.param].copy(deep=True) + vals = ind.values.copy() + if request.param in ["tuples", "mi-with-dt64tz-level", "multi"]: + # For setting missing values in the top level of MultiIndex + vals = ind.tolist() + vals[0] = (None,) + vals[0][1:] + vals[-1] = (None,) + vals[-1][1:] + return MultiIndex.from_tuples(vals) + else: + vals[0] = None + vals[-1] = None + return type(ind)(vals) + + +# ---------------------------------------------------------------- +# Series' +# ---------------------------------------------------------------- +@pytest.fixture +def string_series() -> Series: + """ + Fixture for Series of floats with Index of unique strings + """ + return Series( + np.arange(30, dtype=np.float64) * 1.1, + index=Index([f"i_{i}" for i in range(30)], dtype=object), + name="series", + ) + + +@pytest.fixture +def object_series() -> Series: + """ + Fixture for Series of dtype object with Index of unique strings + """ + data = [f"foo_{i}" for i in range(30)] + index = Index([f"bar_{i}" for i in range(30)], dtype=object) + return Series(data, index=index, name="objects", dtype=object) + + +@pytest.fixture +def datetime_series() -> Series: + """ + Fixture for Series of floats with DatetimeIndex + """ + return Series( + np.random.default_rng(2).standard_normal(30), + index=date_range("2000-01-01", periods=30, freq="B"), + name="ts", + ) + + +def _create_series(index): + """Helper for the _series dict""" + size = len(index) + data = np.random.default_rng(2).standard_normal(size) + return Series(data, index=index, name="a", copy=False) + + +_series = { + f"series-with-{index_id}-index": _create_series(index) + for index_id, index in indices_dict.items() +} + + +@pytest.fixture +def series_with_simple_index(index) -> Series: + """ + Fixture for tests on series with changing types of indices. + """ + return _create_series(index) + + +_narrow_series = { + f"{dtype.__name__}-series": Series( + range(30), index=[f"i-{i}" for i in range(30)], name="a", dtype=dtype + ) + for dtype in tm.NARROW_NP_DTYPES +} + + +_index_or_series_objs = {**indices_dict, **_series, **_narrow_series} + + +@pytest.fixture(params=_index_or_series_objs.keys()) +def index_or_series_obj(request): + """ + Fixture for tests on indexes, series and series with a narrow dtype + copy to avoid mutation, e.g. setting .name + """ + return _index_or_series_objs[request.param].copy(deep=True) + + +_typ_objects_series = { + f"{dtype.__name__}-series": Series(dtype) for dtype in tm.PYTHON_DATA_TYPES +} + + +_index_or_series_memory_objs = { + **indices_dict, + **_series, + **_narrow_series, + **_typ_objects_series, +} + + +@pytest.fixture(params=_index_or_series_memory_objs.keys()) +def index_or_series_memory_obj(request): + """ + Fixture for tests on indexes, series, series with a narrow dtype and + series with empty objects type + copy to avoid mutation, e.g. setting .name + """ + return _index_or_series_memory_objs[request.param].copy(deep=True) + + +# ---------------------------------------------------------------- +# DataFrames +# ---------------------------------------------------------------- +@pytest.fixture +def int_frame() -> DataFrame: + """ + Fixture for DataFrame of ints with index of unique strings + + Columns are ['A', 'B', 'C', 'D'] + """ + return DataFrame( + np.ones((30, 4), dtype=np.int64), + index=Index([f"foo_{i}" for i in range(30)], dtype=object), + columns=Index(list("ABCD"), dtype=object), + ) + + +@pytest.fixture +def float_frame() -> DataFrame: + """ + Fixture for DataFrame of floats with index of unique strings + + Columns are ['A', 'B', 'C', 'D']. + """ + return DataFrame( + np.random.default_rng(2).standard_normal((30, 4)), + index=Index([f"foo_{i}" for i in range(30)]), + columns=Index(list("ABCD")), + ) + + +@pytest.fixture +def rand_series_with_duplicate_datetimeindex() -> Series: + """ + Fixture for Series with a DatetimeIndex that has duplicates. + """ + dates = [ + datetime(2000, 1, 2), + datetime(2000, 1, 2), + datetime(2000, 1, 2), + datetime(2000, 1, 3), + datetime(2000, 1, 3), + datetime(2000, 1, 3), + datetime(2000, 1, 4), + datetime(2000, 1, 4), + datetime(2000, 1, 4), + datetime(2000, 1, 5), + ] + + return Series(np.random.default_rng(2).standard_normal(len(dates)), index=dates) + + +# ---------------------------------------------------------------- +# Scalars +# ---------------------------------------------------------------- +@pytest.fixture( + params=[ + (Interval(left=0, right=5), IntervalDtype("int64", "right")), + (Interval(left=0.1, right=0.5), IntervalDtype("float64", "right")), + (Period("2012-01", freq="M"), "period[M]"), + (Period("2012-02-01", freq="D"), "period[D]"), + ( + Timestamp("2011-01-01", tz="US/Eastern"), + DatetimeTZDtype(unit="s", tz="US/Eastern"), + ), + (Timedelta(seconds=500), "timedelta64[ns]"), + ] +) +def ea_scalar_and_dtype(request): + return request.param + + +# ---------------------------------------------------------------- +# Operators & Operations +# ---------------------------------------------------------------- + + +@pytest.fixture(params=tm.arithmetic_dunder_methods) +def all_arithmetic_operators(request): + """ + Fixture for dunder names for common arithmetic operations. + """ + return request.param + + +@pytest.fixture( + params=[ + operator.add, + ops.radd, + operator.sub, + ops.rsub, + operator.mul, + ops.rmul, + operator.truediv, + ops.rtruediv, + operator.floordiv, + ops.rfloordiv, + operator.mod, + ops.rmod, + operator.pow, + ops.rpow, + operator.eq, + operator.ne, + operator.lt, + operator.le, + operator.gt, + operator.ge, + operator.and_, + ops.rand_, + operator.xor, + ops.rxor, + operator.or_, + ops.ror_, + ] +) +def all_binary_operators(request): + """ + Fixture for operator and roperator arithmetic, comparison, and logical ops. + """ + return request.param + + +@pytest.fixture( + params=[ + operator.add, + ops.radd, + operator.sub, + ops.rsub, + operator.mul, + ops.rmul, + operator.truediv, + ops.rtruediv, + operator.floordiv, + ops.rfloordiv, + operator.mod, + ops.rmod, + operator.pow, + ops.rpow, + ] +) +def all_arithmetic_functions(request): + """ + Fixture for operator and roperator arithmetic functions. + + Notes + ----- + This includes divmod and rdivmod, whereas all_arithmetic_operators + does not. + """ + return request.param + + +_all_numeric_reductions = [ + "count", + "sum", + "max", + "min", + "mean", + "prod", + "std", + "var", + "median", + "kurt", + "skew", + "sem", +] + + +@pytest.fixture(params=_all_numeric_reductions) +def all_numeric_reductions(request): + """ + Fixture for numeric reduction names. + """ + return request.param + + +_all_boolean_reductions = ["all", "any"] + + +@pytest.fixture(params=_all_boolean_reductions) +def all_boolean_reductions(request): + """ + Fixture for boolean reduction names. + """ + return request.param + + +_all_reductions = _all_numeric_reductions + _all_boolean_reductions + + +@pytest.fixture(params=_all_reductions) +def all_reductions(request): + """ + Fixture for all (boolean + numeric) reduction names. + """ + return request.param + + +@pytest.fixture( + params=[ + operator.eq, + operator.ne, + operator.gt, + operator.ge, + operator.lt, + operator.le, + ] +) +def comparison_op(request): + """ + Fixture for operator module comparison functions. + """ + return request.param + + +@pytest.fixture(params=["__le__", "__lt__", "__ge__", "__gt__"]) +def compare_operators_no_eq_ne(request): + """ + Fixture for dunder names for compare operations except == and != + + * >= + * > + * < + * <= + """ + return request.param + + +@pytest.fixture( + params=["__and__", "__rand__", "__or__", "__ror__", "__xor__", "__rxor__"] +) +def all_logical_operators(request): + """ + Fixture for dunder names for common logical operations + + * | + * & + * ^ + """ + return request.param + + +_all_numeric_accumulations = ["cumsum", "cumprod", "cummin", "cummax"] + + +@pytest.fixture(params=_all_numeric_accumulations) +def all_numeric_accumulations(request): + """ + Fixture for numeric accumulation names + """ + return request.param + + +# ---------------------------------------------------------------- +# Data sets/files +# ---------------------------------------------------------------- +@pytest.fixture +def strict_data_files(pytestconfig): + """ + Returns the configuration for the test setting `--no-strict-data-files`. + """ + return pytestconfig.getoption("--no-strict-data-files") + + +@pytest.fixture +def datapath(strict_data_files: str) -> Callable[..., str]: + """ + Get the path to a data file. + + Parameters + ---------- + path : str + Path to the file, relative to ``pandas/tests/`` + + Returns + ------- + path including ``pandas/tests``. + + Raises + ------ + ValueError + If the path doesn't exist and the --no-strict-data-files option is not set. + """ + BASE_PATH = os.path.join(os.path.dirname(__file__), "tests") + + def deco(*args): + path = os.path.join(BASE_PATH, *args) + if not os.path.exists(path): + if strict_data_files: + raise ValueError( + f"Could not find file {path} and --no-strict-data-files is not set." + ) + pytest.skip(f"Could not find {path}.") + return path + + return deco + + +# ---------------------------------------------------------------- +# Time zones +# ---------------------------------------------------------------- +TIMEZONES = [ + None, + "UTC", + "US/Eastern", + "Asia/Tokyo", + "dateutil/US/Pacific", + "dateutil/Asia/Singapore", + "+01:15", + "-02:15", + "UTC+01:15", + "UTC-02:15", + tzutc(), + tzlocal(), + FixedOffset(300), + FixedOffset(0), + FixedOffset(-300), + timezone.utc, + timezone(timedelta(hours=1)), + timezone(timedelta(hours=-1), name="foo"), +] +if zoneinfo is not None: + TIMEZONES.extend( + [ + zoneinfo.ZoneInfo("US/Pacific"), # type: ignore[list-item] + zoneinfo.ZoneInfo("UTC"), # type: ignore[list-item] + ] + ) +TIMEZONE_IDS = [repr(i) for i in TIMEZONES] + + +@td.parametrize_fixture_doc(str(TIMEZONE_IDS)) +@pytest.fixture(params=TIMEZONES, ids=TIMEZONE_IDS) +def tz_naive_fixture(request): + """ + Fixture for trying timezones including default (None): {0} + """ + return request.param + + +@td.parametrize_fixture_doc(str(TIMEZONE_IDS[1:])) +@pytest.fixture(params=TIMEZONES[1:], ids=TIMEZONE_IDS[1:]) +def tz_aware_fixture(request): + """ + Fixture for trying explicit timezones: {0} + """ + return request.param + + +# Generate cartesian product of tz_aware_fixture: +tz_aware_fixture2 = tz_aware_fixture + + +_UTCS = ["utc", "dateutil/UTC", utc, tzutc(), timezone.utc] +if zoneinfo is not None: + _UTCS.append(zoneinfo.ZoneInfo("UTC")) + + +@pytest.fixture(params=_UTCS) +def utc_fixture(request): + """ + Fixture to provide variants of UTC timezone strings and tzinfo objects. + """ + return request.param + + +utc_fixture2 = utc_fixture + + +@pytest.fixture(params=["s", "ms", "us", "ns"]) +def unit(request): + """ + datetime64 units we support. + """ + return request.param + + +unit2 = unit + + +# ---------------------------------------------------------------- +# Dtypes +# ---------------------------------------------------------------- +@pytest.fixture(params=tm.STRING_DTYPES) +def string_dtype(request): + """ + Parametrized fixture for string dtypes. + + * str + * 'str' + * 'U' + """ + return request.param + + +@pytest.fixture( + params=[ + "string[python]", + pytest.param("string[pyarrow]", marks=td.skip_if_no("pyarrow")), + ] +) +def nullable_string_dtype(request): + """ + Parametrized fixture for string dtypes. + + * 'string[python]' + * 'string[pyarrow]' + """ + return request.param + + +@pytest.fixture( + params=[ + "python", + pytest.param("pyarrow", marks=td.skip_if_no("pyarrow")), + pytest.param("pyarrow_numpy", marks=td.skip_if_no("pyarrow")), + ] +) +def string_storage(request): + """ + Parametrized fixture for pd.options.mode.string_storage. + + * 'python' + * 'pyarrow' + * 'pyarrow_numpy' + """ + return request.param + + +@pytest.fixture( + params=[ + "numpy_nullable", + pytest.param("pyarrow", marks=td.skip_if_no("pyarrow")), + ] +) +def dtype_backend(request): + """ + Parametrized fixture for pd.options.mode.string_storage. + + * 'python' + * 'pyarrow' + """ + return request.param + + +# Alias so we can test with cartesian product of string_storage +string_storage2 = string_storage + + +@pytest.fixture(params=tm.BYTES_DTYPES) +def bytes_dtype(request): + """ + Parametrized fixture for bytes dtypes. + + * bytes + * 'bytes' + """ + return request.param + + +@pytest.fixture(params=tm.OBJECT_DTYPES) +def object_dtype(request): + """ + Parametrized fixture for object dtypes. + + * object + * 'object' + """ + return request.param + + +@pytest.fixture( + params=[ + "object", + "string[python]", + pytest.param("string[pyarrow]", marks=td.skip_if_no("pyarrow")), + pytest.param("string[pyarrow_numpy]", marks=td.skip_if_no("pyarrow")), + ] +) +def any_string_dtype(request): + """ + Parametrized fixture for string dtypes. + * 'object' + * 'string[python]' + * 'string[pyarrow]' + """ + return request.param + + +@pytest.fixture(params=tm.DATETIME64_DTYPES) +def datetime64_dtype(request): + """ + Parametrized fixture for datetime64 dtypes. + + * 'datetime64[ns]' + * 'M8[ns]' + """ + return request.param + + +@pytest.fixture(params=tm.TIMEDELTA64_DTYPES) +def timedelta64_dtype(request): + """ + Parametrized fixture for timedelta64 dtypes. + + * 'timedelta64[ns]' + * 'm8[ns]' + """ + return request.param + + +@pytest.fixture +def fixed_now_ts() -> Timestamp: + """ + Fixture emits fixed Timestamp.now() + """ + return Timestamp( # pyright: ignore[reportGeneralTypeIssues] + year=2021, month=1, day=1, hour=12, minute=4, second=13, microsecond=22 + ) + + +@pytest.fixture(params=tm.FLOAT_NUMPY_DTYPES) +def float_numpy_dtype(request): + """ + Parameterized fixture for float dtypes. + + * float + * 'float32' + * 'float64' + """ + return request.param + + +@pytest.fixture(params=tm.FLOAT_EA_DTYPES) +def float_ea_dtype(request): + """ + Parameterized fixture for float dtypes. + + * 'Float32' + * 'Float64' + """ + return request.param + + +@pytest.fixture(params=tm.ALL_FLOAT_DTYPES) +def any_float_dtype(request): + """ + Parameterized fixture for float dtypes. + + * float + * 'float32' + * 'float64' + * 'Float32' + * 'Float64' + """ + return request.param + + +@pytest.fixture(params=tm.COMPLEX_DTYPES) +def complex_dtype(request): + """ + Parameterized fixture for complex dtypes. + + * complex + * 'complex64' + * 'complex128' + """ + return request.param + + +@pytest.fixture(params=tm.SIGNED_INT_NUMPY_DTYPES) +def any_signed_int_numpy_dtype(request): + """ + Parameterized fixture for signed integer dtypes. + + * int + * 'int8' + * 'int16' + * 'int32' + * 'int64' + """ + return request.param + + +@pytest.fixture(params=tm.UNSIGNED_INT_NUMPY_DTYPES) +def any_unsigned_int_numpy_dtype(request): + """ + Parameterized fixture for unsigned integer dtypes. + + * 'uint8' + * 'uint16' + * 'uint32' + * 'uint64' + """ + return request.param + + +@pytest.fixture(params=tm.ALL_INT_NUMPY_DTYPES) +def any_int_numpy_dtype(request): + """ + Parameterized fixture for any integer dtype. + + * int + * 'int8' + * 'uint8' + * 'int16' + * 'uint16' + * 'int32' + * 'uint32' + * 'int64' + * 'uint64' + """ + return request.param + + +@pytest.fixture(params=tm.ALL_INT_EA_DTYPES) +def any_int_ea_dtype(request): + """ + Parameterized fixture for any nullable integer dtype. + + * 'UInt8' + * 'Int8' + * 'UInt16' + * 'Int16' + * 'UInt32' + * 'Int32' + * 'UInt64' + * 'Int64' + """ + return request.param + + +@pytest.fixture(params=tm.ALL_INT_DTYPES) +def any_int_dtype(request): + """ + Parameterized fixture for any nullable integer dtype. + + * int + * 'int8' + * 'uint8' + * 'int16' + * 'uint16' + * 'int32' + * 'uint32' + * 'int64' + * 'uint64' + * 'UInt8' + * 'Int8' + * 'UInt16' + * 'Int16' + * 'UInt32' + * 'Int32' + * 'UInt64' + * 'Int64' + """ + return request.param + + +@pytest.fixture(params=tm.ALL_INT_EA_DTYPES + tm.FLOAT_EA_DTYPES) +def any_numeric_ea_dtype(request): + """ + Parameterized fixture for any nullable integer dtype and + any float ea dtypes. + + * 'UInt8' + * 'Int8' + * 'UInt16' + * 'Int16' + * 'UInt32' + * 'Int32' + * 'UInt64' + * 'Int64' + * 'Float32' + * 'Float64' + """ + return request.param + + +# Unsupported operand types for + ("List[Union[str, ExtensionDtype, dtype[Any], +# Type[object]]]" and "List[str]") +@pytest.fixture( + params=tm.ALL_INT_EA_DTYPES + + tm.FLOAT_EA_DTYPES + + tm.ALL_INT_PYARROW_DTYPES_STR_REPR + + tm.FLOAT_PYARROW_DTYPES_STR_REPR # type: ignore[operator] +) +def any_numeric_ea_and_arrow_dtype(request): + """ + Parameterized fixture for any nullable integer dtype and + any float ea dtypes. + + * 'UInt8' + * 'Int8' + * 'UInt16' + * 'Int16' + * 'UInt32' + * 'Int32' + * 'UInt64' + * 'Int64' + * 'Float32' + * 'Float64' + * 'uint8[pyarrow]' + * 'int8[pyarrow]' + * 'uint16[pyarrow]' + * 'int16[pyarrow]' + * 'uint32[pyarrow]' + * 'int32[pyarrow]' + * 'uint64[pyarrow]' + * 'int64[pyarrow]' + * 'float32[pyarrow]' + * 'float64[pyarrow]' + """ + return request.param + + +@pytest.fixture(params=tm.SIGNED_INT_EA_DTYPES) +def any_signed_int_ea_dtype(request): + """ + Parameterized fixture for any signed nullable integer dtype. + + * 'Int8' + * 'Int16' + * 'Int32' + * 'Int64' + """ + return request.param + + +@pytest.fixture(params=tm.ALL_REAL_NUMPY_DTYPES) +def any_real_numpy_dtype(request): + """ + Parameterized fixture for any (purely) real numeric dtype. + + * int + * 'int8' + * 'uint8' + * 'int16' + * 'uint16' + * 'int32' + * 'uint32' + * 'int64' + * 'uint64' + * float + * 'float32' + * 'float64' + """ + return request.param + + +@pytest.fixture(params=tm.ALL_REAL_DTYPES) +def any_real_numeric_dtype(request): + """ + Parameterized fixture for any (purely) real numeric dtype. + + * int + * 'int8' + * 'uint8' + * 'int16' + * 'uint16' + * 'int32' + * 'uint32' + * 'int64' + * 'uint64' + * float + * 'float32' + * 'float64' + + and associated ea dtypes. + """ + return request.param + + +@pytest.fixture(params=tm.ALL_NUMPY_DTYPES) +def any_numpy_dtype(request): + """ + Parameterized fixture for all numpy dtypes. + + * bool + * 'bool' + * int + * 'int8' + * 'uint8' + * 'int16' + * 'uint16' + * 'int32' + * 'uint32' + * 'int64' + * 'uint64' + * float + * 'float32' + * 'float64' + * complex + * 'complex64' + * 'complex128' + * str + * 'str' + * 'U' + * bytes + * 'bytes' + * 'datetime64[ns]' + * 'M8[ns]' + * 'timedelta64[ns]' + * 'm8[ns]' + * object + * 'object' + """ + return request.param + + +@pytest.fixture(params=tm.ALL_REAL_NULLABLE_DTYPES) +def any_real_nullable_dtype(request): + """ + Parameterized fixture for all real dtypes that can hold NA. + + * float + * 'float32' + * 'float64' + * 'Float32' + * 'Float64' + * 'UInt8' + * 'UInt16' + * 'UInt32' + * 'UInt64' + * 'Int8' + * 'Int16' + * 'Int32' + * 'Int64' + * 'uint8[pyarrow]' + * 'uint16[pyarrow]' + * 'uint32[pyarrow]' + * 'uint64[pyarrow]' + * 'int8[pyarrow]' + * 'int16[pyarrow]' + * 'int32[pyarrow]' + * 'int64[pyarrow]' + * 'float[pyarrow]' + * 'double[pyarrow]' + """ + return request.param + + +@pytest.fixture(params=tm.ALL_NUMERIC_DTYPES) +def any_numeric_dtype(request): + """ + Parameterized fixture for all numeric dtypes. + + * int + * 'int8' + * 'uint8' + * 'int16' + * 'uint16' + * 'int32' + * 'uint32' + * 'int64' + * 'uint64' + * float + * 'float32' + * 'float64' + * complex + * 'complex64' + * 'complex128' + * 'UInt8' + * 'Int8' + * 'UInt16' + * 'Int16' + * 'UInt32' + * 'Int32' + * 'UInt64' + * 'Int64' + * 'Float32' + * 'Float64' + """ + return request.param + + +# categoricals are handled separately +_any_skipna_inferred_dtype = [ + ("string", ["a", np.nan, "c"]), + ("string", ["a", pd.NA, "c"]), + ("mixed", ["a", pd.NaT, "c"]), # pd.NaT not considered valid by is_string_array + ("bytes", [b"a", np.nan, b"c"]), + ("empty", [np.nan, np.nan, np.nan]), + ("empty", []), + ("mixed-integer", ["a", np.nan, 2]), + ("mixed", ["a", np.nan, 2.0]), + ("floating", [1.0, np.nan, 2.0]), + ("integer", [1, np.nan, 2]), + ("mixed-integer-float", [1, np.nan, 2.0]), + ("decimal", [Decimal(1), np.nan, Decimal(2)]), + ("boolean", [True, np.nan, False]), + ("boolean", [True, pd.NA, False]), + ("datetime64", [np.datetime64("2013-01-01"), np.nan, np.datetime64("2018-01-01")]), + ("datetime", [Timestamp("20130101"), np.nan, Timestamp("20180101")]), + ("date", [date(2013, 1, 1), np.nan, date(2018, 1, 1)]), + ("complex", [1 + 1j, np.nan, 2 + 2j]), + # The following dtype is commented out due to GH 23554 + # ('timedelta64', [np.timedelta64(1, 'D'), + # np.nan, np.timedelta64(2, 'D')]), + ("timedelta", [timedelta(1), np.nan, timedelta(2)]), + ("time", [time(1), np.nan, time(2)]), + ("period", [Period(2013), pd.NaT, Period(2018)]), + ("interval", [Interval(0, 1), np.nan, Interval(0, 2)]), +] +ids, _ = zip(*_any_skipna_inferred_dtype) # use inferred type as fixture-id + + +@pytest.fixture(params=_any_skipna_inferred_dtype, ids=ids) +def any_skipna_inferred_dtype(request): + """ + Fixture for all inferred dtypes from _libs.lib.infer_dtype + + The covered (inferred) types are: + * 'string' + * 'empty' + * 'bytes' + * 'mixed' + * 'mixed-integer' + * 'mixed-integer-float' + * 'floating' + * 'integer' + * 'decimal' + * 'boolean' + * 'datetime64' + * 'datetime' + * 'date' + * 'timedelta' + * 'time' + * 'period' + * 'interval' + + Returns + ------- + inferred_dtype : str + The string for the inferred dtype from _libs.lib.infer_dtype + values : np.ndarray + An array of object dtype that will be inferred to have + `inferred_dtype` + + Examples + -------- + >>> from pandas._libs import lib + >>> + >>> def test_something(any_skipna_inferred_dtype): + ... inferred_dtype, values = any_skipna_inferred_dtype + ... # will pass + ... assert lib.infer_dtype(values, skipna=True) == inferred_dtype + """ + inferred_dtype, values = request.param + values = np.array(values, dtype=object) # object dtype to avoid casting + + # correctness of inference tested in tests/dtypes/test_inference.py + return inferred_dtype, values + + +# ---------------------------------------------------------------- +# Misc +# ---------------------------------------------------------------- +@pytest.fixture +def ip(): + """ + Get an instance of IPython.InteractiveShell. + + Will raise a skip if IPython is not installed. + """ + pytest.importorskip("IPython", minversion="6.0.0") + from IPython.core.interactiveshell import InteractiveShell + + # GH#35711 make sure sqlite history file handle is not leaked + from traitlets.config import Config # isort:skip + + c = Config() + c.HistoryManager.hist_file = ":memory:" + + return InteractiveShell(config=c) + + +@pytest.fixture(params=["bsr", "coo", "csc", "csr", "dia", "dok", "lil"]) +def spmatrix(request): + """ + Yields scipy sparse matrix classes. + """ + sparse = pytest.importorskip("scipy.sparse") + + return getattr(sparse, request.param + "_matrix") + + +@pytest.fixture( + params=[ + getattr(pd.offsets, o) + for o in pd.offsets.__all__ + if issubclass(getattr(pd.offsets, o), pd.offsets.Tick) and o != "Tick" + ] +) +def tick_classes(request): + """ + Fixture for Tick based datetime offsets available for a time series. + """ + return request.param + + +@pytest.fixture(params=[None, lambda x: x]) +def sort_by_key(request): + """ + Simple fixture for testing keys in sorting methods. + Tests None (no key) and the identity key. + """ + return request.param + + +@pytest.fixture( + params=[ + ("foo", None, None), + ("Egon", "Venkman", None), + ("NCC1701D", "NCC1701D", "NCC1701D"), + # possibly-matching NAs + (np.nan, np.nan, np.nan), + (np.nan, pd.NaT, None), + (np.nan, pd.NA, None), + (pd.NA, pd.NA, pd.NA), + ] +) +def names(request) -> tuple[Hashable, Hashable, Hashable]: + """ + A 3-tuple of names, the first two for operands, the last for a result. + """ + return request.param + + +@pytest.fixture(params=[tm.setitem, tm.loc, tm.iloc]) +def indexer_sli(request): + """ + Parametrize over __setitem__, loc.__setitem__, iloc.__setitem__ + """ + return request.param + + +@pytest.fixture(params=[tm.loc, tm.iloc]) +def indexer_li(request): + """ + Parametrize over loc.__getitem__, iloc.__getitem__ + """ + return request.param + + +@pytest.fixture(params=[tm.setitem, tm.iloc]) +def indexer_si(request): + """ + Parametrize over __setitem__, iloc.__setitem__ + """ + return request.param + + +@pytest.fixture(params=[tm.setitem, tm.loc]) +def indexer_sl(request): + """ + Parametrize over __setitem__, loc.__setitem__ + """ + return request.param + + +@pytest.fixture(params=[tm.at, tm.loc]) +def indexer_al(request): + """ + Parametrize over at.__setitem__, loc.__setitem__ + """ + return request.param + + +@pytest.fixture(params=[tm.iat, tm.iloc]) +def indexer_ial(request): + """ + Parametrize over iat.__setitem__, iloc.__setitem__ + """ + return request.param + + +@pytest.fixture +def using_array_manager() -> bool: + """ + Fixture to check if the array manager is being used. + """ + return _get_option("mode.data_manager", silent=True) == "array" + + +@pytest.fixture +def using_copy_on_write() -> bool: + """ + Fixture to check if Copy-on-Write is enabled. + """ + return ( + pd.options.mode.copy_on_write is True + and _get_option("mode.data_manager", silent=True) == "block" + ) + + +@pytest.fixture +def warn_copy_on_write() -> bool: + """ + Fixture to check if Copy-on-Write is in warning mode. + """ + return ( + pd.options.mode.copy_on_write == "warn" + and _get_option("mode.data_manager", silent=True) == "block" + ) + + +@pytest.fixture +def using_infer_string() -> bool: + """ + Fixture to check if infer string option is enabled. + """ + return pd.options.future.infer_string is True + + +warsaws = ["Europe/Warsaw", "dateutil/Europe/Warsaw"] +if zoneinfo is not None: + warsaws.append(zoneinfo.ZoneInfo("Europe/Warsaw")) # type: ignore[arg-type] + + +@pytest.fixture(params=warsaws) +def warsaw(request) -> str: + """ + tzinfo for Europe/Warsaw using pytz, dateutil, or zoneinfo. + """ + return request.param + + +@pytest.fixture() +def arrow_string_storage(): + return ("pyarrow", "pyarrow_numpy") diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c804b81c49e7c8abb406f2132909df6036df1c09 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/__init__.py @@ -0,0 +1,13 @@ +# ruff: noqa: TCH004 +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + # import modules that have public classes/functions + from pandas.io import ( + formats, + json, + stata, + ) + + # mark only those modules as public + __all__ = ["formats", "json", "stata"] diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/api.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/api.py new file mode 100644 index 0000000000000000000000000000000000000000..4e8b34a61dfc62992a37d9fab3263ee00a28d1fc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/api.py @@ -0,0 +1,65 @@ +""" +Data IO api +""" + +from pandas.io.clipboards import read_clipboard +from pandas.io.excel import ( + ExcelFile, + ExcelWriter, + read_excel, +) +from pandas.io.feather_format import read_feather +from pandas.io.gbq import read_gbq +from pandas.io.html import read_html +from pandas.io.json import read_json +from pandas.io.orc import read_orc +from pandas.io.parquet import read_parquet +from pandas.io.parsers import ( + read_csv, + read_fwf, + read_table, +) +from pandas.io.pickle import ( + read_pickle, + to_pickle, +) +from pandas.io.pytables import ( + HDFStore, + read_hdf, +) +from pandas.io.sas import read_sas +from pandas.io.spss import read_spss +from pandas.io.sql import ( + read_sql, + read_sql_query, + read_sql_table, +) +from pandas.io.stata import read_stata +from pandas.io.xml import read_xml + +__all__ = [ + "ExcelFile", + "ExcelWriter", + "HDFStore", + "read_clipboard", + "read_csv", + "read_excel", + "read_feather", + "read_fwf", + "read_gbq", + "read_hdf", + "read_html", + "read_json", + "read_orc", + "read_parquet", + "read_pickle", + "read_sas", + "read_spss", + "read_sql", + "read_sql_query", + "read_sql_table", + "read_stata", + "read_table", + "read_xml", + "to_pickle", +] diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/clipboards.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/clipboards.py new file mode 100644 index 0000000000000000000000000000000000000000..a15e37328e9fa95587d53b58b1af10e1e57fd60c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/clipboards.py @@ -0,0 +1,197 @@ +""" io on the clipboard """ +from __future__ import annotations + +from io import StringIO +from typing import TYPE_CHECKING +import warnings + +from pandas._libs import lib +from pandas.util._exceptions import find_stack_level +from pandas.util._validators import check_dtype_backend + +from pandas.core.dtypes.generic import ABCDataFrame + +from pandas import ( + get_option, + option_context, +) + +if TYPE_CHECKING: + from pandas._typing import DtypeBackend + + +def read_clipboard( + sep: str = r"\s+", + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, + **kwargs, +): # pragma: no cover + r""" + Read text from clipboard and pass to :func:`~pandas.read_csv`. + + Parses clipboard contents similar to how CSV files are parsed + using :func:`~pandas.read_csv`. + + Parameters + ---------- + sep : str, default '\\s+' + A string or regex delimiter. The default of ``'\\s+'`` denotes + one or more whitespace characters. + + dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + + **kwargs + See :func:`~pandas.read_csv` for the full argument list. + + Returns + ------- + DataFrame + A parsed :class:`~pandas.DataFrame` object. + + See Also + -------- + DataFrame.to_clipboard : Copy object to the system clipboard. + read_csv : Read a comma-separated values (csv) file into DataFrame. + read_fwf : Read a table of fixed-width formatted lines into DataFrame. + + Examples + -------- + >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) + >>> df.to_clipboard() # doctest: +SKIP + >>> pd.read_clipboard() # doctest: +SKIP + A B C + 0 1 2 3 + 1 4 5 6 + """ + encoding = kwargs.pop("encoding", "utf-8") + + # only utf-8 is valid for passed value because that's what clipboard + # supports + if encoding is not None and encoding.lower().replace("-", "") != "utf8": + raise NotImplementedError("reading from clipboard only supports utf-8 encoding") + + check_dtype_backend(dtype_backend) + + from pandas.io.clipboard import clipboard_get + from pandas.io.parsers import read_csv + + text = clipboard_get() + + # Try to decode (if needed, as "text" might already be a string here). + try: + text = text.decode(kwargs.get("encoding") or get_option("display.encoding")) + except AttributeError: + pass + + # Excel copies into clipboard with \t separation + # inspect no more then the 10 first lines, if they + # all contain an equal number (>0) of tabs, infer + # that this came from excel and set 'sep' accordingly + lines = text[:10000].split("\n")[:-1][:10] + + # Need to remove leading white space, since read_csv + # accepts: + # a b + # 0 1 2 + # 1 3 4 + + counts = {x.lstrip(" ").count("\t") for x in lines} + if len(lines) > 1 and len(counts) == 1 and counts.pop() != 0: + sep = "\t" + # check the number of leading tabs in the first line + # to account for index columns + index_length = len(lines[0]) - len(lines[0].lstrip(" \t")) + if index_length != 0: + kwargs.setdefault("index_col", list(range(index_length))) + + # Edge case where sep is specified to be None, return to default + if sep is None and kwargs.get("delim_whitespace") is None: + sep = r"\s+" + + # Regex separator currently only works with python engine. + # Default to python if separator is multi-character (regex) + if len(sep) > 1 and kwargs.get("engine") is None: + kwargs["engine"] = "python" + elif len(sep) > 1 and kwargs.get("engine") == "c": + warnings.warn( + "read_clipboard with regex separator does not work properly with c engine.", + stacklevel=find_stack_level(), + ) + + return read_csv(StringIO(text), sep=sep, dtype_backend=dtype_backend, **kwargs) + + +def to_clipboard( + obj, excel: bool | None = True, sep: str | None = None, **kwargs +) -> None: # pragma: no cover + """ + Attempt to write text representation of object to the system clipboard + The clipboard can be then pasted into Excel for example. + + Parameters + ---------- + obj : the object to write to the clipboard + excel : bool, defaults to True + if True, use the provided separator, writing in a csv + format for allowing easy pasting into excel. + if False, write a string representation of the object + to the clipboard + sep : optional, defaults to tab + other keywords are passed to to_csv + + Notes + ----- + Requirements for your platform + - Linux: xclip, or xsel (with PyQt4 modules) + - Windows: + - OS X: + """ + encoding = kwargs.pop("encoding", "utf-8") + + # testing if an invalid encoding is passed to clipboard + if encoding is not None and encoding.lower().replace("-", "") != "utf8": + raise ValueError("clipboard only supports utf-8 encoding") + + from pandas.io.clipboard import clipboard_set + + if excel is None: + excel = True + + if excel: + try: + if sep is None: + sep = "\t" + buf = StringIO() + + # clipboard_set (pyperclip) expects unicode + obj.to_csv(buf, sep=sep, encoding="utf-8", **kwargs) + text = buf.getvalue() + + clipboard_set(text) + return + except TypeError: + warnings.warn( + "to_clipboard in excel mode requires a single character separator.", + stacklevel=find_stack_level(), + ) + elif sep is not None: + warnings.warn( + "to_clipboard with excel=False ignores the sep argument.", + stacklevel=find_stack_level(), + ) + + if isinstance(obj, ABCDataFrame): + # str(df) has various unhelpful defaults, like truncation + with option_context("display.max_colwidth", None): + objstr = obj.to_string(**kwargs) + else: + objstr = str(obj) + clipboard_set(objstr) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/common.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/common.py new file mode 100644 index 0000000000000000000000000000000000000000..72c9deeb54fc7aaab781b2870171cf983a47da1f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/common.py @@ -0,0 +1,1267 @@ +"""Common IO api utilities""" +from __future__ import annotations + +from abc import ( + ABC, + abstractmethod, +) +import codecs +from collections import defaultdict +from collections.abc import ( + Hashable, + Mapping, + Sequence, +) +import dataclasses +import functools +import gzip +from io import ( + BufferedIOBase, + BytesIO, + RawIOBase, + StringIO, + TextIOBase, + TextIOWrapper, +) +import mmap +import os +from pathlib import Path +import re +import tarfile +from typing import ( + IO, + TYPE_CHECKING, + Any, + AnyStr, + DefaultDict, + Generic, + Literal, + TypeVar, + cast, + overload, +) +from urllib.parse import ( + urljoin, + urlparse as parse_url, + uses_netloc, + uses_params, + uses_relative, +) +import warnings +import zipfile + +from pandas._typing import ( + BaseBuffer, + ReadCsvBuffer, +) +from pandas.compat import ( + get_bz2_file, + get_lzma_file, +) +from pandas.compat._optional import import_optional_dependency +from pandas.util._decorators import doc +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import ( + is_bool, + is_file_like, + is_integer, + is_list_like, +) +from pandas.core.dtypes.generic import ABCMultiIndex + +from pandas.core.shared_docs import _shared_docs + +_VALID_URLS = set(uses_relative + uses_netloc + uses_params) +_VALID_URLS.discard("") +_RFC_3986_PATTERN = re.compile(r"^[A-Za-z][A-Za-z0-9+\-+.]*://") + +BaseBufferT = TypeVar("BaseBufferT", bound=BaseBuffer) + + +if TYPE_CHECKING: + from types import TracebackType + + from pandas._typing import ( + CompressionDict, + CompressionOptions, + FilePath, + ReadBuffer, + StorageOptions, + WriteBuffer, + ) + + from pandas import MultiIndex + + +@dataclasses.dataclass +class IOArgs: + """ + Return value of io/common.py:_get_filepath_or_buffer. + """ + + filepath_or_buffer: str | BaseBuffer + encoding: str + mode: str + compression: CompressionDict + should_close: bool = False + + +@dataclasses.dataclass +class IOHandles(Generic[AnyStr]): + """ + Return value of io/common.py:get_handle + + Can be used as a context manager. + + This is used to easily close created buffers and to handle corner cases when + TextIOWrapper is inserted. + + handle: The file handle to be used. + created_handles: All file handles that are created by get_handle + is_wrapped: Whether a TextIOWrapper needs to be detached. + """ + + # handle might not implement the IO-interface + handle: IO[AnyStr] + compression: CompressionDict + created_handles: list[IO[bytes] | IO[str]] = dataclasses.field(default_factory=list) + is_wrapped: bool = False + + def close(self) -> None: + """ + Close all created buffers. + + Note: If a TextIOWrapper was inserted, it is flushed and detached to + avoid closing the potentially user-created buffer. + """ + if self.is_wrapped: + assert isinstance(self.handle, TextIOWrapper) + self.handle.flush() + self.handle.detach() + self.created_handles.remove(self.handle) + for handle in self.created_handles: + handle.close() + self.created_handles = [] + self.is_wrapped = False + + def __enter__(self) -> IOHandles[AnyStr]: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + self.close() + + +def is_url(url: object) -> bool: + """ + Check to see if a URL has a valid protocol. + + Parameters + ---------- + url : str or unicode + + Returns + ------- + isurl : bool + If `url` has a valid protocol return True otherwise False. + """ + if not isinstance(url, str): + return False + return parse_url(url).scheme in _VALID_URLS + + +@overload +def _expand_user(filepath_or_buffer: str) -> str: + ... + + +@overload +def _expand_user(filepath_or_buffer: BaseBufferT) -> BaseBufferT: + ... + + +def _expand_user(filepath_or_buffer: str | BaseBufferT) -> str | BaseBufferT: + """ + Return the argument with an initial component of ~ or ~user + replaced by that user's home directory. + + Parameters + ---------- + filepath_or_buffer : object to be converted if possible + + Returns + ------- + expanded_filepath_or_buffer : an expanded filepath or the + input if not expandable + """ + if isinstance(filepath_or_buffer, str): + return os.path.expanduser(filepath_or_buffer) + return filepath_or_buffer + + +def validate_header_arg(header: object) -> None: + if header is None: + return + if is_integer(header): + header = cast(int, header) + if header < 0: + # GH 27779 + raise ValueError( + "Passing negative integer to header is invalid. " + "For no header, use header=None instead" + ) + return + if is_list_like(header, allow_sets=False): + header = cast(Sequence, header) + if not all(map(is_integer, header)): + raise ValueError("header must be integer or list of integers") + if any(i < 0 for i in header): + raise ValueError("cannot specify multi-index header with negative integers") + return + if is_bool(header): + raise TypeError( + "Passing a bool to header is invalid. Use header=None for no header or " + "header=int or list-like of ints to specify " + "the row(s) making up the column names" + ) + # GH 16338 + raise ValueError("header must be integer or list of integers") + + +@overload +def stringify_path(filepath_or_buffer: FilePath, convert_file_like: bool = ...) -> str: + ... + + +@overload +def stringify_path( + filepath_or_buffer: BaseBufferT, convert_file_like: bool = ... +) -> BaseBufferT: + ... + + +def stringify_path( + filepath_or_buffer: FilePath | BaseBufferT, + convert_file_like: bool = False, +) -> str | BaseBufferT: + """ + Attempt to convert a path-like object to a string. + + Parameters + ---------- + filepath_or_buffer : object to be converted + + Returns + ------- + str_filepath_or_buffer : maybe a string version of the object + + Notes + ----- + Objects supporting the fspath protocol are coerced + according to its __fspath__ method. + + Any other object is passed through unchanged, which includes bytes, + strings, buffers, or anything else that's not even path-like. + """ + if not convert_file_like and is_file_like(filepath_or_buffer): + # GH 38125: some fsspec objects implement os.PathLike but have already opened a + # file. This prevents opening the file a second time. infer_compression calls + # this function with convert_file_like=True to infer the compression. + return cast(BaseBufferT, filepath_or_buffer) + + if isinstance(filepath_or_buffer, os.PathLike): + filepath_or_buffer = filepath_or_buffer.__fspath__() + return _expand_user(filepath_or_buffer) + + +def urlopen(*args, **kwargs): + """ + Lazy-import wrapper for stdlib urlopen, as that imports a big chunk of + the stdlib. + """ + import urllib.request + + return urllib.request.urlopen(*args, **kwargs) + + +def is_fsspec_url(url: FilePath | BaseBuffer) -> bool: + """ + Returns true if the given URL looks like + something fsspec can handle + """ + return ( + isinstance(url, str) + and bool(_RFC_3986_PATTERN.match(url)) + and not url.startswith(("http://", "https://")) + ) + + +@doc( + storage_options=_shared_docs["storage_options"], + compression_options=_shared_docs["compression_options"] % "filepath_or_buffer", +) +def _get_filepath_or_buffer( + filepath_or_buffer: FilePath | BaseBuffer, + encoding: str = "utf-8", + compression: CompressionOptions | None = None, + mode: str = "r", + storage_options: StorageOptions | None = None, +) -> IOArgs: + """ + If the filepath_or_buffer is a url, translate and return the buffer. + Otherwise passthrough. + + Parameters + ---------- + filepath_or_buffer : a url, filepath (str, py.path.local or pathlib.Path), + or buffer + {compression_options} + + .. versionchanged:: 1.4.0 Zstandard support. + + encoding : the encoding to use to decode bytes, default is 'utf-8' + mode : str, optional + + {storage_options} + + + Returns the dataclass IOArgs. + """ + filepath_or_buffer = stringify_path(filepath_or_buffer) + + # handle compression dict + compression_method, compression = get_compression_method(compression) + compression_method = infer_compression(filepath_or_buffer, compression_method) + + # GH21227 internal compression is not used for non-binary handles. + if compression_method and hasattr(filepath_or_buffer, "write") and "b" not in mode: + warnings.warn( + "compression has no effect when passing a non-binary object as input.", + RuntimeWarning, + stacklevel=find_stack_level(), + ) + compression_method = None + + compression = dict(compression, method=compression_method) + + # bz2 and xz do not write the byte order mark for utf-16 and utf-32 + # print a warning when writing such files + if ( + "w" in mode + and compression_method in ["bz2", "xz"] + and encoding in ["utf-16", "utf-32"] + ): + warnings.warn( + f"{compression} will not write the byte order mark for {encoding}", + UnicodeWarning, + stacklevel=find_stack_level(), + ) + + # Use binary mode when converting path-like objects to file-like objects (fsspec) + # except when text mode is explicitly requested. The original mode is returned if + # fsspec is not used. + fsspec_mode = mode + if "t" not in fsspec_mode and "b" not in fsspec_mode: + fsspec_mode += "b" + + if isinstance(filepath_or_buffer, str) and is_url(filepath_or_buffer): + # TODO: fsspec can also handle HTTP via requests, but leaving this + # unchanged. using fsspec appears to break the ability to infer if the + # server responded with gzipped data + storage_options = storage_options or {} + + # waiting until now for importing to match intended lazy logic of + # urlopen function defined elsewhere in this module + import urllib.request + + # assuming storage_options is to be interpreted as headers + req_info = urllib.request.Request(filepath_or_buffer, headers=storage_options) + with urlopen(req_info) as req: + content_encoding = req.headers.get("Content-Encoding", None) + if content_encoding == "gzip": + # Override compression based on Content-Encoding header + compression = {"method": "gzip"} + reader = BytesIO(req.read()) + return IOArgs( + filepath_or_buffer=reader, + encoding=encoding, + compression=compression, + should_close=True, + mode=fsspec_mode, + ) + + if is_fsspec_url(filepath_or_buffer): + assert isinstance( + filepath_or_buffer, str + ) # just to appease mypy for this branch + # two special-case s3-like protocols; these have special meaning in Hadoop, + # but are equivalent to just "s3" from fsspec's point of view + # cc #11071 + if filepath_or_buffer.startswith("s3a://"): + filepath_or_buffer = filepath_or_buffer.replace("s3a://", "s3://") + if filepath_or_buffer.startswith("s3n://"): + filepath_or_buffer = filepath_or_buffer.replace("s3n://", "s3://") + fsspec = import_optional_dependency("fsspec") + + # If botocore is installed we fallback to reading with anon=True + # to allow reads from public buckets + err_types_to_retry_with_anon: list[Any] = [] + try: + import_optional_dependency("botocore") + from botocore.exceptions import ( + ClientError, + NoCredentialsError, + ) + + err_types_to_retry_with_anon = [ + ClientError, + NoCredentialsError, + PermissionError, + ] + except ImportError: + pass + + try: + file_obj = fsspec.open( + filepath_or_buffer, mode=fsspec_mode, **(storage_options or {}) + ).open() + # GH 34626 Reads from Public Buckets without Credentials needs anon=True + except tuple(err_types_to_retry_with_anon): + if storage_options is None: + storage_options = {"anon": True} + else: + # don't mutate user input. + storage_options = dict(storage_options) + storage_options["anon"] = True + file_obj = fsspec.open( + filepath_or_buffer, mode=fsspec_mode, **(storage_options or {}) + ).open() + + return IOArgs( + filepath_or_buffer=file_obj, + encoding=encoding, + compression=compression, + should_close=True, + mode=fsspec_mode, + ) + elif storage_options: + raise ValueError( + "storage_options passed with file object or non-fsspec file path" + ) + + if isinstance(filepath_or_buffer, (str, bytes, mmap.mmap)): + return IOArgs( + filepath_or_buffer=_expand_user(filepath_or_buffer), + encoding=encoding, + compression=compression, + should_close=False, + mode=mode, + ) + + # is_file_like requires (read | write) & __iter__ but __iter__ is only + # needed for read_csv(engine=python) + if not ( + hasattr(filepath_or_buffer, "read") or hasattr(filepath_or_buffer, "write") + ): + msg = f"Invalid file path or buffer object type: {type(filepath_or_buffer)}" + raise ValueError(msg) + + return IOArgs( + filepath_or_buffer=filepath_or_buffer, + encoding=encoding, + compression=compression, + should_close=False, + mode=mode, + ) + + +def file_path_to_url(path: str) -> str: + """ + converts an absolute native path to a FILE URL. + + Parameters + ---------- + path : a path in native format + + Returns + ------- + a valid FILE URL + """ + # lazify expensive import (~30ms) + from urllib.request import pathname2url + + return urljoin("file:", pathname2url(path)) + + +extension_to_compression = { + ".tar": "tar", + ".tar.gz": "tar", + ".tar.bz2": "tar", + ".tar.xz": "tar", + ".gz": "gzip", + ".bz2": "bz2", + ".zip": "zip", + ".xz": "xz", + ".zst": "zstd", +} +_supported_compressions = set(extension_to_compression.values()) + + +def get_compression_method( + compression: CompressionOptions, +) -> tuple[str | None, CompressionDict]: + """ + Simplifies a compression argument to a compression method string and + a mapping containing additional arguments. + + Parameters + ---------- + compression : str or mapping + If string, specifies the compression method. If mapping, value at key + 'method' specifies compression method. + + Returns + ------- + tuple of ({compression method}, Optional[str] + {compression arguments}, Dict[str, Any]) + + Raises + ------ + ValueError on mapping missing 'method' key + """ + compression_method: str | None + if isinstance(compression, Mapping): + compression_args = dict(compression) + try: + compression_method = compression_args.pop("method") + except KeyError as err: + raise ValueError("If mapping, compression must have key 'method'") from err + else: + compression_args = {} + compression_method = compression + return compression_method, compression_args + + +@doc(compression_options=_shared_docs["compression_options"] % "filepath_or_buffer") +def infer_compression( + filepath_or_buffer: FilePath | BaseBuffer, compression: str | None +) -> str | None: + """ + Get the compression method for filepath_or_buffer. If compression='infer', + the inferred compression method is returned. Otherwise, the input + compression method is returned unchanged, unless it's invalid, in which + case an error is raised. + + Parameters + ---------- + filepath_or_buffer : str or file handle + File path or object. + {compression_options} + + .. versionchanged:: 1.4.0 Zstandard support. + + Returns + ------- + string or None + + Raises + ------ + ValueError on invalid compression specified. + """ + if compression is None: + return None + + # Infer compression + if compression == "infer": + # Convert all path types (e.g. pathlib.Path) to strings + filepath_or_buffer = stringify_path(filepath_or_buffer, convert_file_like=True) + if not isinstance(filepath_or_buffer, str): + # Cannot infer compression of a buffer, assume no compression + return None + + # Infer compression from the filename/URL extension + for extension, compression in extension_to_compression.items(): + if filepath_or_buffer.lower().endswith(extension): + return compression + return None + + # Compression has been specified. Check that it's valid + if compression in _supported_compressions: + return compression + + valid = ["infer", None] + sorted(_supported_compressions) + msg = ( + f"Unrecognized compression type: {compression}\n" + f"Valid compression types are {valid}" + ) + raise ValueError(msg) + + +def check_parent_directory(path: Path | str) -> None: + """ + Check if parent directory of a file exists, raise OSError if it does not + + Parameters + ---------- + path: Path or str + Path to check parent directory of + """ + parent = Path(path).parent + if not parent.is_dir(): + raise OSError(rf"Cannot save file into a non-existent directory: '{parent}'") + + +@overload +def get_handle( + path_or_buf: FilePath | BaseBuffer, + mode: str, + *, + encoding: str | None = ..., + compression: CompressionOptions = ..., + memory_map: bool = ..., + is_text: Literal[False], + errors: str | None = ..., + storage_options: StorageOptions = ..., +) -> IOHandles[bytes]: + ... + + +@overload +def get_handle( + path_or_buf: FilePath | BaseBuffer, + mode: str, + *, + encoding: str | None = ..., + compression: CompressionOptions = ..., + memory_map: bool = ..., + is_text: Literal[True] = ..., + errors: str | None = ..., + storage_options: StorageOptions = ..., +) -> IOHandles[str]: + ... + + +@overload +def get_handle( + path_or_buf: FilePath | BaseBuffer, + mode: str, + *, + encoding: str | None = ..., + compression: CompressionOptions = ..., + memory_map: bool = ..., + is_text: bool = ..., + errors: str | None = ..., + storage_options: StorageOptions = ..., +) -> IOHandles[str] | IOHandles[bytes]: + ... + + +@doc(compression_options=_shared_docs["compression_options"] % "path_or_buf") +def get_handle( + path_or_buf: FilePath | BaseBuffer, + mode: str, + *, + encoding: str | None = None, + compression: CompressionOptions | None = None, + memory_map: bool = False, + is_text: bool = True, + errors: str | None = None, + storage_options: StorageOptions | None = None, +) -> IOHandles[str] | IOHandles[bytes]: + """ + Get file handle for given path/buffer and mode. + + Parameters + ---------- + path_or_buf : str or file handle + File path or object. + mode : str + Mode to open path_or_buf with. + encoding : str or None + Encoding to use. + {compression_options} + + May be a dict with key 'method' as compression mode + and other keys as compression options if compression + mode is 'zip'. + + Passing compression options as keys in dict is + supported for compression modes 'gzip', 'bz2', 'zstd' and 'zip'. + + .. versionchanged:: 1.4.0 Zstandard support. + + memory_map : bool, default False + See parsers._parser_params for more information. Only used by read_csv. + is_text : bool, default True + Whether the type of the content passed to the file/buffer is string or + bytes. This is not the same as `"b" not in mode`. If a string content is + passed to a binary file/buffer, a wrapper is inserted. + errors : str, default 'strict' + Specifies how encoding and decoding errors are to be handled. + See the errors argument for :func:`open` for a full list + of options. + storage_options: StorageOptions = None + Passed to _get_filepath_or_buffer + + Returns the dataclass IOHandles + """ + # Windows does not default to utf-8. Set to utf-8 for a consistent behavior + encoding = encoding or "utf-8" + + errors = errors or "strict" + + # read_csv does not know whether the buffer is opened in binary/text mode + if _is_binary_mode(path_or_buf, mode) and "b" not in mode: + mode += "b" + + # validate encoding and errors + codecs.lookup(encoding) + if isinstance(errors, str): + codecs.lookup_error(errors) + + # open URLs + ioargs = _get_filepath_or_buffer( + path_or_buf, + encoding=encoding, + compression=compression, + mode=mode, + storage_options=storage_options, + ) + + handle = ioargs.filepath_or_buffer + handles: list[BaseBuffer] + + # memory mapping needs to be the first step + # only used for read_csv + handle, memory_map, handles = _maybe_memory_map(handle, memory_map) + + is_path = isinstance(handle, str) + compression_args = dict(ioargs.compression) + compression = compression_args.pop("method") + + # Only for write methods + if "r" not in mode and is_path: + check_parent_directory(str(handle)) + + if compression: + if compression != "zstd": + # compression libraries do not like an explicit text-mode + ioargs.mode = ioargs.mode.replace("t", "") + elif compression == "zstd" and "b" not in ioargs.mode: + # python-zstandard defaults to text mode, but we always expect + # compression libraries to use binary mode. + ioargs.mode += "b" + + # GZ Compression + if compression == "gzip": + if isinstance(handle, str): + # error: Incompatible types in assignment (expression has type + # "GzipFile", variable has type "Union[str, BaseBuffer]") + handle = gzip.GzipFile( # type: ignore[assignment] + filename=handle, + mode=ioargs.mode, + **compression_args, + ) + else: + handle = gzip.GzipFile( + # No overload variant of "GzipFile" matches argument types + # "Union[str, BaseBuffer]", "str", "Dict[str, Any]" + fileobj=handle, # type: ignore[call-overload] + mode=ioargs.mode, + **compression_args, + ) + + # BZ Compression + elif compression == "bz2": + # Overload of "BZ2File" to handle pickle protocol 5 + # "Union[str, BaseBuffer]", "str", "Dict[str, Any]" + handle = get_bz2_file()( # type: ignore[call-overload] + handle, + mode=ioargs.mode, + **compression_args, + ) + + # ZIP Compression + elif compression == "zip": + # error: Argument 1 to "_BytesZipFile" has incompatible type + # "Union[str, BaseBuffer]"; expected "Union[Union[str, PathLike[str]], + # ReadBuffer[bytes], WriteBuffer[bytes]]" + handle = _BytesZipFile( + handle, ioargs.mode, **compression_args # type: ignore[arg-type] + ) + if handle.buffer.mode == "r": + handles.append(handle) + zip_names = handle.buffer.namelist() + if len(zip_names) == 1: + handle = handle.buffer.open(zip_names.pop()) + elif not zip_names: + raise ValueError(f"Zero files found in ZIP file {path_or_buf}") + else: + raise ValueError( + "Multiple files found in ZIP file. " + f"Only one file per ZIP: {zip_names}" + ) + + # TAR Encoding + elif compression == "tar": + compression_args.setdefault("mode", ioargs.mode) + if isinstance(handle, str): + handle = _BytesTarFile(name=handle, **compression_args) + else: + # error: Argument "fileobj" to "_BytesTarFile" has incompatible + # type "BaseBuffer"; expected "Union[ReadBuffer[bytes], + # WriteBuffer[bytes], None]" + handle = _BytesTarFile( + fileobj=handle, **compression_args # type: ignore[arg-type] + ) + assert isinstance(handle, _BytesTarFile) + if "r" in handle.buffer.mode: + handles.append(handle) + files = handle.buffer.getnames() + if len(files) == 1: + file = handle.buffer.extractfile(files[0]) + assert file is not None + handle = file + elif not files: + raise ValueError(f"Zero files found in TAR archive {path_or_buf}") + else: + raise ValueError( + "Multiple files found in TAR archive. " + f"Only one file per TAR archive: {files}" + ) + + # XZ Compression + elif compression == "xz": + # error: Argument 1 to "LZMAFile" has incompatible type "Union[str, + # BaseBuffer]"; expected "Optional[Union[Union[str, bytes, PathLike[str], + # PathLike[bytes]], IO[bytes]], None]" + handle = get_lzma_file()( + handle, ioargs.mode, **compression_args # type: ignore[arg-type] + ) + + # Zstd Compression + elif compression == "zstd": + zstd = import_optional_dependency("zstandard") + if "r" in ioargs.mode: + open_args = {"dctx": zstd.ZstdDecompressor(**compression_args)} + else: + open_args = {"cctx": zstd.ZstdCompressor(**compression_args)} + handle = zstd.open( + handle, + mode=ioargs.mode, + **open_args, + ) + + # Unrecognized Compression + else: + msg = f"Unrecognized compression type: {compression}" + raise ValueError(msg) + + assert not isinstance(handle, str) + handles.append(handle) + + elif isinstance(handle, str): + # Check whether the filename is to be opened in binary mode. + # Binary mode does not support 'encoding' and 'newline'. + if ioargs.encoding and "b" not in ioargs.mode: + # Encoding + handle = open( + handle, + ioargs.mode, + encoding=ioargs.encoding, + errors=errors, + newline="", + ) + else: + # Binary mode + handle = open(handle, ioargs.mode) + handles.append(handle) + + # Convert BytesIO or file objects passed with an encoding + is_wrapped = False + if not is_text and ioargs.mode == "rb" and isinstance(handle, TextIOBase): + # not added to handles as it does not open/buffer resources + handle = _BytesIOWrapper( + handle, + encoding=ioargs.encoding, + ) + elif is_text and ( + compression or memory_map or _is_binary_mode(handle, ioargs.mode) + ): + if ( + not hasattr(handle, "readable") + or not hasattr(handle, "writable") + or not hasattr(handle, "seekable") + ): + handle = _IOWrapper(handle) + # error: Argument 1 to "TextIOWrapper" has incompatible type + # "_IOWrapper"; expected "IO[bytes]" + handle = TextIOWrapper( + handle, # type: ignore[arg-type] + encoding=ioargs.encoding, + errors=errors, + newline="", + ) + handles.append(handle) + # only marked as wrapped when the caller provided a handle + is_wrapped = not ( + isinstance(ioargs.filepath_or_buffer, str) or ioargs.should_close + ) + + if "r" in ioargs.mode and not hasattr(handle, "read"): + raise TypeError( + "Expected file path name or file-like object, " + f"got {type(ioargs.filepath_or_buffer)} type" + ) + + handles.reverse() # close the most recently added buffer first + if ioargs.should_close: + assert not isinstance(ioargs.filepath_or_buffer, str) + handles.append(ioargs.filepath_or_buffer) + + return IOHandles( + # error: Argument "handle" to "IOHandles" has incompatible type + # "Union[TextIOWrapper, GzipFile, BaseBuffer, typing.IO[bytes], + # typing.IO[Any]]"; expected "pandas._typing.IO[Any]" + handle=handle, # type: ignore[arg-type] + # error: Argument "created_handles" to "IOHandles" has incompatible type + # "List[BaseBuffer]"; expected "List[Union[IO[bytes], IO[str]]]" + created_handles=handles, # type: ignore[arg-type] + is_wrapped=is_wrapped, + compression=ioargs.compression, + ) + + +# error: Definition of "__enter__" in base class "IOBase" is incompatible +# with definition in base class "BinaryIO" +class _BufferedWriter(BytesIO, ABC): # type: ignore[misc] + """ + Some objects do not support multiple .write() calls (TarFile and ZipFile). + This wrapper writes to the underlying buffer on close. + """ + + buffer = BytesIO() + + @abstractmethod + def write_to_buffer(self) -> None: + ... + + def close(self) -> None: + if self.closed: + # already closed + return + if self.getbuffer().nbytes: + # write to buffer + self.seek(0) + with self.buffer: + self.write_to_buffer() + else: + self.buffer.close() + super().close() + + +class _BytesTarFile(_BufferedWriter): + def __init__( + self, + name: str | None = None, + mode: Literal["r", "a", "w", "x"] = "r", + fileobj: ReadBuffer[bytes] | WriteBuffer[bytes] | None = None, + archive_name: str | None = None, + **kwargs, + ) -> None: + super().__init__() + self.archive_name = archive_name + self.name = name + # error: Incompatible types in assignment (expression has type "TarFile", + # base class "_BufferedWriter" defined the type as "BytesIO") + self.buffer: tarfile.TarFile = tarfile.TarFile.open( # type: ignore[assignment] + name=name, + mode=self.extend_mode(mode), + fileobj=fileobj, + **kwargs, + ) + + def extend_mode(self, mode: str) -> str: + mode = mode.replace("b", "") + if mode != "w": + return mode + if self.name is not None: + suffix = Path(self.name).suffix + if suffix in (".gz", ".xz", ".bz2"): + mode = f"{mode}:{suffix[1:]}" + return mode + + def infer_filename(self) -> str | None: + """ + If an explicit archive_name is not given, we still want the file inside the zip + file not to be named something.tar, because that causes confusion (GH39465). + """ + if self.name is None: + return None + + filename = Path(self.name) + if filename.suffix == ".tar": + return filename.with_suffix("").name + elif filename.suffix in (".tar.gz", ".tar.bz2", ".tar.xz"): + return filename.with_suffix("").with_suffix("").name + return filename.name + + def write_to_buffer(self) -> None: + # TarFile needs a non-empty string + archive_name = self.archive_name or self.infer_filename() or "tar" + tarinfo = tarfile.TarInfo(name=archive_name) + tarinfo.size = len(self.getvalue()) + self.buffer.addfile(tarinfo, self) + + +class _BytesZipFile(_BufferedWriter): + def __init__( + self, + file: FilePath | ReadBuffer[bytes] | WriteBuffer[bytes], + mode: str, + archive_name: str | None = None, + **kwargs, + ) -> None: + super().__init__() + mode = mode.replace("b", "") + self.archive_name = archive_name + + kwargs.setdefault("compression", zipfile.ZIP_DEFLATED) + # error: Incompatible types in assignment (expression has type "ZipFile", + # base class "_BufferedWriter" defined the type as "BytesIO") + self.buffer: zipfile.ZipFile = zipfile.ZipFile( # type: ignore[assignment] + file, mode, **kwargs + ) + + def infer_filename(self) -> str | None: + """ + If an explicit archive_name is not given, we still want the file inside the zip + file not to be named something.zip, because that causes confusion (GH39465). + """ + if isinstance(self.buffer.filename, (os.PathLike, str)): + filename = Path(self.buffer.filename) + if filename.suffix == ".zip": + return filename.with_suffix("").name + return filename.name + return None + + def write_to_buffer(self) -> None: + # ZipFile needs a non-empty string + archive_name = self.archive_name or self.infer_filename() or "zip" + self.buffer.writestr(archive_name, self.getvalue()) + + +class _IOWrapper: + # TextIOWrapper is overly strict: it request that the buffer has seekable, readable, + # and writable. If we have a read-only buffer, we shouldn't need writable and vice + # versa. Some buffers, are seek/read/writ-able but they do not have the "-able" + # methods, e.g., tempfile.SpooledTemporaryFile. + # If a buffer does not have the above "-able" methods, we simple assume they are + # seek/read/writ-able. + def __init__(self, buffer: BaseBuffer) -> None: + self.buffer = buffer + + def __getattr__(self, name: str): + return getattr(self.buffer, name) + + def readable(self) -> bool: + if hasattr(self.buffer, "readable"): + return self.buffer.readable() + return True + + def seekable(self) -> bool: + if hasattr(self.buffer, "seekable"): + return self.buffer.seekable() + return True + + def writable(self) -> bool: + if hasattr(self.buffer, "writable"): + return self.buffer.writable() + return True + + +class _BytesIOWrapper: + # Wrapper that wraps a StringIO buffer and reads bytes from it + # Created for compat with pyarrow read_csv + def __init__(self, buffer: StringIO | TextIOBase, encoding: str = "utf-8") -> None: + self.buffer = buffer + self.encoding = encoding + # Because a character can be represented by more than 1 byte, + # it is possible that reading will produce more bytes than n + # We store the extra bytes in this overflow variable, and append the + # overflow to the front of the bytestring the next time reading is performed + self.overflow = b"" + + def __getattr__(self, attr: str): + return getattr(self.buffer, attr) + + def read(self, n: int | None = -1) -> bytes: + assert self.buffer is not None + bytestring = self.buffer.read(n).encode(self.encoding) + # When n=-1/n greater than remaining bytes: Read entire file/rest of file + combined_bytestring = self.overflow + bytestring + if n is None or n < 0 or n >= len(combined_bytestring): + self.overflow = b"" + return combined_bytestring + else: + to_return = combined_bytestring[:n] + self.overflow = combined_bytestring[n:] + return to_return + + +def _maybe_memory_map( + handle: str | BaseBuffer, memory_map: bool +) -> tuple[str | BaseBuffer, bool, list[BaseBuffer]]: + """Try to memory map file/buffer.""" + handles: list[BaseBuffer] = [] + memory_map &= hasattr(handle, "fileno") or isinstance(handle, str) + if not memory_map: + return handle, memory_map, handles + + # mmap used by only read_csv + handle = cast(ReadCsvBuffer, handle) + + # need to open the file first + if isinstance(handle, str): + handle = open(handle, "rb") + handles.append(handle) + + try: + # open mmap and adds *-able + # error: Argument 1 to "_IOWrapper" has incompatible type "mmap"; + # expected "BaseBuffer" + wrapped = _IOWrapper( + mmap.mmap( + handle.fileno(), 0, access=mmap.ACCESS_READ # type: ignore[arg-type] + ) + ) + finally: + for handle in reversed(handles): + # error: "BaseBuffer" has no attribute "close" + handle.close() # type: ignore[attr-defined] + + return wrapped, memory_map, [wrapped] + + +def file_exists(filepath_or_buffer: FilePath | BaseBuffer) -> bool: + """Test whether file exists.""" + exists = False + filepath_or_buffer = stringify_path(filepath_or_buffer) + if not isinstance(filepath_or_buffer, str): + return exists + try: + exists = os.path.exists(filepath_or_buffer) + # gh-5874: if the filepath is too long will raise here + except (TypeError, ValueError): + pass + return exists + + +def _is_binary_mode(handle: FilePath | BaseBuffer, mode: str) -> bool: + """Whether the handle is opened in binary mode""" + # specified by user + if "t" in mode or "b" in mode: + return "b" in mode + + # exceptions + text_classes = ( + # classes that expect string but have 'b' in mode + codecs.StreamWriter, + codecs.StreamReader, + codecs.StreamReaderWriter, + ) + if issubclass(type(handle), text_classes): + return False + + return isinstance(handle, _get_binary_io_classes()) or "b" in getattr( + handle, "mode", mode + ) + + +@functools.lru_cache +def _get_binary_io_classes() -> tuple[type, ...]: + """IO classes that that expect bytes""" + binary_classes: tuple[type, ...] = (BufferedIOBase, RawIOBase) + + # python-zstandard doesn't use any of the builtin base classes; instead we + # have to use the `zstd.ZstdDecompressionReader` class for isinstance checks. + # Unfortunately `zstd.ZstdDecompressionReader` isn't exposed by python-zstandard + # so we have to get it from a `zstd.ZstdDecompressor` instance. + # See also https://github.com/indygreg/python-zstandard/pull/165. + zstd = import_optional_dependency("zstandard", errors="ignore") + if zstd is not None: + with zstd.ZstdDecompressor().stream_reader(b"") as reader: + binary_classes += (type(reader),) + + return binary_classes + + +def is_potential_multi_index( + columns: Sequence[Hashable] | MultiIndex, + index_col: bool | Sequence[int] | None = None, +) -> bool: + """ + Check whether or not the `columns` parameter + could be converted into a MultiIndex. + + Parameters + ---------- + columns : array-like + Object which may or may not be convertible into a MultiIndex + index_col : None, bool or list, optional + Column or columns to use as the (possibly hierarchical) index + + Returns + ------- + bool : Whether or not columns could become a MultiIndex + """ + if index_col is None or isinstance(index_col, bool): + index_col = [] + + return bool( + len(columns) + and not isinstance(columns, ABCMultiIndex) + and all(isinstance(c, tuple) for c in columns if c not in list(index_col)) + ) + + +def dedup_names( + names: Sequence[Hashable], is_potential_multiindex: bool +) -> Sequence[Hashable]: + """ + Rename column names if duplicates exist. + + Currently the renaming is done by appending a period and an autonumeric, + but a custom pattern may be supported in the future. + + Examples + -------- + >>> dedup_names(["x", "y", "x", "x"], is_potential_multiindex=False) + ['x', 'y', 'x.1', 'x.2'] + """ + names = list(names) # so we can index + counts: DefaultDict[Hashable, int] = defaultdict(int) + + for i, col in enumerate(names): + cur_count = counts[col] + + while cur_count > 0: + counts[col] = cur_count + 1 + + if is_potential_multiindex: + # for mypy + assert isinstance(col, tuple) + col = col[:-1] + (f"{col[-1]}.{cur_count}",) + else: + col = f"{col}.{cur_count}" + cur_count = counts[col] + + names[i] = col + counts[col] = cur_count + 1 + + return names diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/feather_format.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/feather_format.py new file mode 100644 index 0000000000000000000000000000000000000000..d0aaf83b84cb241ebdd872c1c8b7982fadc9acdb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/feather_format.py @@ -0,0 +1,143 @@ +""" feather-format compat """ +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, +) + +from pandas._config import using_pyarrow_string_dtype + +from pandas._libs import lib +from pandas.compat._optional import import_optional_dependency +from pandas.util._decorators import doc +from pandas.util._validators import check_dtype_backend + +import pandas as pd +from pandas.core.api import DataFrame +from pandas.core.shared_docs import _shared_docs + +from pandas.io._util import arrow_string_types_mapper +from pandas.io.common import get_handle + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Sequence, + ) + + from pandas._typing import ( + DtypeBackend, + FilePath, + ReadBuffer, + StorageOptions, + WriteBuffer, + ) + + +@doc(storage_options=_shared_docs["storage_options"]) +def to_feather( + df: DataFrame, + path: FilePath | WriteBuffer[bytes], + storage_options: StorageOptions | None = None, + **kwargs: Any, +) -> None: + """ + Write a DataFrame to the binary Feather format. + + Parameters + ---------- + df : DataFrame + path : str, path object, or file-like object + {storage_options} + **kwargs : + Additional keywords passed to `pyarrow.feather.write_feather`. + + """ + import_optional_dependency("pyarrow") + from pyarrow import feather + + if not isinstance(df, DataFrame): + raise ValueError("feather only support IO with DataFrames") + + with get_handle( + path, "wb", storage_options=storage_options, is_text=False + ) as handles: + feather.write_feather(df, handles.handle, **kwargs) + + +@doc(storage_options=_shared_docs["storage_options"]) +def read_feather( + path: FilePath | ReadBuffer[bytes], + columns: Sequence[Hashable] | None = None, + use_threads: bool = True, + storage_options: StorageOptions | None = None, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, +) -> DataFrame: + """ + Load a feather-format object from the file path. + + Parameters + ---------- + path : str, path object, or file-like object + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a binary ``read()`` function. The string could be a URL. + Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is + expected. A local file could be: ``file://localhost/path/to/table.feather``. + columns : sequence, default None + If not provided, all columns are read. + use_threads : bool, default True + Whether to parallelize reading using multiple threads. + {storage_options} + + dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + + Returns + ------- + type of object stored in file + + Examples + -------- + >>> df = pd.read_feather("path/to/file.feather") # doctest: +SKIP + """ + import_optional_dependency("pyarrow") + from pyarrow import feather + + # import utils to register the pyarrow extension types + import pandas.core.arrays.arrow.extension_types # pyright: ignore[reportUnusedImport] # noqa: F401 + + check_dtype_backend(dtype_backend) + + with get_handle( + path, "rb", storage_options=storage_options, is_text=False + ) as handles: + if dtype_backend is lib.no_default and not using_pyarrow_string_dtype(): + return feather.read_feather( + handles.handle, columns=columns, use_threads=bool(use_threads) + ) + + pa_table = feather.read_table( + handles.handle, columns=columns, use_threads=bool(use_threads) + ) + + if dtype_backend == "numpy_nullable": + from pandas.io._util import _arrow_dtype_mapping + + return pa_table.to_pandas(types_mapper=_arrow_dtype_mapping().get) + + elif dtype_backend == "pyarrow": + return pa_table.to_pandas(types_mapper=pd.ArrowDtype) + + elif using_pyarrow_string_dtype(): + return pa_table.to_pandas(types_mapper=arrow_string_types_mapper()) + else: + raise NotImplementedError diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/gbq.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/gbq.py new file mode 100644 index 0000000000000000000000000000000000000000..350002bf461ff91f477371c1570e8cbf2ee090bb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/gbq.py @@ -0,0 +1,255 @@ +""" Google BigQuery support """ +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, +) +import warnings + +from pandas.compat._optional import import_optional_dependency +from pandas.util._exceptions import find_stack_level + +if TYPE_CHECKING: + import google.auth + + from pandas import DataFrame + + +def _try_import(): + # since pandas is a dependency of pandas-gbq + # we need to import on first use + msg = ( + "pandas-gbq is required to load data from Google BigQuery. " + "See the docs: https://pandas-gbq.readthedocs.io." + ) + pandas_gbq = import_optional_dependency("pandas_gbq", extra=msg) + return pandas_gbq + + +def read_gbq( + query: str, + project_id: str | None = None, + index_col: str | None = None, + col_order: list[str] | None = None, + reauth: bool = False, + auth_local_webserver: bool = True, + dialect: str | None = None, + location: str | None = None, + configuration: dict[str, Any] | None = None, + credentials: google.auth.credentials.Credentials | None = None, + use_bqstorage_api: bool | None = None, + max_results: int | None = None, + progress_bar_type: str | None = None, +) -> DataFrame: + """ + Load data from Google BigQuery. + + .. deprecated:: 2.2.0 + + Please use ``pandas_gbq.read_gbq`` instead. + + This function requires the `pandas-gbq package + `__. + + See the `How to authenticate with Google BigQuery + `__ + guide for authentication instructions. + + Parameters + ---------- + query : str + SQL-Like Query to return data values. + project_id : str, optional + Google BigQuery Account project ID. Optional when available from + the environment. + index_col : str, optional + Name of result column to use for index in results DataFrame. + col_order : list(str), optional + List of BigQuery column names in the desired order for results + DataFrame. + reauth : bool, default False + Force Google BigQuery to re-authenticate the user. This is useful + if multiple accounts are used. + auth_local_webserver : bool, default True + Use the `local webserver flow`_ instead of the `console flow`_ + when getting user credentials. + + .. _local webserver flow: + https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server + .. _console flow: + https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console + + *New in version 0.2.0 of pandas-gbq*. + + .. versionchanged:: 1.5.0 + Default value is changed to ``True``. Google has deprecated the + ``auth_local_webserver = False`` `"out of band" (copy-paste) + flow + `_. + dialect : str, default 'legacy' + Note: The default value is changing to 'standard' in a future version. + + SQL syntax dialect to use. Value can be one of: + + ``'legacy'`` + Use BigQuery's legacy SQL dialect. For more information see + `BigQuery Legacy SQL Reference + `__. + ``'standard'`` + Use BigQuery's standard SQL, which is + compliant with the SQL 2011 standard. For more information + see `BigQuery Standard SQL Reference + `__. + location : str, optional + Location where the query job should run. See the `BigQuery locations + documentation + `__ for a + list of available locations. The location must match that of any + datasets used in the query. + + *New in version 0.5.0 of pandas-gbq*. + configuration : dict, optional + Query config parameters for job processing. + For example: + + configuration = {'query': {'useQueryCache': False}} + + For more information see `BigQuery REST API Reference + `__. + credentials : google.auth.credentials.Credentials, optional + Credentials for accessing Google APIs. Use this parameter to override + default credentials, such as to use Compute Engine + :class:`google.auth.compute_engine.Credentials` or Service Account + :class:`google.oauth2.service_account.Credentials` directly. + + *New in version 0.8.0 of pandas-gbq*. + use_bqstorage_api : bool, default False + Use the `BigQuery Storage API + `__ to + download query results quickly, but at an increased cost. To use this + API, first `enable it in the Cloud Console + `__. + You must also have the `bigquery.readsessions.create + `__ + permission on the project you are billing queries to. + + This feature requires version 0.10.0 or later of the ``pandas-gbq`` + package. It also requires the ``google-cloud-bigquery-storage`` and + ``fastavro`` packages. + + max_results : int, optional + If set, limit the maximum number of rows to fetch from the query + results. + + progress_bar_type : Optional, str + If set, use the `tqdm `__ library to + display a progress bar while the data downloads. Install the + ``tqdm`` package to use this feature. + + Possible values of ``progress_bar_type`` include: + + ``None`` + No progress bar. + ``'tqdm'`` + Use the :func:`tqdm.tqdm` function to print a progress bar + to :data:`sys.stderr`. + ``'tqdm_notebook'`` + Use the :func:`tqdm.tqdm_notebook` function to display a + progress bar as a Jupyter notebook widget. + ``'tqdm_gui'`` + Use the :func:`tqdm.tqdm_gui` function to display a + progress bar as a graphical dialog box. + + Returns + ------- + df: DataFrame + DataFrame representing results of query. + + See Also + -------- + pandas_gbq.read_gbq : This function in the pandas-gbq library. + DataFrame.to_gbq : Write a DataFrame to Google BigQuery. + + Examples + -------- + Example taken from `Google BigQuery documentation + `_ + + >>> sql = "SELECT name FROM table_name WHERE state = 'TX' LIMIT 100;" + >>> df = pd.read_gbq(sql, dialect="standard") # doctest: +SKIP + >>> project_id = "your-project-id" # doctest: +SKIP + >>> df = pd.read_gbq(sql, + ... project_id=project_id, + ... dialect="standard" + ... ) # doctest: +SKIP + """ + warnings.warn( + "read_gbq is deprecated and will be removed in a future version. " + "Please use pandas_gbq.read_gbq instead: " + "https://pandas-gbq.readthedocs.io/en/latest/api.html#pandas_gbq.read_gbq", + FutureWarning, + stacklevel=find_stack_level(), + ) + pandas_gbq = _try_import() + + kwargs: dict[str, str | bool | int | None] = {} + + # START: new kwargs. Don't populate unless explicitly set. + if use_bqstorage_api is not None: + kwargs["use_bqstorage_api"] = use_bqstorage_api + if max_results is not None: + kwargs["max_results"] = max_results + + kwargs["progress_bar_type"] = progress_bar_type + # END: new kwargs + + return pandas_gbq.read_gbq( + query, + project_id=project_id, + index_col=index_col, + col_order=col_order, + reauth=reauth, + auth_local_webserver=auth_local_webserver, + dialect=dialect, + location=location, + configuration=configuration, + credentials=credentials, + **kwargs, + ) + + +def to_gbq( + dataframe: DataFrame, + destination_table: str, + project_id: str | None = None, + chunksize: int | None = None, + reauth: bool = False, + if_exists: str = "fail", + auth_local_webserver: bool = True, + table_schema: list[dict[str, str]] | None = None, + location: str | None = None, + progress_bar: bool = True, + credentials: google.auth.credentials.Credentials | None = None, +) -> None: + warnings.warn( + "to_gbq is deprecated and will be removed in a future version. " + "Please use pandas_gbq.to_gbq instead: " + "https://pandas-gbq.readthedocs.io/en/latest/api.html#pandas_gbq.to_gbq", + FutureWarning, + stacklevel=find_stack_level(), + ) + pandas_gbq = _try_import() + pandas_gbq.to_gbq( + dataframe, + destination_table, + project_id=project_id, + chunksize=chunksize, + reauth=reauth, + if_exists=if_exists, + auth_local_webserver=auth_local_webserver, + table_schema=table_schema, + location=location, + progress_bar=progress_bar, + credentials=credentials, + ) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/orc.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/orc.py new file mode 100644 index 0000000000000000000000000000000000000000..fed9463c38d5deb907cb7df0adee03152380d7a0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/orc.py @@ -0,0 +1,245 @@ +""" orc compat """ +from __future__ import annotations + +import io +from types import ModuleType +from typing import ( + TYPE_CHECKING, + Any, + Literal, +) + +from pandas._config import using_pyarrow_string_dtype + +from pandas._libs import lib +from pandas.compat._optional import import_optional_dependency +from pandas.util._validators import check_dtype_backend + +import pandas as pd +from pandas.core.indexes.api import default_index + +from pandas.io._util import arrow_string_types_mapper +from pandas.io.common import ( + get_handle, + is_fsspec_url, +) + +if TYPE_CHECKING: + import fsspec + import pyarrow.fs + + from pandas._typing import ( + DtypeBackend, + FilePath, + ReadBuffer, + WriteBuffer, + ) + + from pandas.core.frame import DataFrame + + +def read_orc( + path: FilePath | ReadBuffer[bytes], + columns: list[str] | None = None, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, + filesystem: pyarrow.fs.FileSystem | fsspec.spec.AbstractFileSystem | None = None, + **kwargs: Any, +) -> DataFrame: + """ + Load an ORC object from the file path, returning a DataFrame. + + Parameters + ---------- + path : str, path object, or file-like object + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a binary ``read()`` function. The string could be a URL. + Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is + expected. A local file could be: + ``file://localhost/path/to/table.orc``. + columns : list, default None + If not None, only these columns will be read from the file. + Output always follows the ordering of the file and not the columns list. + This mirrors the original behaviour of + :external+pyarrow:py:meth:`pyarrow.orc.ORCFile.read`. + dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + + filesystem : fsspec or pyarrow filesystem, default None + Filesystem object to use when reading the parquet file. + + .. versionadded:: 2.1.0 + + **kwargs + Any additional kwargs are passed to pyarrow. + + Returns + ------- + DataFrame + + Notes + ----- + Before using this function you should read the :ref:`user guide about ORC ` + and :ref:`install optional dependencies `. + + If ``path`` is a URI scheme pointing to a local or remote file (e.g. "s3://"), + a ``pyarrow.fs`` filesystem will be attempted to read the file. You can also pass a + pyarrow or fsspec filesystem object into the filesystem keyword to override this + behavior. + + Examples + -------- + >>> result = pd.read_orc("example_pa.orc") # doctest: +SKIP + """ + # we require a newer version of pyarrow than we support for parquet + + orc = import_optional_dependency("pyarrow.orc") + + check_dtype_backend(dtype_backend) + + with get_handle(path, "rb", is_text=False) as handles: + source = handles.handle + if is_fsspec_url(path) and filesystem is None: + pa = import_optional_dependency("pyarrow") + pa_fs = import_optional_dependency("pyarrow.fs") + try: + filesystem, source = pa_fs.FileSystem.from_uri(path) + except (TypeError, pa.ArrowInvalid): + pass + + pa_table = orc.read_table( + source=source, columns=columns, filesystem=filesystem, **kwargs + ) + if dtype_backend is not lib.no_default: + if dtype_backend == "pyarrow": + df = pa_table.to_pandas(types_mapper=pd.ArrowDtype) + else: + from pandas.io._util import _arrow_dtype_mapping + + mapping = _arrow_dtype_mapping() + df = pa_table.to_pandas(types_mapper=mapping.get) + return df + else: + if using_pyarrow_string_dtype(): + types_mapper = arrow_string_types_mapper() + else: + types_mapper = None + return pa_table.to_pandas(types_mapper=types_mapper) + + +def to_orc( + df: DataFrame, + path: FilePath | WriteBuffer[bytes] | None = None, + *, + engine: Literal["pyarrow"] = "pyarrow", + index: bool | None = None, + engine_kwargs: dict[str, Any] | None = None, +) -> bytes | None: + """ + Write a DataFrame to the ORC format. + + .. versionadded:: 1.5.0 + + Parameters + ---------- + df : DataFrame + The dataframe to be written to ORC. Raises NotImplementedError + if dtype of one or more columns is category, unsigned integers, + intervals, periods or sparse. + path : str, file-like object or None, default None + If a string, it will be used as Root Directory path + when writing a partitioned dataset. By file-like object, + we refer to objects with a write() method, such as a file handle + (e.g. via builtin open function). If path is None, + a bytes object is returned. + engine : str, default 'pyarrow' + ORC library to use. + index : bool, optional + If ``True``, include the dataframe's index(es) in the file output. If + ``False``, they will not be written to the file. + If ``None``, similar to ``infer`` the dataframe's index(es) + will be saved. However, instead of being saved as values, + the RangeIndex will be stored as a range in the metadata so it + doesn't require much space and is faster. Other indexes will + be included as columns in the file output. + engine_kwargs : dict[str, Any] or None, default None + Additional keyword arguments passed to :func:`pyarrow.orc.write_table`. + + Returns + ------- + bytes if no path argument is provided else None + + Raises + ------ + NotImplementedError + Dtype of one or more columns is category, unsigned integers, interval, + period or sparse. + ValueError + engine is not pyarrow. + + Notes + ----- + * Before using this function you should read the + :ref:`user guide about ORC ` and + :ref:`install optional dependencies `. + * This function requires `pyarrow `_ + library. + * For supported dtypes please refer to `supported ORC features in Arrow + `__. + * Currently timezones in datetime columns are not preserved when a + dataframe is converted into ORC files. + """ + if index is None: + index = df.index.names[0] is not None + if engine_kwargs is None: + engine_kwargs = {} + + # validate index + # -------------- + + # validate that we have only a default index + # raise on anything else as we don't serialize the index + + if not df.index.equals(default_index(len(df))): + raise ValueError( + "orc does not support serializing a non-default index for the index; " + "you can .reset_index() to make the index into column(s)" + ) + + if df.index.name is not None: + raise ValueError("orc does not serialize index meta-data on a default index") + + if engine != "pyarrow": + raise ValueError("engine must be 'pyarrow'") + engine = import_optional_dependency(engine, min_version="10.0.1") + pa = import_optional_dependency("pyarrow") + orc = import_optional_dependency("pyarrow.orc") + + was_none = path is None + if was_none: + path = io.BytesIO() + assert path is not None # For mypy + with get_handle(path, "wb", is_text=False) as handles: + assert isinstance(engine, ModuleType) # For mypy + try: + orc.write_table( + engine.Table.from_pandas(df, preserve_index=index), + handles.handle, + **engine_kwargs, + ) + except (TypeError, pa.ArrowNotImplementedError) as e: + raise NotImplementedError( + "The dtype of one or more columns is not supported yet." + ) from e + + if was_none: + assert isinstance(path, io.BytesIO) # For mypy + return path.getvalue() + return None diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/pytables.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/pytables.py new file mode 100644 index 0000000000000000000000000000000000000000..13c2f1078512442c836beff51cdf42beb3c861d0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/pytables.py @@ -0,0 +1,5421 @@ +""" +High level interface to PyTables for reading and writing pandas data structures +to disk +""" +from __future__ import annotations + +from contextlib import suppress +import copy +from datetime import ( + date, + tzinfo, +) +import itertools +import os +import re +from textwrap import dedent +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Final, + Literal, + cast, + overload, +) +import warnings + +import numpy as np + +from pandas._config import ( + config, + get_option, + using_copy_on_write, + using_pyarrow_string_dtype, +) + +from pandas._libs import ( + lib, + writers as libwriters, +) +from pandas._libs.lib import is_string_array +from pandas._libs.tslibs import timezones +from pandas.compat._optional import import_optional_dependency +from pandas.compat.pickle_compat import patch_pickle +from pandas.errors import ( + AttributeConflictWarning, + ClosedFileError, + IncompatibilityWarning, + PerformanceWarning, + PossibleDataLossError, +) +from pandas.util._decorators import cache_readonly +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import ( + ensure_object, + is_bool_dtype, + is_complex_dtype, + is_list_like, + is_string_dtype, + needs_i8_conversion, +) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + DatetimeTZDtype, + ExtensionDtype, + PeriodDtype, +) +from pandas.core.dtypes.missing import array_equivalent + +from pandas import ( + DataFrame, + DatetimeIndex, + Index, + MultiIndex, + PeriodIndex, + RangeIndex, + Series, + TimedeltaIndex, + concat, + isna, +) +from pandas.core.arrays import ( + Categorical, + DatetimeArray, + PeriodArray, +) +import pandas.core.common as com +from pandas.core.computation.pytables import ( + PyTablesExpr, + maybe_expression, +) +from pandas.core.construction import extract_array +from pandas.core.indexes.api import ensure_index +from pandas.core.internals import ( + ArrayManager, + BlockManager, +) + +from pandas.io.common import stringify_path +from pandas.io.formats.printing import ( + adjoin, + pprint_thing, +) + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Iterator, + Sequence, + ) + from types import TracebackType + + from tables import ( + Col, + File, + Node, + ) + + from pandas._typing import ( + AnyArrayLike, + ArrayLike, + AxisInt, + DtypeArg, + FilePath, + Self, + Shape, + npt, + ) + + from pandas.core.internals import Block + +# versioning attribute +_version = "0.15.2" + +# encoding +_default_encoding = "UTF-8" + + +def _ensure_decoded(s): + """if we have bytes, decode them to unicode""" + if isinstance(s, np.bytes_): + s = s.decode("UTF-8") + return s + + +def _ensure_encoding(encoding: str | None) -> str: + # set the encoding if we need + if encoding is None: + encoding = _default_encoding + + return encoding + + +def _ensure_str(name): + """ + Ensure that an index / column name is a str (python 3); otherwise they + may be np.string dtype. Non-string dtypes are passed through unchanged. + + https://github.com/pandas-dev/pandas/issues/13492 + """ + if isinstance(name, str): + name = str(name) + return name + + +Term = PyTablesExpr + + +def _ensure_term(where, scope_level: int): + """ + Ensure that the where is a Term or a list of Term. + + This makes sure that we are capturing the scope of variables that are + passed create the terms here with a frame_level=2 (we are 2 levels down) + """ + # only consider list/tuple here as an ndarray is automatically a coordinate + # list + level = scope_level + 1 + if isinstance(where, (list, tuple)): + where = [ + Term(term, scope_level=level + 1) if maybe_expression(term) else term + for term in where + if term is not None + ] + elif maybe_expression(where): + where = Term(where, scope_level=level) + return where if where is None or len(where) else None + + +incompatibility_doc: Final = """ +where criteria is being ignored as this version [%s] is too old (or +not-defined), read the file in and write it out to a new file to upgrade (with +the copy_to method) +""" + +attribute_conflict_doc: Final = """ +the [%s] attribute of the existing index is [%s] which conflicts with the new +[%s], resetting the attribute to None +""" + +performance_doc: Final = """ +your performance may suffer as PyTables will pickle object types that it cannot +map directly to c-types [inferred_type->%s,key->%s] [items->%s] +""" + +# formats +_FORMAT_MAP = {"f": "fixed", "fixed": "fixed", "t": "table", "table": "table"} + +# axes map +_AXES_MAP = {DataFrame: [0]} + +# register our configuration options +dropna_doc: Final = """ +: boolean + drop ALL nan rows when appending to a table +""" +format_doc: Final = """ +: format + default format writing format, if None, then + put will default to 'fixed' and append will default to 'table' +""" + +with config.config_prefix("io.hdf"): + config.register_option("dropna_table", False, dropna_doc, validator=config.is_bool) + config.register_option( + "default_format", + None, + format_doc, + validator=config.is_one_of_factory(["fixed", "table", None]), + ) + +# oh the troubles to reduce import time +_table_mod = None +_table_file_open_policy_is_strict = False + + +def _tables(): + global _table_mod + global _table_file_open_policy_is_strict + if _table_mod is None: + import tables + + _table_mod = tables + + # set the file open policy + # return the file open policy; this changes as of pytables 3.1 + # depending on the HDF5 version + with suppress(AttributeError): + _table_file_open_policy_is_strict = ( + tables.file._FILE_OPEN_POLICY == "strict" + ) + + return _table_mod + + +# interface to/from ### + + +def to_hdf( + path_or_buf: FilePath | HDFStore, + key: str, + value: DataFrame | Series, + mode: str = "a", + complevel: int | None = None, + complib: str | None = None, + append: bool = False, + format: str | None = None, + index: bool = True, + min_itemsize: int | dict[str, int] | None = None, + nan_rep=None, + dropna: bool | None = None, + data_columns: Literal[True] | list[str] | None = None, + errors: str = "strict", + encoding: str = "UTF-8", +) -> None: + """store this object, close it if we opened it""" + if append: + f = lambda store: store.append( + key, + value, + format=format, + index=index, + min_itemsize=min_itemsize, + nan_rep=nan_rep, + dropna=dropna, + data_columns=data_columns, + errors=errors, + encoding=encoding, + ) + else: + # NB: dropna is not passed to `put` + f = lambda store: store.put( + key, + value, + format=format, + index=index, + min_itemsize=min_itemsize, + nan_rep=nan_rep, + data_columns=data_columns, + errors=errors, + encoding=encoding, + dropna=dropna, + ) + + path_or_buf = stringify_path(path_or_buf) + if isinstance(path_or_buf, str): + with HDFStore( + path_or_buf, mode=mode, complevel=complevel, complib=complib + ) as store: + f(store) + else: + f(path_or_buf) + + +def read_hdf( + path_or_buf: FilePath | HDFStore, + key=None, + mode: str = "r", + errors: str = "strict", + where: str | list | None = None, + start: int | None = None, + stop: int | None = None, + columns: list[str] | None = None, + iterator: bool = False, + chunksize: int | None = None, + **kwargs, +): + """ + Read from the store, close it if we opened it. + + Retrieve pandas object stored in file, optionally based on where + criteria. + + .. warning:: + + Pandas uses PyTables for reading and writing HDF5 files, which allows + serializing object-dtype data with pickle when using the "fixed" format. + Loading pickled data received from untrusted sources can be unsafe. + + See: https://docs.python.org/3/library/pickle.html for more. + + Parameters + ---------- + path_or_buf : str, path object, pandas.HDFStore + Any valid string path is acceptable. Only supports the local file system, + remote URLs and file-like objects are not supported. + + If you want to pass in a path object, pandas accepts any + ``os.PathLike``. + + Alternatively, pandas accepts an open :class:`pandas.HDFStore` object. + + key : object, optional + The group identifier in the store. Can be omitted if the HDF file + contains a single pandas object. + mode : {'r', 'r+', 'a'}, default 'r' + Mode to use when opening the file. Ignored if path_or_buf is a + :class:`pandas.HDFStore`. Default is 'r'. + errors : str, default 'strict' + Specifies how encoding and decoding errors are to be handled. + See the errors argument for :func:`open` for a full list + of options. + where : list, optional + A list of Term (or convertible) objects. + start : int, optional + Row number to start selection. + stop : int, optional + Row number to stop selection. + columns : list, optional + A list of columns names to return. + iterator : bool, optional + Return an iterator object. + chunksize : int, optional + Number of rows to include in an iteration when using an iterator. + **kwargs + Additional keyword arguments passed to HDFStore. + + Returns + ------- + object + The selected object. Return type depends on the object stored. + + See Also + -------- + DataFrame.to_hdf : Write a HDF file from a DataFrame. + HDFStore : Low-level access to HDF files. + + Examples + -------- + >>> df = pd.DataFrame([[1, 1.0, 'a']], columns=['x', 'y', 'z']) # doctest: +SKIP + >>> df.to_hdf('./store.h5', 'data') # doctest: +SKIP + >>> reread = pd.read_hdf('./store.h5') # doctest: +SKIP + """ + if mode not in ["r", "r+", "a"]: + raise ValueError( + f"mode {mode} is not allowed while performing a read. " + f"Allowed modes are r, r+ and a." + ) + # grab the scope + if where is not None: + where = _ensure_term(where, scope_level=1) + + if isinstance(path_or_buf, HDFStore): + if not path_or_buf.is_open: + raise OSError("The HDFStore must be open for reading.") + + store = path_or_buf + auto_close = False + else: + path_or_buf = stringify_path(path_or_buf) + if not isinstance(path_or_buf, str): + raise NotImplementedError( + "Support for generic buffers has not been implemented." + ) + try: + exists = os.path.exists(path_or_buf) + + # if filepath is too long + except (TypeError, ValueError): + exists = False + + if not exists: + raise FileNotFoundError(f"File {path_or_buf} does not exist") + + store = HDFStore(path_or_buf, mode=mode, errors=errors, **kwargs) + # can't auto open/close if we are using an iterator + # so delegate to the iterator + auto_close = True + + try: + if key is None: + groups = store.groups() + if len(groups) == 0: + raise ValueError( + "Dataset(s) incompatible with Pandas data types, " + "not table, or no datasets found in HDF5 file." + ) + candidate_only_group = groups[0] + + # For the HDF file to have only one dataset, all other groups + # should then be metadata groups for that candidate group. (This + # assumes that the groups() method enumerates parent groups + # before their children.) + for group_to_check in groups[1:]: + if not _is_metadata_of(group_to_check, candidate_only_group): + raise ValueError( + "key must be provided when HDF5 " + "file contains multiple datasets." + ) + key = candidate_only_group._v_pathname + return store.select( + key, + where=where, + start=start, + stop=stop, + columns=columns, + iterator=iterator, + chunksize=chunksize, + auto_close=auto_close, + ) + except (ValueError, TypeError, LookupError): + if not isinstance(path_or_buf, HDFStore): + # if there is an error, close the store if we opened it. + with suppress(AttributeError): + store.close() + + raise + + +def _is_metadata_of(group: Node, parent_group: Node) -> bool: + """Check if a given group is a metadata group for a given parent_group.""" + if group._v_depth <= parent_group._v_depth: + return False + + current = group + while current._v_depth > 1: + parent = current._v_parent + if parent == parent_group and current._v_name == "meta": + return True + current = current._v_parent + return False + + +class HDFStore: + """ + Dict-like IO interface for storing pandas objects in PyTables. + + Either Fixed or Table format. + + .. warning:: + + Pandas uses PyTables for reading and writing HDF5 files, which allows + serializing object-dtype data with pickle when using the "fixed" format. + Loading pickled data received from untrusted sources can be unsafe. + + See: https://docs.python.org/3/library/pickle.html for more. + + Parameters + ---------- + path : str + File path to HDF5 file. + mode : {'a', 'w', 'r', 'r+'}, default 'a' + + ``'r'`` + Read-only; no data can be modified. + ``'w'`` + Write; a new file is created (an existing file with the same + name would be deleted). + ``'a'`` + Append; an existing file is opened for reading and writing, + and if the file does not exist it is created. + ``'r+'`` + It is similar to ``'a'``, but the file must already exist. + complevel : int, 0-9, default None + Specifies a compression level for data. + A value of 0 or None disables compression. + complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib' + Specifies the compression library to be used. + These additional compressors for Blosc are supported + (default if no compressor specified: 'blosc:blosclz'): + {'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy', + 'blosc:zlib', 'blosc:zstd'}. + Specifying a compression library which is not available issues + a ValueError. + fletcher32 : bool, default False + If applying compression use the fletcher32 checksum. + **kwargs + These parameters will be passed to the PyTables open_file method. + + Examples + -------- + >>> bar = pd.DataFrame(np.random.randn(10, 4)) + >>> store = pd.HDFStore('test.h5') + >>> store['foo'] = bar # write to HDF5 + >>> bar = store['foo'] # retrieve + >>> store.close() + + **Create or load HDF5 file in-memory** + + When passing the `driver` option to the PyTables open_file method through + **kwargs, the HDF5 file is loaded or created in-memory and will only be + written when closed: + + >>> bar = pd.DataFrame(np.random.randn(10, 4)) + >>> store = pd.HDFStore('test.h5', driver='H5FD_CORE') + >>> store['foo'] = bar + >>> store.close() # only now, data is written to disk + """ + + _handle: File | None + _mode: str + + def __init__( + self, + path, + mode: str = "a", + complevel: int | None = None, + complib=None, + fletcher32: bool = False, + **kwargs, + ) -> None: + if "format" in kwargs: + raise ValueError("format is not a defined argument for HDFStore") + + tables = import_optional_dependency("tables") + + if complib is not None and complib not in tables.filters.all_complibs: + raise ValueError( + f"complib only supports {tables.filters.all_complibs} compression." + ) + + if complib is None and complevel is not None: + complib = tables.filters.default_complib + + self._path = stringify_path(path) + if mode is None: + mode = "a" + self._mode = mode + self._handle = None + self._complevel = complevel if complevel else 0 + self._complib = complib + self._fletcher32 = fletcher32 + self._filters = None + self.open(mode=mode, **kwargs) + + def __fspath__(self) -> str: + return self._path + + @property + def root(self): + """return the root node""" + self._check_if_open() + assert self._handle is not None # for mypy + return self._handle.root + + @property + def filename(self) -> str: + return self._path + + def __getitem__(self, key: str): + return self.get(key) + + def __setitem__(self, key: str, value) -> None: + self.put(key, value) + + def __delitem__(self, key: str) -> None: + return self.remove(key) + + def __getattr__(self, name: str): + """allow attribute access to get stores""" + try: + return self.get(name) + except (KeyError, ClosedFileError): + pass + raise AttributeError( + f"'{type(self).__name__}' object has no attribute '{name}'" + ) + + def __contains__(self, key: str) -> bool: + """ + check for existence of this key + can match the exact pathname or the pathnm w/o the leading '/' + """ + node = self.get_node(key) + if node is not None: + name = node._v_pathname + if key in (name, name[1:]): + return True + return False + + def __len__(self) -> int: + return len(self.groups()) + + def __repr__(self) -> str: + pstr = pprint_thing(self._path) + return f"{type(self)}\nFile path: {pstr}\n" + + def __enter__(self) -> Self: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + self.close() + + def keys(self, include: str = "pandas") -> list[str]: + """ + Return a list of keys corresponding to objects stored in HDFStore. + + Parameters + ---------- + + include : str, default 'pandas' + When kind equals 'pandas' return pandas objects. + When kind equals 'native' return native HDF5 Table objects. + + Returns + ------- + list + List of ABSOLUTE path-names (e.g. have the leading '/'). + + Raises + ------ + raises ValueError if kind has an illegal value + + Examples + -------- + >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B']) + >>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP + >>> store.put('data', df) # doctest: +SKIP + >>> store.get('data') # doctest: +SKIP + >>> print(store.keys()) # doctest: +SKIP + ['/data1', '/data2'] + >>> store.close() # doctest: +SKIP + """ + if include == "pandas": + return [n._v_pathname for n in self.groups()] + + elif include == "native": + assert self._handle is not None # mypy + return [ + n._v_pathname for n in self._handle.walk_nodes("/", classname="Table") + ] + raise ValueError( + f"`include` should be either 'pandas' or 'native' but is '{include}'" + ) + + def __iter__(self) -> Iterator[str]: + return iter(self.keys()) + + def items(self) -> Iterator[tuple[str, list]]: + """ + iterate on key->group + """ + for g in self.groups(): + yield g._v_pathname, g + + def open(self, mode: str = "a", **kwargs) -> None: + """ + Open the file in the specified mode + + Parameters + ---------- + mode : {'a', 'w', 'r', 'r+'}, default 'a' + See HDFStore docstring or tables.open_file for info about modes + **kwargs + These parameters will be passed to the PyTables open_file method. + """ + tables = _tables() + + if self._mode != mode: + # if we are changing a write mode to read, ok + if self._mode in ["a", "w"] and mode in ["r", "r+"]: + pass + elif mode in ["w"]: + # this would truncate, raise here + if self.is_open: + raise PossibleDataLossError( + f"Re-opening the file [{self._path}] with mode [{self._mode}] " + "will delete the current file!" + ) + + self._mode = mode + + # close and reopen the handle + if self.is_open: + self.close() + + if self._complevel and self._complevel > 0: + self._filters = _tables().Filters( + self._complevel, self._complib, fletcher32=self._fletcher32 + ) + + if _table_file_open_policy_is_strict and self.is_open: + msg = ( + "Cannot open HDF5 file, which is already opened, " + "even in read-only mode." + ) + raise ValueError(msg) + + self._handle = tables.open_file(self._path, self._mode, **kwargs) + + def close(self) -> None: + """ + Close the PyTables file handle + """ + if self._handle is not None: + self._handle.close() + self._handle = None + + @property + def is_open(self) -> bool: + """ + return a boolean indicating whether the file is open + """ + if self._handle is None: + return False + return bool(self._handle.isopen) + + def flush(self, fsync: bool = False) -> None: + """ + Force all buffered modifications to be written to disk. + + Parameters + ---------- + fsync : bool (default False) + call ``os.fsync()`` on the file handle to force writing to disk. + + Notes + ----- + Without ``fsync=True``, flushing may not guarantee that the OS writes + to disk. With fsync, the operation will block until the OS claims the + file has been written; however, other caching layers may still + interfere. + """ + if self._handle is not None: + self._handle.flush() + if fsync: + with suppress(OSError): + os.fsync(self._handle.fileno()) + + def get(self, key: str): + """ + Retrieve pandas object stored in file. + + Parameters + ---------- + key : str + + Returns + ------- + object + Same type as object stored in file. + + Examples + -------- + >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B']) + >>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP + >>> store.put('data', df) # doctest: +SKIP + >>> store.get('data') # doctest: +SKIP + >>> store.close() # doctest: +SKIP + """ + with patch_pickle(): + # GH#31167 Without this patch, pickle doesn't know how to unpickle + # old DateOffset objects now that they are cdef classes. + group = self.get_node(key) + if group is None: + raise KeyError(f"No object named {key} in the file") + return self._read_group(group) + + def select( + self, + key: str, + where=None, + start=None, + stop=None, + columns=None, + iterator: bool = False, + chunksize: int | None = None, + auto_close: bool = False, + ): + """ + Retrieve pandas object stored in file, optionally based on where criteria. + + .. warning:: + + Pandas uses PyTables for reading and writing HDF5 files, which allows + serializing object-dtype data with pickle when using the "fixed" format. + Loading pickled data received from untrusted sources can be unsafe. + + See: https://docs.python.org/3/library/pickle.html for more. + + Parameters + ---------- + key : str + Object being retrieved from file. + where : list or None + List of Term (or convertible) objects, optional. + start : int or None + Row number to start selection. + stop : int, default None + Row number to stop selection. + columns : list or None + A list of columns that if not None, will limit the return columns. + iterator : bool or False + Returns an iterator. + chunksize : int or None + Number or rows to include in iteration, return an iterator. + auto_close : bool or False + Should automatically close the store when finished. + + Returns + ------- + object + Retrieved object from file. + + Examples + -------- + >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B']) + >>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP + >>> store.put('data', df) # doctest: +SKIP + >>> store.get('data') # doctest: +SKIP + >>> print(store.keys()) # doctest: +SKIP + ['/data1', '/data2'] + >>> store.select('/data1') # doctest: +SKIP + A B + 0 1 2 + 1 3 4 + >>> store.select('/data1', where='columns == A') # doctest: +SKIP + A + 0 1 + 1 3 + >>> store.close() # doctest: +SKIP + """ + group = self.get_node(key) + if group is None: + raise KeyError(f"No object named {key} in the file") + + # create the storer and axes + where = _ensure_term(where, scope_level=1) + s = self._create_storer(group) + s.infer_axes() + + # function to call on iteration + def func(_start, _stop, _where): + return s.read(start=_start, stop=_stop, where=_where, columns=columns) + + # create the iterator + it = TableIterator( + self, + s, + func, + where=where, + nrows=s.nrows, + start=start, + stop=stop, + iterator=iterator, + chunksize=chunksize, + auto_close=auto_close, + ) + + return it.get_result() + + def select_as_coordinates( + self, + key: str, + where=None, + start: int | None = None, + stop: int | None = None, + ): + """ + return the selection as an Index + + .. warning:: + + Pandas uses PyTables for reading and writing HDF5 files, which allows + serializing object-dtype data with pickle when using the "fixed" format. + Loading pickled data received from untrusted sources can be unsafe. + + See: https://docs.python.org/3/library/pickle.html for more. + + + Parameters + ---------- + key : str + where : list of Term (or convertible) objects, optional + start : integer (defaults to None), row number to start selection + stop : integer (defaults to None), row number to stop selection + """ + where = _ensure_term(where, scope_level=1) + tbl = self.get_storer(key) + if not isinstance(tbl, Table): + raise TypeError("can only read_coordinates with a table") + return tbl.read_coordinates(where=where, start=start, stop=stop) + + def select_column( + self, + key: str, + column: str, + start: int | None = None, + stop: int | None = None, + ): + """ + return a single column from the table. This is generally only useful to + select an indexable + + .. warning:: + + Pandas uses PyTables for reading and writing HDF5 files, which allows + serializing object-dtype data with pickle when using the "fixed" format. + Loading pickled data received from untrusted sources can be unsafe. + + See: https://docs.python.org/3/library/pickle.html for more. + + Parameters + ---------- + key : str + column : str + The column of interest. + start : int or None, default None + stop : int or None, default None + + Raises + ------ + raises KeyError if the column is not found (or key is not a valid + store) + raises ValueError if the column can not be extracted individually (it + is part of a data block) + + """ + tbl = self.get_storer(key) + if not isinstance(tbl, Table): + raise TypeError("can only read_column with a table") + return tbl.read_column(column=column, start=start, stop=stop) + + def select_as_multiple( + self, + keys, + where=None, + selector=None, + columns=None, + start=None, + stop=None, + iterator: bool = False, + chunksize: int | None = None, + auto_close: bool = False, + ): + """ + Retrieve pandas objects from multiple tables. + + .. warning:: + + Pandas uses PyTables for reading and writing HDF5 files, which allows + serializing object-dtype data with pickle when using the "fixed" format. + Loading pickled data received from untrusted sources can be unsafe. + + See: https://docs.python.org/3/library/pickle.html for more. + + Parameters + ---------- + keys : a list of the tables + selector : the table to apply the where criteria (defaults to keys[0] + if not supplied) + columns : the columns I want back + start : integer (defaults to None), row number to start selection + stop : integer (defaults to None), row number to stop selection + iterator : bool, return an iterator, default False + chunksize : nrows to include in iteration, return an iterator + auto_close : bool, default False + Should automatically close the store when finished. + + Raises + ------ + raises KeyError if keys or selector is not found or keys is empty + raises TypeError if keys is not a list or tuple + raises ValueError if the tables are not ALL THE SAME DIMENSIONS + """ + # default to single select + where = _ensure_term(where, scope_level=1) + if isinstance(keys, (list, tuple)) and len(keys) == 1: + keys = keys[0] + if isinstance(keys, str): + return self.select( + key=keys, + where=where, + columns=columns, + start=start, + stop=stop, + iterator=iterator, + chunksize=chunksize, + auto_close=auto_close, + ) + + if not isinstance(keys, (list, tuple)): + raise TypeError("keys must be a list/tuple") + + if not len(keys): + raise ValueError("keys must have a non-zero length") + + if selector is None: + selector = keys[0] + + # collect the tables + tbls = [self.get_storer(k) for k in keys] + s = self.get_storer(selector) + + # validate rows + nrows = None + for t, k in itertools.chain([(s, selector)], zip(tbls, keys)): + if t is None: + raise KeyError(f"Invalid table [{k}]") + if not t.is_table: + raise TypeError( + f"object [{t.pathname}] is not a table, and cannot be used in all " + "select as multiple" + ) + + if nrows is None: + nrows = t.nrows + elif t.nrows != nrows: + raise ValueError("all tables must have exactly the same nrows!") + + # The isinstance checks here are redundant with the check above, + # but necessary for mypy; see GH#29757 + _tbls = [x for x in tbls if isinstance(x, Table)] + + # axis is the concentration axes + axis = {t.non_index_axes[0][0] for t in _tbls}.pop() + + def func(_start, _stop, _where): + # retrieve the objs, _where is always passed as a set of + # coordinates here + objs = [ + t.read(where=_where, columns=columns, start=_start, stop=_stop) + for t in tbls + ] + + # concat and return + return concat(objs, axis=axis, verify_integrity=False)._consolidate() + + # create the iterator + it = TableIterator( + self, + s, + func, + where=where, + nrows=nrows, + start=start, + stop=stop, + iterator=iterator, + chunksize=chunksize, + auto_close=auto_close, + ) + + return it.get_result(coordinates=True) + + def put( + self, + key: str, + value: DataFrame | Series, + format=None, + index: bool = True, + append: bool = False, + complib=None, + complevel: int | None = None, + min_itemsize: int | dict[str, int] | None = None, + nan_rep=None, + data_columns: Literal[True] | list[str] | None = None, + encoding=None, + errors: str = "strict", + track_times: bool = True, + dropna: bool = False, + ) -> None: + """ + Store object in HDFStore. + + Parameters + ---------- + key : str + value : {Series, DataFrame} + format : 'fixed(f)|table(t)', default is 'fixed' + Format to use when storing object in HDFStore. Value can be one of: + + ``'fixed'`` + Fixed format. Fast writing/reading. Not-appendable, nor searchable. + ``'table'`` + Table format. Write as a PyTables Table structure which may perform + worse but allow more flexible operations like searching / selecting + subsets of the data. + index : bool, default True + Write DataFrame index as a column. + append : bool, default False + This will force Table format, append the input data to the existing. + data_columns : list of columns or True, default None + List of columns to create as data columns, or True to use all columns. + See `here + `__. + encoding : str, default None + Provide an encoding for strings. + track_times : bool, default True + Parameter is propagated to 'create_table' method of 'PyTables'. + If set to False it enables to have the same h5 files (same hashes) + independent on creation time. + dropna : bool, default False, optional + Remove missing values. + + Examples + -------- + >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B']) + >>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP + >>> store.put('data', df) # doctest: +SKIP + """ + if format is None: + format = get_option("io.hdf.default_format") or "fixed" + format = self._validate_format(format) + self._write_to_group( + key, + value, + format=format, + index=index, + append=append, + complib=complib, + complevel=complevel, + min_itemsize=min_itemsize, + nan_rep=nan_rep, + data_columns=data_columns, + encoding=encoding, + errors=errors, + track_times=track_times, + dropna=dropna, + ) + + def remove(self, key: str, where=None, start=None, stop=None) -> None: + """ + Remove pandas object partially by specifying the where condition + + Parameters + ---------- + key : str + Node to remove or delete rows from + where : list of Term (or convertible) objects, optional + start : integer (defaults to None), row number to start selection + stop : integer (defaults to None), row number to stop selection + + Returns + ------- + number of rows removed (or None if not a Table) + + Raises + ------ + raises KeyError if key is not a valid store + + """ + where = _ensure_term(where, scope_level=1) + try: + s = self.get_storer(key) + except KeyError: + # the key is not a valid store, re-raising KeyError + raise + except AssertionError: + # surface any assertion errors for e.g. debugging + raise + except Exception as err: + # In tests we get here with ClosedFileError, TypeError, and + # _table_mod.NoSuchNodeError. TODO: Catch only these? + + if where is not None: + raise ValueError( + "trying to remove a node with a non-None where clause!" + ) from err + + # we are actually trying to remove a node (with children) + node = self.get_node(key) + if node is not None: + node._f_remove(recursive=True) + return None + + # remove the node + if com.all_none(where, start, stop): + s.group._f_remove(recursive=True) + + # delete from the table + else: + if not s.is_table: + raise ValueError( + "can only remove with where on objects written as tables" + ) + return s.delete(where=where, start=start, stop=stop) + + def append( + self, + key: str, + value: DataFrame | Series, + format=None, + axes=None, + index: bool | list[str] = True, + append: bool = True, + complib=None, + complevel: int | None = None, + columns=None, + min_itemsize: int | dict[str, int] | None = None, + nan_rep=None, + chunksize: int | None = None, + expectedrows=None, + dropna: bool | None = None, + data_columns: Literal[True] | list[str] | None = None, + encoding=None, + errors: str = "strict", + ) -> None: + """ + Append to Table in file. + + Node must already exist and be Table format. + + Parameters + ---------- + key : str + value : {Series, DataFrame} + format : 'table' is the default + Format to use when storing object in HDFStore. Value can be one of: + + ``'table'`` + Table format. Write as a PyTables Table structure which may perform + worse but allow more flexible operations like searching / selecting + subsets of the data. + index : bool, default True + Write DataFrame index as a column. + append : bool, default True + Append the input data to the existing. + data_columns : list of columns, or True, default None + List of columns to create as indexed data columns for on-disk + queries, or True to use all columns. By default only the axes + of the object are indexed. See `here + `__. + min_itemsize : dict of columns that specify minimum str sizes + nan_rep : str to use as str nan representation + chunksize : size to chunk the writing + expectedrows : expected TOTAL row size of this table + encoding : default None, provide an encoding for str + dropna : bool, default False, optional + Do not write an ALL nan row to the store settable + by the option 'io.hdf.dropna_table'. + + Notes + ----- + Does *not* check if data being appended overlaps with existing + data in the table, so be careful + + Examples + -------- + >>> df1 = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B']) + >>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP + >>> store.put('data', df1, format='table') # doctest: +SKIP + >>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=['A', 'B']) + >>> store.append('data', df2) # doctest: +SKIP + >>> store.close() # doctest: +SKIP + A B + 0 1 2 + 1 3 4 + 0 5 6 + 1 7 8 + """ + if columns is not None: + raise TypeError( + "columns is not a supported keyword in append, try data_columns" + ) + + if dropna is None: + dropna = get_option("io.hdf.dropna_table") + if format is None: + format = get_option("io.hdf.default_format") or "table" + format = self._validate_format(format) + self._write_to_group( + key, + value, + format=format, + axes=axes, + index=index, + append=append, + complib=complib, + complevel=complevel, + min_itemsize=min_itemsize, + nan_rep=nan_rep, + chunksize=chunksize, + expectedrows=expectedrows, + dropna=dropna, + data_columns=data_columns, + encoding=encoding, + errors=errors, + ) + + def append_to_multiple( + self, + d: dict, + value, + selector, + data_columns=None, + axes=None, + dropna: bool = False, + **kwargs, + ) -> None: + """ + Append to multiple tables + + Parameters + ---------- + d : a dict of table_name to table_columns, None is acceptable as the + values of one node (this will get all the remaining columns) + value : a pandas object + selector : a string that designates the indexable table; all of its + columns will be designed as data_columns, unless data_columns is + passed, in which case these are used + data_columns : list of columns to create as data columns, or True to + use all columns + dropna : if evaluates to True, drop rows from all tables if any single + row in each table has all NaN. Default False. + + Notes + ----- + axes parameter is currently not accepted + + """ + if axes is not None: + raise TypeError( + "axes is currently not accepted as a parameter to append_to_multiple; " + "you can create the tables independently instead" + ) + + if not isinstance(d, dict): + raise ValueError( + "append_to_multiple must have a dictionary specified as the " + "way to split the value" + ) + + if selector not in d: + raise ValueError( + "append_to_multiple requires a selector that is in passed dict" + ) + + # figure out the splitting axis (the non_index_axis) + axis = next(iter(set(range(value.ndim)) - set(_AXES_MAP[type(value)]))) + + # figure out how to split the value + remain_key = None + remain_values: list = [] + for k, v in d.items(): + if v is None: + if remain_key is not None: + raise ValueError( + "append_to_multiple can only have one value in d that is None" + ) + remain_key = k + else: + remain_values.extend(v) + if remain_key is not None: + ordered = value.axes[axis] + ordd = ordered.difference(Index(remain_values)) + ordd = sorted(ordered.get_indexer(ordd)) + d[remain_key] = ordered.take(ordd) + + # data_columns + if data_columns is None: + data_columns = d[selector] + + # ensure rows are synchronized across the tables + if dropna: + idxs = (value[cols].dropna(how="all").index for cols in d.values()) + valid_index = next(idxs) + for index in idxs: + valid_index = valid_index.intersection(index) + value = value.loc[valid_index] + + min_itemsize = kwargs.pop("min_itemsize", None) + + # append + for k, v in d.items(): + dc = data_columns if k == selector else None + + # compute the val + val = value.reindex(v, axis=axis) + + filtered = ( + {key: value for (key, value) in min_itemsize.items() if key in v} + if min_itemsize is not None + else None + ) + self.append(k, val, data_columns=dc, min_itemsize=filtered, **kwargs) + + def create_table_index( + self, + key: str, + columns=None, + optlevel: int | None = None, + kind: str | None = None, + ) -> None: + """ + Create a pytables index on the table. + + Parameters + ---------- + key : str + columns : None, bool, or listlike[str] + Indicate which columns to create an index on. + + * False : Do not create any indexes. + * True : Create indexes on all columns. + * None : Create indexes on all columns. + * listlike : Create indexes on the given columns. + + optlevel : int or None, default None + Optimization level, if None, pytables defaults to 6. + kind : str or None, default None + Kind of index, if None, pytables defaults to "medium". + + Raises + ------ + TypeError: raises if the node is not a table + """ + # version requirements + _tables() + s = self.get_storer(key) + if s is None: + return + + if not isinstance(s, Table): + raise TypeError("cannot create table index on a Fixed format store") + s.create_index(columns=columns, optlevel=optlevel, kind=kind) + + def groups(self) -> list: + """ + Return a list of all the top-level nodes. + + Each node returned is not a pandas storage object. + + Returns + ------- + list + List of objects. + + Examples + -------- + >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B']) + >>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP + >>> store.put('data', df) # doctest: +SKIP + >>> print(store.groups()) # doctest: +SKIP + >>> store.close() # doctest: +SKIP + [/data (Group) '' + children := ['axis0' (Array), 'axis1' (Array), 'block0_values' (Array), + 'block0_items' (Array)]] + """ + _tables() + self._check_if_open() + assert self._handle is not None # for mypy + assert _table_mod is not None # for mypy + return [ + g + for g in self._handle.walk_groups() + if ( + not isinstance(g, _table_mod.link.Link) + and ( + getattr(g._v_attrs, "pandas_type", None) + or getattr(g, "table", None) + or (isinstance(g, _table_mod.table.Table) and g._v_name != "table") + ) + ) + ] + + def walk(self, where: str = "/") -> Iterator[tuple[str, list[str], list[str]]]: + """ + Walk the pytables group hierarchy for pandas objects. + + This generator will yield the group path, subgroups and pandas object + names for each group. + + Any non-pandas PyTables objects that are not a group will be ignored. + + The `where` group itself is listed first (preorder), then each of its + child groups (following an alphanumerical order) is also traversed, + following the same procedure. + + Parameters + ---------- + where : str, default "/" + Group where to start walking. + + Yields + ------ + path : str + Full path to a group (without trailing '/'). + groups : list + Names (strings) of the groups contained in `path`. + leaves : list + Names (strings) of the pandas objects contained in `path`. + + Examples + -------- + >>> df1 = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B']) + >>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP + >>> store.put('data', df1, format='table') # doctest: +SKIP + >>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=['A', 'B']) + >>> store.append('data', df2) # doctest: +SKIP + >>> store.close() # doctest: +SKIP + >>> for group in store.walk(): # doctest: +SKIP + ... print(group) # doctest: +SKIP + >>> store.close() # doctest: +SKIP + """ + _tables() + self._check_if_open() + assert self._handle is not None # for mypy + assert _table_mod is not None # for mypy + + for g in self._handle.walk_groups(where): + if getattr(g._v_attrs, "pandas_type", None) is not None: + continue + + groups = [] + leaves = [] + for child in g._v_children.values(): + pandas_type = getattr(child._v_attrs, "pandas_type", None) + if pandas_type is None: + if isinstance(child, _table_mod.group.Group): + groups.append(child._v_name) + else: + leaves.append(child._v_name) + + yield (g._v_pathname.rstrip("/"), groups, leaves) + + def get_node(self, key: str) -> Node | None: + """return the node with the key or None if it does not exist""" + self._check_if_open() + if not key.startswith("/"): + key = "/" + key + + assert self._handle is not None + assert _table_mod is not None # for mypy + try: + node = self._handle.get_node(self.root, key) + except _table_mod.exceptions.NoSuchNodeError: + return None + + assert isinstance(node, _table_mod.Node), type(node) + return node + + def get_storer(self, key: str) -> GenericFixed | Table: + """return the storer object for a key, raise if not in the file""" + group = self.get_node(key) + if group is None: + raise KeyError(f"No object named {key} in the file") + + s = self._create_storer(group) + s.infer_axes() + return s + + def copy( + self, + file, + mode: str = "w", + propindexes: bool = True, + keys=None, + complib=None, + complevel: int | None = None, + fletcher32: bool = False, + overwrite: bool = True, + ) -> HDFStore: + """ + Copy the existing store to a new file, updating in place. + + Parameters + ---------- + propindexes : bool, default True + Restore indexes in copied file. + keys : list, optional + List of keys to include in the copy (defaults to all). + overwrite : bool, default True + Whether to overwrite (remove and replace) existing nodes in the new store. + mode, complib, complevel, fletcher32 same as in HDFStore.__init__ + + Returns + ------- + open file handle of the new store + """ + new_store = HDFStore( + file, mode=mode, complib=complib, complevel=complevel, fletcher32=fletcher32 + ) + if keys is None: + keys = list(self.keys()) + if not isinstance(keys, (tuple, list)): + keys = [keys] + for k in keys: + s = self.get_storer(k) + if s is not None: + if k in new_store: + if overwrite: + new_store.remove(k) + + data = self.select(k) + if isinstance(s, Table): + index: bool | list[str] = False + if propindexes: + index = [a.name for a in s.axes if a.is_indexed] + new_store.append( + k, + data, + index=index, + data_columns=getattr(s, "data_columns", None), + encoding=s.encoding, + ) + else: + new_store.put(k, data, encoding=s.encoding) + + return new_store + + def info(self) -> str: + """ + Print detailed information on the store. + + Returns + ------- + str + + Examples + -------- + >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B']) + >>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP + >>> store.put('data', df) # doctest: +SKIP + >>> print(store.info()) # doctest: +SKIP + >>> store.close() # doctest: +SKIP + + File path: store.h5 + /data frame (shape->[2,2]) + """ + path = pprint_thing(self._path) + output = f"{type(self)}\nFile path: {path}\n" + + if self.is_open: + lkeys = sorted(self.keys()) + if len(lkeys): + keys = [] + values = [] + + for k in lkeys: + try: + s = self.get_storer(k) + if s is not None: + keys.append(pprint_thing(s.pathname or k)) + values.append(pprint_thing(s or "invalid_HDFStore node")) + except AssertionError: + # surface any assertion errors for e.g. debugging + raise + except Exception as detail: + keys.append(k) + dstr = pprint_thing(detail) + values.append(f"[invalid_HDFStore node: {dstr}]") + + output += adjoin(12, keys, values) + else: + output += "Empty" + else: + output += "File is CLOSED" + + return output + + # ------------------------------------------------------------------------ + # private methods + + def _check_if_open(self) -> None: + if not self.is_open: + raise ClosedFileError(f"{self._path} file is not open!") + + def _validate_format(self, format: str) -> str: + """validate / deprecate formats""" + # validate + try: + format = _FORMAT_MAP[format.lower()] + except KeyError as err: + raise TypeError(f"invalid HDFStore format specified [{format}]") from err + + return format + + def _create_storer( + self, + group, + format=None, + value: DataFrame | Series | None = None, + encoding: str = "UTF-8", + errors: str = "strict", + ) -> GenericFixed | Table: + """return a suitable class to operate""" + cls: type[GenericFixed | Table] + + if value is not None and not isinstance(value, (Series, DataFrame)): + raise TypeError("value must be None, Series, or DataFrame") + + pt = _ensure_decoded(getattr(group._v_attrs, "pandas_type", None)) + tt = _ensure_decoded(getattr(group._v_attrs, "table_type", None)) + + # infer the pt from the passed value + if pt is None: + if value is None: + _tables() + assert _table_mod is not None # for mypy + if getattr(group, "table", None) or isinstance( + group, _table_mod.table.Table + ): + pt = "frame_table" + tt = "generic_table" + else: + raise TypeError( + "cannot create a storer if the object is not existing " + "nor a value are passed" + ) + else: + if isinstance(value, Series): + pt = "series" + else: + pt = "frame" + + # we are actually a table + if format == "table": + pt += "_table" + + # a storer node + if "table" not in pt: + _STORER_MAP = {"series": SeriesFixed, "frame": FrameFixed} + try: + cls = _STORER_MAP[pt] + except KeyError as err: + raise TypeError( + f"cannot properly create the storer for: [_STORER_MAP] [group->" + f"{group},value->{type(value)},format->{format}" + ) from err + return cls(self, group, encoding=encoding, errors=errors) + + # existing node (and must be a table) + if tt is None: + # if we are a writer, determine the tt + if value is not None: + if pt == "series_table": + index = getattr(value, "index", None) + if index is not None: + if index.nlevels == 1: + tt = "appendable_series" + elif index.nlevels > 1: + tt = "appendable_multiseries" + elif pt == "frame_table": + index = getattr(value, "index", None) + if index is not None: + if index.nlevels == 1: + tt = "appendable_frame" + elif index.nlevels > 1: + tt = "appendable_multiframe" + + _TABLE_MAP = { + "generic_table": GenericTable, + "appendable_series": AppendableSeriesTable, + "appendable_multiseries": AppendableMultiSeriesTable, + "appendable_frame": AppendableFrameTable, + "appendable_multiframe": AppendableMultiFrameTable, + "worm": WORMTable, + } + try: + cls = _TABLE_MAP[tt] + except KeyError as err: + raise TypeError( + f"cannot properly create the storer for: [_TABLE_MAP] [group->" + f"{group},value->{type(value)},format->{format}" + ) from err + + return cls(self, group, encoding=encoding, errors=errors) + + def _write_to_group( + self, + key: str, + value: DataFrame | Series, + format, + axes=None, + index: bool | list[str] = True, + append: bool = False, + complib=None, + complevel: int | None = None, + fletcher32=None, + min_itemsize: int | dict[str, int] | None = None, + chunksize: int | None = None, + expectedrows=None, + dropna: bool = False, + nan_rep=None, + data_columns=None, + encoding=None, + errors: str = "strict", + track_times: bool = True, + ) -> None: + # we don't want to store a table node at all if our object is 0-len + # as there are not dtypes + if getattr(value, "empty", None) and (format == "table" or append): + return + + group = self._identify_group(key, append) + + s = self._create_storer(group, format, value, encoding=encoding, errors=errors) + if append: + # raise if we are trying to append to a Fixed format, + # or a table that exists (and we are putting) + if not s.is_table or (s.is_table and format == "fixed" and s.is_exists): + raise ValueError("Can only append to Tables") + if not s.is_exists: + s.set_object_info() + else: + s.set_object_info() + + if not s.is_table and complib: + raise ValueError("Compression not supported on Fixed format stores") + + # write the object + s.write( + obj=value, + axes=axes, + append=append, + complib=complib, + complevel=complevel, + fletcher32=fletcher32, + min_itemsize=min_itemsize, + chunksize=chunksize, + expectedrows=expectedrows, + dropna=dropna, + nan_rep=nan_rep, + data_columns=data_columns, + track_times=track_times, + ) + + if isinstance(s, Table) and index: + s.create_index(columns=index) + + def _read_group(self, group: Node): + s = self._create_storer(group) + s.infer_axes() + return s.read() + + def _identify_group(self, key: str, append: bool) -> Node: + """Identify HDF5 group based on key, delete/create group if needed.""" + group = self.get_node(key) + + # we make this assertion for mypy; the get_node call will already + # have raised if this is incorrect + assert self._handle is not None + + # remove the node if we are not appending + if group is not None and not append: + self._handle.remove_node(group, recursive=True) + group = None + + if group is None: + group = self._create_nodes_and_group(key) + + return group + + def _create_nodes_and_group(self, key: str) -> Node: + """Create nodes from key and return group name.""" + # assertion for mypy + assert self._handle is not None + + paths = key.split("/") + # recursively create the groups + path = "/" + for p in paths: + if not len(p): + continue + new_path = path + if not path.endswith("/"): + new_path += "/" + new_path += p + group = self.get_node(new_path) + if group is None: + group = self._handle.create_group(path, p) + path = new_path + return group + + +class TableIterator: + """ + Define the iteration interface on a table + + Parameters + ---------- + store : HDFStore + s : the referred storer + func : the function to execute the query + where : the where of the query + nrows : the rows to iterate on + start : the passed start value (default is None) + stop : the passed stop value (default is None) + iterator : bool, default False + Whether to use the default iterator. + chunksize : the passed chunking value (default is 100000) + auto_close : bool, default False + Whether to automatically close the store at the end of iteration. + """ + + chunksize: int | None + store: HDFStore + s: GenericFixed | Table + + def __init__( + self, + store: HDFStore, + s: GenericFixed | Table, + func, + where, + nrows, + start=None, + stop=None, + iterator: bool = False, + chunksize: int | None = None, + auto_close: bool = False, + ) -> None: + self.store = store + self.s = s + self.func = func + self.where = where + + # set start/stop if they are not set if we are a table + if self.s.is_table: + if nrows is None: + nrows = 0 + if start is None: + start = 0 + if stop is None: + stop = nrows + stop = min(nrows, stop) + + self.nrows = nrows + self.start = start + self.stop = stop + + self.coordinates = None + if iterator or chunksize is not None: + if chunksize is None: + chunksize = 100000 + self.chunksize = int(chunksize) + else: + self.chunksize = None + + self.auto_close = auto_close + + def __iter__(self) -> Iterator: + # iterate + current = self.start + if self.coordinates is None: + raise ValueError("Cannot iterate until get_result is called.") + while current < self.stop: + stop = min(current + self.chunksize, self.stop) + value = self.func(None, None, self.coordinates[current:stop]) + current = stop + if value is None or not len(value): + continue + + yield value + + self.close() + + def close(self) -> None: + if self.auto_close: + self.store.close() + + def get_result(self, coordinates: bool = False): + # return the actual iterator + if self.chunksize is not None: + if not isinstance(self.s, Table): + raise TypeError("can only use an iterator or chunksize on a table") + + self.coordinates = self.s.read_coordinates(where=self.where) + + return self + + # if specified read via coordinates (necessary for multiple selections + if coordinates: + if not isinstance(self.s, Table): + raise TypeError("can only read_coordinates on a table") + where = self.s.read_coordinates( + where=self.where, start=self.start, stop=self.stop + ) + else: + where = self.where + + # directly return the result + results = self.func(self.start, self.stop, where) + self.close() + return results + + +class IndexCol: + """ + an index column description class + + Parameters + ---------- + axis : axis which I reference + values : the ndarray like converted values + kind : a string description of this type + typ : the pytables type + pos : the position in the pytables + + """ + + is_an_indexable: bool = True + is_data_indexable: bool = True + _info_fields = ["freq", "tz", "index_name"] + + def __init__( + self, + name: str, + values=None, + kind=None, + typ=None, + cname: str | None = None, + axis=None, + pos=None, + freq=None, + tz=None, + index_name=None, + ordered=None, + table=None, + meta=None, + metadata=None, + ) -> None: + if not isinstance(name, str): + raise ValueError("`name` must be a str.") + + self.values = values + self.kind = kind + self.typ = typ + self.name = name + self.cname = cname or name + self.axis = axis + self.pos = pos + self.freq = freq + self.tz = tz + self.index_name = index_name + self.ordered = ordered + self.table = table + self.meta = meta + self.metadata = metadata + + if pos is not None: + self.set_pos(pos) + + # These are ensured as long as the passed arguments match the + # constructor annotations. + assert isinstance(self.name, str) + assert isinstance(self.cname, str) + + @property + def itemsize(self) -> int: + # Assumes self.typ has already been initialized + return self.typ.itemsize + + @property + def kind_attr(self) -> str: + return f"{self.name}_kind" + + def set_pos(self, pos: int) -> None: + """set the position of this column in the Table""" + self.pos = pos + if pos is not None and self.typ is not None: + self.typ._v_pos = pos + + def __repr__(self) -> str: + temp = tuple( + map(pprint_thing, (self.name, self.cname, self.axis, self.pos, self.kind)) + ) + return ",".join( + [ + f"{key}->{value}" + for key, value in zip(["name", "cname", "axis", "pos", "kind"], temp) + ] + ) + + def __eq__(self, other: object) -> bool: + """compare 2 col items""" + return all( + getattr(self, a, None) == getattr(other, a, None) + for a in ["name", "cname", "axis", "pos"] + ) + + def __ne__(self, other) -> bool: + return not self.__eq__(other) + + @property + def is_indexed(self) -> bool: + """return whether I am an indexed column""" + if not hasattr(self.table, "cols"): + # e.g. if infer hasn't been called yet, self.table will be None. + return False + return getattr(self.table.cols, self.cname).is_indexed + + def convert( + self, values: np.ndarray, nan_rep, encoding: str, errors: str + ) -> tuple[np.ndarray, np.ndarray] | tuple[Index, Index]: + """ + Convert the data from this selection to the appropriate pandas type. + """ + assert isinstance(values, np.ndarray), type(values) + + # values is a recarray + if values.dtype.fields is not None: + # Copy, otherwise values will be a view + # preventing the original recarry from being free'ed + values = values[self.cname].copy() + + val_kind = _ensure_decoded(self.kind) + values = _maybe_convert(values, val_kind, encoding, errors) + kwargs = {} + kwargs["name"] = _ensure_decoded(self.index_name) + + if self.freq is not None: + kwargs["freq"] = _ensure_decoded(self.freq) + + factory: type[Index | DatetimeIndex] = Index + if lib.is_np_dtype(values.dtype, "M") or isinstance( + values.dtype, DatetimeTZDtype + ): + factory = DatetimeIndex + elif values.dtype == "i8" and "freq" in kwargs: + # PeriodIndex data is stored as i8 + # error: Incompatible types in assignment (expression has type + # "Callable[[Any, KwArg(Any)], PeriodIndex]", variable has type + # "Union[Type[Index], Type[DatetimeIndex]]") + factory = lambda x, **kwds: PeriodIndex.from_ordinals( # type: ignore[assignment] + x, freq=kwds.get("freq", None) + )._rename( + kwds["name"] + ) + + # making an Index instance could throw a number of different errors + try: + new_pd_index = factory(values, **kwargs) + except ValueError: + # if the output freq is different that what we recorded, + # it should be None (see also 'doc example part 2') + if "freq" in kwargs: + kwargs["freq"] = None + new_pd_index = factory(values, **kwargs) + final_pd_index = _set_tz(new_pd_index, self.tz) + return final_pd_index, final_pd_index + + def take_data(self): + """return the values""" + return self.values + + @property + def attrs(self): + return self.table._v_attrs + + @property + def description(self): + return self.table.description + + @property + def col(self): + """return my current col description""" + return getattr(self.description, self.cname, None) + + @property + def cvalues(self): + """return my cython values""" + return self.values + + def __iter__(self) -> Iterator: + return iter(self.values) + + def maybe_set_size(self, min_itemsize=None) -> None: + """ + maybe set a string col itemsize: + min_itemsize can be an integer or a dict with this columns name + with an integer size + """ + if _ensure_decoded(self.kind) == "string": + if isinstance(min_itemsize, dict): + min_itemsize = min_itemsize.get(self.name) + + if min_itemsize is not None and self.typ.itemsize < min_itemsize: + self.typ = _tables().StringCol(itemsize=min_itemsize, pos=self.pos) + + def validate_names(self) -> None: + pass + + def validate_and_set(self, handler: AppendableTable, append: bool) -> None: + self.table = handler.table + self.validate_col() + self.validate_attr(append) + self.validate_metadata(handler) + self.write_metadata(handler) + self.set_attr() + + def validate_col(self, itemsize=None): + """validate this column: return the compared against itemsize""" + # validate this column for string truncation (or reset to the max size) + if _ensure_decoded(self.kind) == "string": + c = self.col + if c is not None: + if itemsize is None: + itemsize = self.itemsize + if c.itemsize < itemsize: + raise ValueError( + f"Trying to store a string with len [{itemsize}] in " + f"[{self.cname}] column but\nthis column has a limit of " + f"[{c.itemsize}]!\nConsider using min_itemsize to " + "preset the sizes on these columns" + ) + return c.itemsize + + return None + + def validate_attr(self, append: bool) -> None: + # check for backwards incompatibility + if append: + existing_kind = getattr(self.attrs, self.kind_attr, None) + if existing_kind is not None and existing_kind != self.kind: + raise TypeError( + f"incompatible kind in col [{existing_kind} - {self.kind}]" + ) + + def update_info(self, info) -> None: + """ + set/update the info for this indexable with the key/value + if there is a conflict raise/warn as needed + """ + for key in self._info_fields: + value = getattr(self, key, None) + idx = info.setdefault(self.name, {}) + + existing_value = idx.get(key) + if key in idx and value is not None and existing_value != value: + # frequency/name just warn + if key in ["freq", "index_name"]: + ws = attribute_conflict_doc % (key, existing_value, value) + warnings.warn( + ws, AttributeConflictWarning, stacklevel=find_stack_level() + ) + + # reset + idx[key] = None + setattr(self, key, None) + + else: + raise ValueError( + f"invalid info for [{self.name}] for [{key}], " + f"existing_value [{existing_value}] conflicts with " + f"new value [{value}]" + ) + elif value is not None or existing_value is not None: + idx[key] = value + + def set_info(self, info) -> None: + """set my state from the passed info""" + idx = info.get(self.name) + if idx is not None: + self.__dict__.update(idx) + + def set_attr(self) -> None: + """set the kind for this column""" + setattr(self.attrs, self.kind_attr, self.kind) + + def validate_metadata(self, handler: AppendableTable) -> None: + """validate that kind=category does not change the categories""" + if self.meta == "category": + new_metadata = self.metadata + cur_metadata = handler.read_metadata(self.cname) + if ( + new_metadata is not None + and cur_metadata is not None + and not array_equivalent( + new_metadata, cur_metadata, strict_nan=True, dtype_equal=True + ) + ): + raise ValueError( + "cannot append a categorical with " + "different categories to the existing" + ) + + def write_metadata(self, handler: AppendableTable) -> None: + """set the meta data""" + if self.metadata is not None: + handler.write_metadata(self.cname, self.metadata) + + +class GenericIndexCol(IndexCol): + """an index which is not represented in the data of the table""" + + @property + def is_indexed(self) -> bool: + return False + + def convert( + self, values: np.ndarray, nan_rep, encoding: str, errors: str + ) -> tuple[Index, Index]: + """ + Convert the data from this selection to the appropriate pandas type. + + Parameters + ---------- + values : np.ndarray + nan_rep : str + encoding : str + errors : str + """ + assert isinstance(values, np.ndarray), type(values) + + index = RangeIndex(len(values)) + return index, index + + def set_attr(self) -> None: + pass + + +class DataCol(IndexCol): + """ + a data holding column, by definition this is not indexable + + Parameters + ---------- + data : the actual data + cname : the column name in the table to hold the data (typically + values) + meta : a string description of the metadata + metadata : the actual metadata + """ + + is_an_indexable = False + is_data_indexable = False + _info_fields = ["tz", "ordered"] + + def __init__( + self, + name: str, + values=None, + kind=None, + typ=None, + cname: str | None = None, + pos=None, + tz=None, + ordered=None, + table=None, + meta=None, + metadata=None, + dtype: DtypeArg | None = None, + data=None, + ) -> None: + super().__init__( + name=name, + values=values, + kind=kind, + typ=typ, + pos=pos, + cname=cname, + tz=tz, + ordered=ordered, + table=table, + meta=meta, + metadata=metadata, + ) + self.dtype = dtype + self.data = data + + @property + def dtype_attr(self) -> str: + return f"{self.name}_dtype" + + @property + def meta_attr(self) -> str: + return f"{self.name}_meta" + + def __repr__(self) -> str: + temp = tuple( + map( + pprint_thing, (self.name, self.cname, self.dtype, self.kind, self.shape) + ) + ) + return ",".join( + [ + f"{key}->{value}" + for key, value in zip(["name", "cname", "dtype", "kind", "shape"], temp) + ] + ) + + def __eq__(self, other: object) -> bool: + """compare 2 col items""" + return all( + getattr(self, a, None) == getattr(other, a, None) + for a in ["name", "cname", "dtype", "pos"] + ) + + def set_data(self, data: ArrayLike) -> None: + assert data is not None + assert self.dtype is None + + data, dtype_name = _get_data_and_dtype_name(data) + + self.data = data + self.dtype = dtype_name + self.kind = _dtype_to_kind(dtype_name) + + def take_data(self): + """return the data""" + return self.data + + @classmethod + def _get_atom(cls, values: ArrayLike) -> Col: + """ + Get an appropriately typed and shaped pytables.Col object for values. + """ + dtype = values.dtype + # error: Item "ExtensionDtype" of "Union[ExtensionDtype, dtype[Any]]" has no + # attribute "itemsize" + itemsize = dtype.itemsize # type: ignore[union-attr] + + shape = values.shape + if values.ndim == 1: + # EA, use block shape pretending it is 2D + # TODO(EA2D): not necessary with 2D EAs + shape = (1, values.size) + + if isinstance(values, Categorical): + codes = values.codes + atom = cls.get_atom_data(shape, kind=codes.dtype.name) + elif lib.is_np_dtype(dtype, "M") or isinstance(dtype, DatetimeTZDtype): + atom = cls.get_atom_datetime64(shape) + elif lib.is_np_dtype(dtype, "m"): + atom = cls.get_atom_timedelta64(shape) + elif is_complex_dtype(dtype): + atom = _tables().ComplexCol(itemsize=itemsize, shape=shape[0]) + elif is_string_dtype(dtype): + atom = cls.get_atom_string(shape, itemsize) + else: + atom = cls.get_atom_data(shape, kind=dtype.name) + + return atom + + @classmethod + def get_atom_string(cls, shape, itemsize): + return _tables().StringCol(itemsize=itemsize, shape=shape[0]) + + @classmethod + def get_atom_coltype(cls, kind: str) -> type[Col]: + """return the PyTables column class for this column""" + if kind.startswith("uint"): + k4 = kind[4:] + col_name = f"UInt{k4}Col" + elif kind.startswith("period"): + # we store as integer + col_name = "Int64Col" + else: + kcap = kind.capitalize() + col_name = f"{kcap}Col" + + return getattr(_tables(), col_name) + + @classmethod + def get_atom_data(cls, shape, kind: str) -> Col: + return cls.get_atom_coltype(kind=kind)(shape=shape[0]) + + @classmethod + def get_atom_datetime64(cls, shape): + return _tables().Int64Col(shape=shape[0]) + + @classmethod + def get_atom_timedelta64(cls, shape): + return _tables().Int64Col(shape=shape[0]) + + @property + def shape(self): + return getattr(self.data, "shape", None) + + @property + def cvalues(self): + """return my cython values""" + return self.data + + def validate_attr(self, append) -> None: + """validate that we have the same order as the existing & same dtype""" + if append: + existing_fields = getattr(self.attrs, self.kind_attr, None) + if existing_fields is not None and existing_fields != list(self.values): + raise ValueError("appended items do not match existing items in table!") + + existing_dtype = getattr(self.attrs, self.dtype_attr, None) + if existing_dtype is not None and existing_dtype != self.dtype: + raise ValueError( + "appended items dtype do not match existing items dtype in table!" + ) + + def convert(self, values: np.ndarray, nan_rep, encoding: str, errors: str): + """ + Convert the data from this selection to the appropriate pandas type. + + Parameters + ---------- + values : np.ndarray + nan_rep : + encoding : str + errors : str + + Returns + ------- + index : listlike to become an Index + data : ndarraylike to become a column + """ + assert isinstance(values, np.ndarray), type(values) + + # values is a recarray + if values.dtype.fields is not None: + values = values[self.cname] + + assert self.typ is not None + if self.dtype is None: + # Note: in tests we never have timedelta64 or datetime64, + # so the _get_data_and_dtype_name may be unnecessary + converted, dtype_name = _get_data_and_dtype_name(values) + kind = _dtype_to_kind(dtype_name) + else: + converted = values + dtype_name = self.dtype + kind = self.kind + + assert isinstance(converted, np.ndarray) # for mypy + + # use the meta if needed + meta = _ensure_decoded(self.meta) + metadata = self.metadata + ordered = self.ordered + tz = self.tz + + assert dtype_name is not None + # convert to the correct dtype + dtype = _ensure_decoded(dtype_name) + + # reverse converts + if dtype.startswith("datetime64"): + # recreate with tz if indicated + converted = _set_tz(converted, tz, coerce=True) + + elif dtype == "timedelta64": + converted = np.asarray(converted, dtype="m8[ns]") + elif dtype == "date": + try: + converted = np.asarray( + [date.fromordinal(v) for v in converted], dtype=object + ) + except ValueError: + converted = np.asarray( + [date.fromtimestamp(v) for v in converted], dtype=object + ) + + elif meta == "category": + # we have a categorical + categories = metadata + codes = converted.ravel() + + # if we have stored a NaN in the categories + # then strip it; in theory we could have BOTH + # -1s in the codes and nulls :< + if categories is None: + # Handle case of NaN-only categorical columns in which case + # the categories are an empty array; when this is stored, + # pytables cannot write a zero-len array, so on readback + # the categories would be None and `read_hdf()` would fail. + categories = Index([], dtype=np.float64) + else: + mask = isna(categories) + if mask.any(): + categories = categories[~mask] + codes[codes != -1] -= mask.astype(int).cumsum()._values + + converted = Categorical.from_codes( + codes, categories=categories, ordered=ordered, validate=False + ) + + else: + try: + converted = converted.astype(dtype, copy=False) + except TypeError: + converted = converted.astype("O", copy=False) + + # convert nans / decode + if _ensure_decoded(kind) == "string": + converted = _unconvert_string_array( + converted, nan_rep=nan_rep, encoding=encoding, errors=errors + ) + + return self.values, converted + + def set_attr(self) -> None: + """set the data for this column""" + setattr(self.attrs, self.kind_attr, self.values) + setattr(self.attrs, self.meta_attr, self.meta) + assert self.dtype is not None + setattr(self.attrs, self.dtype_attr, self.dtype) + + +class DataIndexableCol(DataCol): + """represent a data column that can be indexed""" + + is_data_indexable = True + + def validate_names(self) -> None: + if not is_string_dtype(Index(self.values).dtype): + # TODO: should the message here be more specifically non-str? + raise ValueError("cannot have non-object label DataIndexableCol") + + @classmethod + def get_atom_string(cls, shape, itemsize): + return _tables().StringCol(itemsize=itemsize) + + @classmethod + def get_atom_data(cls, shape, kind: str) -> Col: + return cls.get_atom_coltype(kind=kind)() + + @classmethod + def get_atom_datetime64(cls, shape): + return _tables().Int64Col() + + @classmethod + def get_atom_timedelta64(cls, shape): + return _tables().Int64Col() + + +class GenericDataIndexableCol(DataIndexableCol): + """represent a generic pytables data column""" + + +class Fixed: + """ + represent an object in my store + facilitate read/write of various types of objects + this is an abstract base class + + Parameters + ---------- + parent : HDFStore + group : Node + The group node where the table resides. + """ + + pandas_kind: str + format_type: str = "fixed" # GH#30962 needed by dask + obj_type: type[DataFrame | Series] + ndim: int + parent: HDFStore + is_table: bool = False + + def __init__( + self, + parent: HDFStore, + group: Node, + encoding: str | None = "UTF-8", + errors: str = "strict", + ) -> None: + assert isinstance(parent, HDFStore), type(parent) + assert _table_mod is not None # needed for mypy + assert isinstance(group, _table_mod.Node), type(group) + self.parent = parent + self.group = group + self.encoding = _ensure_encoding(encoding) + self.errors = errors + + @property + def is_old_version(self) -> bool: + return self.version[0] <= 0 and self.version[1] <= 10 and self.version[2] < 1 + + @property + def version(self) -> tuple[int, int, int]: + """compute and set our version""" + version = _ensure_decoded(getattr(self.group._v_attrs, "pandas_version", None)) + try: + version = tuple(int(x) for x in version.split(".")) + if len(version) == 2: + version = version + (0,) + except AttributeError: + version = (0, 0, 0) + return version + + @property + def pandas_type(self): + return _ensure_decoded(getattr(self.group._v_attrs, "pandas_type", None)) + + def __repr__(self) -> str: + """return a pretty representation of myself""" + self.infer_axes() + s = self.shape + if s is not None: + if isinstance(s, (list, tuple)): + jshape = ",".join([pprint_thing(x) for x in s]) + s = f"[{jshape}]" + return f"{self.pandas_type:12.12} (shape->{s})" + return self.pandas_type + + def set_object_info(self) -> None: + """set my pandas type & version""" + self.attrs.pandas_type = str(self.pandas_kind) + self.attrs.pandas_version = str(_version) + + def copy(self) -> Fixed: + new_self = copy.copy(self) + return new_self + + @property + def shape(self): + return self.nrows + + @property + def pathname(self): + return self.group._v_pathname + + @property + def _handle(self): + return self.parent._handle + + @property + def _filters(self): + return self.parent._filters + + @property + def _complevel(self) -> int: + return self.parent._complevel + + @property + def _fletcher32(self) -> bool: + return self.parent._fletcher32 + + @property + def attrs(self): + return self.group._v_attrs + + def set_attrs(self) -> None: + """set our object attributes""" + + def get_attrs(self) -> None: + """get our object attributes""" + + @property + def storable(self): + """return my storable""" + return self.group + + @property + def is_exists(self) -> bool: + return False + + @property + def nrows(self): + return getattr(self.storable, "nrows", None) + + def validate(self, other) -> Literal[True] | None: + """validate against an existing storable""" + if other is None: + return None + return True + + def validate_version(self, where=None) -> None: + """are we trying to operate on an old version?""" + + def infer_axes(self) -> bool: + """ + infer the axes of my storer + return a boolean indicating if we have a valid storer or not + """ + s = self.storable + if s is None: + return False + self.get_attrs() + return True + + def read( + self, + where=None, + columns=None, + start: int | None = None, + stop: int | None = None, + ): + raise NotImplementedError( + "cannot read on an abstract storer: subclasses should implement" + ) + + def write(self, obj, **kwargs) -> None: + raise NotImplementedError( + "cannot write on an abstract storer: subclasses should implement" + ) + + def delete( + self, where=None, start: int | None = None, stop: int | None = None + ) -> None: + """ + support fully deleting the node in its entirety (only) - where + specification must be None + """ + if com.all_none(where, start, stop): + self._handle.remove_node(self.group, recursive=True) + return None + + raise TypeError("cannot delete on an abstract storer") + + +class GenericFixed(Fixed): + """a generified fixed version""" + + _index_type_map = {DatetimeIndex: "datetime", PeriodIndex: "period"} + _reverse_index_map = {v: k for k, v in _index_type_map.items()} + attributes: list[str] = [] + + # indexer helpers + def _class_to_alias(self, cls) -> str: + return self._index_type_map.get(cls, "") + + def _alias_to_class(self, alias): + if isinstance(alias, type): # pragma: no cover + # compat: for a short period of time master stored types + return alias + return self._reverse_index_map.get(alias, Index) + + def _get_index_factory(self, attrs): + index_class = self._alias_to_class( + _ensure_decoded(getattr(attrs, "index_class", "")) + ) + + factory: Callable + + if index_class == DatetimeIndex: + + def f(values, freq=None, tz=None): + # data are already in UTC, localize and convert if tz present + dta = DatetimeArray._simple_new( + values.values, dtype=values.dtype, freq=freq + ) + result = DatetimeIndex._simple_new(dta, name=None) + if tz is not None: + result = result.tz_localize("UTC").tz_convert(tz) + return result + + factory = f + elif index_class == PeriodIndex: + + def f(values, freq=None, tz=None): + dtype = PeriodDtype(freq) + parr = PeriodArray._simple_new(values, dtype=dtype) + return PeriodIndex._simple_new(parr, name=None) + + factory = f + else: + factory = index_class + + kwargs = {} + if "freq" in attrs: + kwargs["freq"] = attrs["freq"] + if index_class is Index: + # DTI/PI would be gotten by _alias_to_class + factory = TimedeltaIndex + + if "tz" in attrs: + if isinstance(attrs["tz"], bytes): + # created by python2 + kwargs["tz"] = attrs["tz"].decode("utf-8") + else: + # created by python3 + kwargs["tz"] = attrs["tz"] + assert index_class is DatetimeIndex # just checking + + return factory, kwargs + + def validate_read(self, columns, where) -> None: + """ + raise if any keywords are passed which are not-None + """ + if columns is not None: + raise TypeError( + "cannot pass a column specification when reading " + "a Fixed format store. this store must be selected in its entirety" + ) + if where is not None: + raise TypeError( + "cannot pass a where specification when reading " + "from a Fixed format store. this store must be selected in its entirety" + ) + + @property + def is_exists(self) -> bool: + return True + + def set_attrs(self) -> None: + """set our object attributes""" + self.attrs.encoding = self.encoding + self.attrs.errors = self.errors + + def get_attrs(self) -> None: + """retrieve our attributes""" + self.encoding = _ensure_encoding(getattr(self.attrs, "encoding", None)) + self.errors = _ensure_decoded(getattr(self.attrs, "errors", "strict")) + for n in self.attributes: + setattr(self, n, _ensure_decoded(getattr(self.attrs, n, None))) + + def write(self, obj, **kwargs) -> None: + self.set_attrs() + + def read_array(self, key: str, start: int | None = None, stop: int | None = None): + """read an array for the specified node (off of group""" + import tables + + node = getattr(self.group, key) + attrs = node._v_attrs + + transposed = getattr(attrs, "transposed", False) + + if isinstance(node, tables.VLArray): + ret = node[0][start:stop] + else: + dtype = _ensure_decoded(getattr(attrs, "value_type", None)) + shape = getattr(attrs, "shape", None) + + if shape is not None: + # length 0 axis + ret = np.empty(shape, dtype=dtype) + else: + ret = node[start:stop] + + if dtype and dtype.startswith("datetime64"): + # reconstruct a timezone if indicated + tz = getattr(attrs, "tz", None) + ret = _set_tz(ret, tz, coerce=True) + + elif dtype == "timedelta64": + ret = np.asarray(ret, dtype="m8[ns]") + + if transposed: + return ret.T + else: + return ret + + def read_index( + self, key: str, start: int | None = None, stop: int | None = None + ) -> Index: + variety = _ensure_decoded(getattr(self.attrs, f"{key}_variety")) + + if variety == "multi": + return self.read_multi_index(key, start=start, stop=stop) + elif variety == "regular": + node = getattr(self.group, key) + index = self.read_index_node(node, start=start, stop=stop) + return index + else: # pragma: no cover + raise TypeError(f"unrecognized index variety: {variety}") + + def write_index(self, key: str, index: Index) -> None: + if isinstance(index, MultiIndex): + setattr(self.attrs, f"{key}_variety", "multi") + self.write_multi_index(key, index) + else: + setattr(self.attrs, f"{key}_variety", "regular") + converted = _convert_index("index", index, self.encoding, self.errors) + + self.write_array(key, converted.values) + + node = getattr(self.group, key) + node._v_attrs.kind = converted.kind + node._v_attrs.name = index.name + + if isinstance(index, (DatetimeIndex, PeriodIndex)): + node._v_attrs.index_class = self._class_to_alias(type(index)) + + if isinstance(index, (DatetimeIndex, PeriodIndex, TimedeltaIndex)): + node._v_attrs.freq = index.freq + + if isinstance(index, DatetimeIndex) and index.tz is not None: + node._v_attrs.tz = _get_tz(index.tz) + + def write_multi_index(self, key: str, index: MultiIndex) -> None: + setattr(self.attrs, f"{key}_nlevels", index.nlevels) + + for i, (lev, level_codes, name) in enumerate( + zip(index.levels, index.codes, index.names) + ): + # write the level + if isinstance(lev.dtype, ExtensionDtype): + raise NotImplementedError( + "Saving a MultiIndex with an extension dtype is not supported." + ) + level_key = f"{key}_level{i}" + conv_level = _convert_index(level_key, lev, self.encoding, self.errors) + self.write_array(level_key, conv_level.values) + node = getattr(self.group, level_key) + node._v_attrs.kind = conv_level.kind + node._v_attrs.name = name + + # write the name + setattr(node._v_attrs, f"{key}_name{name}", name) + + # write the labels + label_key = f"{key}_label{i}" + self.write_array(label_key, level_codes) + + def read_multi_index( + self, key: str, start: int | None = None, stop: int | None = None + ) -> MultiIndex: + nlevels = getattr(self.attrs, f"{key}_nlevels") + + levels = [] + codes = [] + names: list[Hashable] = [] + for i in range(nlevels): + level_key = f"{key}_level{i}" + node = getattr(self.group, level_key) + lev = self.read_index_node(node, start=start, stop=stop) + levels.append(lev) + names.append(lev.name) + + label_key = f"{key}_label{i}" + level_codes = self.read_array(label_key, start=start, stop=stop) + codes.append(level_codes) + + return MultiIndex( + levels=levels, codes=codes, names=names, verify_integrity=True + ) + + def read_index_node( + self, node: Node, start: int | None = None, stop: int | None = None + ) -> Index: + data = node[start:stop] + # If the index was an empty array write_array_empty() will + # have written a sentinel. Here we replace it with the original. + if "shape" in node._v_attrs and np.prod(node._v_attrs.shape) == 0: + data = np.empty(node._v_attrs.shape, dtype=node._v_attrs.value_type) + kind = _ensure_decoded(node._v_attrs.kind) + name = None + + if "name" in node._v_attrs: + name = _ensure_str(node._v_attrs.name) + name = _ensure_decoded(name) + + attrs = node._v_attrs + factory, kwargs = self._get_index_factory(attrs) + + if kind in ("date", "object"): + index = factory( + _unconvert_index( + data, kind, encoding=self.encoding, errors=self.errors + ), + dtype=object, + **kwargs, + ) + else: + index = factory( + _unconvert_index( + data, kind, encoding=self.encoding, errors=self.errors + ), + **kwargs, + ) + + index.name = name + + return index + + def write_array_empty(self, key: str, value: ArrayLike) -> None: + """write a 0-len array""" + # ugly hack for length 0 axes + arr = np.empty((1,) * value.ndim) + self._handle.create_array(self.group, key, arr) + node = getattr(self.group, key) + node._v_attrs.value_type = str(value.dtype) + node._v_attrs.shape = value.shape + + def write_array( + self, key: str, obj: AnyArrayLike, items: Index | None = None + ) -> None: + # TODO: we only have a few tests that get here, the only EA + # that gets passed is DatetimeArray, and we never have + # both self._filters and EA + + value = extract_array(obj, extract_numpy=True) + + if key in self.group: + self._handle.remove_node(self.group, key) + + # Transform needed to interface with pytables row/col notation + empty_array = value.size == 0 + transposed = False + + if isinstance(value.dtype, CategoricalDtype): + raise NotImplementedError( + "Cannot store a category dtype in a HDF5 dataset that uses format=" + '"fixed". Use format="table".' + ) + if not empty_array: + if hasattr(value, "T"): + # ExtensionArrays (1d) may not have transpose. + value = value.T + transposed = True + + atom = None + if self._filters is not None: + with suppress(ValueError): + # get the atom for this datatype + atom = _tables().Atom.from_dtype(value.dtype) + + if atom is not None: + # We only get here if self._filters is non-None and + # the Atom.from_dtype call succeeded + + # create an empty chunked array and fill it from value + if not empty_array: + ca = self._handle.create_carray( + self.group, key, atom, value.shape, filters=self._filters + ) + ca[:] = value + + else: + self.write_array_empty(key, value) + + elif value.dtype.type == np.object_: + # infer the type, warn if we have a non-string type here (for + # performance) + inferred_type = lib.infer_dtype(value, skipna=False) + if empty_array: + pass + elif inferred_type == "string": + pass + else: + ws = performance_doc % (inferred_type, key, items) + warnings.warn(ws, PerformanceWarning, stacklevel=find_stack_level()) + + vlarr = self._handle.create_vlarray(self.group, key, _tables().ObjectAtom()) + vlarr.append(value) + + elif lib.is_np_dtype(value.dtype, "M"): + self._handle.create_array(self.group, key, value.view("i8")) + getattr(self.group, key)._v_attrs.value_type = str(value.dtype) + elif isinstance(value.dtype, DatetimeTZDtype): + # store as UTC + # with a zone + + # error: Item "ExtensionArray" of "Union[Any, ExtensionArray]" has no + # attribute "asi8" + self._handle.create_array( + self.group, key, value.asi8 # type: ignore[union-attr] + ) + + node = getattr(self.group, key) + # error: Item "ExtensionArray" of "Union[Any, ExtensionArray]" has no + # attribute "tz" + node._v_attrs.tz = _get_tz(value.tz) # type: ignore[union-attr] + node._v_attrs.value_type = f"datetime64[{value.dtype.unit}]" + elif lib.is_np_dtype(value.dtype, "m"): + self._handle.create_array(self.group, key, value.view("i8")) + getattr(self.group, key)._v_attrs.value_type = "timedelta64" + elif empty_array: + self.write_array_empty(key, value) + else: + self._handle.create_array(self.group, key, value) + + getattr(self.group, key)._v_attrs.transposed = transposed + + +class SeriesFixed(GenericFixed): + pandas_kind = "series" + attributes = ["name"] + + name: Hashable + + @property + def shape(self): + try: + return (len(self.group.values),) + except (TypeError, AttributeError): + return None + + def read( + self, + where=None, + columns=None, + start: int | None = None, + stop: int | None = None, + ) -> Series: + self.validate_read(columns, where) + index = self.read_index("index", start=start, stop=stop) + values = self.read_array("values", start=start, stop=stop) + result = Series(values, index=index, name=self.name, copy=False) + if using_pyarrow_string_dtype() and is_string_array(values, skipna=True): + result = result.astype("string[pyarrow_numpy]") + return result + + def write(self, obj, **kwargs) -> None: + super().write(obj, **kwargs) + self.write_index("index", obj.index) + self.write_array("values", obj) + self.attrs.name = obj.name + + +class BlockManagerFixed(GenericFixed): + attributes = ["ndim", "nblocks"] + + nblocks: int + + @property + def shape(self) -> Shape | None: + try: + ndim = self.ndim + + # items + items = 0 + for i in range(self.nblocks): + node = getattr(self.group, f"block{i}_items") + shape = getattr(node, "shape", None) + if shape is not None: + items += shape[0] + + # data shape + node = self.group.block0_values + shape = getattr(node, "shape", None) + if shape is not None: + shape = list(shape[0 : (ndim - 1)]) + else: + shape = [] + + shape.append(items) + + return shape + except AttributeError: + return None + + def read( + self, + where=None, + columns=None, + start: int | None = None, + stop: int | None = None, + ) -> DataFrame: + # start, stop applied to rows, so 0th axis only + self.validate_read(columns, where) + select_axis = self.obj_type()._get_block_manager_axis(0) + + axes = [] + for i in range(self.ndim): + _start, _stop = (start, stop) if i == select_axis else (None, None) + ax = self.read_index(f"axis{i}", start=_start, stop=_stop) + axes.append(ax) + + items = axes[0] + dfs = [] + + for i in range(self.nblocks): + blk_items = self.read_index(f"block{i}_items") + values = self.read_array(f"block{i}_values", start=_start, stop=_stop) + + columns = items[items.get_indexer(blk_items)] + df = DataFrame(values.T, columns=columns, index=axes[1], copy=False) + if using_pyarrow_string_dtype() and is_string_array(values, skipna=True): + df = df.astype("string[pyarrow_numpy]") + dfs.append(df) + + if len(dfs) > 0: + out = concat(dfs, axis=1, copy=True) + if using_copy_on_write(): + # with CoW, concat ignores the copy keyword. Here, we still want + # to copy to enforce optimized column-major layout + out = out.copy() + out = out.reindex(columns=items, copy=False) + return out + + return DataFrame(columns=axes[0], index=axes[1]) + + def write(self, obj, **kwargs) -> None: + super().write(obj, **kwargs) + + # TODO(ArrayManager) HDFStore relies on accessing the blocks + if isinstance(obj._mgr, ArrayManager): + obj = obj._as_manager("block") + + data = obj._mgr + if not data.is_consolidated(): + data = data.consolidate() + + self.attrs.ndim = data.ndim + for i, ax in enumerate(data.axes): + if i == 0 and (not ax.is_unique): + raise ValueError("Columns index has to be unique for fixed format") + self.write_index(f"axis{i}", ax) + + # Supporting mixed-type DataFrame objects...nontrivial + self.attrs.nblocks = len(data.blocks) + for i, blk in enumerate(data.blocks): + # I have no idea why, but writing values before items fixed #2299 + blk_items = data.items.take(blk.mgr_locs) + self.write_array(f"block{i}_values", blk.values, items=blk_items) + self.write_index(f"block{i}_items", blk_items) + + +class FrameFixed(BlockManagerFixed): + pandas_kind = "frame" + obj_type = DataFrame + + +class Table(Fixed): + """ + represent a table: + facilitate read/write of various types of tables + + Attrs in Table Node + ------------------- + These are attributes that are store in the main table node, they are + necessary to recreate these tables when read back in. + + index_axes : a list of tuples of the (original indexing axis and + index column) + non_index_axes: a list of tuples of the (original index axis and + columns on a non-indexing axis) + values_axes : a list of the columns which comprise the data of this + table + data_columns : a list of the columns that we are allowing indexing + (these become single columns in values_axes) + nan_rep : the string to use for nan representations for string + objects + levels : the names of levels + metadata : the names of the metadata columns + """ + + pandas_kind = "wide_table" + format_type: str = "table" # GH#30962 needed by dask + table_type: str + levels: int | list[Hashable] = 1 + is_table = True + + metadata: list + + def __init__( + self, + parent: HDFStore, + group: Node, + encoding: str | None = None, + errors: str = "strict", + index_axes: list[IndexCol] | None = None, + non_index_axes: list[tuple[AxisInt, Any]] | None = None, + values_axes: list[DataCol] | None = None, + data_columns: list | None = None, + info: dict | None = None, + nan_rep=None, + ) -> None: + super().__init__(parent, group, encoding=encoding, errors=errors) + self.index_axes = index_axes or [] + self.non_index_axes = non_index_axes or [] + self.values_axes = values_axes or [] + self.data_columns = data_columns or [] + self.info = info or {} + self.nan_rep = nan_rep + + @property + def table_type_short(self) -> str: + return self.table_type.split("_")[0] + + def __repr__(self) -> str: + """return a pretty representation of myself""" + self.infer_axes() + jdc = ",".join(self.data_columns) if len(self.data_columns) else "" + dc = f",dc->[{jdc}]" + + ver = "" + if self.is_old_version: + jver = ".".join([str(x) for x in self.version]) + ver = f"[{jver}]" + + jindex_axes = ",".join([a.name for a in self.index_axes]) + return ( + f"{self.pandas_type:12.12}{ver} " + f"(typ->{self.table_type_short},nrows->{self.nrows}," + f"ncols->{self.ncols},indexers->[{jindex_axes}]{dc})" + ) + + def __getitem__(self, c: str): + """return the axis for c""" + for a in self.axes: + if c == a.name: + return a + return None + + def validate(self, other) -> None: + """validate against an existing table""" + if other is None: + return + + if other.table_type != self.table_type: + raise TypeError( + "incompatible table_type with existing " + f"[{other.table_type} - {self.table_type}]" + ) + + for c in ["index_axes", "non_index_axes", "values_axes"]: + sv = getattr(self, c, None) + ov = getattr(other, c, None) + if sv != ov: + # show the error for the specific axes + # Argument 1 to "enumerate" has incompatible type + # "Optional[Any]"; expected "Iterable[Any]" [arg-type] + for i, sax in enumerate(sv): # type: ignore[arg-type] + # Value of type "Optional[Any]" is not indexable [index] + oax = ov[i] # type: ignore[index] + if sax != oax: + raise ValueError( + f"invalid combination of [{c}] on appending data " + f"[{sax}] vs current table [{oax}]" + ) + + # should never get here + raise Exception( + f"invalid combination of [{c}] on appending data [{sv}] vs " + f"current table [{ov}]" + ) + + @property + def is_multi_index(self) -> bool: + """the levels attribute is 1 or a list in the case of a multi-index""" + return isinstance(self.levels, list) + + def validate_multiindex( + self, obj: DataFrame | Series + ) -> tuple[DataFrame, list[Hashable]]: + """ + validate that we can store the multi-index; reset and return the + new object + """ + levels = com.fill_missing_names(obj.index.names) + try: + reset_obj = obj.reset_index() + except ValueError as err: + raise ValueError( + "duplicate names/columns in the multi-index when storing as a table" + ) from err + assert isinstance(reset_obj, DataFrame) # for mypy + return reset_obj, levels + + @property + def nrows_expected(self) -> int: + """based on our axes, compute the expected nrows""" + return np.prod([i.cvalues.shape[0] for i in self.index_axes]) + + @property + def is_exists(self) -> bool: + """has this table been created""" + return "table" in self.group + + @property + def storable(self): + return getattr(self.group, "table", None) + + @property + def table(self): + """return the table group (this is my storable)""" + return self.storable + + @property + def dtype(self): + return self.table.dtype + + @property + def description(self): + return self.table.description + + @property + def axes(self) -> itertools.chain[IndexCol]: + return itertools.chain(self.index_axes, self.values_axes) + + @property + def ncols(self) -> int: + """the number of total columns in the values axes""" + return sum(len(a.values) for a in self.values_axes) + + @property + def is_transposed(self) -> bool: + return False + + @property + def data_orientation(self) -> tuple[int, ...]: + """return a tuple of my permutated axes, non_indexable at the front""" + return tuple( + itertools.chain( + [int(a[0]) for a in self.non_index_axes], + [int(a.axis) for a in self.index_axes], + ) + ) + + def queryables(self) -> dict[str, Any]: + """return a dict of the kinds allowable columns for this object""" + # mypy doesn't recognize DataFrame._AXIS_NAMES, so we re-write it here + axis_names = {0: "index", 1: "columns"} + + # compute the values_axes queryables + d1 = [(a.cname, a) for a in self.index_axes] + d2 = [(axis_names[axis], None) for axis, values in self.non_index_axes] + d3 = [ + (v.cname, v) for v in self.values_axes if v.name in set(self.data_columns) + ] + + return dict(d1 + d2 + d3) + + def index_cols(self): + """return a list of my index cols""" + # Note: each `i.cname` below is assured to be a str. + return [(i.axis, i.cname) for i in self.index_axes] + + def values_cols(self) -> list[str]: + """return a list of my values cols""" + return [i.cname for i in self.values_axes] + + def _get_metadata_path(self, key: str) -> str: + """return the metadata pathname for this key""" + group = self.group._v_pathname + return f"{group}/meta/{key}/meta" + + def write_metadata(self, key: str, values: np.ndarray) -> None: + """ + Write out a metadata array to the key as a fixed-format Series. + + Parameters + ---------- + key : str + values : ndarray + """ + self.parent.put( + self._get_metadata_path(key), + Series(values, copy=False), + format="table", + encoding=self.encoding, + errors=self.errors, + nan_rep=self.nan_rep, + ) + + def read_metadata(self, key: str): + """return the meta data array for this key""" + if getattr(getattr(self.group, "meta", None), key, None) is not None: + return self.parent.select(self._get_metadata_path(key)) + return None + + def set_attrs(self) -> None: + """set our table type & indexables""" + self.attrs.table_type = str(self.table_type) + self.attrs.index_cols = self.index_cols() + self.attrs.values_cols = self.values_cols() + self.attrs.non_index_axes = self.non_index_axes + self.attrs.data_columns = self.data_columns + self.attrs.nan_rep = self.nan_rep + self.attrs.encoding = self.encoding + self.attrs.errors = self.errors + self.attrs.levels = self.levels + self.attrs.info = self.info + + def get_attrs(self) -> None: + """retrieve our attributes""" + self.non_index_axes = getattr(self.attrs, "non_index_axes", None) or [] + self.data_columns = getattr(self.attrs, "data_columns", None) or [] + self.info = getattr(self.attrs, "info", None) or {} + self.nan_rep = getattr(self.attrs, "nan_rep", None) + self.encoding = _ensure_encoding(getattr(self.attrs, "encoding", None)) + self.errors = _ensure_decoded(getattr(self.attrs, "errors", "strict")) + self.levels: list[Hashable] = getattr(self.attrs, "levels", None) or [] + self.index_axes = [a for a in self.indexables if a.is_an_indexable] + self.values_axes = [a for a in self.indexables if not a.is_an_indexable] + + def validate_version(self, where=None) -> None: + """are we trying to operate on an old version?""" + if where is not None: + if self.is_old_version: + ws = incompatibility_doc % ".".join([str(x) for x in self.version]) + warnings.warn( + ws, + IncompatibilityWarning, + stacklevel=find_stack_level(), + ) + + def validate_min_itemsize(self, min_itemsize) -> None: + """ + validate the min_itemsize doesn't contain items that are not in the + axes this needs data_columns to be defined + """ + if min_itemsize is None: + return + if not isinstance(min_itemsize, dict): + return + + q = self.queryables() + for k in min_itemsize: + # ok, apply generally + if k == "values": + continue + if k not in q: + raise ValueError( + f"min_itemsize has the key [{k}] which is not an axis or " + "data_column" + ) + + @cache_readonly + def indexables(self): + """create/cache the indexables if they don't exist""" + _indexables = [] + + desc = self.description + table_attrs = self.table.attrs + + # Note: each of the `name` kwargs below are str, ensured + # by the definition in index_cols. + # index columns + for i, (axis, name) in enumerate(self.attrs.index_cols): + atom = getattr(desc, name) + md = self.read_metadata(name) + meta = "category" if md is not None else None + + kind_attr = f"{name}_kind" + kind = getattr(table_attrs, kind_attr, None) + + index_col = IndexCol( + name=name, + axis=axis, + pos=i, + kind=kind, + typ=atom, + table=self.table, + meta=meta, + metadata=md, + ) + _indexables.append(index_col) + + # values columns + dc = set(self.data_columns) + base_pos = len(_indexables) + + def f(i, c): + assert isinstance(c, str) + klass = DataCol + if c in dc: + klass = DataIndexableCol + + atom = getattr(desc, c) + adj_name = _maybe_adjust_name(c, self.version) + + # TODO: why kind_attr here? + values = getattr(table_attrs, f"{adj_name}_kind", None) + dtype = getattr(table_attrs, f"{adj_name}_dtype", None) + # Argument 1 to "_dtype_to_kind" has incompatible type + # "Optional[Any]"; expected "str" [arg-type] + kind = _dtype_to_kind(dtype) # type: ignore[arg-type] + + md = self.read_metadata(c) + # TODO: figure out why these two versions of `meta` dont always match. + # meta = "category" if md is not None else None + meta = getattr(table_attrs, f"{adj_name}_meta", None) + + obj = klass( + name=adj_name, + cname=c, + values=values, + kind=kind, + pos=base_pos + i, + typ=atom, + table=self.table, + meta=meta, + metadata=md, + dtype=dtype, + ) + return obj + + # Note: the definition of `values_cols` ensures that each + # `c` below is a str. + _indexables.extend([f(i, c) for i, c in enumerate(self.attrs.values_cols)]) + + return _indexables + + def create_index( + self, columns=None, optlevel=None, kind: str | None = None + ) -> None: + """ + Create a pytables index on the specified columns. + + Parameters + ---------- + columns : None, bool, or listlike[str] + Indicate which columns to create an index on. + + * False : Do not create any indexes. + * True : Create indexes on all columns. + * None : Create indexes on all columns. + * listlike : Create indexes on the given columns. + + optlevel : int or None, default None + Optimization level, if None, pytables defaults to 6. + kind : str or None, default None + Kind of index, if None, pytables defaults to "medium". + + Raises + ------ + TypeError if trying to create an index on a complex-type column. + + Notes + ----- + Cannot index Time64Col or ComplexCol. + Pytables must be >= 3.0. + """ + if not self.infer_axes(): + return + if columns is False: + return + + # index all indexables and data_columns + if columns is None or columns is True: + columns = [a.cname for a in self.axes if a.is_data_indexable] + if not isinstance(columns, (tuple, list)): + columns = [columns] + + kw = {} + if optlevel is not None: + kw["optlevel"] = optlevel + if kind is not None: + kw["kind"] = kind + + table = self.table + for c in columns: + v = getattr(table.cols, c, None) + if v is not None: + # remove the index if the kind/optlevel have changed + if v.is_indexed: + index = v.index + cur_optlevel = index.optlevel + cur_kind = index.kind + + if kind is not None and cur_kind != kind: + v.remove_index() + else: + kw["kind"] = cur_kind + + if optlevel is not None and cur_optlevel != optlevel: + v.remove_index() + else: + kw["optlevel"] = cur_optlevel + + # create the index + if not v.is_indexed: + if v.type.startswith("complex"): + raise TypeError( + "Columns containing complex values can be stored but " + "cannot be indexed when using table format. Either use " + "fixed format, set index=False, or do not include " + "the columns containing complex values to " + "data_columns when initializing the table." + ) + v.create_index(**kw) + elif c in self.non_index_axes[0][1]: + # GH 28156 + raise AttributeError( + f"column {c} is not a data_column.\n" + f"In order to read column {c} you must reload the dataframe \n" + f"into HDFStore and include {c} with the data_columns argument." + ) + + def _read_axes( + self, where, start: int | None = None, stop: int | None = None + ) -> list[tuple[np.ndarray, np.ndarray] | tuple[Index, Index]]: + """ + Create the axes sniffed from the table. + + Parameters + ---------- + where : ??? + start : int or None, default None + stop : int or None, default None + + Returns + ------- + List[Tuple[index_values, column_values]] + """ + # create the selection + selection = Selection(self, where=where, start=start, stop=stop) + values = selection.select() + + results = [] + # convert the data + for a in self.axes: + a.set_info(self.info) + res = a.convert( + values, + nan_rep=self.nan_rep, + encoding=self.encoding, + errors=self.errors, + ) + results.append(res) + + return results + + @classmethod + def get_object(cls, obj, transposed: bool): + """return the data for this obj""" + return obj + + def validate_data_columns(self, data_columns, min_itemsize, non_index_axes): + """ + take the input data_columns and min_itemize and create a data + columns spec + """ + if not len(non_index_axes): + return [] + + axis, axis_labels = non_index_axes[0] + info = self.info.get(axis, {}) + if info.get("type") == "MultiIndex" and data_columns: + raise ValueError( + f"cannot use a multi-index on axis [{axis}] with " + f"data_columns {data_columns}" + ) + + # evaluate the passed data_columns, True == use all columns + # take only valid axis labels + if data_columns is True: + data_columns = list(axis_labels) + elif data_columns is None: + data_columns = [] + + # if min_itemsize is a dict, add the keys (exclude 'values') + if isinstance(min_itemsize, dict): + existing_data_columns = set(data_columns) + data_columns = list(data_columns) # ensure we do not modify + data_columns.extend( + [ + k + for k in min_itemsize.keys() + if k != "values" and k not in existing_data_columns + ] + ) + + # return valid columns in the order of our axis + return [c for c in data_columns if c in axis_labels] + + def _create_axes( + self, + axes, + obj: DataFrame, + validate: bool = True, + nan_rep=None, + data_columns=None, + min_itemsize=None, + ): + """ + Create and return the axes. + + Parameters + ---------- + axes: list or None + The names or numbers of the axes to create. + obj : DataFrame + The object to create axes on. + validate: bool, default True + Whether to validate the obj against an existing object already written. + nan_rep : + A value to use for string column nan_rep. + data_columns : List[str], True, or None, default None + Specify the columns that we want to create to allow indexing on. + + * True : Use all available columns. + * None : Use no columns. + * List[str] : Use the specified columns. + + min_itemsize: Dict[str, int] or None, default None + The min itemsize for a column in bytes. + """ + if not isinstance(obj, DataFrame): + group = self.group._v_name + raise TypeError( + f"cannot properly create the storer for: [group->{group}," + f"value->{type(obj)}]" + ) + + # set the default axes if needed + if axes is None: + axes = [0] + + # map axes to numbers + axes = [obj._get_axis_number(a) for a in axes] + + # do we have an existing table (if so, use its axes & data_columns) + if self.infer_axes(): + table_exists = True + axes = [a.axis for a in self.index_axes] + data_columns = list(self.data_columns) + nan_rep = self.nan_rep + # TODO: do we always have validate=True here? + else: + table_exists = False + + new_info = self.info + + assert self.ndim == 2 # with next check, we must have len(axes) == 1 + # currently support on ndim-1 axes + if len(axes) != self.ndim - 1: + raise ValueError( + "currently only support ndim-1 indexers in an AppendableTable" + ) + + # create according to the new data + new_non_index_axes: list = [] + + # nan_representation + if nan_rep is None: + nan_rep = "nan" + + # We construct the non-index-axis first, since that alters new_info + idx = next(x for x in [0, 1] if x not in axes) + + a = obj.axes[idx] + # we might be able to change the axes on the appending data if necessary + append_axis = list(a) + if table_exists: + indexer = len(new_non_index_axes) # i.e. 0 + exist_axis = self.non_index_axes[indexer][1] + if not array_equivalent( + np.array(append_axis), + np.array(exist_axis), + strict_nan=True, + dtype_equal=True, + ): + # ahah! -> reindex + if array_equivalent( + np.array(sorted(append_axis)), + np.array(sorted(exist_axis)), + strict_nan=True, + dtype_equal=True, + ): + append_axis = exist_axis + + # the non_index_axes info + info = new_info.setdefault(idx, {}) + info["names"] = list(a.names) + info["type"] = type(a).__name__ + + new_non_index_axes.append((idx, append_axis)) + + # Now we can construct our new index axis + idx = axes[0] + a = obj.axes[idx] + axis_name = obj._get_axis_name(idx) + new_index = _convert_index(axis_name, a, self.encoding, self.errors) + new_index.axis = idx + + # Because we are always 2D, there is only one new_index, so + # we know it will have pos=0 + new_index.set_pos(0) + new_index.update_info(new_info) + new_index.maybe_set_size(min_itemsize) # check for column conflicts + + new_index_axes = [new_index] + j = len(new_index_axes) # i.e. 1 + assert j == 1 + + # reindex by our non_index_axes & compute data_columns + assert len(new_non_index_axes) == 1 + for a in new_non_index_axes: + obj = _reindex_axis(obj, a[0], a[1]) + + transposed = new_index.axis == 1 + + # figure out data_columns and get out blocks + data_columns = self.validate_data_columns( + data_columns, min_itemsize, new_non_index_axes + ) + + frame = self.get_object(obj, transposed)._consolidate() + + blocks, blk_items = self._get_blocks_and_items( + frame, table_exists, new_non_index_axes, self.values_axes, data_columns + ) + + # add my values + vaxes = [] + for i, (blk, b_items) in enumerate(zip(blocks, blk_items)): + # shape of the data column are the indexable axes + klass = DataCol + name = None + + # we have a data_column + if data_columns and len(b_items) == 1 and b_items[0] in data_columns: + klass = DataIndexableCol + name = b_items[0] + if not (name is None or isinstance(name, str)): + # TODO: should the message here be more specifically non-str? + raise ValueError("cannot have non-object label DataIndexableCol") + + # make sure that we match up the existing columns + # if we have an existing table + existing_col: DataCol | None + + if table_exists and validate: + try: + existing_col = self.values_axes[i] + except (IndexError, KeyError) as err: + raise ValueError( + f"Incompatible appended table [{blocks}]" + f"with existing table [{self.values_axes}]" + ) from err + else: + existing_col = None + + new_name = name or f"values_block_{i}" + data_converted = _maybe_convert_for_string_atom( + new_name, + blk.values, + existing_col=existing_col, + min_itemsize=min_itemsize, + nan_rep=nan_rep, + encoding=self.encoding, + errors=self.errors, + columns=b_items, + ) + adj_name = _maybe_adjust_name(new_name, self.version) + + typ = klass._get_atom(data_converted) + kind = _dtype_to_kind(data_converted.dtype.name) + tz = None + if getattr(data_converted, "tz", None) is not None: + tz = _get_tz(data_converted.tz) + + meta = metadata = ordered = None + if isinstance(data_converted.dtype, CategoricalDtype): + ordered = data_converted.ordered + meta = "category" + metadata = np.asarray(data_converted.categories).ravel() + + data, dtype_name = _get_data_and_dtype_name(data_converted) + + col = klass( + name=adj_name, + cname=new_name, + values=list(b_items), + typ=typ, + pos=j, + kind=kind, + tz=tz, + ordered=ordered, + meta=meta, + metadata=metadata, + dtype=dtype_name, + data=data, + ) + col.update_info(new_info) + + vaxes.append(col) + + j += 1 + + dcs = [col.name for col in vaxes if col.is_data_indexable] + + new_table = type(self)( + parent=self.parent, + group=self.group, + encoding=self.encoding, + errors=self.errors, + index_axes=new_index_axes, + non_index_axes=new_non_index_axes, + values_axes=vaxes, + data_columns=dcs, + info=new_info, + nan_rep=nan_rep, + ) + if hasattr(self, "levels"): + # TODO: get this into constructor, only for appropriate subclass + new_table.levels = self.levels + + new_table.validate_min_itemsize(min_itemsize) + + if validate and table_exists: + new_table.validate(self) + + return new_table + + @staticmethod + def _get_blocks_and_items( + frame: DataFrame, + table_exists: bool, + new_non_index_axes, + values_axes, + data_columns, + ): + # Helper to clarify non-state-altering parts of _create_axes + + # TODO(ArrayManager) HDFStore relies on accessing the blocks + if isinstance(frame._mgr, ArrayManager): + frame = frame._as_manager("block") + + def get_blk_items(mgr): + return [mgr.items.take(blk.mgr_locs) for blk in mgr.blocks] + + mgr = frame._mgr + mgr = cast(BlockManager, mgr) + blocks: list[Block] = list(mgr.blocks) + blk_items: list[Index] = get_blk_items(mgr) + + if len(data_columns): + # TODO: prove that we only get here with axis == 1? + # It is the case in all extant tests, but NOT the case + # outside this `if len(data_columns)` check. + + axis, axis_labels = new_non_index_axes[0] + new_labels = Index(axis_labels).difference(Index(data_columns)) + mgr = frame.reindex(new_labels, axis=axis)._mgr + mgr = cast(BlockManager, mgr) + + blocks = list(mgr.blocks) + blk_items = get_blk_items(mgr) + for c in data_columns: + # This reindex would raise ValueError if we had a duplicate + # index, so we can infer that (as long as axis==1) we + # get a single column back, so a single block. + mgr = frame.reindex([c], axis=axis)._mgr + mgr = cast(BlockManager, mgr) + blocks.extend(mgr.blocks) + blk_items.extend(get_blk_items(mgr)) + + # reorder the blocks in the same order as the existing table if we can + if table_exists: + by_items = { + tuple(b_items.tolist()): (b, b_items) + for b, b_items in zip(blocks, blk_items) + } + new_blocks: list[Block] = [] + new_blk_items = [] + for ea in values_axes: + items = tuple(ea.values) + try: + b, b_items = by_items.pop(items) + new_blocks.append(b) + new_blk_items.append(b_items) + except (IndexError, KeyError) as err: + jitems = ",".join([pprint_thing(item) for item in items]) + raise ValueError( + f"cannot match existing table structure for [{jitems}] " + "on appending data" + ) from err + blocks = new_blocks + blk_items = new_blk_items + + return blocks, blk_items + + def process_axes(self, obj, selection: Selection, columns=None) -> DataFrame: + """process axes filters""" + # make a copy to avoid side effects + if columns is not None: + columns = list(columns) + + # make sure to include levels if we have them + if columns is not None and self.is_multi_index: + assert isinstance(self.levels, list) # assured by is_multi_index + for n in self.levels: + if n not in columns: + columns.insert(0, n) + + # reorder by any non_index_axes & limit to the select columns + for axis, labels in self.non_index_axes: + obj = _reindex_axis(obj, axis, labels, columns) + + def process_filter(field, filt, op): + for axis_name in obj._AXIS_ORDERS: + axis_number = obj._get_axis_number(axis_name) + axis_values = obj._get_axis(axis_name) + assert axis_number is not None + + # see if the field is the name of an axis + if field == axis_name: + # if we have a multi-index, then need to include + # the levels + if self.is_multi_index: + filt = filt.union(Index(self.levels)) + + takers = op(axis_values, filt) + return obj.loc(axis=axis_number)[takers] + + # this might be the name of a file IN an axis + elif field in axis_values: + # we need to filter on this dimension + values = ensure_index(getattr(obj, field).values) + filt = ensure_index(filt) + + # hack until we support reversed dim flags + if isinstance(obj, DataFrame): + axis_number = 1 - axis_number + + takers = op(values, filt) + return obj.loc(axis=axis_number)[takers] + + raise ValueError(f"cannot find the field [{field}] for filtering!") + + # apply the selection filters (but keep in the same order) + if selection.filter is not None: + for field, op, filt in selection.filter.format(): + obj = process_filter(field, filt, op) + + return obj + + def create_description( + self, + complib, + complevel: int | None, + fletcher32: bool, + expectedrows: int | None, + ) -> dict[str, Any]: + """create the description of the table from the axes & values""" + # provided expected rows if its passed + if expectedrows is None: + expectedrows = max(self.nrows_expected, 10000) + + d = {"name": "table", "expectedrows": expectedrows} + + # description from the axes & values + d["description"] = {a.cname: a.typ for a in self.axes} + + if complib: + if complevel is None: + complevel = self._complevel or 9 + filters = _tables().Filters( + complevel=complevel, + complib=complib, + fletcher32=fletcher32 or self._fletcher32, + ) + d["filters"] = filters + elif self._filters is not None: + d["filters"] = self._filters + + return d + + def read_coordinates( + self, where=None, start: int | None = None, stop: int | None = None + ): + """ + select coordinates (row numbers) from a table; return the + coordinates object + """ + # validate the version + self.validate_version(where) + + # infer the data kind + if not self.infer_axes(): + return False + + # create the selection + selection = Selection(self, where=where, start=start, stop=stop) + coords = selection.select_coords() + if selection.filter is not None: + for field, op, filt in selection.filter.format(): + data = self.read_column( + field, start=coords.min(), stop=coords.max() + 1 + ) + coords = coords[op(data.iloc[coords - coords.min()], filt).values] + + return Index(coords) + + def read_column( + self, + column: str, + where=None, + start: int | None = None, + stop: int | None = None, + ): + """ + return a single column from the table, generally only indexables + are interesting + """ + # validate the version + self.validate_version() + + # infer the data kind + if not self.infer_axes(): + return False + + if where is not None: + raise TypeError("read_column does not currently accept a where clause") + + # find the axes + for a in self.axes: + if column == a.name: + if not a.is_data_indexable: + raise ValueError( + f"column [{column}] can not be extracted individually; " + "it is not data indexable" + ) + + # column must be an indexable or a data column + c = getattr(self.table.cols, column) + a.set_info(self.info) + col_values = a.convert( + c[start:stop], + nan_rep=self.nan_rep, + encoding=self.encoding, + errors=self.errors, + ) + return Series(_set_tz(col_values[1], a.tz), name=column, copy=False) + + raise KeyError(f"column [{column}] not found in the table") + + +class WORMTable(Table): + """ + a write-once read-many table: this format DOES NOT ALLOW appending to a + table. writing is a one-time operation the data are stored in a format + that allows for searching the data on disk + """ + + table_type = "worm" + + def read( + self, + where=None, + columns=None, + start: int | None = None, + stop: int | None = None, + ): + """ + read the indices and the indexing array, calculate offset rows and return + """ + raise NotImplementedError("WORMTable needs to implement read") + + def write(self, obj, **kwargs) -> None: + """ + write in a format that we can search later on (but cannot append + to): write out the indices and the values using _write_array + (e.g. a CArray) create an indexing table so that we can search + """ + raise NotImplementedError("WORMTable needs to implement write") + + +class AppendableTable(Table): + """support the new appendable table formats""" + + table_type = "appendable" + + # error: Signature of "write" incompatible with supertype "Fixed" + def write( # type: ignore[override] + self, + obj, + axes=None, + append: bool = False, + complib=None, + complevel=None, + fletcher32=None, + min_itemsize=None, + chunksize: int | None = None, + expectedrows=None, + dropna: bool = False, + nan_rep=None, + data_columns=None, + track_times: bool = True, + ) -> None: + if not append and self.is_exists: + self._handle.remove_node(self.group, "table") + + # create the axes + table = self._create_axes( + axes=axes, + obj=obj, + validate=append, + min_itemsize=min_itemsize, + nan_rep=nan_rep, + data_columns=data_columns, + ) + + for a in table.axes: + a.validate_names() + + if not table.is_exists: + # create the table + options = table.create_description( + complib=complib, + complevel=complevel, + fletcher32=fletcher32, + expectedrows=expectedrows, + ) + + # set the table attributes + table.set_attrs() + + options["track_times"] = track_times + + # create the table + table._handle.create_table(table.group, **options) + + # update my info + table.attrs.info = table.info + + # validate the axes and set the kinds + for a in table.axes: + a.validate_and_set(table, append) + + # add the rows + table.write_data(chunksize, dropna=dropna) + + def write_data(self, chunksize: int | None, dropna: bool = False) -> None: + """ + we form the data into a 2-d including indexes,values,mask write chunk-by-chunk + """ + names = self.dtype.names + nrows = self.nrows_expected + + # if dropna==True, then drop ALL nan rows + masks = [] + if dropna: + for a in self.values_axes: + # figure the mask: only do if we can successfully process this + # column, otherwise ignore the mask + mask = isna(a.data).all(axis=0) + if isinstance(mask, np.ndarray): + masks.append(mask.astype("u1", copy=False)) + + # consolidate masks + if len(masks): + mask = masks[0] + for m in masks[1:]: + mask = mask & m + mask = mask.ravel() + else: + mask = None + + # broadcast the indexes if needed + indexes = [a.cvalues for a in self.index_axes] + nindexes = len(indexes) + assert nindexes == 1, nindexes # ensures we dont need to broadcast + + # transpose the values so first dimension is last + # reshape the values if needed + values = [a.take_data() for a in self.values_axes] + values = [v.transpose(np.roll(np.arange(v.ndim), v.ndim - 1)) for v in values] + bvalues = [] + for i, v in enumerate(values): + new_shape = (nrows,) + self.dtype[names[nindexes + i]].shape + bvalues.append(v.reshape(new_shape)) + + # write the chunks + if chunksize is None: + chunksize = 100000 + + rows = np.empty(min(chunksize, nrows), dtype=self.dtype) + chunks = nrows // chunksize + 1 + for i in range(chunks): + start_i = i * chunksize + end_i = min((i + 1) * chunksize, nrows) + if start_i >= end_i: + break + + self.write_data_chunk( + rows, + indexes=[a[start_i:end_i] for a in indexes], + mask=mask[start_i:end_i] if mask is not None else None, + values=[v[start_i:end_i] for v in bvalues], + ) + + def write_data_chunk( + self, + rows: np.ndarray, + indexes: list[np.ndarray], + mask: npt.NDArray[np.bool_] | None, + values: list[np.ndarray], + ) -> None: + """ + Parameters + ---------- + rows : an empty memory space where we are putting the chunk + indexes : an array of the indexes + mask : an array of the masks + values : an array of the values + """ + # 0 len + for v in values: + if not np.prod(v.shape): + return + + nrows = indexes[0].shape[0] + if nrows != len(rows): + rows = np.empty(nrows, dtype=self.dtype) + names = self.dtype.names + nindexes = len(indexes) + + # indexes + for i, idx in enumerate(indexes): + rows[names[i]] = idx + + # values + for i, v in enumerate(values): + rows[names[i + nindexes]] = v + + # mask + if mask is not None: + m = ~mask.ravel().astype(bool, copy=False) + if not m.all(): + rows = rows[m] + + if len(rows): + self.table.append(rows) + self.table.flush() + + def delete(self, where=None, start: int | None = None, stop: int | None = None): + # delete all rows (and return the nrows) + if where is None or not len(where): + if start is None and stop is None: + nrows = self.nrows + self._handle.remove_node(self.group, recursive=True) + else: + # pytables<3.0 would remove a single row with stop=None + if stop is None: + stop = self.nrows + nrows = self.table.remove_rows(start=start, stop=stop) + self.table.flush() + return nrows + + # infer the data kind + if not self.infer_axes(): + return None + + # create the selection + table = self.table + selection = Selection(self, where, start=start, stop=stop) + values = selection.select_coords() + + # delete the rows in reverse order + sorted_series = Series(values, copy=False).sort_values() + ln = len(sorted_series) + + if ln: + # construct groups of consecutive rows + diff = sorted_series.diff() + groups = list(diff[diff > 1].index) + + # 1 group + if not len(groups): + groups = [0] + + # final element + if groups[-1] != ln: + groups.append(ln) + + # initial element + if groups[0] != 0: + groups.insert(0, 0) + + # we must remove in reverse order! + pg = groups.pop() + for g in reversed(groups): + rows = sorted_series.take(range(g, pg)) + table.remove_rows( + start=rows[rows.index[0]], stop=rows[rows.index[-1]] + 1 + ) + pg = g + + self.table.flush() + + # return the number of rows removed + return ln + + +class AppendableFrameTable(AppendableTable): + """support the new appendable table formats""" + + pandas_kind = "frame_table" + table_type = "appendable_frame" + ndim = 2 + obj_type: type[DataFrame | Series] = DataFrame + + @property + def is_transposed(self) -> bool: + return self.index_axes[0].axis == 1 + + @classmethod + def get_object(cls, obj, transposed: bool): + """these are written transposed""" + if transposed: + obj = obj.T + return obj + + def read( + self, + where=None, + columns=None, + start: int | None = None, + stop: int | None = None, + ): + # validate the version + self.validate_version(where) + + # infer the data kind + if not self.infer_axes(): + return None + + result = self._read_axes(where=where, start=start, stop=stop) + + info = ( + self.info.get(self.non_index_axes[0][0], {}) + if len(self.non_index_axes) + else {} + ) + + inds = [i for i, ax in enumerate(self.axes) if ax is self.index_axes[0]] + assert len(inds) == 1 + ind = inds[0] + + index = result[ind][0] + + frames = [] + for i, a in enumerate(self.axes): + if a not in self.values_axes: + continue + index_vals, cvalues = result[i] + + # we could have a multi-index constructor here + # ensure_index doesn't recognized our list-of-tuples here + if info.get("type") != "MultiIndex": + cols = Index(index_vals) + else: + cols = MultiIndex.from_tuples(index_vals) + + names = info.get("names") + if names is not None: + cols.set_names(names, inplace=True) + + if self.is_transposed: + values = cvalues + index_ = cols + cols_ = Index(index, name=getattr(index, "name", None)) + else: + values = cvalues.T + index_ = Index(index, name=getattr(index, "name", None)) + cols_ = cols + + # if we have a DataIndexableCol, its shape will only be 1 dim + if values.ndim == 1 and isinstance(values, np.ndarray): + values = values.reshape((1, values.shape[0])) + + if isinstance(values, np.ndarray): + df = DataFrame(values.T, columns=cols_, index=index_, copy=False) + elif isinstance(values, Index): + df = DataFrame(values, columns=cols_, index=index_) + else: + # Categorical + df = DataFrame._from_arrays([values], columns=cols_, index=index_) + if not (using_pyarrow_string_dtype() and values.dtype.kind == "O"): + assert (df.dtypes == values.dtype).all(), (df.dtypes, values.dtype) + if using_pyarrow_string_dtype() and is_string_array( + values, # type: ignore[arg-type] + skipna=True, + ): + df = df.astype("string[pyarrow_numpy]") + frames.append(df) + + if len(frames) == 1: + df = frames[0] + else: + df = concat(frames, axis=1) + + selection = Selection(self, where=where, start=start, stop=stop) + # apply the selection filters & axis orderings + df = self.process_axes(df, selection=selection, columns=columns) + return df + + +class AppendableSeriesTable(AppendableFrameTable): + """support the new appendable table formats""" + + pandas_kind = "series_table" + table_type = "appendable_series" + ndim = 2 + obj_type = Series + + @property + def is_transposed(self) -> bool: + return False + + @classmethod + def get_object(cls, obj, transposed: bool): + return obj + + # error: Signature of "write" incompatible with supertype "Fixed" + def write(self, obj, data_columns=None, **kwargs) -> None: # type: ignore[override] + """we are going to write this as a frame table""" + if not isinstance(obj, DataFrame): + name = obj.name or "values" + obj = obj.to_frame(name) + super().write(obj=obj, data_columns=obj.columns.tolist(), **kwargs) + + def read( + self, + where=None, + columns=None, + start: int | None = None, + stop: int | None = None, + ) -> Series: + is_multi_index = self.is_multi_index + if columns is not None and is_multi_index: + assert isinstance(self.levels, list) # needed for mypy + for n in self.levels: + if n not in columns: + columns.insert(0, n) + s = super().read(where=where, columns=columns, start=start, stop=stop) + if is_multi_index: + s.set_index(self.levels, inplace=True) + + s = s.iloc[:, 0] + + # remove the default name + if s.name == "values": + s.name = None + return s + + +class AppendableMultiSeriesTable(AppendableSeriesTable): + """support the new appendable table formats""" + + pandas_kind = "series_table" + table_type = "appendable_multiseries" + + # error: Signature of "write" incompatible with supertype "Fixed" + def write(self, obj, **kwargs) -> None: # type: ignore[override] + """we are going to write this as a frame table""" + name = obj.name or "values" + newobj, self.levels = self.validate_multiindex(obj) + assert isinstance(self.levels, list) # for mypy + cols = list(self.levels) + cols.append(name) + newobj.columns = Index(cols) + super().write(obj=newobj, **kwargs) + + +class GenericTable(AppendableFrameTable): + """a table that read/writes the generic pytables table format""" + + pandas_kind = "frame_table" + table_type = "generic_table" + ndim = 2 + obj_type = DataFrame + levels: list[Hashable] + + @property + def pandas_type(self) -> str: + return self.pandas_kind + + @property + def storable(self): + return getattr(self.group, "table", None) or self.group + + def get_attrs(self) -> None: + """retrieve our attributes""" + self.non_index_axes = [] + self.nan_rep = None + self.levels = [] + + self.index_axes = [a for a in self.indexables if a.is_an_indexable] + self.values_axes = [a for a in self.indexables if not a.is_an_indexable] + self.data_columns = [a.name for a in self.values_axes] + + @cache_readonly + def indexables(self): + """create the indexables from the table description""" + d = self.description + + # TODO: can we get a typ for this? AFAICT it is the only place + # where we aren't passing one + # the index columns is just a simple index + md = self.read_metadata("index") + meta = "category" if md is not None else None + index_col = GenericIndexCol( + name="index", axis=0, table=self.table, meta=meta, metadata=md + ) + + _indexables: list[GenericIndexCol | GenericDataIndexableCol] = [index_col] + + for i, n in enumerate(d._v_names): + assert isinstance(n, str) + + atom = getattr(d, n) + md = self.read_metadata(n) + meta = "category" if md is not None else None + dc = GenericDataIndexableCol( + name=n, + pos=i, + values=[n], + typ=atom, + table=self.table, + meta=meta, + metadata=md, + ) + _indexables.append(dc) + + return _indexables + + # error: Signature of "write" incompatible with supertype "AppendableTable" + def write(self, **kwargs) -> None: # type: ignore[override] + raise NotImplementedError("cannot write on an generic table") + + +class AppendableMultiFrameTable(AppendableFrameTable): + """a frame with a multi-index""" + + table_type = "appendable_multiframe" + obj_type = DataFrame + ndim = 2 + _re_levels = re.compile(r"^level_\d+$") + + @property + def table_type_short(self) -> str: + return "appendable_multi" + + # error: Signature of "write" incompatible with supertype "Fixed" + def write(self, obj, data_columns=None, **kwargs) -> None: # type: ignore[override] + if data_columns is None: + data_columns = [] + elif data_columns is True: + data_columns = obj.columns.tolist() + obj, self.levels = self.validate_multiindex(obj) + assert isinstance(self.levels, list) # for mypy + for n in self.levels: + if n not in data_columns: + data_columns.insert(0, n) + super().write(obj=obj, data_columns=data_columns, **kwargs) + + def read( + self, + where=None, + columns=None, + start: int | None = None, + stop: int | None = None, + ): + df = super().read(where=where, columns=columns, start=start, stop=stop) + df = df.set_index(self.levels) + + # remove names for 'level_%d' + df.index = df.index.set_names( + [None if self._re_levels.search(name) else name for name in df.index.names] + ) + + return df + + +def _reindex_axis( + obj: DataFrame, axis: AxisInt, labels: Index, other=None +) -> DataFrame: + ax = obj._get_axis(axis) + labels = ensure_index(labels) + + # try not to reindex even if other is provided + # if it equals our current index + if other is not None: + other = ensure_index(other) + if (other is None or labels.equals(other)) and labels.equals(ax): + return obj + + labels = ensure_index(labels.unique()) + if other is not None: + labels = ensure_index(other.unique()).intersection(labels, sort=False) + if not labels.equals(ax): + slicer: list[slice | Index] = [slice(None, None)] * obj.ndim + slicer[axis] = labels + obj = obj.loc[tuple(slicer)] + return obj + + +# tz to/from coercion + + +def _get_tz(tz: tzinfo) -> str | tzinfo: + """for a tz-aware type, return an encoded zone""" + zone = timezones.get_timezone(tz) + return zone + + +@overload +def _set_tz( + values: np.ndarray | Index, tz: str | tzinfo, coerce: bool = False +) -> DatetimeIndex: + ... + + +@overload +def _set_tz(values: np.ndarray | Index, tz: None, coerce: bool = False) -> np.ndarray: + ... + + +def _set_tz( + values: np.ndarray | Index, tz: str | tzinfo | None, coerce: bool = False +) -> np.ndarray | DatetimeIndex: + """ + coerce the values to a DatetimeIndex if tz is set + preserve the input shape if possible + + Parameters + ---------- + values : ndarray or Index + tz : str or tzinfo + coerce : if we do not have a passed timezone, coerce to M8[ns] ndarray + """ + if isinstance(values, DatetimeIndex): + # If values is tzaware, the tz gets dropped in the values.ravel() + # call below (which returns an ndarray). So we are only non-lossy + # if `tz` matches `values.tz`. + assert values.tz is None or values.tz == tz + if values.tz is not None: + return values + + if tz is not None: + if isinstance(values, DatetimeIndex): + name = values.name + else: + name = None + values = values.ravel() + + tz = _ensure_decoded(tz) + values = DatetimeIndex(values, name=name) + values = values.tz_localize("UTC").tz_convert(tz) + elif coerce: + values = np.asarray(values, dtype="M8[ns]") + + # error: Incompatible return value type (got "Union[ndarray, Index]", + # expected "Union[ndarray, DatetimeIndex]") + return values # type: ignore[return-value] + + +def _convert_index(name: str, index: Index, encoding: str, errors: str) -> IndexCol: + assert isinstance(name, str) + + index_name = index.name + # error: Argument 1 to "_get_data_and_dtype_name" has incompatible type "Index"; + # expected "Union[ExtensionArray, ndarray]" + converted, dtype_name = _get_data_and_dtype_name(index) # type: ignore[arg-type] + kind = _dtype_to_kind(dtype_name) + atom = DataIndexableCol._get_atom(converted) + + if ( + lib.is_np_dtype(index.dtype, "iu") + or needs_i8_conversion(index.dtype) + or is_bool_dtype(index.dtype) + ): + # Includes Index, RangeIndex, DatetimeIndex, TimedeltaIndex, PeriodIndex, + # in which case "kind" is "integer", "integer", "datetime64", + # "timedelta64", and "integer", respectively. + return IndexCol( + name, + values=converted, + kind=kind, + typ=atom, + freq=getattr(index, "freq", None), + tz=getattr(index, "tz", None), + index_name=index_name, + ) + + if isinstance(index, MultiIndex): + raise TypeError("MultiIndex not supported here!") + + inferred_type = lib.infer_dtype(index, skipna=False) + # we won't get inferred_type of "datetime64" or "timedelta64" as these + # would go through the DatetimeIndex/TimedeltaIndex paths above + + values = np.asarray(index) + + if inferred_type == "date": + converted = np.asarray([v.toordinal() for v in values], dtype=np.int32) + return IndexCol( + name, converted, "date", _tables().Time32Col(), index_name=index_name + ) + elif inferred_type == "string": + converted = _convert_string_array(values, encoding, errors) + itemsize = converted.dtype.itemsize + return IndexCol( + name, + converted, + "string", + _tables().StringCol(itemsize), + index_name=index_name, + ) + + elif inferred_type in ["integer", "floating"]: + return IndexCol( + name, values=converted, kind=kind, typ=atom, index_name=index_name + ) + else: + assert isinstance(converted, np.ndarray) and converted.dtype == object + assert kind == "object", kind + atom = _tables().ObjectAtom() + return IndexCol(name, converted, kind, atom, index_name=index_name) + + +def _unconvert_index(data, kind: str, encoding: str, errors: str) -> np.ndarray | Index: + index: Index | np.ndarray + + if kind.startswith("datetime64"): + if kind == "datetime64": + # created before we stored resolution information + index = DatetimeIndex(data) + else: + index = DatetimeIndex(data.view(kind)) + elif kind == "timedelta64": + index = TimedeltaIndex(data) + elif kind == "date": + try: + index = np.asarray([date.fromordinal(v) for v in data], dtype=object) + except ValueError: + index = np.asarray([date.fromtimestamp(v) for v in data], dtype=object) + elif kind in ("integer", "float", "bool"): + index = np.asarray(data) + elif kind in ("string"): + index = _unconvert_string_array( + data, nan_rep=None, encoding=encoding, errors=errors + ) + elif kind == "object": + index = np.asarray(data[0]) + else: # pragma: no cover + raise ValueError(f"unrecognized index type {kind}") + return index + + +def _maybe_convert_for_string_atom( + name: str, + bvalues: ArrayLike, + existing_col, + min_itemsize, + nan_rep, + encoding, + errors, + columns: list[str], +): + if bvalues.dtype != object: + return bvalues + + bvalues = cast(np.ndarray, bvalues) + + dtype_name = bvalues.dtype.name + inferred_type = lib.infer_dtype(bvalues, skipna=False) + + if inferred_type == "date": + raise TypeError("[date] is not implemented as a table column") + if inferred_type == "datetime": + # after GH#8260 + # this only would be hit for a multi-timezone dtype which is an error + raise TypeError( + "too many timezones in this block, create separate data columns" + ) + + if not (inferred_type == "string" or dtype_name == "object"): + return bvalues + + mask = isna(bvalues) + data = bvalues.copy() + data[mask] = nan_rep + + # see if we have a valid string type + inferred_type = lib.infer_dtype(data, skipna=False) + if inferred_type != "string": + # we cannot serialize this data, so report an exception on a column + # by column basis + + # expected behaviour: + # search block for a non-string object column by column + for i in range(data.shape[0]): + col = data[i] + inferred_type = lib.infer_dtype(col, skipna=False) + if inferred_type != "string": + error_column_label = columns[i] if len(columns) > i else f"No.{i}" + raise TypeError( + f"Cannot serialize the column [{error_column_label}]\n" + f"because its data contents are not [string] but " + f"[{inferred_type}] object dtype" + ) + + # itemsize is the maximum length of a string (along any dimension) + + data_converted = _convert_string_array(data, encoding, errors).reshape(data.shape) + itemsize = data_converted.itemsize + + # specified min_itemsize? + if isinstance(min_itemsize, dict): + min_itemsize = int(min_itemsize.get(name) or min_itemsize.get("values") or 0) + itemsize = max(min_itemsize or 0, itemsize) + + # check for column in the values conflicts + if existing_col is not None: + eci = existing_col.validate_col(itemsize) + if eci is not None and eci > itemsize: + itemsize = eci + + data_converted = data_converted.astype(f"|S{itemsize}", copy=False) + return data_converted + + +def _convert_string_array(data: np.ndarray, encoding: str, errors: str) -> np.ndarray: + """ + Take a string-like that is object dtype and coerce to a fixed size string type. + + Parameters + ---------- + data : np.ndarray[object] + encoding : str + errors : str + Handler for encoding errors. + + Returns + ------- + np.ndarray[fixed-length-string] + """ + # encode if needed + if len(data): + data = ( + Series(data.ravel(), copy=False) + .str.encode(encoding, errors) + ._values.reshape(data.shape) + ) + + # create the sized dtype + ensured = ensure_object(data.ravel()) + itemsize = max(1, libwriters.max_len_string_array(ensured)) + + data = np.asarray(data, dtype=f"S{itemsize}") + return data + + +def _unconvert_string_array( + data: np.ndarray, nan_rep, encoding: str, errors: str +) -> np.ndarray: + """ + Inverse of _convert_string_array. + + Parameters + ---------- + data : np.ndarray[fixed-length-string] + nan_rep : the storage repr of NaN + encoding : str + errors : str + Handler for encoding errors. + + Returns + ------- + np.ndarray[object] + Decoded data. + """ + shape = data.shape + data = np.asarray(data.ravel(), dtype=object) + + if len(data): + itemsize = libwriters.max_len_string_array(ensure_object(data)) + dtype = f"U{itemsize}" + + if isinstance(data[0], bytes): + data = Series(data, copy=False).str.decode(encoding, errors=errors)._values + else: + data = data.astype(dtype, copy=False).astype(object, copy=False) + + if nan_rep is None: + nan_rep = "nan" + + libwriters.string_array_replace_from_nan_rep(data, nan_rep) + return data.reshape(shape) + + +def _maybe_convert(values: np.ndarray, val_kind: str, encoding: str, errors: str): + assert isinstance(val_kind, str), type(val_kind) + if _need_convert(val_kind): + conv = _get_converter(val_kind, encoding, errors) + values = conv(values) + return values + + +def _get_converter(kind: str, encoding: str, errors: str): + if kind == "datetime64": + return lambda x: np.asarray(x, dtype="M8[ns]") + elif "datetime64" in kind: + return lambda x: np.asarray(x, dtype=kind) + elif kind == "string": + return lambda x: _unconvert_string_array( + x, nan_rep=None, encoding=encoding, errors=errors + ) + else: # pragma: no cover + raise ValueError(f"invalid kind {kind}") + + +def _need_convert(kind: str) -> bool: + if kind in ("datetime64", "string") or "datetime64" in kind: + return True + return False + + +def _maybe_adjust_name(name: str, version: Sequence[int]) -> str: + """ + Prior to 0.10.1, we named values blocks like: values_block_0 an the + name values_0, adjust the given name if necessary. + + Parameters + ---------- + name : str + version : Tuple[int, int, int] + + Returns + ------- + str + """ + if isinstance(version, str) or len(version) < 3: + raise ValueError("Version is incorrect, expected sequence of 3 integers.") + + if version[0] == 0 and version[1] <= 10 and version[2] == 0: + m = re.search(r"values_block_(\d+)", name) + if m: + grp = m.groups()[0] + name = f"values_{grp}" + return name + + +def _dtype_to_kind(dtype_str: str) -> str: + """ + Find the "kind" string describing the given dtype name. + """ + dtype_str = _ensure_decoded(dtype_str) + + if dtype_str.startswith(("string", "bytes")): + kind = "string" + elif dtype_str.startswith("float"): + kind = "float" + elif dtype_str.startswith("complex"): + kind = "complex" + elif dtype_str.startswith(("int", "uint")): + kind = "integer" + elif dtype_str.startswith("datetime64"): + kind = dtype_str + elif dtype_str.startswith("timedelta"): + kind = "timedelta64" + elif dtype_str.startswith("bool"): + kind = "bool" + elif dtype_str.startswith("category"): + kind = "category" + elif dtype_str.startswith("period"): + # We store the `freq` attr so we can restore from integers + kind = "integer" + elif dtype_str == "object": + kind = "object" + else: + raise ValueError(f"cannot interpret dtype of [{dtype_str}]") + + return kind + + +def _get_data_and_dtype_name(data: ArrayLike): + """ + Convert the passed data into a storable form and a dtype string. + """ + if isinstance(data, Categorical): + data = data.codes + + if isinstance(data.dtype, DatetimeTZDtype): + # For datetime64tz we need to drop the TZ in tests TODO: why? + dtype_name = f"datetime64[{data.dtype.unit}]" + else: + dtype_name = data.dtype.name + + if data.dtype.kind in "mM": + data = np.asarray(data.view("i8")) + # TODO: we used to reshape for the dt64tz case, but no longer + # doing that doesn't seem to break anything. why? + + elif isinstance(data, PeriodIndex): + data = data.asi8 + + data = np.asarray(data) + return data, dtype_name + + +class Selection: + """ + Carries out a selection operation on a tables.Table object. + + Parameters + ---------- + table : a Table object + where : list of Terms (or convertible to) + start, stop: indices to start and/or stop selection + + """ + + def __init__( + self, + table: Table, + where=None, + start: int | None = None, + stop: int | None = None, + ) -> None: + self.table = table + self.where = where + self.start = start + self.stop = stop + self.condition = None + self.filter = None + self.terms = None + self.coordinates = None + + if is_list_like(where): + # see if we have a passed coordinate like + with suppress(ValueError): + inferred = lib.infer_dtype(where, skipna=False) + if inferred in ("integer", "boolean"): + where = np.asarray(where) + if where.dtype == np.bool_: + start, stop = self.start, self.stop + if start is None: + start = 0 + if stop is None: + stop = self.table.nrows + self.coordinates = np.arange(start, stop)[where] + elif issubclass(where.dtype.type, np.integer): + if (self.start is not None and (where < self.start).any()) or ( + self.stop is not None and (where >= self.stop).any() + ): + raise ValueError( + "where must have index locations >= start and < stop" + ) + self.coordinates = where + + if self.coordinates is None: + self.terms = self.generate(where) + + # create the numexpr & the filter + if self.terms is not None: + self.condition, self.filter = self.terms.evaluate() + + def generate(self, where): + """where can be a : dict,list,tuple,string""" + if where is None: + return None + + q = self.table.queryables() + try: + return PyTablesExpr(where, queryables=q, encoding=self.table.encoding) + except NameError as err: + # raise a nice message, suggesting that the user should use + # data_columns + qkeys = ",".join(q.keys()) + msg = dedent( + f"""\ + The passed where expression: {where} + contains an invalid variable reference + all of the variable references must be a reference to + an axis (e.g. 'index' or 'columns'), or a data_column + The currently defined references are: {qkeys} + """ + ) + raise ValueError(msg) from err + + def select(self): + """ + generate the selection + """ + if self.condition is not None: + return self.table.table.read_where( + self.condition.format(), start=self.start, stop=self.stop + ) + elif self.coordinates is not None: + return self.table.table.read_coordinates(self.coordinates) + return self.table.table.read(start=self.start, stop=self.stop) + + def select_coords(self): + """ + generate the selection + """ + start, stop = self.start, self.stop + nrows = self.table.nrows + if start is None: + start = 0 + elif start < 0: + start += nrows + if stop is None: + stop = nrows + elif stop < 0: + stop += nrows + + if self.condition is not None: + return self.table.table.get_where_list( + self.condition.format(), start=start, stop=stop, sort=True + ) + elif self.coordinates is not None: + return self.coordinates + + return np.arange(start, stop) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/sas/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/sas/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..317730745b6e3a0278a48b7bb810cf43e718e787 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/sas/__init__.py @@ -0,0 +1,3 @@ +from pandas.io.sas.sasreader import read_sas + +__all__ = ["read_sas"] diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/sas/__pycache__/sas7bdat.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/sas/__pycache__/sas7bdat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16d77ea46b6158262051b36908910bb381675043 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/sas/__pycache__/sas7bdat.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/sas/__pycache__/sasreader.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/sas/__pycache__/sasreader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..438f6f404670ac7aaa0967c068975e895ddf893a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/sas/__pycache__/sasreader.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/sas/sas_constants.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/sas/sas_constants.py new file mode 100644 index 0000000000000000000000000000000000000000..62c17bd03927e5f852af708e6b9ef6cf7e74d57c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/sas/sas_constants.py @@ -0,0 +1,310 @@ +from __future__ import annotations + +from typing import Final + +magic: Final = ( + b"\x00\x00\x00\x00\x00\x00\x00\x00" + b"\x00\x00\x00\x00\xc2\xea\x81\x60" + b"\xb3\x14\x11\xcf\xbd\x92\x08\x00" + b"\x09\xc7\x31\x8c\x18\x1f\x10\x11" +) + +align_1_checker_value: Final = b"3" +align_1_offset: Final = 32 +align_1_length: Final = 1 +align_1_value: Final = 4 +u64_byte_checker_value: Final = b"3" +align_2_offset: Final = 35 +align_2_length: Final = 1 +align_2_value: Final = 4 +endianness_offset: Final = 37 +endianness_length: Final = 1 +platform_offset: Final = 39 +platform_length: Final = 1 +encoding_offset: Final = 70 +encoding_length: Final = 1 +dataset_offset: Final = 92 +dataset_length: Final = 64 +file_type_offset: Final = 156 +file_type_length: Final = 8 +date_created_offset: Final = 164 +date_created_length: Final = 8 +date_modified_offset: Final = 172 +date_modified_length: Final = 8 +header_size_offset: Final = 196 +header_size_length: Final = 4 +page_size_offset: Final = 200 +page_size_length: Final = 4 +page_count_offset: Final = 204 +page_count_length: Final = 4 +sas_release_offset: Final = 216 +sas_release_length: Final = 8 +sas_server_type_offset: Final = 224 +sas_server_type_length: Final = 16 +os_version_number_offset: Final = 240 +os_version_number_length: Final = 16 +os_maker_offset: Final = 256 +os_maker_length: Final = 16 +os_name_offset: Final = 272 +os_name_length: Final = 16 +page_bit_offset_x86: Final = 16 +page_bit_offset_x64: Final = 32 +subheader_pointer_length_x86: Final = 12 +subheader_pointer_length_x64: Final = 24 +page_type_offset: Final = 0 +page_type_length: Final = 2 +block_count_offset: Final = 2 +block_count_length: Final = 2 +subheader_count_offset: Final = 4 +subheader_count_length: Final = 2 +page_type_mask: Final = 0x0F00 +# Keep "page_comp_type" bits +page_type_mask2: Final = 0xF000 | page_type_mask +page_meta_type: Final = 0x0000 +page_data_type: Final = 0x0100 +page_mix_type: Final = 0x0200 +page_amd_type: Final = 0x0400 +page_meta2_type: Final = 0x4000 +page_comp_type: Final = 0x9000 +page_meta_types: Final = [page_meta_type, page_meta2_type] +subheader_pointers_offset: Final = 8 +truncated_subheader_id: Final = 1 +compressed_subheader_id: Final = 4 +compressed_subheader_type: Final = 1 +text_block_size_length: Final = 2 +row_length_offset_multiplier: Final = 5 +row_count_offset_multiplier: Final = 6 +col_count_p1_multiplier: Final = 9 +col_count_p2_multiplier: Final = 10 +row_count_on_mix_page_offset_multiplier: Final = 15 +column_name_pointer_length: Final = 8 +column_name_text_subheader_offset: Final = 0 +column_name_text_subheader_length: Final = 2 +column_name_offset_offset: Final = 2 +column_name_offset_length: Final = 2 +column_name_length_offset: Final = 4 +column_name_length_length: Final = 2 +column_data_offset_offset: Final = 8 +column_data_length_offset: Final = 8 +column_data_length_length: Final = 4 +column_type_offset: Final = 14 +column_type_length: Final = 1 +column_format_text_subheader_index_offset: Final = 22 +column_format_text_subheader_index_length: Final = 2 +column_format_offset_offset: Final = 24 +column_format_offset_length: Final = 2 +column_format_length_offset: Final = 26 +column_format_length_length: Final = 2 +column_label_text_subheader_index_offset: Final = 28 +column_label_text_subheader_index_length: Final = 2 +column_label_offset_offset: Final = 30 +column_label_offset_length: Final = 2 +column_label_length_offset: Final = 32 +column_label_length_length: Final = 2 +rle_compression: Final = b"SASYZCRL" +rdc_compression: Final = b"SASYZCR2" + +compression_literals: Final = [rle_compression, rdc_compression] + +# Incomplete list of encodings, using SAS nomenclature: +# https://support.sas.com/documentation/onlinedoc/dfdmstudio/2.6/dmpdmsug/Content/dfU_Encodings_SAS.html +# corresponding to the Python documentation of standard encodings +# https://docs.python.org/3/library/codecs.html#standard-encodings +encoding_names: Final = { + 20: "utf-8", + 29: "latin1", + 30: "latin2", + 31: "latin3", + 32: "latin4", + 33: "cyrillic", + 34: "arabic", + 35: "greek", + 36: "hebrew", + 37: "latin5", + 38: "latin6", + 39: "cp874", + 40: "latin9", + 41: "cp437", + 42: "cp850", + 43: "cp852", + 44: "cp857", + 45: "cp858", + 46: "cp862", + 47: "cp864", + 48: "cp865", + 49: "cp866", + 50: "cp869", + 51: "cp874", + # 52: "", # not found + # 53: "", # not found + # 54: "", # not found + 55: "cp720", + 56: "cp737", + 57: "cp775", + 58: "cp860", + 59: "cp863", + 60: "cp1250", + 61: "cp1251", + 62: "cp1252", + 63: "cp1253", + 64: "cp1254", + 65: "cp1255", + 66: "cp1256", + 67: "cp1257", + 68: "cp1258", + 118: "cp950", + # 119: "", # not found + 123: "big5", + 125: "gb2312", + 126: "cp936", + 134: "euc_jp", + 136: "cp932", + 138: "shift_jis", + 140: "euc-kr", + 141: "cp949", + 227: "latin8", + # 228: "", # not found + # 229: "" # not found +} + + +class SASIndex: + row_size_index: Final = 0 + column_size_index: Final = 1 + subheader_counts_index: Final = 2 + column_text_index: Final = 3 + column_name_index: Final = 4 + column_attributes_index: Final = 5 + format_and_label_index: Final = 6 + column_list_index: Final = 7 + data_subheader_index: Final = 8 + + +subheader_signature_to_index: Final = { + b"\xF7\xF7\xF7\xF7": SASIndex.row_size_index, + b"\x00\x00\x00\x00\xF7\xF7\xF7\xF7": SASIndex.row_size_index, + b"\xF7\xF7\xF7\xF7\x00\x00\x00\x00": SASIndex.row_size_index, + b"\xF7\xF7\xF7\xF7\xFF\xFF\xFB\xFE": SASIndex.row_size_index, + b"\xF6\xF6\xF6\xF6": SASIndex.column_size_index, + b"\x00\x00\x00\x00\xF6\xF6\xF6\xF6": SASIndex.column_size_index, + b"\xF6\xF6\xF6\xF6\x00\x00\x00\x00": SASIndex.column_size_index, + b"\xF6\xF6\xF6\xF6\xFF\xFF\xFB\xFE": SASIndex.column_size_index, + b"\x00\xFC\xFF\xFF": SASIndex.subheader_counts_index, + b"\xFF\xFF\xFC\x00": SASIndex.subheader_counts_index, + b"\x00\xFC\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.subheader_counts_index, + b"\xFF\xFF\xFF\xFF\xFF\xFF\xFC\x00": SASIndex.subheader_counts_index, + b"\xFD\xFF\xFF\xFF": SASIndex.column_text_index, + b"\xFF\xFF\xFF\xFD": SASIndex.column_text_index, + b"\xFD\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_text_index, + b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD": SASIndex.column_text_index, + b"\xFF\xFF\xFF\xFF": SASIndex.column_name_index, + b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_name_index, + b"\xFC\xFF\xFF\xFF": SASIndex.column_attributes_index, + b"\xFF\xFF\xFF\xFC": SASIndex.column_attributes_index, + b"\xFC\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_attributes_index, + b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFC": SASIndex.column_attributes_index, + b"\xFE\xFB\xFF\xFF": SASIndex.format_and_label_index, + b"\xFF\xFF\xFB\xFE": SASIndex.format_and_label_index, + b"\xFE\xFB\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.format_and_label_index, + b"\xFF\xFF\xFF\xFF\xFF\xFF\xFB\xFE": SASIndex.format_and_label_index, + b"\xFE\xFF\xFF\xFF": SASIndex.column_list_index, + b"\xFF\xFF\xFF\xFE": SASIndex.column_list_index, + b"\xFE\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_list_index, + b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE": SASIndex.column_list_index, +} + + +# List of frequently used SAS date and datetime formats +# http://support.sas.com/documentation/cdl/en/etsug/60372/HTML/default/viewer.htm#etsug_intervals_sect009.htm +# https://github.com/epam/parso/blob/master/src/main/java/com/epam/parso/impl/SasFileConstants.java +sas_date_formats: Final = ( + "DATE", + "DAY", + "DDMMYY", + "DOWNAME", + "JULDAY", + "JULIAN", + "MMDDYY", + "MMYY", + "MMYYC", + "MMYYD", + "MMYYP", + "MMYYS", + "MMYYN", + "MONNAME", + "MONTH", + "MONYY", + "QTR", + "QTRR", + "NENGO", + "WEEKDATE", + "WEEKDATX", + "WEEKDAY", + "WEEKV", + "WORDDATE", + "WORDDATX", + "YEAR", + "YYMM", + "YYMMC", + "YYMMD", + "YYMMP", + "YYMMS", + "YYMMN", + "YYMON", + "YYMMDD", + "YYQ", + "YYQC", + "YYQD", + "YYQP", + "YYQS", + "YYQN", + "YYQR", + "YYQRC", + "YYQRD", + "YYQRP", + "YYQRS", + "YYQRN", + "YYMMDDP", + "YYMMDDC", + "E8601DA", + "YYMMDDN", + "MMDDYYC", + "MMDDYYS", + "MMDDYYD", + "YYMMDDS", + "B8601DA", + "DDMMYYN", + "YYMMDDD", + "DDMMYYB", + "DDMMYYP", + "MMDDYYP", + "YYMMDDB", + "MMDDYYN", + "DDMMYYC", + "DDMMYYD", + "DDMMYYS", + "MINGUO", +) + +sas_datetime_formats: Final = ( + "DATETIME", + "DTWKDATX", + "B8601DN", + "B8601DT", + "B8601DX", + "B8601DZ", + "B8601LX", + "E8601DN", + "E8601DT", + "E8601DX", + "E8601DZ", + "E8601LX", + "DATEAMPM", + "DTDATE", + "DTMONYY", + "DTMONYY", + "DTWKDATX", + "DTYEAR", + "TOD", + "MDYAMPM", +) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/stata.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/stata.py new file mode 100644 index 0000000000000000000000000000000000000000..4abf9af185a01284ab4c3ba968335117b3f9b81b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/stata.py @@ -0,0 +1,3763 @@ +""" +Module contains tools for processing Stata files into DataFrames + +The StataReader below was originally written by Joe Presbrey as part of PyDTA. +It has been extended and improved by Skipper Seabold from the Statsmodels +project who also developed the StataWriter and was finally added to pandas in +a once again improved version. + +You can find more information on http://presbrey.mit.edu/PyDTA and +https://www.statsmodels.org/devel/ +""" +from __future__ import annotations + +from collections import abc +from datetime import ( + datetime, + timedelta, +) +from io import BytesIO +import os +import struct +import sys +from typing import ( + IO, + TYPE_CHECKING, + AnyStr, + Callable, + Final, + cast, +) +import warnings + +import numpy as np + +from pandas._libs import lib +from pandas._libs.lib import infer_dtype +from pandas._libs.writers import max_len_string_array +from pandas.errors import ( + CategoricalConversionWarning, + InvalidColumnName, + PossiblePrecisionLoss, + ValueLabelTypeMismatch, +) +from pandas.util._decorators import ( + Appender, + doc, +) +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.base import ExtensionDtype +from pandas.core.dtypes.common import ( + ensure_object, + is_numeric_dtype, + is_string_dtype, +) +from pandas.core.dtypes.dtypes import CategoricalDtype + +from pandas import ( + Categorical, + DatetimeIndex, + NaT, + Timestamp, + isna, + to_datetime, + to_timedelta, +) +from pandas.core.frame import DataFrame +from pandas.core.indexes.base import Index +from pandas.core.indexes.range import RangeIndex +from pandas.core.series import Series +from pandas.core.shared_docs import _shared_docs + +from pandas.io.common import get_handle + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Sequence, + ) + from types import TracebackType + from typing import Literal + + from pandas._typing import ( + CompressionOptions, + FilePath, + ReadBuffer, + Self, + StorageOptions, + WriteBuffer, + ) + +_version_error = ( + "Version of given Stata file is {version}. pandas supports importing " + "versions 105, 108, 111 (Stata 7SE), 113 (Stata 8/9), " + "114 (Stata 10/11), 115 (Stata 12), 117 (Stata 13), 118 (Stata 14/15/16)," + "and 119 (Stata 15/16, over 32,767 variables)." +) + +_statafile_processing_params1 = """\ +convert_dates : bool, default True + Convert date variables to DataFrame time values. +convert_categoricals : bool, default True + Read value labels and convert columns to Categorical/Factor variables.""" + +_statafile_processing_params2 = """\ +index_col : str, optional + Column to set as index. +convert_missing : bool, default False + Flag indicating whether to convert missing values to their Stata + representations. If False, missing values are replaced with nan. + If True, columns containing missing values are returned with + object data types and missing values are represented by + StataMissingValue objects. +preserve_dtypes : bool, default True + Preserve Stata datatypes. If False, numeric data are upcast to pandas + default types for foreign data (float64 or int64). +columns : list or None + Columns to retain. Columns will be returned in the given order. None + returns all columns. +order_categoricals : bool, default True + Flag indicating whether converted categorical data are ordered.""" + +_chunksize_params = """\ +chunksize : int, default None + Return StataReader object for iterations, returns chunks with + given number of lines.""" + +_iterator_params = """\ +iterator : bool, default False + Return StataReader object.""" + +_reader_notes = """\ +Notes +----- +Categorical variables read through an iterator may not have the same +categories and dtype. This occurs when a variable stored in a DTA +file is associated to an incomplete set of value labels that only +label a strict subset of the values.""" + +_read_stata_doc = f""" +Read Stata file into DataFrame. + +Parameters +---------- +filepath_or_buffer : str, path object or file-like object + Any valid string path is acceptable. The string could be a URL. Valid + URL schemes include http, ftp, s3, and file. For file URLs, a host is + expected. A local file could be: ``file://localhost/path/to/table.dta``. + + If you want to pass in a path object, pandas accepts any ``os.PathLike``. + + By file-like object, we refer to objects with a ``read()`` method, + such as a file handle (e.g. via builtin ``open`` function) + or ``StringIO``. +{_statafile_processing_params1} +{_statafile_processing_params2} +{_chunksize_params} +{_iterator_params} +{_shared_docs["decompression_options"] % "filepath_or_buffer"} +{_shared_docs["storage_options"]} + +Returns +------- +DataFrame or pandas.api.typing.StataReader + +See Also +-------- +io.stata.StataReader : Low-level reader for Stata data files. +DataFrame.to_stata: Export Stata data files. + +{_reader_notes} + +Examples +-------- + +Creating a dummy stata for this example + +>>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon', 'parrot'], +... 'speed': [350, 18, 361, 15]}}) # doctest: +SKIP +>>> df.to_stata('animals.dta') # doctest: +SKIP + +Read a Stata dta file: + +>>> df = pd.read_stata('animals.dta') # doctest: +SKIP + +Read a Stata dta file in 10,000 line chunks: + +>>> values = np.random.randint(0, 10, size=(20_000, 1), dtype="uint8") # doctest: +SKIP +>>> df = pd.DataFrame(values, columns=["i"]) # doctest: +SKIP +>>> df.to_stata('filename.dta') # doctest: +SKIP + +>>> with pd.read_stata('filename.dta', chunksize=10000) as itr: # doctest: +SKIP +>>> for chunk in itr: +... # Operate on a single chunk, e.g., chunk.mean() +... pass # doctest: +SKIP +""" + +_read_method_doc = f"""\ +Reads observations from Stata file, converting them into a dataframe + +Parameters +---------- +nrows : int + Number of lines to read from data file, if None read whole file. +{_statafile_processing_params1} +{_statafile_processing_params2} + +Returns +------- +DataFrame +""" + +_stata_reader_doc = f"""\ +Class for reading Stata dta files. + +Parameters +---------- +path_or_buf : path (string), buffer or path object + string, path object (pathlib.Path or py._path.local.LocalPath) or object + implementing a binary read() functions. +{_statafile_processing_params1} +{_statafile_processing_params2} +{_chunksize_params} +{_shared_docs["decompression_options"]} +{_shared_docs["storage_options"]} + +{_reader_notes} +""" + + +_date_formats = ["%tc", "%tC", "%td", "%d", "%tw", "%tm", "%tq", "%th", "%ty"] + + +stata_epoch: Final = datetime(1960, 1, 1) + + +def _stata_elapsed_date_to_datetime_vec(dates: Series, fmt: str) -> Series: + """ + Convert from SIF to datetime. https://www.stata.com/help.cgi?datetime + + Parameters + ---------- + dates : Series + The Stata Internal Format date to convert to datetime according to fmt + fmt : str + The format to convert to. Can be, tc, td, tw, tm, tq, th, ty + Returns + + Returns + ------- + converted : Series + The converted dates + + Examples + -------- + >>> dates = pd.Series([52]) + >>> _stata_elapsed_date_to_datetime_vec(dates , "%tw") + 0 1961-01-01 + dtype: datetime64[ns] + + Notes + ----- + datetime/c - tc + milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day + datetime/C - tC - NOT IMPLEMENTED + milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds + date - td + days since 01jan1960 (01jan1960 = 0) + weekly date - tw + weeks since 1960w1 + This assumes 52 weeks in a year, then adds 7 * remainder of the weeks. + The datetime value is the start of the week in terms of days in the + year, not ISO calendar weeks. + monthly date - tm + months since 1960m1 + quarterly date - tq + quarters since 1960q1 + half-yearly date - th + half-years since 1960h1 yearly + date - ty + years since 0000 + """ + MIN_YEAR, MAX_YEAR = Timestamp.min.year, Timestamp.max.year + MAX_DAY_DELTA = (Timestamp.max - datetime(1960, 1, 1)).days + MIN_DAY_DELTA = (Timestamp.min - datetime(1960, 1, 1)).days + MIN_MS_DELTA = MIN_DAY_DELTA * 24 * 3600 * 1000 + MAX_MS_DELTA = MAX_DAY_DELTA * 24 * 3600 * 1000 + + def convert_year_month_safe(year, month) -> Series: + """ + Convert year and month to datetimes, using pandas vectorized versions + when the date range falls within the range supported by pandas. + Otherwise it falls back to a slower but more robust method + using datetime. + """ + if year.max() < MAX_YEAR and year.min() > MIN_YEAR: + return to_datetime(100 * year + month, format="%Y%m") + else: + index = getattr(year, "index", None) + return Series([datetime(y, m, 1) for y, m in zip(year, month)], index=index) + + def convert_year_days_safe(year, days) -> Series: + """ + Converts year (e.g. 1999) and days since the start of the year to a + datetime or datetime64 Series + """ + if year.max() < (MAX_YEAR - 1) and year.min() > MIN_YEAR: + return to_datetime(year, format="%Y") + to_timedelta(days, unit="d") + else: + index = getattr(year, "index", None) + value = [ + datetime(y, 1, 1) + timedelta(days=int(d)) for y, d in zip(year, days) + ] + return Series(value, index=index) + + def convert_delta_safe(base, deltas, unit) -> Series: + """ + Convert base dates and deltas to datetimes, using pandas vectorized + versions if the deltas satisfy restrictions required to be expressed + as dates in pandas. + """ + index = getattr(deltas, "index", None) + if unit == "d": + if deltas.max() > MAX_DAY_DELTA or deltas.min() < MIN_DAY_DELTA: + values = [base + timedelta(days=int(d)) for d in deltas] + return Series(values, index=index) + elif unit == "ms": + if deltas.max() > MAX_MS_DELTA or deltas.min() < MIN_MS_DELTA: + values = [ + base + timedelta(microseconds=(int(d) * 1000)) for d in deltas + ] + return Series(values, index=index) + else: + raise ValueError("format not understood") + base = to_datetime(base) + deltas = to_timedelta(deltas, unit=unit) + return base + deltas + + # TODO(non-nano): If/when pandas supports more than datetime64[ns], this + # should be improved to use correct range, e.g. datetime[Y] for yearly + bad_locs = np.isnan(dates) + has_bad_values = False + if bad_locs.any(): + has_bad_values = True + dates._values[bad_locs] = 1.0 # Replace with NaT + dates = dates.astype(np.int64) + + if fmt.startswith(("%tc", "tc")): # Delta ms relative to base + base = stata_epoch + ms = dates + conv_dates = convert_delta_safe(base, ms, "ms") + elif fmt.startswith(("%tC", "tC")): + warnings.warn( + "Encountered %tC format. Leaving in Stata Internal Format.", + stacklevel=find_stack_level(), + ) + conv_dates = Series(dates, dtype=object) + if has_bad_values: + conv_dates[bad_locs] = NaT + return conv_dates + # Delta days relative to base + elif fmt.startswith(("%td", "td", "%d", "d")): + base = stata_epoch + days = dates + conv_dates = convert_delta_safe(base, days, "d") + # does not count leap days - 7 days is a week. + # 52nd week may have more than 7 days + elif fmt.startswith(("%tw", "tw")): + year = stata_epoch.year + dates // 52 + days = (dates % 52) * 7 + conv_dates = convert_year_days_safe(year, days) + elif fmt.startswith(("%tm", "tm")): # Delta months relative to base + year = stata_epoch.year + dates // 12 + month = (dates % 12) + 1 + conv_dates = convert_year_month_safe(year, month) + elif fmt.startswith(("%tq", "tq")): # Delta quarters relative to base + year = stata_epoch.year + dates // 4 + quarter_month = (dates % 4) * 3 + 1 + conv_dates = convert_year_month_safe(year, quarter_month) + elif fmt.startswith(("%th", "th")): # Delta half-years relative to base + year = stata_epoch.year + dates // 2 + month = (dates % 2) * 6 + 1 + conv_dates = convert_year_month_safe(year, month) + elif fmt.startswith(("%ty", "ty")): # Years -- not delta + year = dates + first_month = np.ones_like(dates) + conv_dates = convert_year_month_safe(year, first_month) + else: + raise ValueError(f"Date fmt {fmt} not understood") + + if has_bad_values: # Restore NaT for bad values + conv_dates[bad_locs] = NaT + + return conv_dates + + +def _datetime_to_stata_elapsed_vec(dates: Series, fmt: str) -> Series: + """ + Convert from datetime to SIF. https://www.stata.com/help.cgi?datetime + + Parameters + ---------- + dates : Series + Series or array containing datetime or datetime64[ns] to + convert to the Stata Internal Format given by fmt + fmt : str + The format to convert to. Can be, tc, td, tw, tm, tq, th, ty + """ + index = dates.index + NS_PER_DAY = 24 * 3600 * 1000 * 1000 * 1000 + US_PER_DAY = NS_PER_DAY / 1000 + + def parse_dates_safe( + dates: Series, delta: bool = False, year: bool = False, days: bool = False + ): + d = {} + if lib.is_np_dtype(dates.dtype, "M"): + if delta: + time_delta = dates - Timestamp(stata_epoch).as_unit("ns") + d["delta"] = time_delta._values.view(np.int64) // 1000 # microseconds + if days or year: + date_index = DatetimeIndex(dates) + d["year"] = date_index._data.year + d["month"] = date_index._data.month + if days: + days_in_ns = dates._values.view(np.int64) - to_datetime( + d["year"], format="%Y" + )._values.view(np.int64) + d["days"] = days_in_ns // NS_PER_DAY + + elif infer_dtype(dates, skipna=False) == "datetime": + if delta: + delta = dates._values - stata_epoch + + def f(x: timedelta) -> float: + return US_PER_DAY * x.days + 1000000 * x.seconds + x.microseconds + + v = np.vectorize(f) + d["delta"] = v(delta) + if year: + year_month = dates.apply(lambda x: 100 * x.year + x.month) + d["year"] = year_month._values // 100 + d["month"] = year_month._values - d["year"] * 100 + if days: + + def g(x: datetime) -> int: + return (x - datetime(x.year, 1, 1)).days + + v = np.vectorize(g) + d["days"] = v(dates) + else: + raise ValueError( + "Columns containing dates must contain either " + "datetime64, datetime or null values." + ) + + return DataFrame(d, index=index) + + bad_loc = isna(dates) + index = dates.index + if bad_loc.any(): + if lib.is_np_dtype(dates.dtype, "M"): + dates._values[bad_loc] = to_datetime(stata_epoch) + else: + dates._values[bad_loc] = stata_epoch + + if fmt in ["%tc", "tc"]: + d = parse_dates_safe(dates, delta=True) + conv_dates = d.delta / 1000 + elif fmt in ["%tC", "tC"]: + warnings.warn( + "Stata Internal Format tC not supported.", + stacklevel=find_stack_level(), + ) + conv_dates = dates + elif fmt in ["%td", "td"]: + d = parse_dates_safe(dates, delta=True) + conv_dates = d.delta // US_PER_DAY + elif fmt in ["%tw", "tw"]: + d = parse_dates_safe(dates, year=True, days=True) + conv_dates = 52 * (d.year - stata_epoch.year) + d.days // 7 + elif fmt in ["%tm", "tm"]: + d = parse_dates_safe(dates, year=True) + conv_dates = 12 * (d.year - stata_epoch.year) + d.month - 1 + elif fmt in ["%tq", "tq"]: + d = parse_dates_safe(dates, year=True) + conv_dates = 4 * (d.year - stata_epoch.year) + (d.month - 1) // 3 + elif fmt in ["%th", "th"]: + d = parse_dates_safe(dates, year=True) + conv_dates = 2 * (d.year - stata_epoch.year) + (d.month > 6).astype(int) + elif fmt in ["%ty", "ty"]: + d = parse_dates_safe(dates, year=True) + conv_dates = d.year + else: + raise ValueError(f"Format {fmt} is not a known Stata date format") + + conv_dates = Series(conv_dates, dtype=np.float64, copy=False) + missing_value = struct.unpack(" DataFrame: + """ + Checks the dtypes of the columns of a pandas DataFrame for + compatibility with the data types and ranges supported by Stata, and + converts if necessary. + + Parameters + ---------- + data : DataFrame + The DataFrame to check and convert + + Notes + ----- + Numeric columns in Stata must be one of int8, int16, int32, float32 or + float64, with some additional value restrictions. int8 and int16 columns + are checked for violations of the value restrictions and upcast if needed. + int64 data is not usable in Stata, and so it is downcast to int32 whenever + the value are in the int32 range, and sidecast to float64 when larger than + this range. If the int64 values are outside of the range of those + perfectly representable as float64 values, a warning is raised. + + bool columns are cast to int8. uint columns are converted to int of the + same size if there is no loss in precision, otherwise are upcast to a + larger type. uint64 is currently not supported since it is concerted to + object in a DataFrame. + """ + ws = "" + # original, if small, if large + conversion_data: tuple[ + tuple[type, type, type], + tuple[type, type, type], + tuple[type, type, type], + tuple[type, type, type], + tuple[type, type, type], + ] = ( + (np.bool_, np.int8, np.int8), + (np.uint8, np.int8, np.int16), + (np.uint16, np.int16, np.int32), + (np.uint32, np.int32, np.int64), + (np.uint64, np.int64, np.float64), + ) + + float32_max = struct.unpack("= 2**53: + ws = precision_loss_doc.format("uint64", "float64") + + data[col] = data[col].astype(dtype) + + # Check values and upcast if necessary + + if dtype == np.int8 and not empty_df: + if data[col].max() > 100 or data[col].min() < -127: + data[col] = data[col].astype(np.int16) + elif dtype == np.int16 and not empty_df: + if data[col].max() > 32740 or data[col].min() < -32767: + data[col] = data[col].astype(np.int32) + elif dtype == np.int64: + if empty_df or ( + data[col].max() <= 2147483620 and data[col].min() >= -2147483647 + ): + data[col] = data[col].astype(np.int32) + else: + data[col] = data[col].astype(np.float64) + if data[col].max() >= 2**53 or data[col].min() <= -(2**53): + ws = precision_loss_doc.format("int64", "float64") + elif dtype in (np.float32, np.float64): + if np.isinf(data[col]).any(): + raise ValueError( + f"Column {col} contains infinity or -infinity" + "which is outside the range supported by Stata." + ) + value = data[col].max() + if dtype == np.float32 and value > float32_max: + data[col] = data[col].astype(np.float64) + elif dtype == np.float64: + if value > float64_max: + raise ValueError( + f"Column {col} has a maximum value ({value}) outside the range " + f"supported by Stata ({float64_max})" + ) + if is_nullable_int: + if orig_missing.any(): + # Replace missing by Stata sentinel value + sentinel = StataMissingValue.BASE_MISSING_VALUES[data[col].dtype.name] + data.loc[orig_missing, col] = sentinel + if ws: + warnings.warn( + ws, + PossiblePrecisionLoss, + stacklevel=find_stack_level(), + ) + + return data + + +class StataValueLabel: + """ + Parse a categorical column and prepare formatted output + + Parameters + ---------- + catarray : Series + Categorical Series to encode + encoding : {"latin-1", "utf-8"} + Encoding to use for value labels. + """ + + def __init__( + self, catarray: Series, encoding: Literal["latin-1", "utf-8"] = "latin-1" + ) -> None: + if encoding not in ("latin-1", "utf-8"): + raise ValueError("Only latin-1 and utf-8 are supported.") + self.labname = catarray.name + self._encoding = encoding + categories = catarray.cat.categories + self.value_labels = enumerate(categories) + + self._prepare_value_labels() + + def _prepare_value_labels(self) -> None: + """Encode value labels.""" + + self.text_len = 0 + self.txt: list[bytes] = [] + self.n = 0 + # Offsets (length of categories), converted to int32 + self.off = np.array([], dtype=np.int32) + # Values, converted to int32 + self.val = np.array([], dtype=np.int32) + self.len = 0 + + # Compute lengths and setup lists of offsets and labels + offsets: list[int] = [] + values: list[float] = [] + for vl in self.value_labels: + category: str | bytes = vl[1] + if not isinstance(category, str): + category = str(category) + warnings.warn( + value_label_mismatch_doc.format(self.labname), + ValueLabelTypeMismatch, + stacklevel=find_stack_level(), + ) + category = category.encode(self._encoding) + offsets.append(self.text_len) + self.text_len += len(category) + 1 # +1 for the padding + values.append(vl[0]) + self.txt.append(category) + self.n += 1 + + if self.text_len > 32000: + raise ValueError( + "Stata value labels for a single variable must " + "have a combined length less than 32,000 characters." + ) + + # Ensure int32 + self.off = np.array(offsets, dtype=np.int32) + self.val = np.array(values, dtype=np.int32) + + # Total length + self.len = 4 + 4 + 4 * self.n + 4 * self.n + self.text_len + + def generate_value_label(self, byteorder: str) -> bytes: + """ + Generate the binary representation of the value labels. + + Parameters + ---------- + byteorder : str + Byte order of the output + + Returns + ------- + value_label : bytes + Bytes containing the formatted value label + """ + encoding = self._encoding + bio = BytesIO() + null_byte = b"\x00" + + # len + bio.write(struct.pack(byteorder + "i", self.len)) + + # labname + labname = str(self.labname)[:32].encode(encoding) + lab_len = 32 if encoding not in ("utf-8", "utf8") else 128 + labname = _pad_bytes(labname, lab_len + 1) + bio.write(labname) + + # padding - 3 bytes + for i in range(3): + bio.write(struct.pack("c", null_byte)) + + # value_label_table + # n - int32 + bio.write(struct.pack(byteorder + "i", self.n)) + + # textlen - int32 + bio.write(struct.pack(byteorder + "i", self.text_len)) + + # off - int32 array (n elements) + for offset in self.off: + bio.write(struct.pack(byteorder + "i", offset)) + + # val - int32 array (n elements) + for value in self.val: + bio.write(struct.pack(byteorder + "i", value)) + + # txt - Text labels, null terminated + for text in self.txt: + bio.write(text + null_byte) + + return bio.getvalue() + + +class StataNonCatValueLabel(StataValueLabel): + """ + Prepare formatted version of value labels + + Parameters + ---------- + labname : str + Value label name + value_labels: Dictionary + Mapping of values to labels + encoding : {"latin-1", "utf-8"} + Encoding to use for value labels. + """ + + def __init__( + self, + labname: str, + value_labels: dict[float, str], + encoding: Literal["latin-1", "utf-8"] = "latin-1", + ) -> None: + if encoding not in ("latin-1", "utf-8"): + raise ValueError("Only latin-1 and utf-8 are supported.") + + self.labname = labname + self._encoding = encoding + self.value_labels = sorted( # type: ignore[assignment] + value_labels.items(), key=lambda x: x[0] + ) + self._prepare_value_labels() + + +class StataMissingValue: + """ + An observation's missing value. + + Parameters + ---------- + value : {int, float} + The Stata missing value code + + Notes + ----- + More information: + + Integer missing values make the code '.', '.a', ..., '.z' to the ranges + 101 ... 127 (for int8), 32741 ... 32767 (for int16) and 2147483621 ... + 2147483647 (for int32). Missing values for floating point data types are + more complex but the pattern is simple to discern from the following table. + + np.float32 missing values (float in Stata) + 0000007f . + 0008007f .a + 0010007f .b + ... + 00c0007f .x + 00c8007f .y + 00d0007f .z + + np.float64 missing values (double in Stata) + 000000000000e07f . + 000000000001e07f .a + 000000000002e07f .b + ... + 000000000018e07f .x + 000000000019e07f .y + 00000000001ae07f .z + """ + + # Construct a dictionary of missing values + MISSING_VALUES: dict[float, str] = {} + bases: Final = (101, 32741, 2147483621) + for b in bases: + # Conversion to long to avoid hash issues on 32 bit platforms #8968 + MISSING_VALUES[b] = "." + for i in range(1, 27): + MISSING_VALUES[i + b] = "." + chr(96 + i) + + float32_base: bytes = b"\x00\x00\x00\x7f" + increment_32: int = struct.unpack(" 0: + MISSING_VALUES[key] += chr(96 + i) + int_value = struct.unpack(" 0: + MISSING_VALUES[key] += chr(96 + i) + int_value = struct.unpack("q", struct.pack(" None: + self._value = value + # Conversion to int to avoid hash issues on 32 bit platforms #8968 + value = int(value) if value < 2147483648 else float(value) + self._str = self.MISSING_VALUES[value] + + @property + def string(self) -> str: + """ + The Stata representation of the missing value: '.', '.a'..'.z' + + Returns + ------- + str + The representation of the missing value. + """ + return self._str + + @property + def value(self) -> float: + """ + The binary representation of the missing value. + + Returns + ------- + {int, float} + The binary representation of the missing value. + """ + return self._value + + def __str__(self) -> str: + return self.string + + def __repr__(self) -> str: + return f"{type(self)}({self})" + + def __eq__(self, other: object) -> bool: + return ( + isinstance(other, type(self)) + and self.string == other.string + and self.value == other.value + ) + + @classmethod + def get_base_missing_value(cls, dtype: np.dtype) -> float: + if dtype.type is np.int8: + value = cls.BASE_MISSING_VALUES["int8"] + elif dtype.type is np.int16: + value = cls.BASE_MISSING_VALUES["int16"] + elif dtype.type is np.int32: + value = cls.BASE_MISSING_VALUES["int32"] + elif dtype.type is np.float32: + value = cls.BASE_MISSING_VALUES["float32"] + elif dtype.type is np.float64: + value = cls.BASE_MISSING_VALUES["float64"] + else: + raise ValueError("Unsupported dtype") + return value + + +class StataParser: + def __init__(self) -> None: + # type code. + # -------------------- + # str1 1 = 0x01 + # str2 2 = 0x02 + # ... + # str244 244 = 0xf4 + # byte 251 = 0xfb (sic) + # int 252 = 0xfc + # long 253 = 0xfd + # float 254 = 0xfe + # double 255 = 0xff + # -------------------- + # NOTE: the byte type seems to be reserved for categorical variables + # with a label, but the underlying variable is -127 to 100 + # we're going to drop the label and cast to int + self.DTYPE_MAP = dict( + [(i, np.dtype(f"S{i}")) for i in range(1, 245)] + + [ + (251, np.dtype(np.int8)), + (252, np.dtype(np.int16)), + (253, np.dtype(np.int32)), + (254, np.dtype(np.float32)), + (255, np.dtype(np.float64)), + ] + ) + self.DTYPE_MAP_XML: dict[int, np.dtype] = { + 32768: np.dtype(np.uint8), # Keys to GSO + 65526: np.dtype(np.float64), + 65527: np.dtype(np.float32), + 65528: np.dtype(np.int32), + 65529: np.dtype(np.int16), + 65530: np.dtype(np.int8), + } + self.TYPE_MAP = list(tuple(range(251)) + tuple("bhlfd")) + self.TYPE_MAP_XML = { + # Not really a Q, unclear how to handle byteswap + 32768: "Q", + 65526: "d", + 65527: "f", + 65528: "l", + 65529: "h", + 65530: "b", + } + # NOTE: technically, some of these are wrong. there are more numbers + # that can be represented. it's the 27 ABOVE and BELOW the max listed + # numeric data type in [U] 12.2.2 of the 11.2 manual + float32_min = b"\xff\xff\xff\xfe" + float32_max = b"\xff\xff\xff\x7e" + float64_min = b"\xff\xff\xff\xff\xff\xff\xef\xff" + float64_max = b"\xff\xff\xff\xff\xff\xff\xdf\x7f" + self.VALID_RANGE = { + "b": (-127, 100), + "h": (-32767, 32740), + "l": (-2147483647, 2147483620), + "f": ( + np.float32(struct.unpack(" None: + super().__init__() + + # Arguments to the reader (can be temporarily overridden in + # calls to read). + self._convert_dates = convert_dates + self._convert_categoricals = convert_categoricals + self._index_col = index_col + self._convert_missing = convert_missing + self._preserve_dtypes = preserve_dtypes + self._columns = columns + self._order_categoricals = order_categoricals + self._original_path_or_buf = path_or_buf + self._compression = compression + self._storage_options = storage_options + self._encoding = "" + self._chunksize = chunksize + self._using_iterator = False + self._entered = False + if self._chunksize is None: + self._chunksize = 1 + elif not isinstance(chunksize, int) or chunksize <= 0: + raise ValueError("chunksize must be a positive integer when set.") + + # State variables for the file + self._close_file: Callable[[], None] | None = None + self._missing_values = False + self._can_read_value_labels = False + self._column_selector_set = False + self._value_labels_read = False + self._data_read = False + self._dtype: np.dtype | None = None + self._lines_read = 0 + + self._native_byteorder = _set_endianness(sys.byteorder) + + def _ensure_open(self) -> None: + """ + Ensure the file has been opened and its header data read. + """ + if not hasattr(self, "_path_or_buf"): + self._open_file() + + def _open_file(self) -> None: + """ + Open the file (with compression options, etc.), and read header information. + """ + if not self._entered: + warnings.warn( + "StataReader is being used without using a context manager. " + "Using StataReader as a context manager is the only supported method.", + ResourceWarning, + stacklevel=find_stack_level(), + ) + handles = get_handle( + self._original_path_or_buf, + "rb", + storage_options=self._storage_options, + is_text=False, + compression=self._compression, + ) + if hasattr(handles.handle, "seekable") and handles.handle.seekable(): + # If the handle is directly seekable, use it without an extra copy. + self._path_or_buf = handles.handle + self._close_file = handles.close + else: + # Copy to memory, and ensure no encoding. + with handles: + self._path_or_buf = BytesIO(handles.handle.read()) + self._close_file = self._path_or_buf.close + + self._read_header() + self._setup_dtype() + + def __enter__(self) -> Self: + """enter context manager""" + self._entered = True + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + if self._close_file: + self._close_file() + + def close(self) -> None: + """Close the handle if its open. + + .. deprecated: 2.0.0 + + The close method is not part of the public API. + The only supported way to use StataReader is to use it as a context manager. + """ + warnings.warn( + "The StataReader.close() method is not part of the public API and " + "will be removed in a future version without notice. " + "Using StataReader as a context manager is the only supported method.", + FutureWarning, + stacklevel=find_stack_level(), + ) + if self._close_file: + self._close_file() + + def _set_encoding(self) -> None: + """ + Set string encoding which depends on file version + """ + if self._format_version < 118: + self._encoding = "latin-1" + else: + self._encoding = "utf-8" + + def _read_int8(self) -> int: + return struct.unpack("b", self._path_or_buf.read(1))[0] + + def _read_uint8(self) -> int: + return struct.unpack("B", self._path_or_buf.read(1))[0] + + def _read_uint16(self) -> int: + return struct.unpack(f"{self._byteorder}H", self._path_or_buf.read(2))[0] + + def _read_uint32(self) -> int: + return struct.unpack(f"{self._byteorder}I", self._path_or_buf.read(4))[0] + + def _read_uint64(self) -> int: + return struct.unpack(f"{self._byteorder}Q", self._path_or_buf.read(8))[0] + + def _read_int16(self) -> int: + return struct.unpack(f"{self._byteorder}h", self._path_or_buf.read(2))[0] + + def _read_int32(self) -> int: + return struct.unpack(f"{self._byteorder}i", self._path_or_buf.read(4))[0] + + def _read_int64(self) -> int: + return struct.unpack(f"{self._byteorder}q", self._path_or_buf.read(8))[0] + + def _read_char8(self) -> bytes: + return struct.unpack("c", self._path_or_buf.read(1))[0] + + def _read_int16_count(self, count: int) -> tuple[int, ...]: + return struct.unpack( + f"{self._byteorder}{'h' * count}", + self._path_or_buf.read(2 * count), + ) + + def _read_header(self) -> None: + first_char = self._read_char8() + if first_char == b"<": + self._read_new_header() + else: + self._read_old_header(first_char) + + def _read_new_header(self) -> None: + # The first part of the header is common to 117 - 119. + self._path_or_buf.read(27) # stata_dta>
+ self._format_version = int(self._path_or_buf.read(3)) + if self._format_version not in [117, 118, 119]: + raise ValueError(_version_error.format(version=self._format_version)) + self._set_encoding() + self._path_or_buf.read(21) # + self._byteorder = ">" if self._path_or_buf.read(3) == b"MSF" else "<" + self._path_or_buf.read(15) # + self._nvar = ( + self._read_uint16() if self._format_version <= 118 else self._read_uint32() + ) + self._path_or_buf.read(7) # + + self._nobs = self._get_nobs() + self._path_or_buf.read(11) # + self._time_stamp = self._get_time_stamp() + self._path_or_buf.read(26) #
+ self._path_or_buf.read(8) # 0x0000000000000000 + self._path_or_buf.read(8) # position of + + self._seek_vartypes = self._read_int64() + 16 + self._seek_varnames = self._read_int64() + 10 + self._seek_sortlist = self._read_int64() + 10 + self._seek_formats = self._read_int64() + 9 + self._seek_value_label_names = self._read_int64() + 19 + + # Requires version-specific treatment + self._seek_variable_labels = self._get_seek_variable_labels() + + self._path_or_buf.read(8) # + self._data_location = self._read_int64() + 6 + self._seek_strls = self._read_int64() + 7 + self._seek_value_labels = self._read_int64() + 14 + + self._typlist, self._dtyplist = self._get_dtypes(self._seek_vartypes) + + self._path_or_buf.seek(self._seek_varnames) + self._varlist = self._get_varlist() + + self._path_or_buf.seek(self._seek_sortlist) + self._srtlist = self._read_int16_count(self._nvar + 1)[:-1] + + self._path_or_buf.seek(self._seek_formats) + self._fmtlist = self._get_fmtlist() + + self._path_or_buf.seek(self._seek_value_label_names) + self._lbllist = self._get_lbllist() + + self._path_or_buf.seek(self._seek_variable_labels) + self._variable_labels = self._get_variable_labels() + + # Get data type information, works for versions 117-119. + def _get_dtypes( + self, seek_vartypes: int + ) -> tuple[list[int | str], list[str | np.dtype]]: + self._path_or_buf.seek(seek_vartypes) + typlist = [] + dtyplist = [] + for _ in range(self._nvar): + typ = self._read_uint16() + if typ <= 2045: + typlist.append(typ) + dtyplist.append(str(typ)) + else: + try: + typlist.append(self.TYPE_MAP_XML[typ]) # type: ignore[arg-type] + dtyplist.append(self.DTYPE_MAP_XML[typ]) # type: ignore[arg-type] + except KeyError as err: + raise ValueError(f"cannot convert stata types [{typ}]") from err + + return typlist, dtyplist # type: ignore[return-value] + + def _get_varlist(self) -> list[str]: + # 33 in order formats, 129 in formats 118 and 119 + b = 33 if self._format_version < 118 else 129 + return [self._decode(self._path_or_buf.read(b)) for _ in range(self._nvar)] + + # Returns the format list + def _get_fmtlist(self) -> list[str]: + if self._format_version >= 118: + b = 57 + elif self._format_version > 113: + b = 49 + elif self._format_version > 104: + b = 12 + else: + b = 7 + + return [self._decode(self._path_or_buf.read(b)) for _ in range(self._nvar)] + + # Returns the label list + def _get_lbllist(self) -> list[str]: + if self._format_version >= 118: + b = 129 + elif self._format_version > 108: + b = 33 + else: + b = 9 + return [self._decode(self._path_or_buf.read(b)) for _ in range(self._nvar)] + + def _get_variable_labels(self) -> list[str]: + if self._format_version >= 118: + vlblist = [ + self._decode(self._path_or_buf.read(321)) for _ in range(self._nvar) + ] + elif self._format_version > 105: + vlblist = [ + self._decode(self._path_or_buf.read(81)) for _ in range(self._nvar) + ] + else: + vlblist = [ + self._decode(self._path_or_buf.read(32)) for _ in range(self._nvar) + ] + return vlblist + + def _get_nobs(self) -> int: + if self._format_version >= 118: + return self._read_uint64() + else: + return self._read_uint32() + + def _get_data_label(self) -> str: + if self._format_version >= 118: + strlen = self._read_uint16() + return self._decode(self._path_or_buf.read(strlen)) + elif self._format_version == 117: + strlen = self._read_int8() + return self._decode(self._path_or_buf.read(strlen)) + elif self._format_version > 105: + return self._decode(self._path_or_buf.read(81)) + else: + return self._decode(self._path_or_buf.read(32)) + + def _get_time_stamp(self) -> str: + if self._format_version >= 118: + strlen = self._read_int8() + return self._path_or_buf.read(strlen).decode("utf-8") + elif self._format_version == 117: + strlen = self._read_int8() + return self._decode(self._path_or_buf.read(strlen)) + elif self._format_version > 104: + return self._decode(self._path_or_buf.read(18)) + else: + raise ValueError() + + def _get_seek_variable_labels(self) -> int: + if self._format_version == 117: + self._path_or_buf.read(8) # , throw away + # Stata 117 data files do not follow the described format. This is + # a work around that uses the previous label, 33 bytes for each + # variable, 20 for the closing tag and 17 for the opening tag + return self._seek_value_label_names + (33 * self._nvar) + 20 + 17 + elif self._format_version >= 118: + return self._read_int64() + 17 + else: + raise ValueError() + + def _read_old_header(self, first_char: bytes) -> None: + self._format_version = int(first_char[0]) + if self._format_version not in [104, 105, 108, 111, 113, 114, 115]: + raise ValueError(_version_error.format(version=self._format_version)) + self._set_encoding() + self._byteorder = ">" if self._read_int8() == 0x1 else "<" + self._filetype = self._read_int8() + self._path_or_buf.read(1) # unused + + self._nvar = self._read_uint16() + self._nobs = self._get_nobs() + + self._data_label = self._get_data_label() + + self._time_stamp = self._get_time_stamp() + + # descriptors + if self._format_version > 108: + typlist = [int(c) for c in self._path_or_buf.read(self._nvar)] + else: + buf = self._path_or_buf.read(self._nvar) + typlistb = np.frombuffer(buf, dtype=np.uint8) + typlist = [] + for tp in typlistb: + if tp in self.OLD_TYPE_MAPPING: + typlist.append(self.OLD_TYPE_MAPPING[tp]) + else: + typlist.append(tp - 127) # bytes + + try: + self._typlist = [self.TYPE_MAP[typ] for typ in typlist] + except ValueError as err: + invalid_types = ",".join([str(x) for x in typlist]) + raise ValueError(f"cannot convert stata types [{invalid_types}]") from err + try: + self._dtyplist = [self.DTYPE_MAP[typ] for typ in typlist] + except ValueError as err: + invalid_dtypes = ",".join([str(x) for x in typlist]) + raise ValueError(f"cannot convert stata dtypes [{invalid_dtypes}]") from err + + if self._format_version > 108: + self._varlist = [ + self._decode(self._path_or_buf.read(33)) for _ in range(self._nvar) + ] + else: + self._varlist = [ + self._decode(self._path_or_buf.read(9)) for _ in range(self._nvar) + ] + self._srtlist = self._read_int16_count(self._nvar + 1)[:-1] + + self._fmtlist = self._get_fmtlist() + + self._lbllist = self._get_lbllist() + + self._variable_labels = self._get_variable_labels() + + # ignore expansion fields (Format 105 and later) + # When reading, read five bytes; the last four bytes now tell you + # the size of the next read, which you discard. You then continue + # like this until you read 5 bytes of zeros. + + if self._format_version > 104: + while True: + data_type = self._read_int8() + if self._format_version > 108: + data_len = self._read_int32() + else: + data_len = self._read_int16() + if data_type == 0: + break + self._path_or_buf.read(data_len) + + # necessary data to continue parsing + self._data_location = self._path_or_buf.tell() + + def _setup_dtype(self) -> np.dtype: + """Map between numpy and state dtypes""" + if self._dtype is not None: + return self._dtype + + dtypes = [] # Convert struct data types to numpy data type + for i, typ in enumerate(self._typlist): + if typ in self.NUMPY_TYPE_MAP: + typ = cast(str, typ) # only strs in NUMPY_TYPE_MAP + dtypes.append((f"s{i}", f"{self._byteorder}{self.NUMPY_TYPE_MAP[typ]}")) + else: + dtypes.append((f"s{i}", f"S{typ}")) + self._dtype = np.dtype(dtypes) + + return self._dtype + + def _decode(self, s: bytes) -> str: + # have bytes not strings, so must decode + s = s.partition(b"\0")[0] + try: + return s.decode(self._encoding) + except UnicodeDecodeError: + # GH 25960, fallback to handle incorrect format produced when 117 + # files are converted to 118 files in Stata + encoding = self._encoding + msg = f""" +One or more strings in the dta file could not be decoded using {encoding}, and +so the fallback encoding of latin-1 is being used. This can happen when a file +has been incorrectly encoded by Stata or some other software. You should verify +the string values returned are correct.""" + warnings.warn( + msg, + UnicodeWarning, + stacklevel=find_stack_level(), + ) + return s.decode("latin-1") + + def _read_value_labels(self) -> None: + self._ensure_open() + if self._value_labels_read: + # Don't read twice + return + if self._format_version <= 108: + # Value labels are not supported in version 108 and earlier. + self._value_labels_read = True + self._value_label_dict: dict[str, dict[float, str]] = {} + return + + if self._format_version >= 117: + self._path_or_buf.seek(self._seek_value_labels) + else: + assert self._dtype is not None + offset = self._nobs * self._dtype.itemsize + self._path_or_buf.seek(self._data_location + offset) + + self._value_labels_read = True + self._value_label_dict = {} + + while True: + if self._format_version >= 117: + if self._path_or_buf.read(5) == b" + break # end of value label table + + slength = self._path_or_buf.read(4) + if not slength: + break # end of value label table (format < 117) + if self._format_version <= 117: + labname = self._decode(self._path_or_buf.read(33)) + else: + labname = self._decode(self._path_or_buf.read(129)) + self._path_or_buf.read(3) # padding + + n = self._read_uint32() + txtlen = self._read_uint32() + off = np.frombuffer( + self._path_or_buf.read(4 * n), dtype=f"{self._byteorder}i4", count=n + ) + val = np.frombuffer( + self._path_or_buf.read(4 * n), dtype=f"{self._byteorder}i4", count=n + ) + ii = np.argsort(off) + off = off[ii] + val = val[ii] + txt = self._path_or_buf.read(txtlen) + self._value_label_dict[labname] = {} + for i in range(n): + end = off[i + 1] if i < n - 1 else txtlen + self._value_label_dict[labname][val[i]] = self._decode( + txt[off[i] : end] + ) + if self._format_version >= 117: + self._path_or_buf.read(6) # + self._value_labels_read = True + + def _read_strls(self) -> None: + self._path_or_buf.seek(self._seek_strls) + # Wrap v_o in a string to allow uint64 values as keys on 32bit OS + self.GSO = {"0": ""} + while True: + if self._path_or_buf.read(3) != b"GSO": + break + + if self._format_version == 117: + v_o = self._read_uint64() + else: + buf = self._path_or_buf.read(12) + # Only tested on little endian file on little endian machine. + v_size = 2 if self._format_version == 118 else 3 + if self._byteorder == "<": + buf = buf[0:v_size] + buf[4 : (12 - v_size)] + else: + # This path may not be correct, impossible to test + buf = buf[0:v_size] + buf[(4 + v_size) :] + v_o = struct.unpack("Q", buf)[0] + typ = self._read_uint8() + length = self._read_uint32() + va = self._path_or_buf.read(length) + if typ == 130: + decoded_va = va[0:-1].decode(self._encoding) + else: + # Stata says typ 129 can be binary, so use str + decoded_va = str(va) + # Wrap v_o in a string to allow uint64 values as keys on 32bit OS + self.GSO[str(v_o)] = decoded_va + + def __next__(self) -> DataFrame: + self._using_iterator = True + return self.read(nrows=self._chunksize) + + def get_chunk(self, size: int | None = None) -> DataFrame: + """ + Reads lines from Stata file and returns as dataframe + + Parameters + ---------- + size : int, defaults to None + Number of lines to read. If None, reads whole file. + + Returns + ------- + DataFrame + """ + if size is None: + size = self._chunksize + return self.read(nrows=size) + + @Appender(_read_method_doc) + def read( + self, + nrows: int | None = None, + convert_dates: bool | None = None, + convert_categoricals: bool | None = None, + index_col: str | None = None, + convert_missing: bool | None = None, + preserve_dtypes: bool | None = None, + columns: Sequence[str] | None = None, + order_categoricals: bool | None = None, + ) -> DataFrame: + self._ensure_open() + + # Handle options + if convert_dates is None: + convert_dates = self._convert_dates + if convert_categoricals is None: + convert_categoricals = self._convert_categoricals + if convert_missing is None: + convert_missing = self._convert_missing + if preserve_dtypes is None: + preserve_dtypes = self._preserve_dtypes + if columns is None: + columns = self._columns + if order_categoricals is None: + order_categoricals = self._order_categoricals + if index_col is None: + index_col = self._index_col + if nrows is None: + nrows = self._nobs + + # Handle empty file or chunk. If reading incrementally raise + # StopIteration. If reading the whole thing return an empty + # data frame. + if (self._nobs == 0) and nrows == 0: + self._can_read_value_labels = True + self._data_read = True + data = DataFrame(columns=self._varlist) + # Apply dtypes correctly + for i, col in enumerate(data.columns): + dt = self._dtyplist[i] + if isinstance(dt, np.dtype): + if dt.char != "S": + data[col] = data[col].astype(dt) + if columns is not None: + data = self._do_select_columns(data, columns) + return data + + if (self._format_version >= 117) and (not self._value_labels_read): + self._can_read_value_labels = True + self._read_strls() + + # Read data + assert self._dtype is not None + dtype = self._dtype + max_read_len = (self._nobs - self._lines_read) * dtype.itemsize + read_len = nrows * dtype.itemsize + read_len = min(read_len, max_read_len) + if read_len <= 0: + # Iterator has finished, should never be here unless + # we are reading the file incrementally + if convert_categoricals: + self._read_value_labels() + raise StopIteration + offset = self._lines_read * dtype.itemsize + self._path_or_buf.seek(self._data_location + offset) + read_lines = min(nrows, self._nobs - self._lines_read) + raw_data = np.frombuffer( + self._path_or_buf.read(read_len), dtype=dtype, count=read_lines + ) + + self._lines_read += read_lines + if self._lines_read == self._nobs: + self._can_read_value_labels = True + self._data_read = True + # if necessary, swap the byte order to native here + if self._byteorder != self._native_byteorder: + raw_data = raw_data.byteswap().view(raw_data.dtype.newbyteorder()) + + if convert_categoricals: + self._read_value_labels() + + if len(raw_data) == 0: + data = DataFrame(columns=self._varlist) + else: + data = DataFrame.from_records(raw_data) + data.columns = Index(self._varlist) + + # If index is not specified, use actual row number rather than + # restarting at 0 for each chunk. + if index_col is None: + data.index = RangeIndex( + self._lines_read - read_lines, self._lines_read + ) # set attr instead of set_index to avoid copy + + if columns is not None: + data = self._do_select_columns(data, columns) + + # Decode strings + for col, typ in zip(data, self._typlist): + if isinstance(typ, int): + data[col] = data[col].apply(self._decode) + + data = self._insert_strls(data) + + # Convert columns (if needed) to match input type + valid_dtypes = [i for i, dtyp in enumerate(self._dtyplist) if dtyp is not None] + object_type = np.dtype(object) + for idx in valid_dtypes: + dtype = data.iloc[:, idx].dtype + if dtype not in (object_type, self._dtyplist[idx]): + data.isetitem(idx, data.iloc[:, idx].astype(dtype)) + + data = self._do_convert_missing(data, convert_missing) + + if convert_dates: + for i, fmt in enumerate(self._fmtlist): + if any(fmt.startswith(date_fmt) for date_fmt in _date_formats): + data.isetitem( + i, _stata_elapsed_date_to_datetime_vec(data.iloc[:, i], fmt) + ) + + if convert_categoricals and self._format_version > 108: + data = self._do_convert_categoricals( + data, self._value_label_dict, self._lbllist, order_categoricals + ) + + if not preserve_dtypes: + retyped_data = [] + convert = False + for col in data: + dtype = data[col].dtype + if dtype in (np.dtype(np.float16), np.dtype(np.float32)): + dtype = np.dtype(np.float64) + convert = True + elif dtype in ( + np.dtype(np.int8), + np.dtype(np.int16), + np.dtype(np.int32), + ): + dtype = np.dtype(np.int64) + convert = True + retyped_data.append((col, data[col].astype(dtype))) + if convert: + data = DataFrame.from_dict(dict(retyped_data)) + + if index_col is not None: + data = data.set_index(data.pop(index_col)) + + return data + + def _do_convert_missing(self, data: DataFrame, convert_missing: bool) -> DataFrame: + # Check for missing values, and replace if found + replacements = {} + for i in range(len(data.columns)): + fmt = self._typlist[i] + if fmt not in self.VALID_RANGE: + continue + + fmt = cast(str, fmt) # only strs in VALID_RANGE + nmin, nmax = self.VALID_RANGE[fmt] + series = data.iloc[:, i] + + # appreciably faster to do this with ndarray instead of Series + svals = series._values + missing = (svals < nmin) | (svals > nmax) + + if not missing.any(): + continue + + if convert_missing: # Replacement follows Stata notation + missing_loc = np.nonzero(np.asarray(missing))[0] + umissing, umissing_loc = np.unique(series[missing], return_inverse=True) + replacement = Series(series, dtype=object) + for j, um in enumerate(umissing): + missing_value = StataMissingValue(um) + + loc = missing_loc[umissing_loc == j] + replacement.iloc[loc] = missing_value + else: # All replacements are identical + dtype = series.dtype + if dtype not in (np.float32, np.float64): + dtype = np.float64 + replacement = Series(series, dtype=dtype) + if not replacement._values.flags["WRITEABLE"]: + # only relevant for ArrayManager; construction + # path for BlockManager ensures writeability + replacement = replacement.copy() + # Note: operating on ._values is much faster than directly + # TODO: can we fix that? + replacement._values[missing] = np.nan + replacements[i] = replacement + if replacements: + for idx, value in replacements.items(): + data.isetitem(idx, value) + return data + + def _insert_strls(self, data: DataFrame) -> DataFrame: + if not hasattr(self, "GSO") or len(self.GSO) == 0: + return data + for i, typ in enumerate(self._typlist): + if typ != "Q": + continue + # Wrap v_o in a string to allow uint64 values as keys on 32bit OS + data.isetitem(i, [self.GSO[str(k)] for k in data.iloc[:, i]]) + return data + + def _do_select_columns(self, data: DataFrame, columns: Sequence[str]) -> DataFrame: + if not self._column_selector_set: + column_set = set(columns) + if len(column_set) != len(columns): + raise ValueError("columns contains duplicate entries") + unmatched = column_set.difference(data.columns) + if unmatched: + joined = ", ".join(list(unmatched)) + raise ValueError( + "The following columns were not " + f"found in the Stata data set: {joined}" + ) + # Copy information for retained columns for later processing + dtyplist = [] + typlist = [] + fmtlist = [] + lbllist = [] + for col in columns: + i = data.columns.get_loc(col) + dtyplist.append(self._dtyplist[i]) + typlist.append(self._typlist[i]) + fmtlist.append(self._fmtlist[i]) + lbllist.append(self._lbllist[i]) + + self._dtyplist = dtyplist + self._typlist = typlist + self._fmtlist = fmtlist + self._lbllist = lbllist + self._column_selector_set = True + + return data[columns] + + def _do_convert_categoricals( + self, + data: DataFrame, + value_label_dict: dict[str, dict[float, str]], + lbllist: Sequence[str], + order_categoricals: bool, + ) -> DataFrame: + """ + Converts categorical columns to Categorical type. + """ + if not value_label_dict: + return data + cat_converted_data = [] + for col, label in zip(data, lbllist): + if label in value_label_dict: + # Explicit call with ordered=True + vl = value_label_dict[label] + keys = np.array(list(vl.keys())) + column = data[col] + key_matches = column.isin(keys) + if self._using_iterator and key_matches.all(): + initial_categories: np.ndarray | None = keys + # If all categories are in the keys and we are iterating, + # use the same keys for all chunks. If some are missing + # value labels, then we will fall back to the categories + # varying across chunks. + else: + if self._using_iterator: + # warn is using an iterator + warnings.warn( + categorical_conversion_warning, + CategoricalConversionWarning, + stacklevel=find_stack_level(), + ) + initial_categories = None + cat_data = Categorical( + column, categories=initial_categories, ordered=order_categoricals + ) + if initial_categories is None: + # If None here, then we need to match the cats in the Categorical + categories = [] + for category in cat_data.categories: + if category in vl: + categories.append(vl[category]) + else: + categories.append(category) + else: + # If all cats are matched, we can use the values + categories = list(vl.values()) + try: + # Try to catch duplicate categories + # TODO: if we get a non-copying rename_categories, use that + cat_data = cat_data.rename_categories(categories) + except ValueError as err: + vc = Series(categories, copy=False).value_counts() + repeated_cats = list(vc.index[vc > 1]) + repeats = "-" * 80 + "\n" + "\n".join(repeated_cats) + # GH 25772 + msg = f""" +Value labels for column {col} are not unique. These cannot be converted to +pandas categoricals. + +Either read the file with `convert_categoricals` set to False or use the +low level interface in `StataReader` to separately read the values and the +value_labels. + +The repeated labels are: +{repeats} +""" + raise ValueError(msg) from err + # TODO: is the next line needed above in the data(...) method? + cat_series = Series(cat_data, index=data.index, copy=False) + cat_converted_data.append((col, cat_series)) + else: + cat_converted_data.append((col, data[col])) + data = DataFrame(dict(cat_converted_data), copy=False) + return data + + @property + def data_label(self) -> str: + """ + Return data label of Stata file. + + Examples + -------- + >>> df = pd.DataFrame([(1,)], columns=["variable"]) + >>> time_stamp = pd.Timestamp(2000, 2, 29, 14, 21) + >>> data_label = "This is a data file." + >>> path = "/My_path/filename.dta" + >>> df.to_stata(path, time_stamp=time_stamp, # doctest: +SKIP + ... data_label=data_label, # doctest: +SKIP + ... version=None) # doctest: +SKIP + >>> with pd.io.stata.StataReader(path) as reader: # doctest: +SKIP + ... print(reader.data_label) # doctest: +SKIP + This is a data file. + """ + self._ensure_open() + return self._data_label + + @property + def time_stamp(self) -> str: + """ + Return time stamp of Stata file. + """ + self._ensure_open() + return self._time_stamp + + def variable_labels(self) -> dict[str, str]: + """ + Return a dict associating each variable name with corresponding label. + + Returns + ------- + dict + + Examples + -------- + >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=["col_1", "col_2"]) + >>> time_stamp = pd.Timestamp(2000, 2, 29, 14, 21) + >>> path = "/My_path/filename.dta" + >>> variable_labels = {"col_1": "This is an example"} + >>> df.to_stata(path, time_stamp=time_stamp, # doctest: +SKIP + ... variable_labels=variable_labels, version=None) # doctest: +SKIP + >>> with pd.io.stata.StataReader(path) as reader: # doctest: +SKIP + ... print(reader.variable_labels()) # doctest: +SKIP + {'index': '', 'col_1': 'This is an example', 'col_2': ''} + >>> pd.read_stata(path) # doctest: +SKIP + index col_1 col_2 + 0 0 1 2 + 1 1 3 4 + """ + self._ensure_open() + return dict(zip(self._varlist, self._variable_labels)) + + def value_labels(self) -> dict[str, dict[float, str]]: + """ + Return a nested dict associating each variable name to its value and label. + + Returns + ------- + dict + + Examples + -------- + >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=["col_1", "col_2"]) + >>> time_stamp = pd.Timestamp(2000, 2, 29, 14, 21) + >>> path = "/My_path/filename.dta" + >>> value_labels = {"col_1": {3: "x"}} + >>> df.to_stata(path, time_stamp=time_stamp, # doctest: +SKIP + ... value_labels=value_labels, version=None) # doctest: +SKIP + >>> with pd.io.stata.StataReader(path) as reader: # doctest: +SKIP + ... print(reader.value_labels()) # doctest: +SKIP + {'col_1': {3: 'x'}} + >>> pd.read_stata(path) # doctest: +SKIP + index col_1 col_2 + 0 0 1 2 + 1 1 x 4 + """ + if not self._value_labels_read: + self._read_value_labels() + + return self._value_label_dict + + +@Appender(_read_stata_doc) +def read_stata( + filepath_or_buffer: FilePath | ReadBuffer[bytes], + *, + convert_dates: bool = True, + convert_categoricals: bool = True, + index_col: str | None = None, + convert_missing: bool = False, + preserve_dtypes: bool = True, + columns: Sequence[str] | None = None, + order_categoricals: bool = True, + chunksize: int | None = None, + iterator: bool = False, + compression: CompressionOptions = "infer", + storage_options: StorageOptions | None = None, +) -> DataFrame | StataReader: + reader = StataReader( + filepath_or_buffer, + convert_dates=convert_dates, + convert_categoricals=convert_categoricals, + index_col=index_col, + convert_missing=convert_missing, + preserve_dtypes=preserve_dtypes, + columns=columns, + order_categoricals=order_categoricals, + chunksize=chunksize, + storage_options=storage_options, + compression=compression, + ) + + if iterator or chunksize: + return reader + + with reader: + return reader.read() + + +def _set_endianness(endianness: str) -> str: + if endianness.lower() in ["<", "little"]: + return "<" + elif endianness.lower() in [">", "big"]: + return ">" + else: # pragma : no cover + raise ValueError(f"Endianness {endianness} not understood") + + +def _pad_bytes(name: AnyStr, length: int) -> AnyStr: + """ + Take a char string and pads it with null bytes until it's length chars. + """ + if isinstance(name, bytes): + return name + b"\x00" * (length - len(name)) + return name + "\x00" * (length - len(name)) + + +def _convert_datetime_to_stata_type(fmt: str) -> np.dtype: + """ + Convert from one of the stata date formats to a type in TYPE_MAP. + """ + if fmt in [ + "tc", + "%tc", + "td", + "%td", + "tw", + "%tw", + "tm", + "%tm", + "tq", + "%tq", + "th", + "%th", + "ty", + "%ty", + ]: + return np.dtype(np.float64) # Stata expects doubles for SIFs + else: + raise NotImplementedError(f"Format {fmt} not implemented") + + +def _maybe_convert_to_int_keys(convert_dates: dict, varlist: list[Hashable]) -> dict: + new_dict = {} + for key in convert_dates: + if not convert_dates[key].startswith("%"): # make sure proper fmts + convert_dates[key] = "%" + convert_dates[key] + if key in varlist: + new_dict.update({varlist.index(key): convert_dates[key]}) + else: + if not isinstance(key, int): + raise ValueError("convert_dates key must be a column or an integer") + new_dict.update({key: convert_dates[key]}) + return new_dict + + +def _dtype_to_stata_type(dtype: np.dtype, column: Series) -> int: + """ + Convert dtype types to stata types. Returns the byte of the given ordinal. + See TYPE_MAP and comments for an explanation. This is also explained in + the dta spec. + 1 - 244 are strings of this length + Pandas Stata + 251 - for int8 byte + 252 - for int16 int + 253 - for int32 long + 254 - for float32 float + 255 - for double double + + If there are dates to convert, then dtype will already have the correct + type inserted. + """ + # TODO: expand to handle datetime to integer conversion + if dtype.type is np.object_: # try to coerce it to the biggest string + # not memory efficient, what else could we + # do? + itemsize = max_len_string_array(ensure_object(column._values)) + return max(itemsize, 1) + elif dtype.type is np.float64: + return 255 + elif dtype.type is np.float32: + return 254 + elif dtype.type is np.int32: + return 253 + elif dtype.type is np.int16: + return 252 + elif dtype.type is np.int8: + return 251 + else: # pragma : no cover + raise NotImplementedError(f"Data type {dtype} not supported.") + + +def _dtype_to_default_stata_fmt( + dtype, column: Series, dta_version: int = 114, force_strl: bool = False +) -> str: + """ + Map numpy dtype to stata's default format for this type. Not terribly + important since users can change this in Stata. Semantics are + + object -> "%DDs" where DD is the length of the string. If not a string, + raise ValueError + float64 -> "%10.0g" + float32 -> "%9.0g" + int64 -> "%9.0g" + int32 -> "%12.0g" + int16 -> "%8.0g" + int8 -> "%8.0g" + strl -> "%9s" + """ + # TODO: Refactor to combine type with format + # TODO: expand this to handle a default datetime format? + if dta_version < 117: + max_str_len = 244 + else: + max_str_len = 2045 + if force_strl: + return "%9s" + if dtype.type is np.object_: + itemsize = max_len_string_array(ensure_object(column._values)) + if itemsize > max_str_len: + if dta_version >= 117: + return "%9s" + else: + raise ValueError(excessive_string_length_error.format(column.name)) + return "%" + str(max(itemsize, 1)) + "s" + elif dtype == np.float64: + return "%10.0g" + elif dtype == np.float32: + return "%9.0g" + elif dtype == np.int32: + return "%12.0g" + elif dtype in (np.int8, np.int16): + return "%8.0g" + else: # pragma : no cover + raise NotImplementedError(f"Data type {dtype} not supported.") + + +@doc( + storage_options=_shared_docs["storage_options"], + compression_options=_shared_docs["compression_options"] % "fname", +) +class StataWriter(StataParser): + """ + A class for writing Stata binary dta files + + Parameters + ---------- + fname : path (string), buffer or path object + string, path object (pathlib.Path or py._path.local.LocalPath) or + object implementing a binary write() functions. If using a buffer + then the buffer will not be automatically closed after the file + is written. + data : DataFrame + Input to save + convert_dates : dict + Dictionary mapping columns containing datetime types to stata internal + format to use when writing the dates. Options are 'tc', 'td', 'tm', + 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name. + Datetime columns that do not have a conversion type specified will be + converted to 'tc'. Raises NotImplementedError if a datetime column has + timezone information + write_index : bool + Write the index to Stata dataset. + byteorder : str + Can be ">", "<", "little", or "big". default is `sys.byteorder` + time_stamp : datetime + A datetime to use as file creation date. Default is the current time + data_label : str + A label for the data set. Must be 80 characters or smaller. + variable_labels : dict + Dictionary containing columns as keys and variable labels as values. + Each label must be 80 characters or smaller. + {compression_options} + + .. versionchanged:: 1.4.0 Zstandard support. + + {storage_options} + + value_labels : dict of dicts + Dictionary containing columns as keys and dictionaries of column value + to labels as values. The combined length of all labels for a single + variable must be 32,000 characters or smaller. + + .. versionadded:: 1.4.0 + + Returns + ------- + writer : StataWriter instance + The StataWriter instance has a write_file method, which will + write the file to the given `fname`. + + Raises + ------ + NotImplementedError + * If datetimes contain timezone information + ValueError + * Columns listed in convert_dates are neither datetime64[ns] + or datetime + * Column dtype is not representable in Stata + * Column listed in convert_dates is not in DataFrame + * Categorical label contains more than 32,000 characters + + Examples + -------- + >>> data = pd.DataFrame([[1.0, 1]], columns=['a', 'b']) + >>> writer = StataWriter('./data_file.dta', data) + >>> writer.write_file() + + Directly write a zip file + >>> compression = {{"method": "zip", "archive_name": "data_file.dta"}} + >>> writer = StataWriter('./data_file.zip', data, compression=compression) + >>> writer.write_file() + + Save a DataFrame with dates + >>> from datetime import datetime + >>> data = pd.DataFrame([[datetime(2000,1,1)]], columns=['date']) + >>> writer = StataWriter('./date_data_file.dta', data, {{'date' : 'tw'}}) + >>> writer.write_file() + """ + + _max_string_length = 244 + _encoding: Literal["latin-1", "utf-8"] = "latin-1" + + def __init__( + self, + fname: FilePath | WriteBuffer[bytes], + data: DataFrame, + convert_dates: dict[Hashable, str] | None = None, + write_index: bool = True, + byteorder: str | None = None, + time_stamp: datetime | None = None, + data_label: str | None = None, + variable_labels: dict[Hashable, str] | None = None, + compression: CompressionOptions = "infer", + storage_options: StorageOptions | None = None, + *, + value_labels: dict[Hashable, dict[float, str]] | None = None, + ) -> None: + super().__init__() + self.data = data + self._convert_dates = {} if convert_dates is None else convert_dates + self._write_index = write_index + self._time_stamp = time_stamp + self._data_label = data_label + self._variable_labels = variable_labels + self._non_cat_value_labels = value_labels + self._value_labels: list[StataValueLabel] = [] + self._has_value_labels = np.array([], dtype=bool) + self._compression = compression + self._output_file: IO[bytes] | None = None + self._converted_names: dict[Hashable, str] = {} + # attach nobs, nvars, data, varlist, typlist + self._prepare_pandas(data) + self.storage_options = storage_options + + if byteorder is None: + byteorder = sys.byteorder + self._byteorder = _set_endianness(byteorder) + self._fname = fname + self.type_converters = {253: np.int32, 252: np.int16, 251: np.int8} + + def _write(self, to_write: str) -> None: + """ + Helper to call encode before writing to file for Python 3 compat. + """ + self.handles.handle.write(to_write.encode(self._encoding)) + + def _write_bytes(self, value: bytes) -> None: + """ + Helper to assert file is open before writing. + """ + self.handles.handle.write(value) + + def _prepare_non_cat_value_labels( + self, data: DataFrame + ) -> list[StataNonCatValueLabel]: + """ + Check for value labels provided for non-categorical columns. Value + labels + """ + non_cat_value_labels: list[StataNonCatValueLabel] = [] + if self._non_cat_value_labels is None: + return non_cat_value_labels + + for labname, labels in self._non_cat_value_labels.items(): + if labname in self._converted_names: + colname = self._converted_names[labname] + elif labname in data.columns: + colname = str(labname) + else: + raise KeyError( + f"Can't create value labels for {labname}, it wasn't " + "found in the dataset." + ) + + if not is_numeric_dtype(data[colname].dtype): + # Labels should not be passed explicitly for categorical + # columns that will be converted to int + raise ValueError( + f"Can't create value labels for {labname}, value labels " + "can only be applied to numeric columns." + ) + svl = StataNonCatValueLabel(colname, labels, self._encoding) + non_cat_value_labels.append(svl) + return non_cat_value_labels + + def _prepare_categoricals(self, data: DataFrame) -> DataFrame: + """ + Check for categorical columns, retain categorical information for + Stata file and convert categorical data to int + """ + is_cat = [isinstance(dtype, CategoricalDtype) for dtype in data.dtypes] + if not any(is_cat): + return data + + self._has_value_labels |= np.array(is_cat) + + get_base_missing_value = StataMissingValue.get_base_missing_value + data_formatted = [] + for col, col_is_cat in zip(data, is_cat): + if col_is_cat: + svl = StataValueLabel(data[col], encoding=self._encoding) + self._value_labels.append(svl) + dtype = data[col].cat.codes.dtype + if dtype == np.int64: + raise ValueError( + "It is not possible to export " + "int64-based categorical data to Stata." + ) + values = data[col].cat.codes._values.copy() + + # Upcast if needed so that correct missing values can be set + if values.max() >= get_base_missing_value(dtype): + if dtype == np.int8: + dtype = np.dtype(np.int16) + elif dtype == np.int16: + dtype = np.dtype(np.int32) + else: + dtype = np.dtype(np.float64) + values = np.array(values, dtype=dtype) + + # Replace missing values with Stata missing value for type + values[values == -1] = get_base_missing_value(dtype) + data_formatted.append((col, values)) + else: + data_formatted.append((col, data[col])) + return DataFrame.from_dict(dict(data_formatted)) + + def _replace_nans(self, data: DataFrame) -> DataFrame: + # return data + """ + Checks floating point data columns for nans, and replaces these with + the generic Stata for missing value (.) + """ + for c in data: + dtype = data[c].dtype + if dtype in (np.float32, np.float64): + if dtype == np.float32: + replacement = self.MISSING_VALUES["f"] + else: + replacement = self.MISSING_VALUES["d"] + data[c] = data[c].fillna(replacement) + + return data + + def _update_strl_names(self) -> None: + """No-op, forward compatibility""" + + def _validate_variable_name(self, name: str) -> str: + """ + Validate variable names for Stata export. + + Parameters + ---------- + name : str + Variable name + + Returns + ------- + str + The validated name with invalid characters replaced with + underscores. + + Notes + ----- + Stata 114 and 117 support ascii characters in a-z, A-Z, 0-9 + and _. + """ + for c in name: + if ( + (c < "A" or c > "Z") + and (c < "a" or c > "z") + and (c < "0" or c > "9") + and c != "_" + ): + name = name.replace(c, "_") + return name + + def _check_column_names(self, data: DataFrame) -> DataFrame: + """ + Checks column names to ensure that they are valid Stata column names. + This includes checks for: + * Non-string names + * Stata keywords + * Variables that start with numbers + * Variables with names that are too long + + When an illegal variable name is detected, it is converted, and if + dates are exported, the variable name is propagated to the date + conversion dictionary + """ + converted_names: dict[Hashable, str] = {} + columns = list(data.columns) + original_columns = columns[:] + + duplicate_var_id = 0 + for j, name in enumerate(columns): + orig_name = name + if not isinstance(name, str): + name = str(name) + + name = self._validate_variable_name(name) + + # Variable name must not be a reserved word + if name in self.RESERVED_WORDS: + name = "_" + name + + # Variable name may not start with a number + if "0" <= name[0] <= "9": + name = "_" + name + + name = name[: min(len(name), 32)] + + if not name == orig_name: + # check for duplicates + while columns.count(name) > 0: + # prepend ascending number to avoid duplicates + name = "_" + str(duplicate_var_id) + name + name = name[: min(len(name), 32)] + duplicate_var_id += 1 + converted_names[orig_name] = name + + columns[j] = name + + data.columns = Index(columns) + + # Check date conversion, and fix key if needed + if self._convert_dates: + for c, o in zip(columns, original_columns): + if c != o: + self._convert_dates[c] = self._convert_dates[o] + del self._convert_dates[o] + + if converted_names: + conversion_warning = [] + for orig_name, name in converted_names.items(): + msg = f"{orig_name} -> {name}" + conversion_warning.append(msg) + + ws = invalid_name_doc.format("\n ".join(conversion_warning)) + warnings.warn( + ws, + InvalidColumnName, + stacklevel=find_stack_level(), + ) + + self._converted_names = converted_names + self._update_strl_names() + + return data + + def _set_formats_and_types(self, dtypes: Series) -> None: + self.fmtlist: list[str] = [] + self.typlist: list[int] = [] + for col, dtype in dtypes.items(): + self.fmtlist.append(_dtype_to_default_stata_fmt(dtype, self.data[col])) + self.typlist.append(_dtype_to_stata_type(dtype, self.data[col])) + + def _prepare_pandas(self, data: DataFrame) -> None: + # NOTE: we might need a different API / class for pandas objects so + # we can set different semantics - handle this with a PR to pandas.io + + data = data.copy() + + if self._write_index: + temp = data.reset_index() + if isinstance(temp, DataFrame): + data = temp + + # Ensure column names are strings + data = self._check_column_names(data) + + # Check columns for compatibility with stata, upcast if necessary + # Raise if outside the supported range + data = _cast_to_stata_types(data) + + # Replace NaNs with Stata missing values + data = self._replace_nans(data) + + # Set all columns to initially unlabelled + self._has_value_labels = np.repeat(False, data.shape[1]) + + # Create value labels for non-categorical data + non_cat_value_labels = self._prepare_non_cat_value_labels(data) + + non_cat_columns = [svl.labname for svl in non_cat_value_labels] + has_non_cat_val_labels = data.columns.isin(non_cat_columns) + self._has_value_labels |= has_non_cat_val_labels + self._value_labels.extend(non_cat_value_labels) + + # Convert categoricals to int data, and strip labels + data = self._prepare_categoricals(data) + + self.nobs, self.nvar = data.shape + self.data = data + self.varlist = data.columns.tolist() + + dtypes = data.dtypes + + # Ensure all date columns are converted + for col in data: + if col in self._convert_dates: + continue + if lib.is_np_dtype(data[col].dtype, "M"): + self._convert_dates[col] = "tc" + + self._convert_dates = _maybe_convert_to_int_keys( + self._convert_dates, self.varlist + ) + for key in self._convert_dates: + new_type = _convert_datetime_to_stata_type(self._convert_dates[key]) + dtypes.iloc[key] = np.dtype(new_type) + + # Verify object arrays are strings and encode to bytes + self._encode_strings() + + self._set_formats_and_types(dtypes) + + # set the given format for the datetime cols + if self._convert_dates is not None: + for key in self._convert_dates: + if isinstance(key, int): + self.fmtlist[key] = self._convert_dates[key] + + def _encode_strings(self) -> None: + """ + Encode strings in dta-specific encoding + + Do not encode columns marked for date conversion or for strL + conversion. The strL converter independently handles conversion and + also accepts empty string arrays. + """ + convert_dates = self._convert_dates + # _convert_strl is not available in dta 114 + convert_strl = getattr(self, "_convert_strl", []) + for i, col in enumerate(self.data): + # Skip columns marked for date conversion or strl conversion + if i in convert_dates or col in convert_strl: + continue + column = self.data[col] + dtype = column.dtype + if dtype.type is np.object_: + inferred_dtype = infer_dtype(column, skipna=True) + if not ((inferred_dtype == "string") or len(column) == 0): + col = column.name + raise ValueError( + f"""\ +Column `{col}` cannot be exported.\n\nOnly string-like object arrays +containing all strings or a mix of strings and None can be exported. +Object arrays containing only null values are prohibited. Other object +types cannot be exported and must first be converted to one of the +supported types.""" + ) + encoded = self.data[col].str.encode(self._encoding) + # If larger than _max_string_length do nothing + if ( + max_len_string_array(ensure_object(encoded._values)) + <= self._max_string_length + ): + self.data[col] = encoded + + def write_file(self) -> None: + """ + Export DataFrame object to Stata dta format. + + Examples + -------- + >>> df = pd.DataFrame({"fully_labelled": [1, 2, 3, 3, 1], + ... "partially_labelled": [1.0, 2.0, np.nan, 9.0, np.nan], + ... "Y": [7, 7, 9, 8, 10], + ... "Z": pd.Categorical(["j", "k", "l", "k", "j"]), + ... }) + >>> path = "/My_path/filename.dta" + >>> labels = {"fully_labelled": {1: "one", 2: "two", 3: "three"}, + ... "partially_labelled": {1.0: "one", 2.0: "two"}, + ... } + >>> writer = pd.io.stata.StataWriter(path, + ... df, + ... value_labels=labels) # doctest: +SKIP + >>> writer.write_file() # doctest: +SKIP + >>> df = pd.read_stata(path) # doctest: +SKIP + >>> df # doctest: +SKIP + index fully_labelled partially_labeled Y Z + 0 0 one one 7 j + 1 1 two two 7 k + 2 2 three NaN 9 l + 3 3 three 9.0 8 k + 4 4 one NaN 10 j + """ + with get_handle( + self._fname, + "wb", + compression=self._compression, + is_text=False, + storage_options=self.storage_options, + ) as self.handles: + if self.handles.compression["method"] is not None: + # ZipFile creates a file (with the same name) for each write call. + # Write it first into a buffer and then write the buffer to the ZipFile. + self._output_file, self.handles.handle = self.handles.handle, BytesIO() + self.handles.created_handles.append(self.handles.handle) + + try: + self._write_header( + data_label=self._data_label, time_stamp=self._time_stamp + ) + self._write_map() + self._write_variable_types() + self._write_varnames() + self._write_sortlist() + self._write_formats() + self._write_value_label_names() + self._write_variable_labels() + self._write_expansion_fields() + self._write_characteristics() + records = self._prepare_data() + self._write_data(records) + self._write_strls() + self._write_value_labels() + self._write_file_close_tag() + self._write_map() + self._close() + except Exception as exc: + self.handles.close() + if isinstance(self._fname, (str, os.PathLike)) and os.path.isfile( + self._fname + ): + try: + os.unlink(self._fname) + except OSError: + warnings.warn( + f"This save was not successful but {self._fname} could not " + "be deleted. This file is not valid.", + ResourceWarning, + stacklevel=find_stack_level(), + ) + raise exc + + def _close(self) -> None: + """ + Close the file if it was created by the writer. + + If a buffer or file-like object was passed in, for example a GzipFile, + then leave this file open for the caller to close. + """ + # write compression + if self._output_file is not None: + assert isinstance(self.handles.handle, BytesIO) + bio, self.handles.handle = self.handles.handle, self._output_file + self.handles.handle.write(bio.getvalue()) + + def _write_map(self) -> None: + """No-op, future compatibility""" + + def _write_file_close_tag(self) -> None: + """No-op, future compatibility""" + + def _write_characteristics(self) -> None: + """No-op, future compatibility""" + + def _write_strls(self) -> None: + """No-op, future compatibility""" + + def _write_expansion_fields(self) -> None: + """Write 5 zeros for expansion fields""" + self._write(_pad_bytes("", 5)) + + def _write_value_labels(self) -> None: + for vl in self._value_labels: + self._write_bytes(vl.generate_value_label(self._byteorder)) + + def _write_header( + self, + data_label: str | None = None, + time_stamp: datetime | None = None, + ) -> None: + byteorder = self._byteorder + # ds_format - just use 114 + self._write_bytes(struct.pack("b", 114)) + # byteorder + self._write(byteorder == ">" and "\x01" or "\x02") + # filetype + self._write("\x01") + # unused + self._write("\x00") + # number of vars, 2 bytes + self._write_bytes(struct.pack(byteorder + "h", self.nvar)[:2]) + # number of obs, 4 bytes + self._write_bytes(struct.pack(byteorder + "i", self.nobs)[:4]) + # data label 81 bytes, char, null terminated + if data_label is None: + self._write_bytes(self._null_terminate_bytes(_pad_bytes("", 80))) + else: + self._write_bytes( + self._null_terminate_bytes(_pad_bytes(data_label[:80], 80)) + ) + # time stamp, 18 bytes, char, null terminated + # format dd Mon yyyy hh:mm + if time_stamp is None: + time_stamp = datetime.now() + elif not isinstance(time_stamp, datetime): + raise ValueError("time_stamp should be datetime type") + # GH #13856 + # Avoid locale-specific month conversion + months = [ + "Jan", + "Feb", + "Mar", + "Apr", + "May", + "Jun", + "Jul", + "Aug", + "Sep", + "Oct", + "Nov", + "Dec", + ] + month_lookup = {i + 1: month for i, month in enumerate(months)} + ts = ( + time_stamp.strftime("%d ") + + month_lookup[time_stamp.month] + + time_stamp.strftime(" %Y %H:%M") + ) + self._write_bytes(self._null_terminate_bytes(ts)) + + def _write_variable_types(self) -> None: + for typ in self.typlist: + self._write_bytes(struct.pack("B", typ)) + + def _write_varnames(self) -> None: + # varlist names are checked by _check_column_names + # varlist, requires null terminated + for name in self.varlist: + name = self._null_terminate_str(name) + name = _pad_bytes(name[:32], 33) + self._write(name) + + def _write_sortlist(self) -> None: + # srtlist, 2*(nvar+1), int array, encoded by byteorder + srtlist = _pad_bytes("", 2 * (self.nvar + 1)) + self._write(srtlist) + + def _write_formats(self) -> None: + # fmtlist, 49*nvar, char array + for fmt in self.fmtlist: + self._write(_pad_bytes(fmt, 49)) + + def _write_value_label_names(self) -> None: + # lbllist, 33*nvar, char array + for i in range(self.nvar): + # Use variable name when categorical + if self._has_value_labels[i]: + name = self.varlist[i] + name = self._null_terminate_str(name) + name = _pad_bytes(name[:32], 33) + self._write(name) + else: # Default is empty label + self._write(_pad_bytes("", 33)) + + def _write_variable_labels(self) -> None: + # Missing labels are 80 blank characters plus null termination + blank = _pad_bytes("", 81) + + if self._variable_labels is None: + for i in range(self.nvar): + self._write(blank) + return + + for col in self.data: + if col in self._variable_labels: + label = self._variable_labels[col] + if len(label) > 80: + raise ValueError("Variable labels must be 80 characters or fewer") + is_latin1 = all(ord(c) < 256 for c in label) + if not is_latin1: + raise ValueError( + "Variable labels must contain only characters that " + "can be encoded in Latin-1" + ) + self._write(_pad_bytes(label, 81)) + else: + self._write(blank) + + def _convert_strls(self, data: DataFrame) -> DataFrame: + """No-op, future compatibility""" + return data + + def _prepare_data(self) -> np.rec.recarray: + data = self.data + typlist = self.typlist + convert_dates = self._convert_dates + # 1. Convert dates + if self._convert_dates is not None: + for i, col in enumerate(data): + if i in convert_dates: + data[col] = _datetime_to_stata_elapsed_vec( + data[col], self.fmtlist[i] + ) + # 2. Convert strls + data = self._convert_strls(data) + + # 3. Convert bad string data to '' and pad to correct length + dtypes = {} + native_byteorder = self._byteorder == _set_endianness(sys.byteorder) + for i, col in enumerate(data): + typ = typlist[i] + if typ <= self._max_string_length: + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + "Downcasting object dtype arrays", + category=FutureWarning, + ) + dc = data[col].fillna("") + data[col] = dc.apply(_pad_bytes, args=(typ,)) + stype = f"S{typ}" + dtypes[col] = stype + data[col] = data[col].astype(stype) + else: + dtype = data[col].dtype + if not native_byteorder: + dtype = dtype.newbyteorder(self._byteorder) + dtypes[col] = dtype + + return data.to_records(index=False, column_dtypes=dtypes) + + def _write_data(self, records: np.rec.recarray) -> None: + self._write_bytes(records.tobytes()) + + @staticmethod + def _null_terminate_str(s: str) -> str: + s += "\x00" + return s + + def _null_terminate_bytes(self, s: str) -> bytes: + return self._null_terminate_str(s).encode(self._encoding) + + +def _dtype_to_stata_type_117(dtype: np.dtype, column: Series, force_strl: bool) -> int: + """ + Converts dtype types to stata types. Returns the byte of the given ordinal. + See TYPE_MAP and comments for an explanation. This is also explained in + the dta spec. + 1 - 2045 are strings of this length + Pandas Stata + 32768 - for object strL + 65526 - for int8 byte + 65527 - for int16 int + 65528 - for int32 long + 65529 - for float32 float + 65530 - for double double + + If there are dates to convert, then dtype will already have the correct + type inserted. + """ + # TODO: expand to handle datetime to integer conversion + if force_strl: + return 32768 + if dtype.type is np.object_: # try to coerce it to the biggest string + # not memory efficient, what else could we + # do? + itemsize = max_len_string_array(ensure_object(column._values)) + itemsize = max(itemsize, 1) + if itemsize <= 2045: + return itemsize + return 32768 + elif dtype.type is np.float64: + return 65526 + elif dtype.type is np.float32: + return 65527 + elif dtype.type is np.int32: + return 65528 + elif dtype.type is np.int16: + return 65529 + elif dtype.type is np.int8: + return 65530 + else: # pragma : no cover + raise NotImplementedError(f"Data type {dtype} not supported.") + + +def _pad_bytes_new(name: str | bytes, length: int) -> bytes: + """ + Takes a bytes instance and pads it with null bytes until it's length chars. + """ + if isinstance(name, str): + name = bytes(name, "utf-8") + return name + b"\x00" * (length - len(name)) + + +class StataStrLWriter: + """ + Converter for Stata StrLs + + Stata StrLs map 8 byte values to strings which are stored using a + dictionary-like format where strings are keyed to two values. + + Parameters + ---------- + df : DataFrame + DataFrame to convert + columns : Sequence[str] + List of columns names to convert to StrL + version : int, optional + dta version. Currently supports 117, 118 and 119 + byteorder : str, optional + Can be ">", "<", "little", or "big". default is `sys.byteorder` + + Notes + ----- + Supports creation of the StrL block of a dta file for dta versions + 117, 118 and 119. These differ in how the GSO is stored. 118 and + 119 store the GSO lookup value as a uint32 and a uint64, while 117 + uses two uint32s. 118 and 119 also encode all strings as unicode + which is required by the format. 117 uses 'latin-1' a fixed width + encoding that extends the 7-bit ascii table with an additional 128 + characters. + """ + + def __init__( + self, + df: DataFrame, + columns: Sequence[str], + version: int = 117, + byteorder: str | None = None, + ) -> None: + if version not in (117, 118, 119): + raise ValueError("Only dta versions 117, 118 and 119 supported") + self._dta_ver = version + + self.df = df + self.columns = columns + self._gso_table = {"": (0, 0)} + if byteorder is None: + byteorder = sys.byteorder + self._byteorder = _set_endianness(byteorder) + + gso_v_type = "I" # uint32 + gso_o_type = "Q" # uint64 + self._encoding = "utf-8" + if version == 117: + o_size = 4 + gso_o_type = "I" # 117 used uint32 + self._encoding = "latin-1" + elif version == 118: + o_size = 6 + else: # version == 119 + o_size = 5 + self._o_offet = 2 ** (8 * (8 - o_size)) + self._gso_o_type = gso_o_type + self._gso_v_type = gso_v_type + + def _convert_key(self, key: tuple[int, int]) -> int: + v, o = key + return v + self._o_offet * o + + def generate_table(self) -> tuple[dict[str, tuple[int, int]], DataFrame]: + """ + Generates the GSO lookup table for the DataFrame + + Returns + ------- + gso_table : dict + Ordered dictionary using the string found as keys + and their lookup position (v,o) as values + gso_df : DataFrame + DataFrame where strl columns have been converted to + (v,o) values + + Notes + ----- + Modifies the DataFrame in-place. + + The DataFrame returned encodes the (v,o) values as uint64s. The + encoding depends on the dta version, and can be expressed as + + enc = v + o * 2 ** (o_size * 8) + + so that v is stored in the lower bits and o is in the upper + bits. o_size is + + * 117: 4 + * 118: 6 + * 119: 5 + """ + gso_table = self._gso_table + gso_df = self.df + columns = list(gso_df.columns) + selected = gso_df[self.columns] + col_index = [(col, columns.index(col)) for col in self.columns] + keys = np.empty(selected.shape, dtype=np.uint64) + for o, (idx, row) in enumerate(selected.iterrows()): + for j, (col, v) in enumerate(col_index): + val = row[col] + # Allow columns with mixed str and None (GH 23633) + val = "" if val is None else val + key = gso_table.get(val, None) + if key is None: + # Stata prefers human numbers + key = (v + 1, o + 1) + gso_table[val] = key + keys[o, j] = self._convert_key(key) + for i, col in enumerate(self.columns): + gso_df[col] = keys[:, i] + + return gso_table, gso_df + + def generate_blob(self, gso_table: dict[str, tuple[int, int]]) -> bytes: + """ + Generates the binary blob of GSOs that is written to the dta file. + + Parameters + ---------- + gso_table : dict + Ordered dictionary (str, vo) + + Returns + ------- + gso : bytes + Binary content of dta file to be placed between strl tags + + Notes + ----- + Output format depends on dta version. 117 uses two uint32s to + express v and o while 118+ uses a uint32 for v and a uint64 for o. + """ + # Format information + # Length includes null term + # 117 + # GSOvvvvooootllllxxxxxxxxxxxxxxx...x + # 3 u4 u4 u1 u4 string + null term + # + # 118, 119 + # GSOvvvvooooooootllllxxxxxxxxxxxxxxx...x + # 3 u4 u8 u1 u4 string + null term + + bio = BytesIO() + gso = bytes("GSO", "ascii") + gso_type = struct.pack(self._byteorder + "B", 130) + null = struct.pack(self._byteorder + "B", 0) + v_type = self._byteorder + self._gso_v_type + o_type = self._byteorder + self._gso_o_type + len_type = self._byteorder + "I" + for strl, vo in gso_table.items(): + if vo == (0, 0): + continue + v, o = vo + + # GSO + bio.write(gso) + + # vvvv + bio.write(struct.pack(v_type, v)) + + # oooo / oooooooo + bio.write(struct.pack(o_type, o)) + + # t + bio.write(gso_type) + + # llll + utf8_string = bytes(strl, "utf-8") + bio.write(struct.pack(len_type, len(utf8_string) + 1)) + + # xxx...xxx + bio.write(utf8_string) + bio.write(null) + + return bio.getvalue() + + +class StataWriter117(StataWriter): + """ + A class for writing Stata binary dta files in Stata 13 format (117) + + Parameters + ---------- + fname : path (string), buffer or path object + string, path object (pathlib.Path or py._path.local.LocalPath) or + object implementing a binary write() functions. If using a buffer + then the buffer will not be automatically closed after the file + is written. + data : DataFrame + Input to save + convert_dates : dict + Dictionary mapping columns containing datetime types to stata internal + format to use when writing the dates. Options are 'tc', 'td', 'tm', + 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name. + Datetime columns that do not have a conversion type specified will be + converted to 'tc'. Raises NotImplementedError if a datetime column has + timezone information + write_index : bool + Write the index to Stata dataset. + byteorder : str + Can be ">", "<", "little", or "big". default is `sys.byteorder` + time_stamp : datetime + A datetime to use as file creation date. Default is the current time + data_label : str + A label for the data set. Must be 80 characters or smaller. + variable_labels : dict + Dictionary containing columns as keys and variable labels as values. + Each label must be 80 characters or smaller. + convert_strl : list + List of columns names to convert to Stata StrL format. Columns with + more than 2045 characters are automatically written as StrL. + Smaller columns can be converted by including the column name. Using + StrLs can reduce output file size when strings are longer than 8 + characters, and either frequently repeated or sparse. + {compression_options} + + .. versionchanged:: 1.4.0 Zstandard support. + + value_labels : dict of dicts + Dictionary containing columns as keys and dictionaries of column value + to labels as values. The combined length of all labels for a single + variable must be 32,000 characters or smaller. + + .. versionadded:: 1.4.0 + + Returns + ------- + writer : StataWriter117 instance + The StataWriter117 instance has a write_file method, which will + write the file to the given `fname`. + + Raises + ------ + NotImplementedError + * If datetimes contain timezone information + ValueError + * Columns listed in convert_dates are neither datetime64[ns] + or datetime + * Column dtype is not representable in Stata + * Column listed in convert_dates is not in DataFrame + * Categorical label contains more than 32,000 characters + + Examples + -------- + >>> data = pd.DataFrame([[1.0, 1, 'a']], columns=['a', 'b', 'c']) + >>> writer = pd.io.stata.StataWriter117('./data_file.dta', data) + >>> writer.write_file() + + Directly write a zip file + >>> compression = {"method": "zip", "archive_name": "data_file.dta"} + >>> writer = pd.io.stata.StataWriter117( + ... './data_file.zip', data, compression=compression + ... ) + >>> writer.write_file() + + Or with long strings stored in strl format + >>> data = pd.DataFrame([['A relatively long string'], [''], ['']], + ... columns=['strls']) + >>> writer = pd.io.stata.StataWriter117( + ... './data_file_with_long_strings.dta', data, convert_strl=['strls']) + >>> writer.write_file() + """ + + _max_string_length = 2045 + _dta_version = 117 + + def __init__( + self, + fname: FilePath | WriteBuffer[bytes], + data: DataFrame, + convert_dates: dict[Hashable, str] | None = None, + write_index: bool = True, + byteorder: str | None = None, + time_stamp: datetime | None = None, + data_label: str | None = None, + variable_labels: dict[Hashable, str] | None = None, + convert_strl: Sequence[Hashable] | None = None, + compression: CompressionOptions = "infer", + storage_options: StorageOptions | None = None, + *, + value_labels: dict[Hashable, dict[float, str]] | None = None, + ) -> None: + # Copy to new list since convert_strl might be modified later + self._convert_strl: list[Hashable] = [] + if convert_strl is not None: + self._convert_strl.extend(convert_strl) + + super().__init__( + fname, + data, + convert_dates, + write_index, + byteorder=byteorder, + time_stamp=time_stamp, + data_label=data_label, + variable_labels=variable_labels, + value_labels=value_labels, + compression=compression, + storage_options=storage_options, + ) + self._map: dict[str, int] = {} + self._strl_blob = b"" + + @staticmethod + def _tag(val: str | bytes, tag: str) -> bytes: + """Surround val with """ + if isinstance(val, str): + val = bytes(val, "utf-8") + return bytes("<" + tag + ">", "utf-8") + val + bytes("", "utf-8") + + def _update_map(self, tag: str) -> None: + """Update map location for tag with file position""" + assert self.handles.handle is not None + self._map[tag] = self.handles.handle.tell() + + def _write_header( + self, + data_label: str | None = None, + time_stamp: datetime | None = None, + ) -> None: + """Write the file header""" + byteorder = self._byteorder + self._write_bytes(bytes("", "utf-8")) + bio = BytesIO() + # ds_format - 117 + bio.write(self._tag(bytes(str(self._dta_version), "utf-8"), "release")) + # byteorder + bio.write(self._tag(byteorder == ">" and "MSF" or "LSF", "byteorder")) + # number of vars, 2 bytes in 117 and 118, 4 byte in 119 + nvar_type = "H" if self._dta_version <= 118 else "I" + bio.write(self._tag(struct.pack(byteorder + nvar_type, self.nvar), "K")) + # 117 uses 4 bytes, 118 uses 8 + nobs_size = "I" if self._dta_version == 117 else "Q" + bio.write(self._tag(struct.pack(byteorder + nobs_size, self.nobs), "N")) + # data label 81 bytes, char, null terminated + label = data_label[:80] if data_label is not None else "" + encoded_label = label.encode(self._encoding) + label_size = "B" if self._dta_version == 117 else "H" + label_len = struct.pack(byteorder + label_size, len(encoded_label)) + encoded_label = label_len + encoded_label + bio.write(self._tag(encoded_label, "label")) + # time stamp, 18 bytes, char, null terminated + # format dd Mon yyyy hh:mm + if time_stamp is None: + time_stamp = datetime.now() + elif not isinstance(time_stamp, datetime): + raise ValueError("time_stamp should be datetime type") + # Avoid locale-specific month conversion + months = [ + "Jan", + "Feb", + "Mar", + "Apr", + "May", + "Jun", + "Jul", + "Aug", + "Sep", + "Oct", + "Nov", + "Dec", + ] + month_lookup = {i + 1: month for i, month in enumerate(months)} + ts = ( + time_stamp.strftime("%d ") + + month_lookup[time_stamp.month] + + time_stamp.strftime(" %Y %H:%M") + ) + # '\x11' added due to inspection of Stata file + stata_ts = b"\x11" + bytes(ts, "utf-8") + bio.write(self._tag(stata_ts, "timestamp")) + self._write_bytes(self._tag(bio.getvalue(), "header")) + + def _write_map(self) -> None: + """ + Called twice during file write. The first populates the values in + the map with 0s. The second call writes the final map locations when + all blocks have been written. + """ + if not self._map: + self._map = { + "stata_data": 0, + "map": self.handles.handle.tell(), + "variable_types": 0, + "varnames": 0, + "sortlist": 0, + "formats": 0, + "value_label_names": 0, + "variable_labels": 0, + "characteristics": 0, + "data": 0, + "strls": 0, + "value_labels": 0, + "stata_data_close": 0, + "end-of-file": 0, + } + # Move to start of map + self.handles.handle.seek(self._map["map"]) + bio = BytesIO() + for val in self._map.values(): + bio.write(struct.pack(self._byteorder + "Q", val)) + self._write_bytes(self._tag(bio.getvalue(), "map")) + + def _write_variable_types(self) -> None: + self._update_map("variable_types") + bio = BytesIO() + for typ in self.typlist: + bio.write(struct.pack(self._byteorder + "H", typ)) + self._write_bytes(self._tag(bio.getvalue(), "variable_types")) + + def _write_varnames(self) -> None: + self._update_map("varnames") + bio = BytesIO() + # 118 scales by 4 to accommodate utf-8 data worst case encoding + vn_len = 32 if self._dta_version == 117 else 128 + for name in self.varlist: + name = self._null_terminate_str(name) + name = _pad_bytes_new(name[:32].encode(self._encoding), vn_len + 1) + bio.write(name) + self._write_bytes(self._tag(bio.getvalue(), "varnames")) + + def _write_sortlist(self) -> None: + self._update_map("sortlist") + sort_size = 2 if self._dta_version < 119 else 4 + self._write_bytes(self._tag(b"\x00" * sort_size * (self.nvar + 1), "sortlist")) + + def _write_formats(self) -> None: + self._update_map("formats") + bio = BytesIO() + fmt_len = 49 if self._dta_version == 117 else 57 + for fmt in self.fmtlist: + bio.write(_pad_bytes_new(fmt.encode(self._encoding), fmt_len)) + self._write_bytes(self._tag(bio.getvalue(), "formats")) + + def _write_value_label_names(self) -> None: + self._update_map("value_label_names") + bio = BytesIO() + # 118 scales by 4 to accommodate utf-8 data worst case encoding + vl_len = 32 if self._dta_version == 117 else 128 + for i in range(self.nvar): + # Use variable name when categorical + name = "" # default name + if self._has_value_labels[i]: + name = self.varlist[i] + name = self._null_terminate_str(name) + encoded_name = _pad_bytes_new(name[:32].encode(self._encoding), vl_len + 1) + bio.write(encoded_name) + self._write_bytes(self._tag(bio.getvalue(), "value_label_names")) + + def _write_variable_labels(self) -> None: + # Missing labels are 80 blank characters plus null termination + self._update_map("variable_labels") + bio = BytesIO() + # 118 scales by 4 to accommodate utf-8 data worst case encoding + vl_len = 80 if self._dta_version == 117 else 320 + blank = _pad_bytes_new("", vl_len + 1) + + if self._variable_labels is None: + for _ in range(self.nvar): + bio.write(blank) + self._write_bytes(self._tag(bio.getvalue(), "variable_labels")) + return + + for col in self.data: + if col in self._variable_labels: + label = self._variable_labels[col] + if len(label) > 80: + raise ValueError("Variable labels must be 80 characters or fewer") + try: + encoded = label.encode(self._encoding) + except UnicodeEncodeError as err: + raise ValueError( + "Variable labels must contain only characters that " + f"can be encoded in {self._encoding}" + ) from err + + bio.write(_pad_bytes_new(encoded, vl_len + 1)) + else: + bio.write(blank) + self._write_bytes(self._tag(bio.getvalue(), "variable_labels")) + + def _write_characteristics(self) -> None: + self._update_map("characteristics") + self._write_bytes(self._tag(b"", "characteristics")) + + def _write_data(self, records) -> None: + self._update_map("data") + self._write_bytes(b"") + self._write_bytes(records.tobytes()) + self._write_bytes(b"") + + def _write_strls(self) -> None: + self._update_map("strls") + self._write_bytes(self._tag(self._strl_blob, "strls")) + + def _write_expansion_fields(self) -> None: + """No-op in dta 117+""" + + def _write_value_labels(self) -> None: + self._update_map("value_labels") + bio = BytesIO() + for vl in self._value_labels: + lab = vl.generate_value_label(self._byteorder) + lab = self._tag(lab, "lbl") + bio.write(lab) + self._write_bytes(self._tag(bio.getvalue(), "value_labels")) + + def _write_file_close_tag(self) -> None: + self._update_map("stata_data_close") + self._write_bytes(bytes("", "utf-8")) + self._update_map("end-of-file") + + def _update_strl_names(self) -> None: + """ + Update column names for conversion to strl if they might have been + changed to comply with Stata naming rules + """ + # Update convert_strl if names changed + for orig, new in self._converted_names.items(): + if orig in self._convert_strl: + idx = self._convert_strl.index(orig) + self._convert_strl[idx] = new + + def _convert_strls(self, data: DataFrame) -> DataFrame: + """ + Convert columns to StrLs if either very large or in the + convert_strl variable + """ + convert_cols = [ + col + for i, col in enumerate(data) + if self.typlist[i] == 32768 or col in self._convert_strl + ] + + if convert_cols: + ssw = StataStrLWriter(data, convert_cols, version=self._dta_version) + tab, new_data = ssw.generate_table() + data = new_data + self._strl_blob = ssw.generate_blob(tab) + return data + + def _set_formats_and_types(self, dtypes: Series) -> None: + self.typlist = [] + self.fmtlist = [] + for col, dtype in dtypes.items(): + force_strl = col in self._convert_strl + fmt = _dtype_to_default_stata_fmt( + dtype, + self.data[col], + dta_version=self._dta_version, + force_strl=force_strl, + ) + self.fmtlist.append(fmt) + self.typlist.append( + _dtype_to_stata_type_117(dtype, self.data[col], force_strl) + ) + + +class StataWriterUTF8(StataWriter117): + """ + Stata binary dta file writing in Stata 15 (118) and 16 (119) formats + + DTA 118 and 119 format files support unicode string data (both fixed + and strL) format. Unicode is also supported in value labels, variable + labels and the dataset label. Format 119 is automatically used if the + file contains more than 32,767 variables. + + Parameters + ---------- + fname : path (string), buffer or path object + string, path object (pathlib.Path or py._path.local.LocalPath) or + object implementing a binary write() functions. If using a buffer + then the buffer will not be automatically closed after the file + is written. + data : DataFrame + Input to save + convert_dates : dict, default None + Dictionary mapping columns containing datetime types to stata internal + format to use when writing the dates. Options are 'tc', 'td', 'tm', + 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name. + Datetime columns that do not have a conversion type specified will be + converted to 'tc'. Raises NotImplementedError if a datetime column has + timezone information + write_index : bool, default True + Write the index to Stata dataset. + byteorder : str, default None + Can be ">", "<", "little", or "big". default is `sys.byteorder` + time_stamp : datetime, default None + A datetime to use as file creation date. Default is the current time + data_label : str, default None + A label for the data set. Must be 80 characters or smaller. + variable_labels : dict, default None + Dictionary containing columns as keys and variable labels as values. + Each label must be 80 characters or smaller. + convert_strl : list, default None + List of columns names to convert to Stata StrL format. Columns with + more than 2045 characters are automatically written as StrL. + Smaller columns can be converted by including the column name. Using + StrLs can reduce output file size when strings are longer than 8 + characters, and either frequently repeated or sparse. + version : int, default None + The dta version to use. By default, uses the size of data to determine + the version. 118 is used if data.shape[1] <= 32767, and 119 is used + for storing larger DataFrames. + {compression_options} + + .. versionchanged:: 1.4.0 Zstandard support. + + value_labels : dict of dicts + Dictionary containing columns as keys and dictionaries of column value + to labels as values. The combined length of all labels for a single + variable must be 32,000 characters or smaller. + + .. versionadded:: 1.4.0 + + Returns + ------- + StataWriterUTF8 + The instance has a write_file method, which will write the file to the + given `fname`. + + Raises + ------ + NotImplementedError + * If datetimes contain timezone information + ValueError + * Columns listed in convert_dates are neither datetime64[ns] + or datetime + * Column dtype is not representable in Stata + * Column listed in convert_dates is not in DataFrame + * Categorical label contains more than 32,000 characters + + Examples + -------- + Using Unicode data and column names + + >>> from pandas.io.stata import StataWriterUTF8 + >>> data = pd.DataFrame([[1.0, 1, 'ᴬ']], columns=['a', 'β', 'ĉ']) + >>> writer = StataWriterUTF8('./data_file.dta', data) + >>> writer.write_file() + + Directly write a zip file + >>> compression = {"method": "zip", "archive_name": "data_file.dta"} + >>> writer = StataWriterUTF8('./data_file.zip', data, compression=compression) + >>> writer.write_file() + + Or with long strings stored in strl format + + >>> data = pd.DataFrame([['ᴀ relatively long ŝtring'], [''], ['']], + ... columns=['strls']) + >>> writer = StataWriterUTF8('./data_file_with_long_strings.dta', data, + ... convert_strl=['strls']) + >>> writer.write_file() + """ + + _encoding: Literal["utf-8"] = "utf-8" + + def __init__( + self, + fname: FilePath | WriteBuffer[bytes], + data: DataFrame, + convert_dates: dict[Hashable, str] | None = None, + write_index: bool = True, + byteorder: str | None = None, + time_stamp: datetime | None = None, + data_label: str | None = None, + variable_labels: dict[Hashable, str] | None = None, + convert_strl: Sequence[Hashable] | None = None, + version: int | None = None, + compression: CompressionOptions = "infer", + storage_options: StorageOptions | None = None, + *, + value_labels: dict[Hashable, dict[float, str]] | None = None, + ) -> None: + if version is None: + version = 118 if data.shape[1] <= 32767 else 119 + elif version not in (118, 119): + raise ValueError("version must be either 118 or 119.") + elif version == 118 and data.shape[1] > 32767: + raise ValueError( + "You must use version 119 for data sets containing more than" + "32,767 variables" + ) + + super().__init__( + fname, + data, + convert_dates=convert_dates, + write_index=write_index, + byteorder=byteorder, + time_stamp=time_stamp, + data_label=data_label, + variable_labels=variable_labels, + value_labels=value_labels, + convert_strl=convert_strl, + compression=compression, + storage_options=storage_options, + ) + # Override version set in StataWriter117 init + self._dta_version = version + + def _validate_variable_name(self, name: str) -> str: + """ + Validate variable names for Stata export. + + Parameters + ---------- + name : str + Variable name + + Returns + ------- + str + The validated name with invalid characters replaced with + underscores. + + Notes + ----- + Stata 118+ support most unicode characters. The only limitation is in + the ascii range where the characters supported are a-z, A-Z, 0-9 and _. + """ + # High code points appear to be acceptable + for c in name: + if ( + ( + ord(c) < 128 + and (c < "A" or c > "Z") + and (c < "a" or c > "z") + and (c < "0" or c > "9") + and c != "_" + ) + or 128 <= ord(c) < 192 + or c in {"×", "÷"} # noqa: RUF001 + ): + name = name.replace(c, "_") + + return name diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/pyproject.toml b/env-llmeval/lib/python3.10/site-packages/pandas/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..db9f055799ab05ad59119c7786310e7fe8d37be5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/pyproject.toml @@ -0,0 +1,801 @@ +[build-system] +# Minimum requirements for the build system to execute. +# See https://github.com/scipy/scipy/pull/12940 for the AIX issue. +requires = [ + "meson-python==0.13.1", + "meson==1.2.1", + "wheel", + "Cython==3.0.5", # Note: sync with setup.py, environment.yml and asv.conf.json + # Force numpy higher than 2.0rc1, so that built wheels are compatible + # with both numpy 1 and 2 + "numpy>=2.0.0rc1", + "versioneer[toml]" +] + +build-backend = "mesonpy" + +[project] +name = 'pandas' +dynamic = [ + 'version' +] +description = 'Powerful data structures for data analysis, time series, and statistics' +readme = 'README.md' +authors = [ + { name = 'The Pandas Development Team', email='pandas-dev@python.org' }, +] +license = {file = 'LICENSE'} +requires-python = '>=3.9' +dependencies = [ + "numpy>=1.22.4; python_version<'3.11'", + "numpy>=1.23.2; python_version=='3.11'", + "numpy>=1.26.0; python_version>='3.12'", + "python-dateutil>=2.8.2", + "pytz>=2020.1", + "tzdata>=2022.7" +] +classifiers = [ + 'Development Status :: 5 - Production/Stable', + 'Environment :: Console', + 'Intended Audience :: Science/Research', + 'License :: OSI Approved :: BSD License', + 'Operating System :: OS Independent', + 'Programming Language :: Cython', + 'Programming Language :: Python', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3 :: Only', + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: 3.11', + 'Programming Language :: Python :: 3.12', + 'Topic :: Scientific/Engineering' +] + +[project.urls] +homepage = 'https://pandas.pydata.org' +documentation = 'https://pandas.pydata.org/docs/' +repository = 'https://github.com/pandas-dev/pandas' + +[project.entry-points."pandas_plotting_backends"] +matplotlib = "pandas:plotting._matplotlib" + +[project.optional-dependencies] +test = ['hypothesis>=6.46.1', 'pytest>=7.3.2', 'pytest-xdist>=2.2.0'] +pyarrow = ['pyarrow>=10.0.1'] +performance = ['bottleneck>=1.3.6', 'numba>=0.56.4', 'numexpr>=2.8.4'] +computation = ['scipy>=1.10.0', 'xarray>=2022.12.0'] +fss = ['fsspec>=2022.11.0'] +aws = ['s3fs>=2022.11.0'] +gcp = ['gcsfs>=2022.11.0', 'pandas-gbq>=0.19.0'] +excel = ['odfpy>=1.4.1', 'openpyxl>=3.1.0', 'python-calamine>=0.1.7', 'pyxlsb>=1.0.10', 'xlrd>=2.0.1', 'xlsxwriter>=3.0.5'] +parquet = ['pyarrow>=10.0.1'] +feather = ['pyarrow>=10.0.1'] +hdf5 = [# blosc only available on conda (https://github.com/Blosc/python-blosc/issues/297) + #'blosc>=1.20.1', + 'tables>=3.8.0'] +spss = ['pyreadstat>=1.2.0'] +postgresql = ['SQLAlchemy>=2.0.0', 'psycopg2>=2.9.6', 'adbc-driver-postgresql>=0.8.0'] +mysql = ['SQLAlchemy>=2.0.0', 'pymysql>=1.0.2'] +sql-other = ['SQLAlchemy>=2.0.0', 'adbc-driver-postgresql>=0.8.0', 'adbc-driver-sqlite>=0.8.0'] +html = ['beautifulsoup4>=4.11.2', 'html5lib>=1.1', 'lxml>=4.9.2'] +xml = ['lxml>=4.9.2'] +plot = ['matplotlib>=3.6.3'] +output-formatting = ['jinja2>=3.1.2', 'tabulate>=0.9.0'] +clipboard = ['PyQt5>=5.15.9', 'qtpy>=2.3.0'] +compression = ['zstandard>=0.19.0'] +consortium-standard = ['dataframe-api-compat>=0.1.7'] +all = ['adbc-driver-postgresql>=0.8.0', + 'adbc-driver-sqlite>=0.8.0', + 'beautifulsoup4>=4.11.2', + # blosc only available on conda (https://github.com/Blosc/python-blosc/issues/297) + #'blosc>=1.21.3', + 'bottleneck>=1.3.6', + 'dataframe-api-compat>=0.1.7', + 'fastparquet>=2022.12.0', + 'fsspec>=2022.11.0', + 'gcsfs>=2022.11.0', + 'html5lib>=1.1', + 'hypothesis>=6.46.1', + 'jinja2>=3.1.2', + 'lxml>=4.9.2', + 'matplotlib>=3.6.3', + 'numba>=0.56.4', + 'numexpr>=2.8.4', + 'odfpy>=1.4.1', + 'openpyxl>=3.1.0', + 'pandas-gbq>=0.19.0', + 'psycopg2>=2.9.6', + 'pyarrow>=10.0.1', + 'pymysql>=1.0.2', + 'PyQt5>=5.15.9', + 'pyreadstat>=1.2.0', + 'pytest>=7.3.2', + 'pytest-xdist>=2.2.0', + 'python-calamine>=0.1.7', + 'pyxlsb>=1.0.10', + 'qtpy>=2.3.0', + 'scipy>=1.10.0', + 's3fs>=2022.11.0', + 'SQLAlchemy>=2.0.0', + 'tables>=3.8.0', + 'tabulate>=0.9.0', + 'xarray>=2022.12.0', + 'xlrd>=2.0.1', + 'xlsxwriter>=3.0.5', + 'zstandard>=0.19.0'] + +# TODO: Remove after setuptools support is dropped. +[tool.setuptools] +include-package-data = true + +[tool.setuptools.packages.find] +include = ["pandas", "pandas.*"] +namespaces = false + +[tool.setuptools.exclude-package-data] +"*" = ["*.c", "*.h"] + +# See the docstring in versioneer.py for instructions. Note that you must +# re-run 'versioneer.py setup' after changing this section, and commit the +# resulting files. +[tool.versioneer] +VCS = "git" +style = "pep440" +versionfile_source = "pandas/_version.py" +versionfile_build = "pandas/_version.py" +tag_prefix = "v" +parentdir_prefix = "pandas-" + +[tool.meson-python.args] +setup = ['--vsenv'] # For Windows + +[tool.cibuildwheel] +skip = "cp36-* cp37-* cp38-* pp* *_i686 *_ppc64le *_s390x" +build-verbosity = "3" +environment = {LDFLAGS="-Wl,--strip-all"} +# TODO: remove this once numpy 2.0 proper releases +# and specify numpy 2.0 as a dependency in [build-system] requires in pyproject.toml +before-build = "pip install numpy==2.0.0rc1" +test-requires = "hypothesis>=6.46.1 pytest>=7.3.2 pytest-xdist>=2.2.0" +test-command = """ + PANDAS_CI='1' python -c 'import pandas as pd; \ + pd.test(extra_args=["-m not clipboard and not single_cpu and not slow and not network and not db", "-n 2", "--no-strict-data-files"]); \ + pd.test(extra_args=["-m not clipboard and single_cpu and not slow and not network and not db", "--no-strict-data-files"]);' \ + """ + +[tool.cibuildwheel.windows] +# TODO: remove this once numpy 2.0 proper releases +# and specify numpy 2.0 as a dependency in [build-system] requires in pyproject.toml +before-build = "pip install delvewheel numpy==2.0.0rc1" +repair-wheel-command = "delvewheel repair -w {dest_dir} {wheel}" + +[[tool.cibuildwheel.overrides]] +select = "*-musllinux*" +before-test = "apk update && apk add musl-locales" + +[[tool.cibuildwheel.overrides]] +select = "*-win*" +# We test separately for Windows, since we use +# the windowsservercore docker image to check if any dlls are +# missing from the wheel +test-command = "" + +[[tool.cibuildwheel.overrides]] +# Don't strip wheels on macOS. +# macOS doesn't support stripping wheels with linker +# https://github.com/MacPython/numpy-wheels/pull/87#issuecomment-624878264 +select = "*-macosx*" +environment = {CFLAGS="-g0"} + +[tool.black] +target-version = ['py39', 'py310'] +required-version = '23.11.0' +exclude = ''' +( + asv_bench/env + | \.egg + | \.git + | \.hg + | \.mypy_cache + | \.nox + | \.tox + | \.venv + | _build + | buck-out + | build + | dist + | setup.py +) +''' + +[tool.ruff] +line-length = 88 +target-version = "py310" +fix = true +unfixable = [] +typing-modules = ["pandas._typing"] + +select = [ + # pyflakes + "F", + # pycodestyle + "E", "W", + # flake8-2020 + "YTT", + # flake8-bugbear + "B", + # flake8-quotes + "Q", + # flake8-debugger + "T10", + # flake8-gettext + "INT", + # pylint + "PL", + # misc lints + "PIE", + # flake8-pyi + "PYI", + # tidy imports + "TID", + # implicit string concatenation + "ISC", + # type-checking imports + "TCH", + # comprehensions + "C4", + # pygrep-hooks + "PGH", + # Ruff-specific rules + "RUF", + # flake8-bandit: exec-builtin + "S102", + # numpy-legacy-random + "NPY002", + # Perflint + "PERF", + # flynt + "FLY", + # flake8-logging-format + "G", + # flake8-future-annotations + "FA", +] + +ignore = [ + ### Intentionally disabled + # space before : (needed for how black formats slicing) + "E203", + # module level import not at top of file + "E402", + # do not assign a lambda expression, use a def + "E731", + # line break before binary operator + # "W503", # not yet implemented + # line break after binary operator + # "W504", # not yet implemented + # controversial + "B006", + # controversial + "B007", + # controversial + "B008", + # setattr is used to side-step mypy + "B009", + # getattr is used to side-step mypy + "B010", + # tests use assert False + "B011", + # tests use comparisons but not their returned value + "B015", + # false positives + "B019", + # Loop control variable overrides iterable it iterates + "B020", + # Function definition does not bind loop variable + "B023", + # Functions defined inside a loop must not use variables redefined in the loop + # "B301", # not yet implemented + # Only works with python >=3.10 + "B905", + # Too many arguments to function call + "PLR0913", + # Too many returns + "PLR0911", + # Too many branches + "PLR0912", + # Too many statements + "PLR0915", + # Redefined loop name + "PLW2901", + # Global statements are discouraged + "PLW0603", + # Docstrings should not be included in stubs + "PYI021", + # Use `typing.NamedTuple` instead of `collections.namedtuple` + "PYI024", + # No builtin `eval()` allowed + "PGH001", + # compare-to-empty-string + "PLC1901", + # while int | float can be shortened to float, the former is more explicit + "PYI041", + # incorrect-dict-iterator, flags valid Series.items usage + "PERF102", + # try-except-in-loop, becomes useless in Python 3.11 + "PERF203", + + + ### TODO: Enable gradually + # Useless statement + "B018", + # Within an except clause, raise exceptions with ... + "B904", + # Magic number + "PLR2004", + # comparison-with-itself + "PLR0124", + # Consider `elif` instead of `else` then `if` to remove indentation level + "PLR5501", + # collection-literal-concatenation + "RUF005", + # pairwise-over-zipped (>=PY310 only) + "RUF007", + # explicit-f-string-type-conversion + "RUF010", + # mutable-class-default + "RUF012" +] + +exclude = [ + "doc/sphinxext/*.py", + "doc/build/*.py", + "doc/temp/*.py", + ".eggs/*.py", + # vendored files + "pandas/util/version/*", + "pandas/io/clipboard/__init__.py", + # exclude asv benchmark environments from linting + "env", +] + +[tool.ruff.per-file-ignores] +# relative imports allowed for asv_bench +"asv_bench/*" = ["TID", "NPY002"] +# to be enabled gradually +"pandas/core/*" = ["PLR5501"] +"pandas/tests/*" = ["B028", "FLY"] +"scripts/*" = ["B028"] +# Keep this one enabled +"pandas/_typing.py" = ["TCH"] + +[tool.pylint.messages_control] +max-line-length = 88 +disable = [ + # intentionally turned off + "bad-mcs-classmethod-argument", + "broad-except", + "c-extension-no-member", + "comparison-with-itself", + "consider-using-enumerate", + "import-error", + "import-outside-toplevel", + "invalid-name", + "invalid-unary-operand-type", + "line-too-long", + "no-else-continue", + "no-else-raise", + "no-else-return", + "no-member", + "no-name-in-module", + "not-an-iterable", + "overridden-final-method", + "pointless-statement", + "redundant-keyword-arg", + "singleton-comparison", + "too-many-ancestors", + "too-many-arguments", + "too-many-boolean-expressions", + "too-many-branches", + "too-many-function-args", + "too-many-instance-attributes", + "too-many-locals", + "too-many-nested-blocks", + "too-many-public-methods", + "too-many-return-statements", + "too-many-statements", + "unexpected-keyword-arg", + "ungrouped-imports", + "unsubscriptable-object", + "unsupported-assignment-operation", + "unsupported-membership-test", + "unused-import", + "use-dict-literal", + "use-implicit-booleaness-not-comparison", + "use-implicit-booleaness-not-len", + "wrong-import-order", + "wrong-import-position", + "redefined-loop-name", + + # misc + "abstract-class-instantiated", + "no-value-for-parameter", + "undefined-variable", + "unpacking-non-sequence", + "used-before-assignment", + + # pylint type "C": convention, for programming standard violation + "missing-class-docstring", + "missing-function-docstring", + "missing-module-docstring", + "superfluous-parens", + "too-many-lines", + "unidiomatic-typecheck", + "unnecessary-dunder-call", + "unnecessary-lambda-assignment", + + # pylint type "R": refactor, for bad code smell + "consider-using-with", + "cyclic-import", + "duplicate-code", + "inconsistent-return-statements", + "redefined-argument-from-local", + "too-few-public-methods", + + # pylint type "W": warning, for python specific problems + "abstract-method", + "arguments-differ", + "arguments-out-of-order", + "arguments-renamed", + "attribute-defined-outside-init", + "broad-exception-raised", + "comparison-with-callable", + "dangerous-default-value", + "deprecated-module", + "eval-used", + "expression-not-assigned", + "fixme", + "global-statement", + "invalid-overridden-method", + "keyword-arg-before-vararg", + "possibly-unused-variable", + "protected-access", + "raise-missing-from", + "redefined-builtin", + "redefined-outer-name", + "self-cls-assignment", + "signature-differs", + "super-init-not-called", + "try-except-raise", + "unnecessary-lambda", + "unused-argument", + "unused-variable", + "using-constant-test" +] + +[tool.pytest.ini_options] +# sync minversion with pyproject.toml & install.rst +minversion = "7.3.2" +addopts = "--strict-markers --strict-config --capture=no --durations=30 --junitxml=test-data.xml" +empty_parameter_set_mark = "fail_at_collect" +xfail_strict = true +testpaths = "pandas" +doctest_optionflags = [ + "NORMALIZE_WHITESPACE", + "IGNORE_EXCEPTION_DETAIL", + "ELLIPSIS", +] +filterwarnings = [ + "error:::pandas", + "error::ResourceWarning", + "error::pytest.PytestUnraisableExceptionWarning", + # TODO(PY311-minimum): Specify EncodingWarning + # Ignore 3rd party EncodingWarning but raise on pandas' + "ignore:.*encoding.* argument not specified", + "error:.*encoding.* argument not specified::pandas", + "ignore:.*ssl.SSLSocket:pytest.PytestUnraisableExceptionWarning", + "ignore:.*ssl.SSLSocket:ResourceWarning", + # GH 44844: Can remove once minimum matplotlib version >= 3.7 + "ignore:.*FileIO:pytest.PytestUnraisableExceptionWarning", + "ignore:.*BufferedRandom:ResourceWarning", + "ignore::ResourceWarning:asyncio", + # From plotting doctests + "ignore:More than 20 figures have been opened:RuntimeWarning", + # Will be fixed in numba 0.56: https://github.com/numba/numba/issues/7758 + "ignore:`np.MachAr` is deprecated:DeprecationWarning:numba", + "ignore:.*urllib3:DeprecationWarning:botocore", + "ignore:Setuptools is replacing distutils.:UserWarning:_distutils_hack", + # https://github.com/PyTables/PyTables/issues/822 + "ignore:a closed node found in the registry:UserWarning:tables", + "ignore:`np.object` is a deprecated:DeprecationWarning:tables", + "ignore:tostring:DeprecationWarning:tables", + "ignore:distutils Version classes are deprecated:DeprecationWarning:pandas_datareader", + "ignore:distutils Version classes are deprecated:DeprecationWarning:numexpr", + "ignore:distutils Version classes are deprecated:DeprecationWarning:fastparquet", + "ignore:distutils Version classes are deprecated:DeprecationWarning:fsspec", + # Can be removed once https://github.com/numpy/numpy/pull/24794 is merged + "ignore:.*In the future `np.long` will be defined as.*:FutureWarning", +] +junit_family = "xunit2" +markers = [ + "single_cpu: tests that should run on a single cpu only", + "slow: mark a test as slow", + "network: mark a test as network", + "db: tests requiring a database (mysql or postgres)", + "clipboard: mark a pd.read_clipboard test", + "arm_slow: mark a test as slow for arm64 architecture", + "skip_ubsan: Tests known to fail UBSAN check", +] + +[tool.mypy] +# Import discovery +mypy_path = "typings" +files = ["pandas", "typings"] +namespace_packages = false +explicit_package_bases = false +ignore_missing_imports = true +follow_imports = "normal" +follow_imports_for_stubs = false +no_site_packages = false +no_silence_site_packages = false +# Platform configuration +python_version = "3.11" +platform = "linux-64" +# Disallow dynamic typing +disallow_any_unimported = false # TODO +disallow_any_expr = false # TODO +disallow_any_decorated = false # TODO +disallow_any_explicit = false # TODO +disallow_any_generics = false # TODO +disallow_subclassing_any = false # TODO +# Untyped definitions and calls +disallow_untyped_calls = true +disallow_untyped_defs = true +disallow_incomplete_defs = true +check_untyped_defs = true +disallow_untyped_decorators = true +# None and Optional handling +no_implicit_optional = true +strict_optional = true +# Configuring warnings +warn_redundant_casts = true +warn_unused_ignores = true +warn_no_return = true +warn_return_any = false # TODO +warn_unreachable = false # GH#27396 +# Suppressing errors +ignore_errors = false +enable_error_code = "ignore-without-code" +# Miscellaneous strictness flags +allow_untyped_globals = false +allow_redefinition = false +local_partial_types = false +implicit_reexport = true +strict_equality = true +# Configuring error messages +show_error_context = false +show_column_numbers = false +show_error_codes = true + +[[tool.mypy.overrides]] +module = [ + "pandas._config.config", # TODO + "pandas._libs.*", + "pandas._testing.*", # TODO + "pandas.arrays", # TODO + "pandas.compat.numpy.function", # TODO + "pandas.compat._optional", # TODO + "pandas.compat.compressors", # TODO + "pandas.compat.pickle_compat", # TODO + "pandas.core._numba.executor", # TODO + "pandas.core.array_algos.datetimelike_accumulations", # TODO + "pandas.core.array_algos.masked_accumulations", # TODO + "pandas.core.array_algos.masked_reductions", # TODO + "pandas.core.array_algos.putmask", # TODO + "pandas.core.array_algos.quantile", # TODO + "pandas.core.array_algos.replace", # TODO + "pandas.core.array_algos.take", # TODO + "pandas.core.arrays.*", # TODO + "pandas.core.computation.*", # TODO + "pandas.core.dtypes.astype", # TODO + "pandas.core.dtypes.cast", # TODO + "pandas.core.dtypes.common", # TODO + "pandas.core.dtypes.concat", # TODO + "pandas.core.dtypes.dtypes", # TODO + "pandas.core.dtypes.generic", # TODO + "pandas.core.dtypes.inference", # TODO + "pandas.core.dtypes.missing", # TODO + "pandas.core.groupby.categorical", # TODO + "pandas.core.groupby.generic", # TODO + "pandas.core.groupby.grouper", # TODO + "pandas.core.groupby.groupby", # TODO + "pandas.core.groupby.ops", # TODO + "pandas.core.indexers.*", # TODO + "pandas.core.indexes.*", # TODO + "pandas.core.interchange.column", # TODO + "pandas.core.interchange.dataframe_protocol", # TODO + "pandas.core.interchange.from_dataframe", # TODO + "pandas.core.internals.*", # TODO + "pandas.core.methods.*", # TODO + "pandas.core.ops.array_ops", # TODO + "pandas.core.ops.common", # TODO + "pandas.core.ops.invalid", # TODO + "pandas.core.ops.mask_ops", # TODO + "pandas.core.ops.missing", # TODO + "pandas.core.reshape.*", # TODO + "pandas.core.strings.*", # TODO + "pandas.core.tools.*", # TODO + "pandas.core.window.common", # TODO + "pandas.core.window.ewm", # TODO + "pandas.core.window.expanding", # TODO + "pandas.core.window.numba_", # TODO + "pandas.core.window.online", # TODO + "pandas.core.window.rolling", # TODO + "pandas.core.accessor", # TODO + "pandas.core.algorithms", # TODO + "pandas.core.apply", # TODO + "pandas.core.arraylike", # TODO + "pandas.core.base", # TODO + "pandas.core.common", # TODO + "pandas.core.config_init", # TODO + "pandas.core.construction", # TODO + "pandas.core.flags", # TODO + "pandas.core.frame", # TODO + "pandas.core.generic", # TODO + "pandas.core.indexing", # TODO + "pandas.core.missing", # TODO + "pandas.core.nanops", # TODO + "pandas.core.resample", # TODO + "pandas.core.roperator", # TODO + "pandas.core.sample", # TODO + "pandas.core.series", # TODO + "pandas.core.sorting", # TODO + "pandas.errors", # TODO + "pandas.io.clipboard", # TODO + "pandas.io.excel._base", # TODO + "pandas.io.excel._odfreader", # TODO + "pandas.io.excel._odswriter", # TODO + "pandas.io.excel._openpyxl", # TODO + "pandas.io.excel._pyxlsb", # TODO + "pandas.io.excel._xlrd", # TODO + "pandas.io.excel._xlsxwriter", # TODO + "pandas.io.formats.console", # TODO + "pandas.io.formats.css", # TODO + "pandas.io.formats.excel", # TODO + "pandas.io.formats.format", # TODO + "pandas.io.formats.info", # TODO + "pandas.io.formats.printing", # TODO + "pandas.io.formats.style", # TODO + "pandas.io.formats.style_render", # TODO + "pandas.io.formats.xml", # TODO + "pandas.io.json.*", # TODO + "pandas.io.parsers.*", # TODO + "pandas.io.sas.sas_xport", # TODO + "pandas.io.sas.sas7bdat", # TODO + "pandas.io.clipboards", # TODO + "pandas.io.common", # TODO + "pandas.io.gbq", # TODO + "pandas.io.html", # TODO + "pandas.io.gbq", # TODO + "pandas.io.parquet", # TODO + "pandas.io.pytables", # TODO + "pandas.io.sql", # TODO + "pandas.io.stata", # TODO + "pandas.io.xml", # TODO + "pandas.plotting.*", # TODO + "pandas.tests.*", + "pandas.tseries.frequencies", # TODO + "pandas.tseries.holiday", # TODO + "pandas.util._decorators", # TODO + "pandas.util._doctools", # TODO + "pandas.util._print_versions", # TODO + "pandas.util._test_decorators", # TODO + "pandas.util._validators", # TODO + "pandas.util", # TODO + "pandas._version", + "pandas.conftest", + "pandas" +] +disallow_untyped_calls = false +disallow_untyped_defs = false +disallow_incomplete_defs = false + +[[tool.mypy.overrides]] +module = [ + "pandas.tests.*", + "pandas._version", + "pandas.io.clipboard", +] +check_untyped_defs = false + +[[tool.mypy.overrides]] +module = [ + "pandas.tests.apply.test_series_apply", + "pandas.tests.arithmetic.conftest", + "pandas.tests.arrays.sparse.test_combine_concat", + "pandas.tests.dtypes.test_common", + "pandas.tests.frame.methods.test_to_records", + "pandas.tests.groupby.test_rank", + "pandas.tests.groupby.transform.test_transform", + "pandas.tests.indexes.interval.test_interval", + "pandas.tests.indexing.test_categorical", + "pandas.tests.io.excel.test_writers", + "pandas.tests.reductions.test_reductions", + "pandas.tests.test_expressions", +] +ignore_errors = true + +# To be kept consistent with "Import Formatting" section in contributing.rst +[tool.isort] +known_pre_libs = "pandas._config" +known_pre_core = ["pandas._libs", "pandas._typing", "pandas.util._*", "pandas.compat", "pandas.errors"] +known_dtypes = "pandas.core.dtypes" +known_post_core = ["pandas.tseries", "pandas.io", "pandas.plotting"] +sections = ["FUTURE", "STDLIB", "THIRDPARTY" ,"PRE_LIBS" , "PRE_CORE", "DTYPES", "FIRSTPARTY", "POST_CORE", "LOCALFOLDER"] +profile = "black" +combine_as_imports = true +force_grid_wrap = 2 +force_sort_within_sections = true +skip_glob = "env" +skip = "pandas/__init__.py" + +[tool.pyright] +pythonVersion = "3.11" +typeCheckingMode = "basic" +useLibraryCodeForTypes = false +include = ["pandas", "typings"] +exclude = ["pandas/tests", "pandas/io/clipboard", "pandas/util/version", "pandas/core/_numba/extensions.py"] +# enable subset of "strict" +reportDuplicateImport = true +reportInconsistentConstructor = true +reportInvalidStubStatement = true +reportOverlappingOverload = true +reportPropertyTypeMismatch = true +reportUntypedClassDecorator = true +reportUntypedFunctionDecorator = true +reportUntypedNamedTuple = true +reportUnusedImport = true +disableBytesTypePromotions = true +# disable subset of "basic" +reportGeneralTypeIssues = false +reportMissingModuleSource = false +reportOptionalCall = false +reportOptionalIterable = false +reportOptionalMemberAccess = false +reportOptionalOperand = false +reportOptionalSubscript = false +reportPrivateImportUsage = false +reportUnboundVariable = false + +[tool.coverage.run] +branch = true +omit = ["pandas/_typing.py", "pandas/_version.py"] +plugins = ["Cython.Coverage"] +source = ["pandas"] + +[tool.coverage.report] +ignore_errors = false +show_missing = true +omit = ["pandas/_version.py"] +exclude_lines = [ + # Have to re-enable the standard pragma + "pragma: no cover", + # Don't complain about missing debug-only code:s + "def __repr__", + "if self.debug", + # Don't complain if tests don't hit defensive assertion code: + "raise AssertionError", + "raise NotImplementedError", + "AbstractMethodError", + # Don't complain if non-runnable code isn't run: + "if 0:", + "if __name__ == .__main__.:", + "if TYPE_CHECKING:", +] + +[tool.coverage.html] +directory = "coverage_html_report" + +[tool.codespell] +ignore-words-list = "blocs, coo, hist, nd, sav, ser, recuse, nin, timere, expec, expecs" +ignore-regex = 'https://([\w/\.])+' diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/testing.py b/env-llmeval/lib/python3.10/site-packages/pandas/testing.py new file mode 100644 index 0000000000000000000000000000000000000000..841b55df48556561904b9144a05f747d889ea621 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/testing.py @@ -0,0 +1,18 @@ +""" +Public testing utility functions. +""" + + +from pandas._testing import ( + assert_extension_array_equal, + assert_frame_equal, + assert_index_equal, + assert_series_equal, +) + +__all__ = [ + "assert_extension_array_equal", + "assert_frame_equal", + "assert_series_equal", + "assert_index_equal", +]