diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8cd76b3b38ced45b07c753228b4937821263756b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/__pycache__/_typing.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/__pycache__/_typing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3cf5d21e3e11e9cb26aeb579f4f4c778cacd9424 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/__pycache__/_typing.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/__pycache__/_version.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/__pycache__/_version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b66f120c55240dcd562688715d7bd5b8c0a528f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/__pycache__/_version.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/__pycache__/_version_meson.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/__pycache__/_version_meson.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dceec65b41d16407ac372cb7ed05a63288ea244e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/__pycache__/_version_meson.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/__pycache__/conftest.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..491f25e81eb4ce377051c30d5f0d261d4e96c392 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/__pycache__/conftest.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/__pycache__/testing.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/__pycache__/testing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed4233df2e9e968963bffa9c5ad34039e00a9fed Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/__pycache__/testing.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..26a872a90e4934c929d168228125f2e43936c774 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/__init__.py @@ -0,0 +1,27 @@ +__all__ = [ + "NaT", + "NaTType", + "OutOfBoundsDatetime", + "Period", + "Timedelta", + "Timestamp", + "iNaT", + "Interval", +] + + +# Below imports needs to happen first to ensure pandas top level +# module gets monkeypatched with the pandas_datetime_CAPI +# see pandas_datetime_exec in pd_datetime.c +import pandas._libs.pandas_parser # isort: skip # type: ignore[reportUnusedImport] +import pandas._libs.pandas_datetime # noqa: F401 # isort: skip # type: ignore[reportUnusedImport] +from pandas._libs.interval import Interval +from pandas._libs.tslibs import ( + NaT, + NaTType, + OutOfBoundsDatetime, + Period, + Timedelta, + Timestamp, + iNaT, +) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d78fe8a792ed924a96917c04406ad2ea37495037 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/algos.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/algos.pyi new file mode 100644 index 0000000000000000000000000000000000000000..caf5425dfc7b44cbb2fea103e56d4584709eca1e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/algos.pyi @@ -0,0 +1,416 @@ +from typing import Any + +import numpy as np + +from pandas._typing import npt + +class Infinity: + def __eq__(self, other) -> bool: ... + def __ne__(self, other) -> bool: ... + def __lt__(self, other) -> bool: ... + def __le__(self, other) -> bool: ... + def __gt__(self, other) -> bool: ... + def __ge__(self, other) -> bool: ... + +class NegInfinity: + def __eq__(self, other) -> bool: ... + def __ne__(self, other) -> bool: ... + def __lt__(self, other) -> bool: ... + def __le__(self, other) -> bool: ... + def __gt__(self, other) -> bool: ... + def __ge__(self, other) -> bool: ... + +def unique_deltas( + arr: np.ndarray, # const int64_t[:] +) -> np.ndarray: ... # np.ndarray[np.int64, ndim=1] +def is_lexsorted(list_of_arrays: list[npt.NDArray[np.int64]]) -> bool: ... +def groupsort_indexer( + index: np.ndarray, # const int64_t[:] + ngroups: int, +) -> tuple[ + np.ndarray, # ndarray[int64_t, ndim=1] + np.ndarray, # ndarray[int64_t, ndim=1] +]: ... +def kth_smallest( + arr: np.ndarray, # numeric[:] + k: int, +) -> Any: ... # numeric + +# ---------------------------------------------------------------------- +# Pairwise correlation/covariance + +def nancorr( + mat: npt.NDArray[np.float64], # const float64_t[:, :] + cov: bool = ..., + minp: int | None = ..., +) -> npt.NDArray[np.float64]: ... # ndarray[float64_t, ndim=2] +def nancorr_spearman( + mat: npt.NDArray[np.float64], # ndarray[float64_t, ndim=2] + minp: int = ..., +) -> npt.NDArray[np.float64]: ... # ndarray[float64_t, ndim=2] + +# ---------------------------------------------------------------------- + +def validate_limit(nobs: int | None, limit=...) -> int: ... +def get_fill_indexer( + mask: npt.NDArray[np.bool_], + limit: int | None = None, +) -> npt.NDArray[np.intp]: ... +def pad( + old: np.ndarray, # ndarray[numeric_object_t] + new: np.ndarray, # ndarray[numeric_object_t] + limit=..., +) -> npt.NDArray[np.intp]: ... # np.ndarray[np.intp, ndim=1] +def pad_inplace( + values: np.ndarray, # numeric_object_t[:] + mask: np.ndarray, # uint8_t[:] + limit=..., +) -> None: ... +def pad_2d_inplace( + values: np.ndarray, # numeric_object_t[:, :] + mask: np.ndarray, # const uint8_t[:, :] + limit=..., +) -> None: ... +def backfill( + old: np.ndarray, # ndarray[numeric_object_t] + new: np.ndarray, # ndarray[numeric_object_t] + limit=..., +) -> npt.NDArray[np.intp]: ... # np.ndarray[np.intp, ndim=1] +def backfill_inplace( + values: np.ndarray, # numeric_object_t[:] + mask: np.ndarray, # uint8_t[:] + limit=..., +) -> None: ... +def backfill_2d_inplace( + values: np.ndarray, # numeric_object_t[:, :] + mask: np.ndarray, # const uint8_t[:, :] + limit=..., +) -> None: ... +def is_monotonic( + arr: np.ndarray, # ndarray[numeric_object_t, ndim=1] + timelike: bool, +) -> tuple[bool, bool, bool]: ... + +# ---------------------------------------------------------------------- +# rank_1d, rank_2d +# ---------------------------------------------------------------------- + +def rank_1d( + values: np.ndarray, # ndarray[numeric_object_t, ndim=1] + labels: np.ndarray | None = ..., # const int64_t[:]=None + is_datetimelike: bool = ..., + ties_method=..., + ascending: bool = ..., + pct: bool = ..., + na_option=..., + mask: npt.NDArray[np.bool_] | None = ..., +) -> np.ndarray: ... # np.ndarray[float64_t, ndim=1] +def rank_2d( + in_arr: np.ndarray, # ndarray[numeric_object_t, ndim=2] + axis: int = ..., + is_datetimelike: bool = ..., + ties_method=..., + ascending: bool = ..., + na_option=..., + pct: bool = ..., +) -> np.ndarray: ... # np.ndarray[float64_t, ndim=1] +def diff_2d( + arr: np.ndarray, # ndarray[diff_t, ndim=2] + out: np.ndarray, # ndarray[out_t, ndim=2] + periods: int, + axis: int, + datetimelike: bool = ..., +) -> None: ... +def ensure_platform_int(arr: object) -> npt.NDArray[np.intp]: ... +def ensure_object(arr: object) -> npt.NDArray[np.object_]: ... +def ensure_float64(arr: object) -> npt.NDArray[np.float64]: ... +def ensure_int8(arr: object) -> npt.NDArray[np.int8]: ... +def ensure_int16(arr: object) -> npt.NDArray[np.int16]: ... +def ensure_int32(arr: object) -> npt.NDArray[np.int32]: ... +def ensure_int64(arr: object) -> npt.NDArray[np.int64]: ... +def ensure_uint64(arr: object) -> npt.NDArray[np.uint64]: ... +def take_1d_int8_int8( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int8_int32( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int8_int64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int8_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int16_int16( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int16_int32( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int16_int64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int16_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int32_int32( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int32_int64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int32_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int64_int64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int64_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_float32_float32( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_float32_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_float64_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_object_object( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_bool_bool( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_bool_object( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int8_int8( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int8_int32( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int8_int64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int8_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int16_int16( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int16_int32( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int16_int64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int16_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int32_int32( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int32_int64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int32_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int64_int64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int64_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_float32_float32( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_float32_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_float64_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_object_object( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_bool_bool( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_bool_object( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int8_int8( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int8_int32( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int8_int64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int8_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int16_int16( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int16_int32( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int16_int64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int16_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int32_int32( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int32_int64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int32_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int64_int64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int64_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_float32_float32( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_float32_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_float64_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_object_object( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_bool_bool( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_bool_object( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_multi_int8_int8( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_int8_int32( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_int8_int64( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_int8_float64( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_int16_int16( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_int16_int32( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_int16_int64( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_int16_float64( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_int32_int32( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_int32_int64( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_int32_float64( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_int64_float64( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_float32_float32( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_float32_float64( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_float64_float64( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_object_object( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_bool_bool( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_bool_object( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_int64_int64( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/arrays.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/arrays.pyi new file mode 100644 index 0000000000000000000000000000000000000000..86f69c3cdfc75f3101a8e0afe59f9b102d90ad79 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/arrays.pyi @@ -0,0 +1,40 @@ +from typing import Sequence + +import numpy as np + +from pandas._typing import ( + AxisInt, + DtypeObj, + Self, + Shape, +) + +class NDArrayBacked: + _dtype: DtypeObj + _ndarray: np.ndarray + def __init__(self, values: np.ndarray, dtype: DtypeObj) -> None: ... + @classmethod + def _simple_new(cls, values: np.ndarray, dtype: DtypeObj): ... + def _from_backing_data(self, values: np.ndarray): ... + def __setstate__(self, state): ... + def __len__(self) -> int: ... + @property + def shape(self) -> Shape: ... + @property + def ndim(self) -> int: ... + @property + def size(self) -> int: ... + @property + def nbytes(self) -> int: ... + def copy(self, order=...): ... + def delete(self, loc, axis=...): ... + def swapaxes(self, axis1, axis2): ... + def repeat(self, repeats: int | Sequence[int], axis: int | None = ...): ... + def reshape(self, *args, **kwargs): ... + def ravel(self, order=...): ... + @property + def T(self): ... + @classmethod + def _concat_same_type( + cls, to_concat: Sequence[Self], axis: AxisInt = ... + ) -> Self: ... diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/byteswap.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/byteswap.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..e46592df3eeed2d4da1bad6b918b35cd00e002b0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/byteswap.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/groupby.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/groupby.pyi new file mode 100644 index 0000000000000000000000000000000000000000..a494b61fa7e3d70c77a921eb8bfcc269c76db9fb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/groupby.pyi @@ -0,0 +1,216 @@ +from typing import Literal + +import numpy as np + +from pandas._typing import npt + +def group_median_float64( + out: np.ndarray, # ndarray[float64_t, ndim=2] + counts: npt.NDArray[np.int64], + values: np.ndarray, # ndarray[float64_t, ndim=2] + labels: npt.NDArray[np.int64], + min_count: int = ..., # Py_ssize_t + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., +) -> None: ... +def group_cumprod( + out: np.ndarray, # float64_t[:, ::1] + values: np.ndarray, # const float64_t[:, :] + labels: np.ndarray, # const int64_t[:] + ngroups: int, + is_datetimelike: bool, + skipna: bool = ..., + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., +) -> None: ... +def group_cumsum( + out: np.ndarray, # int64float_t[:, ::1] + values: np.ndarray, # ndarray[int64float_t, ndim=2] + labels: np.ndarray, # const int64_t[:] + ngroups: int, + is_datetimelike: bool, + skipna: bool = ..., + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., +) -> None: ... +def group_shift_indexer( + out: np.ndarray, # int64_t[::1] + labels: np.ndarray, # const int64_t[:] + ngroups: int, + periods: int, +) -> None: ... +def group_fillna_indexer( + out: np.ndarray, # ndarray[intp_t] + labels: np.ndarray, # ndarray[int64_t] + sorted_labels: npt.NDArray[np.intp], + mask: npt.NDArray[np.uint8], + limit: int, # int64_t + dropna: bool, +) -> None: ... +def group_any_all( + out: np.ndarray, # uint8_t[::1] + values: np.ndarray, # const uint8_t[::1] + labels: np.ndarray, # const int64_t[:] + mask: np.ndarray, # const uint8_t[::1] + val_test: Literal["any", "all"], + skipna: bool, + result_mask: np.ndarray | None, +) -> None: ... +def group_sum( + out: np.ndarray, # complexfloatingintuint_t[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[complexfloatingintuint_t, ndim=2] + labels: np.ndarray, # const intp_t[:] + mask: np.ndarray | None, + result_mask: np.ndarray | None = ..., + min_count: int = ..., + is_datetimelike: bool = ..., +) -> None: ... +def group_prod( + out: np.ndarray, # int64float_t[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[int64float_t, ndim=2] + labels: np.ndarray, # const intp_t[:] + mask: np.ndarray | None, + result_mask: np.ndarray | None = ..., + min_count: int = ..., +) -> None: ... +def group_var( + out: np.ndarray, # floating[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[floating, ndim=2] + labels: np.ndarray, # const intp_t[:] + min_count: int = ..., # Py_ssize_t + ddof: int = ..., # int64_t + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., + is_datetimelike: bool = ..., + name: str = ..., +) -> None: ... +def group_skew( + out: np.ndarray, # float64_t[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[float64_T, ndim=2] + labels: np.ndarray, # const intp_t[::1] + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., + skipna: bool = ..., +) -> None: ... +def group_mean( + out: np.ndarray, # floating[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[floating, ndim=2] + labels: np.ndarray, # const intp_t[:] + min_count: int = ..., # Py_ssize_t + is_datetimelike: bool = ..., # bint + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., +) -> None: ... +def group_ohlc( + out: np.ndarray, # floatingintuint_t[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[floatingintuint_t, ndim=2] + labels: np.ndarray, # const intp_t[:] + min_count: int = ..., + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., +) -> None: ... +def group_quantile( + out: npt.NDArray[np.float64], + values: np.ndarray, # ndarray[numeric, ndim=1] + labels: npt.NDArray[np.intp], + mask: npt.NDArray[np.uint8], + qs: npt.NDArray[np.float64], # const + starts: npt.NDArray[np.int64], + ends: npt.NDArray[np.int64], + interpolation: Literal["linear", "lower", "higher", "nearest", "midpoint"], + result_mask: np.ndarray | None, + is_datetimelike: bool, +) -> None: ... +def group_last( + out: np.ndarray, # rank_t[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[rank_t, ndim=2] + labels: np.ndarray, # const int64_t[:] + mask: npt.NDArray[np.bool_] | None, + result_mask: npt.NDArray[np.bool_] | None = ..., + min_count: int = ..., # Py_ssize_t + is_datetimelike: bool = ..., + skipna: bool = ..., +) -> None: ... +def group_nth( + out: np.ndarray, # rank_t[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[rank_t, ndim=2] + labels: np.ndarray, # const int64_t[:] + mask: npt.NDArray[np.bool_] | None, + result_mask: npt.NDArray[np.bool_] | None = ..., + min_count: int = ..., # int64_t + rank: int = ..., # int64_t + is_datetimelike: bool = ..., + skipna: bool = ..., +) -> None: ... +def group_rank( + out: np.ndarray, # float64_t[:, ::1] + values: np.ndarray, # ndarray[rank_t, ndim=2] + labels: np.ndarray, # const int64_t[:] + ngroups: int, + is_datetimelike: bool, + ties_method: Literal["average", "min", "max", "first", "dense"] = ..., + ascending: bool = ..., + pct: bool = ..., + na_option: Literal["keep", "top", "bottom"] = ..., + mask: npt.NDArray[np.bool_] | None = ..., +) -> None: ... +def group_max( + out: np.ndarray, # groupby_t[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[groupby_t, ndim=2] + labels: np.ndarray, # const int64_t[:] + min_count: int = ..., + is_datetimelike: bool = ..., + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., +) -> None: ... +def group_min( + out: np.ndarray, # groupby_t[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[groupby_t, ndim=2] + labels: np.ndarray, # const int64_t[:] + min_count: int = ..., + is_datetimelike: bool = ..., + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., +) -> None: ... +def group_idxmin_idxmax( + out: npt.NDArray[np.intp], + counts: npt.NDArray[np.int64], + values: np.ndarray, # ndarray[groupby_t, ndim=2] + labels: npt.NDArray[np.intp], + min_count: int = ..., + is_datetimelike: bool = ..., + mask: np.ndarray | None = ..., + name: str = ..., + skipna: bool = ..., + result_mask: np.ndarray | None = ..., +) -> None: ... +def group_cummin( + out: np.ndarray, # groupby_t[:, ::1] + values: np.ndarray, # ndarray[groupby_t, ndim=2] + labels: np.ndarray, # const int64_t[:] + ngroups: int, + is_datetimelike: bool, + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., + skipna: bool = ..., +) -> None: ... +def group_cummax( + out: np.ndarray, # groupby_t[:, ::1] + values: np.ndarray, # ndarray[groupby_t, ndim=2] + labels: np.ndarray, # const int64_t[:] + ngroups: int, + is_datetimelike: bool, + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., + skipna: bool = ..., +) -> None: ... diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/hashing.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/hashing.pyi new file mode 100644 index 0000000000000000000000000000000000000000..8361026e4a87d462e04c53f7f1f8aee8a7f6ffe0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/hashing.pyi @@ -0,0 +1,9 @@ +import numpy as np + +from pandas._typing import npt + +def hash_object_array( + arr: npt.NDArray[np.object_], + key: str, + encoding: str = ..., +) -> npt.NDArray[np.uint64]: ... diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/index.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/index.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..e86e6dda5b56729438d40fcbe198f09908c2b84a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/index.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/internals.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/internals.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ffe6c7730bcdcfb8af1407bf55b59170c4093699 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/internals.pyi @@ -0,0 +1,94 @@ +from typing import ( + Iterator, + Sequence, + final, + overload, +) +import weakref + +import numpy as np + +from pandas._typing import ( + ArrayLike, + Self, + npt, +) + +from pandas import Index +from pandas.core.internals.blocks import Block as B + +def slice_len(slc: slice, objlen: int = ...) -> int: ... +def get_concat_blkno_indexers( + blknos_list: list[npt.NDArray[np.intp]], +) -> list[tuple[npt.NDArray[np.intp], BlockPlacement]]: ... +def get_blkno_indexers( + blknos: np.ndarray, # int64_t[:] + group: bool = ..., +) -> list[tuple[int, slice | np.ndarray]]: ... +def get_blkno_placements( + blknos: np.ndarray, + group: bool = ..., +) -> Iterator[tuple[int, BlockPlacement]]: ... +def update_blklocs_and_blknos( + blklocs: npt.NDArray[np.intp], + blknos: npt.NDArray[np.intp], + loc: int, + nblocks: int, +) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... +@final +class BlockPlacement: + def __init__(self, val: int | slice | np.ndarray) -> None: ... + @property + def indexer(self) -> np.ndarray | slice: ... + @property + def as_array(self) -> np.ndarray: ... + @property + def as_slice(self) -> slice: ... + @property + def is_slice_like(self) -> bool: ... + @overload + def __getitem__( + self, loc: slice | Sequence[int] | npt.NDArray[np.intp] + ) -> BlockPlacement: ... + @overload + def __getitem__(self, loc: int) -> int: ... + def __iter__(self) -> Iterator[int]: ... + def __len__(self) -> int: ... + def delete(self, loc) -> BlockPlacement: ... + def add(self, other) -> BlockPlacement: ... + def append(self, others: list[BlockPlacement]) -> BlockPlacement: ... + def tile_for_unstack(self, factor: int) -> npt.NDArray[np.intp]: ... + +class Block: + _mgr_locs: BlockPlacement + ndim: int + values: ArrayLike + refs: BlockValuesRefs + def __init__( + self, + values: ArrayLike, + placement: BlockPlacement, + ndim: int, + refs: BlockValuesRefs | None = ..., + ) -> None: ... + def slice_block_rows(self, slicer: slice) -> Self: ... + +class BlockManager: + blocks: tuple[B, ...] + axes: list[Index] + _known_consolidated: bool + _is_consolidated: bool + _blknos: np.ndarray + _blklocs: np.ndarray + def __init__( + self, blocks: tuple[B, ...], axes: list[Index], verify_integrity=... + ) -> None: ... + def get_slice(self, slobj: slice, axis: int = ...) -> Self: ... + def _rebuild_blknos_and_blklocs(self) -> None: ... + +class BlockValuesRefs: + referenced_blocks: list[weakref.ref] + def __init__(self, blk: Block | None = ...) -> None: ... + def add_reference(self, blk: Block) -> None: ... + def add_index_reference(self, index: Index) -> None: ... + def has_reference(self) -> bool: ... diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/join.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/join.pyi new file mode 100644 index 0000000000000000000000000000000000000000..1d4e8c90bc5593eae650319e9ca1b58cbd7eed73 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/join.pyi @@ -0,0 +1,79 @@ +import numpy as np + +from pandas._typing import npt + +def inner_join( + left: np.ndarray, # const intp_t[:] + right: np.ndarray, # const intp_t[:] + max_groups: int, + sort: bool = ..., +) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... +def left_outer_join( + left: np.ndarray, # const intp_t[:] + right: np.ndarray, # const intp_t[:] + max_groups: int, + sort: bool = ..., +) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... +def full_outer_join( + left: np.ndarray, # const intp_t[:] + right: np.ndarray, # const intp_t[:] + max_groups: int, +) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... +def ffill_indexer( + indexer: np.ndarray, # const intp_t[:] +) -> npt.NDArray[np.intp]: ... +def left_join_indexer_unique( + left: np.ndarray, # ndarray[join_t] + right: np.ndarray, # ndarray[join_t] +) -> npt.NDArray[np.intp]: ... +def left_join_indexer( + left: np.ndarray, # ndarray[join_t] + right: np.ndarray, # ndarray[join_t] +) -> tuple[ + np.ndarray, # np.ndarray[join_t] + npt.NDArray[np.intp], + npt.NDArray[np.intp], +]: ... +def inner_join_indexer( + left: np.ndarray, # ndarray[join_t] + right: np.ndarray, # ndarray[join_t] +) -> tuple[ + np.ndarray, # np.ndarray[join_t] + npt.NDArray[np.intp], + npt.NDArray[np.intp], +]: ... +def outer_join_indexer( + left: np.ndarray, # ndarray[join_t] + right: np.ndarray, # ndarray[join_t] +) -> tuple[ + np.ndarray, # np.ndarray[join_t] + npt.NDArray[np.intp], + npt.NDArray[np.intp], +]: ... +def asof_join_backward_on_X_by_Y( + left_values: np.ndarray, # ndarray[numeric_t] + right_values: np.ndarray, # ndarray[numeric_t] + left_by_values: np.ndarray, # const int64_t[:] + right_by_values: np.ndarray, # const int64_t[:] + allow_exact_matches: bool = ..., + tolerance: np.number | float | None = ..., + use_hashtable: bool = ..., +) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... +def asof_join_forward_on_X_by_Y( + left_values: np.ndarray, # ndarray[numeric_t] + right_values: np.ndarray, # ndarray[numeric_t] + left_by_values: np.ndarray, # const int64_t[:] + right_by_values: np.ndarray, # const int64_t[:] + allow_exact_matches: bool = ..., + tolerance: np.number | float | None = ..., + use_hashtable: bool = ..., +) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... +def asof_join_nearest_on_X_by_Y( + left_values: np.ndarray, # ndarray[numeric_t] + right_values: np.ndarray, # ndarray[numeric_t] + left_by_values: np.ndarray, # const int64_t[:] + right_by_values: np.ndarray, # const int64_t[:] + allow_exact_matches: bool = ..., + tolerance: np.number | float | None = ..., + use_hashtable: bool = ..., +) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/json.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/json.pyi new file mode 100644 index 0000000000000000000000000000000000000000..bc4fe68573b94290cfba2e66950c1b6d45ccf0dc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/json.pyi @@ -0,0 +1,23 @@ +from typing import ( + Any, + Callable, +) + +def ujson_dumps( + obj: Any, + ensure_ascii: bool = ..., + double_precision: int = ..., + indent: int = ..., + orient: str = ..., + date_unit: str = ..., + iso_dates: bool = ..., + default_handler: None + | Callable[[Any], str | float | bool | list | dict | None] = ..., +) -> str: ... +def ujson_loads( + s: str, + precise_float: bool = ..., + numpy: bool = ..., + dtype: None = ..., + labelled: bool = ..., +) -> Any: ... diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/lib.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/lib.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..ce0f37eb9e7825e828bd4f7d6a400a9e89853f17 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/lib.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/lib.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/lib.pyi new file mode 100644 index 0000000000000000000000000000000000000000..b9fd970e68f5bd1d635737f65228f4ac7bc43080 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/lib.pyi @@ -0,0 +1,213 @@ +# TODO(npdtypes): Many types specified here can be made more specific/accurate; +# the more specific versions are specified in comments +from decimal import Decimal +from typing import ( + Any, + Callable, + Final, + Generator, + Hashable, + Literal, + TypeAlias, + overload, +) + +import numpy as np + +from pandas._libs.interval import Interval +from pandas._libs.tslibs import Period +from pandas._typing import ( + ArrayLike, + DtypeObj, + TypeGuard, + npt, +) + +# placeholder until we can specify np.ndarray[object, ndim=2] +ndarray_obj_2d = np.ndarray + +from enum import Enum + +class _NoDefault(Enum): + no_default = ... + +no_default: Final = _NoDefault.no_default +NoDefault: TypeAlias = Literal[_NoDefault.no_default] + +i8max: int +u8max: int + +def is_np_dtype(dtype: object, kinds: str | None = ...) -> TypeGuard[np.dtype]: ... +def item_from_zerodim(val: object) -> object: ... +def infer_dtype(value: object, skipna: bool = ...) -> str: ... +def is_iterator(obj: object) -> bool: ... +def is_scalar(val: object) -> bool: ... +def is_list_like(obj: object, allow_sets: bool = ...) -> bool: ... +def is_pyarrow_array(obj: object) -> bool: ... +def is_period(val: object) -> TypeGuard[Period]: ... +def is_interval(obj: object) -> TypeGuard[Interval]: ... +def is_decimal(obj: object) -> TypeGuard[Decimal]: ... +def is_complex(obj: object) -> TypeGuard[complex]: ... +def is_bool(obj: object) -> TypeGuard[bool | np.bool_]: ... +def is_integer(obj: object) -> TypeGuard[int | np.integer]: ... +def is_int_or_none(obj) -> bool: ... +def is_float(obj: object) -> TypeGuard[float]: ... +def is_interval_array(values: np.ndarray) -> bool: ... +def is_datetime64_array(values: np.ndarray, skipna: bool = True) -> bool: ... +def is_timedelta_or_timedelta64_array( + values: np.ndarray, skipna: bool = True +) -> bool: ... +def is_datetime_with_singletz_array(values: np.ndarray) -> bool: ... +def is_time_array(values: np.ndarray, skipna: bool = ...): ... +def is_date_array(values: np.ndarray, skipna: bool = ...): ... +def is_datetime_array(values: np.ndarray, skipna: bool = ...): ... +def is_string_array(values: np.ndarray, skipna: bool = ...): ... +def is_float_array(values: np.ndarray): ... +def is_integer_array(values: np.ndarray, skipna: bool = ...): ... +def is_bool_array(values: np.ndarray, skipna: bool = ...): ... +def fast_multiget( + mapping: dict, + keys: np.ndarray, # object[:] + default=..., +) -> np.ndarray: ... +def fast_unique_multiple_list_gen(gen: Generator, sort: bool = ...) -> list: ... +def fast_unique_multiple_list(lists: list, sort: bool | None = ...) -> list: ... +def map_infer( + arr: np.ndarray, + f: Callable[[Any], Any], + convert: bool = ..., + ignore_na: bool = ..., +) -> np.ndarray: ... +@overload +def maybe_convert_objects( + objects: npt.NDArray[np.object_], + *, + try_float: bool = ..., + safe: bool = ..., + convert_numeric: bool = ..., + convert_non_numeric: Literal[False] = ..., + convert_to_nullable_dtype: Literal[False] = ..., + dtype_if_all_nat: DtypeObj | None = ..., +) -> npt.NDArray[np.object_ | np.number]: ... +@overload +def maybe_convert_objects( + objects: npt.NDArray[np.object_], + *, + try_float: bool = ..., + safe: bool = ..., + convert_numeric: bool = ..., + convert_non_numeric: bool = ..., + convert_to_nullable_dtype: Literal[True] = ..., + dtype_if_all_nat: DtypeObj | None = ..., +) -> ArrayLike: ... +@overload +def maybe_convert_objects( + objects: npt.NDArray[np.object_], + *, + try_float: bool = ..., + safe: bool = ..., + convert_numeric: bool = ..., + convert_non_numeric: bool = ..., + convert_to_nullable_dtype: bool = ..., + dtype_if_all_nat: DtypeObj | None = ..., +) -> ArrayLike: ... +@overload +def maybe_convert_numeric( + values: npt.NDArray[np.object_], + na_values: set, + convert_empty: bool = ..., + coerce_numeric: bool = ..., + convert_to_masked_nullable: Literal[False] = ..., +) -> tuple[np.ndarray, None]: ... +@overload +def maybe_convert_numeric( + values: npt.NDArray[np.object_], + na_values: set, + convert_empty: bool = ..., + coerce_numeric: bool = ..., + *, + convert_to_masked_nullable: Literal[True], +) -> tuple[np.ndarray, np.ndarray]: ... + +# TODO: restrict `arr`? +def ensure_string_array( + arr, + na_value: object = ..., + convert_na_value: bool = ..., + copy: bool = ..., + skipna: bool = ..., +) -> npt.NDArray[np.object_]: ... +def convert_nans_to_NA( + arr: npt.NDArray[np.object_], +) -> npt.NDArray[np.object_]: ... +def fast_zip(ndarrays: list) -> npt.NDArray[np.object_]: ... + +# TODO: can we be more specific about rows? +def to_object_array_tuples(rows: object) -> ndarray_obj_2d: ... +def tuples_to_object_array( + tuples: npt.NDArray[np.object_], +) -> ndarray_obj_2d: ... + +# TODO: can we be more specific about rows? +def to_object_array(rows: object, min_width: int = ...) -> ndarray_obj_2d: ... +def dicts_to_array(dicts: list, columns: list) -> ndarray_obj_2d: ... +def maybe_booleans_to_slice( + mask: npt.NDArray[np.uint8], +) -> slice | npt.NDArray[np.uint8]: ... +def maybe_indices_to_slice( + indices: npt.NDArray[np.intp], + max_len: int, +) -> slice | npt.NDArray[np.intp]: ... +def is_all_arraylike(obj: list) -> bool: ... + +# ----------------------------------------------------------------- +# Functions which in reality take memoryviews + +def memory_usage_of_objects(arr: np.ndarray) -> int: ... # object[:] # np.int64 +def map_infer_mask( + arr: np.ndarray, + f: Callable[[Any], Any], + mask: np.ndarray, # const uint8_t[:] + convert: bool = ..., + na_value: Any = ..., + dtype: np.dtype = ..., +) -> np.ndarray: ... +def indices_fast( + index: npt.NDArray[np.intp], + labels: np.ndarray, # const int64_t[:] + keys: list, + sorted_labels: list[npt.NDArray[np.int64]], +) -> dict[Hashable, npt.NDArray[np.intp]]: ... +def generate_slices( + labels: np.ndarray, ngroups: int # const intp_t[:] +) -> tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]: ... +def count_level_2d( + mask: np.ndarray, # ndarray[uint8_t, ndim=2, cast=True], + labels: np.ndarray, # const intp_t[:] + max_bin: int, +) -> np.ndarray: ... # np.ndarray[np.int64, ndim=2] +def get_level_sorter( + codes: np.ndarray, # const int64_t[:] + starts: np.ndarray, # const intp_t[:] +) -> np.ndarray: ... # np.ndarray[np.intp, ndim=1] +def generate_bins_dt64( + values: npt.NDArray[np.int64], + binner: np.ndarray, # const int64_t[:] + closed: object = ..., + hasnans: bool = ..., +) -> np.ndarray: ... # np.ndarray[np.int64, ndim=1] +def array_equivalent_object( + left: npt.NDArray[np.object_], + right: npt.NDArray[np.object_], +) -> bool: ... +def has_infs(arr: np.ndarray) -> bool: ... # const floating[:] +def has_only_ints_or_nan(arr: np.ndarray) -> bool: ... # const floating[:] +def get_reverse_indexer( + indexer: np.ndarray, # const intp_t[:] + length: int, +) -> npt.NDArray[np.intp]: ... +def is_bool_list(obj: list) -> bool: ... +def dtypes_all_equal(types: list[DtypeObj]) -> bool: ... +def is_range_indexer( + left: np.ndarray, n: int # np.ndarray[np.int64, ndim=1] +) -> bool: ... diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/missing.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/missing.pyi new file mode 100644 index 0000000000000000000000000000000000000000..282dcee3ed6cfdd5cd83e5c78a69c422831b4cac --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/missing.pyi @@ -0,0 +1,16 @@ +import numpy as np +from numpy import typing as npt + +class NAType: + def __new__(cls, *args, **kwargs): ... + +NA: NAType + +def is_matching_na( + left: object, right: object, nan_matches_none: bool = ... +) -> bool: ... +def isposinf_scalar(val: object) -> bool: ... +def isneginf_scalar(val: object) -> bool: ... +def checknull(val: object, inf_as_na: bool = ...) -> bool: ... +def isnaobj(arr: np.ndarray, inf_as_na: bool = ...) -> npt.NDArray[np.bool_]: ... +def is_numeric_na(values: np.ndarray) -> npt.NDArray[np.bool_]: ... diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/ops.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/ops.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..7584e2a2aa5d6fcbc654f5ed8d8002dd5c73a9e6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/ops.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/ops_dispatch.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/ops_dispatch.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..488b5c6d67af94423a38fee42bfc9fee0e1658f0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/ops_dispatch.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/pandas_parser.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/pandas_parser.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..aea1affb0ecc379af78b95ddfff7609e45c98c3e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/pandas_parser.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/properties.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/properties.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..5cc832c2bec571931b1a3d6f0edaf6671a6e33de Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/properties.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/properties.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/properties.pyi new file mode 100644 index 0000000000000000000000000000000000000000..aaa44a0cf47bf8635727ea9f354227de72bbff29 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/properties.pyi @@ -0,0 +1,27 @@ +from typing import ( + Sequence, + overload, +) + +from pandas._typing import ( + AnyArrayLike, + DataFrame, + Index, + Series, +) + +# note: this is a lie to make type checkers happy (they special +# case property). cache_readonly uses attribute names similar to +# property (fget) but it does not provide fset and fdel. +cache_readonly = property + +class AxisProperty: + axis: int + def __init__(self, axis: int = ..., doc: str = ...) -> None: ... + @overload + def __get__(self, obj: DataFrame | Series, type) -> Index: ... + @overload + def __get__(self, obj: None, type) -> AxisProperty: ... + def __set__( + self, obj: DataFrame | Series, value: AnyArrayLike | Sequence + ) -> None: ... diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/reshape.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/reshape.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..cb45535e3895f11fe23d8fdedcd59fa25e31af65 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/reshape.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/reshape.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/reshape.pyi new file mode 100644 index 0000000000000000000000000000000000000000..110687fcd0c313c45e8b025083fa5790fb9913b1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/reshape.pyi @@ -0,0 +1,16 @@ +import numpy as np + +from pandas._typing import npt + +def unstack( + values: np.ndarray, # reshape_t[:, :] + mask: np.ndarray, # const uint8_t[:] + stride: int, + length: int, + width: int, + new_values: np.ndarray, # reshape_t[:, :] + new_mask: np.ndarray, # uint8_t[:, :] +) -> None: ... +def explode( + values: npt.NDArray[np.object_], +) -> tuple[npt.NDArray[np.object_], npt.NDArray[np.int64]]: ... diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/sas.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/sas.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..81b0d4c39e2065678c0c27429327f4b3e53d3b5d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/sas.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/sas.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/sas.pyi new file mode 100644 index 0000000000000000000000000000000000000000..5d65e2b56b5916ed1e76e1409e4f75c652ee8fc9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/sas.pyi @@ -0,0 +1,7 @@ +from pandas.io.sas.sas7bdat import SAS7BDATReader + +class Parser: + def __init__(self, parser: SAS7BDATReader) -> None: ... + def read(self, nrows: int) -> None: ... + +def get_subheader_index(signature: bytes) -> int: ... diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/sparse.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/sparse.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..73a73d427e68f00e342078ae9c959523329bf9f9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/sparse.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/sparse.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/sparse.pyi new file mode 100644 index 0000000000000000000000000000000000000000..536265b25425e9e7ecabd37f095115442a7cb209 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/sparse.pyi @@ -0,0 +1,51 @@ +from typing import Sequence + +import numpy as np + +from pandas._typing import ( + Self, + npt, +) + +class SparseIndex: + length: int + npoints: int + def __init__(self) -> None: ... + @property + def ngaps(self) -> int: ... + @property + def nbytes(self) -> int: ... + @property + def indices(self) -> npt.NDArray[np.int32]: ... + def equals(self, other) -> bool: ... + def lookup(self, index: int) -> np.int32: ... + def lookup_array(self, indexer: npt.NDArray[np.int32]) -> npt.NDArray[np.int32]: ... + def to_int_index(self) -> IntIndex: ... + def to_block_index(self) -> BlockIndex: ... + def intersect(self, y_: SparseIndex) -> Self: ... + def make_union(self, y_: SparseIndex) -> Self: ... + +class IntIndex(SparseIndex): + indices: npt.NDArray[np.int32] + def __init__( + self, length: int, indices: Sequence[int], check_integrity: bool = ... + ) -> None: ... + +class BlockIndex(SparseIndex): + nblocks: int + blocs: np.ndarray + blengths: np.ndarray + def __init__( + self, length: int, blocs: np.ndarray, blengths: np.ndarray + ) -> None: ... + + # Override to have correct parameters + def intersect(self, other: SparseIndex) -> Self: ... + def make_union(self, y: SparseIndex) -> Self: ... + +def make_mask_object_ndarray( + arr: npt.NDArray[np.object_], fill_value +) -> npt.NDArray[np.bool_]: ... +def get_blocks( + indices: npt.NDArray[np.int32], +) -> tuple[npt.NDArray[np.int32], npt.NDArray[np.int32]]: ... diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/testing.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/testing.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..5e979af95276ddf8161f8e8a518bbfdd53f2c4ca Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/testing.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/testing.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/testing.pyi new file mode 100644 index 0000000000000000000000000000000000000000..01da496975f512b204defb06fcd19a36e235f97a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/testing.pyi @@ -0,0 +1,12 @@ +def assert_dict_equal(a, b, compare_keys: bool = ...): ... +def assert_almost_equal( + a, + b, + rtol: float = ..., + atol: float = ..., + check_dtype: bool = ..., + obj=..., + lobj=..., + robj=..., + index_values=..., +): ... diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/window/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/window/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8be58e91eabd8e8dde4e9006ab15611cb10c503 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/window/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/window/indexers.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/window/indexers.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..7c4a670e692a6c329f6272e382e6fafd8203c927 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/window/indexers.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/writers.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/writers.pyi new file mode 100644 index 0000000000000000000000000000000000000000..7b41856525dadf79a2bf4b29c7ddebfedaa880db --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/writers.pyi @@ -0,0 +1,20 @@ +import numpy as np + +from pandas._typing import ArrayLike + +def write_csv_rows( + data: list[ArrayLike], + data_index: np.ndarray, + nlevels: int, + cols: np.ndarray, + writer: object, # _csv.writer +) -> None: ... +def convert_json_to_lines(arr: str) -> str: ... +def max_len_string_array( + arr: np.ndarray, # pandas_string[:] +) -> int: ... +def word_len(val: object) -> int: ... +def string_array_replace_from_nan_rep( + arr: np.ndarray, # np.ndarray[object, ndim=1] + nan_rep: object, +) -> None: ... diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_testing/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/_testing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..361998db8e38bdd5a56cb4e20fa9c80e2f8b1af7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_testing/__init__.py @@ -0,0 +1,638 @@ +from __future__ import annotations + +from decimal import Decimal +import operator +import os +from sys import byteorder +from typing import ( + TYPE_CHECKING, + Callable, + ContextManager, + cast, +) +import warnings + +import numpy as np + +from pandas._config.localization import ( + can_set_locale, + get_locales, + set_locale, +) + +from pandas.compat import pa_version_under10p1 + +from pandas.core.dtypes.common import is_string_dtype + +import pandas as pd +from pandas import ( + ArrowDtype, + DataFrame, + Index, + MultiIndex, + RangeIndex, + Series, +) +from pandas._testing._io import ( + round_trip_localpath, + round_trip_pathlib, + round_trip_pickle, + write_to_compressed, +) +from pandas._testing._warnings import ( + assert_produces_warning, + maybe_produces_warning, +) +from pandas._testing.asserters import ( + assert_almost_equal, + assert_attr_equal, + assert_categorical_equal, + assert_class_equal, + assert_contains_all, + assert_copy, + assert_datetime_array_equal, + assert_dict_equal, + assert_equal, + assert_extension_array_equal, + assert_frame_equal, + assert_index_equal, + assert_indexing_slices_equivalent, + assert_interval_array_equal, + assert_is_sorted, + assert_is_valid_plot_return_object, + assert_metadata_equivalent, + assert_numpy_array_equal, + assert_period_array_equal, + assert_series_equal, + assert_sp_array_equal, + assert_timedelta_array_equal, + raise_assert_detail, +) +from pandas._testing.compat import ( + get_dtype, + get_obj, +) +from pandas._testing.contexts import ( + assert_cow_warning, + decompress_file, + ensure_clean, + raises_chained_assignment_error, + set_timezone, + use_numexpr, + with_csv_dialect, +) +from pandas.core.arrays import ( + BaseMaskedArray, + ExtensionArray, + NumpyExtensionArray, +) +from pandas.core.arrays._mixins import NDArrayBackedExtensionArray +from pandas.core.construction import extract_array + +if TYPE_CHECKING: + from pandas._typing import ( + Dtype, + NpDtype, + ) + + from pandas.core.arrays import ArrowExtensionArray + +UNSIGNED_INT_NUMPY_DTYPES: list[NpDtype] = ["uint8", "uint16", "uint32", "uint64"] +UNSIGNED_INT_EA_DTYPES: list[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"] +SIGNED_INT_NUMPY_DTYPES: list[NpDtype] = [int, "int8", "int16", "int32", "int64"] +SIGNED_INT_EA_DTYPES: list[Dtype] = ["Int8", "Int16", "Int32", "Int64"] +ALL_INT_NUMPY_DTYPES = UNSIGNED_INT_NUMPY_DTYPES + SIGNED_INT_NUMPY_DTYPES +ALL_INT_EA_DTYPES = UNSIGNED_INT_EA_DTYPES + SIGNED_INT_EA_DTYPES +ALL_INT_DTYPES: list[Dtype] = [*ALL_INT_NUMPY_DTYPES, *ALL_INT_EA_DTYPES] + +FLOAT_NUMPY_DTYPES: list[NpDtype] = [float, "float32", "float64"] +FLOAT_EA_DTYPES: list[Dtype] = ["Float32", "Float64"] +ALL_FLOAT_DTYPES: list[Dtype] = [*FLOAT_NUMPY_DTYPES, *FLOAT_EA_DTYPES] + +COMPLEX_DTYPES: list[Dtype] = [complex, "complex64", "complex128"] +STRING_DTYPES: list[Dtype] = [str, "str", "U"] + +DATETIME64_DTYPES: list[Dtype] = ["datetime64[ns]", "M8[ns]"] +TIMEDELTA64_DTYPES: list[Dtype] = ["timedelta64[ns]", "m8[ns]"] + +BOOL_DTYPES: list[Dtype] = [bool, "bool"] +BYTES_DTYPES: list[Dtype] = [bytes, "bytes"] +OBJECT_DTYPES: list[Dtype] = [object, "object"] + +ALL_REAL_NUMPY_DTYPES = FLOAT_NUMPY_DTYPES + ALL_INT_NUMPY_DTYPES +ALL_REAL_EXTENSION_DTYPES = FLOAT_EA_DTYPES + ALL_INT_EA_DTYPES +ALL_REAL_DTYPES: list[Dtype] = [*ALL_REAL_NUMPY_DTYPES, *ALL_REAL_EXTENSION_DTYPES] +ALL_NUMERIC_DTYPES: list[Dtype] = [*ALL_REAL_DTYPES, *COMPLEX_DTYPES] + +ALL_NUMPY_DTYPES = ( + ALL_REAL_NUMPY_DTYPES + + COMPLEX_DTYPES + + STRING_DTYPES + + DATETIME64_DTYPES + + TIMEDELTA64_DTYPES + + BOOL_DTYPES + + OBJECT_DTYPES + + BYTES_DTYPES +) + +NARROW_NP_DTYPES = [ + np.float16, + np.float32, + np.int8, + np.int16, + np.int32, + np.uint8, + np.uint16, + np.uint32, +] + +PYTHON_DATA_TYPES = [ + str, + int, + float, + complex, + list, + tuple, + range, + dict, + set, + frozenset, + bool, + bytes, + bytearray, + memoryview, +] + +ENDIAN = {"little": "<", "big": ">"}[byteorder] + +NULL_OBJECTS = [None, np.nan, pd.NaT, float("nan"), pd.NA, Decimal("NaN")] +NP_NAT_OBJECTS = [ + cls("NaT", unit) + for cls in [np.datetime64, np.timedelta64] + for unit in [ + "Y", + "M", + "W", + "D", + "h", + "m", + "s", + "ms", + "us", + "ns", + "ps", + "fs", + "as", + ] +] + +if not pa_version_under10p1: + import pyarrow as pa + + UNSIGNED_INT_PYARROW_DTYPES = [pa.uint8(), pa.uint16(), pa.uint32(), pa.uint64()] + SIGNED_INT_PYARROW_DTYPES = [pa.int8(), pa.int16(), pa.int32(), pa.int64()] + ALL_INT_PYARROW_DTYPES = UNSIGNED_INT_PYARROW_DTYPES + SIGNED_INT_PYARROW_DTYPES + ALL_INT_PYARROW_DTYPES_STR_REPR = [ + str(ArrowDtype(typ)) for typ in ALL_INT_PYARROW_DTYPES + ] + + # pa.float16 doesn't seem supported + # https://github.com/apache/arrow/blob/master/python/pyarrow/src/arrow/python/helpers.cc#L86 + FLOAT_PYARROW_DTYPES = [pa.float32(), pa.float64()] + FLOAT_PYARROW_DTYPES_STR_REPR = [ + str(ArrowDtype(typ)) for typ in FLOAT_PYARROW_DTYPES + ] + DECIMAL_PYARROW_DTYPES = [pa.decimal128(7, 3)] + STRING_PYARROW_DTYPES = [pa.string()] + BINARY_PYARROW_DTYPES = [pa.binary()] + + TIME_PYARROW_DTYPES = [ + pa.time32("s"), + pa.time32("ms"), + pa.time64("us"), + pa.time64("ns"), + ] + DATE_PYARROW_DTYPES = [pa.date32(), pa.date64()] + DATETIME_PYARROW_DTYPES = [ + pa.timestamp(unit=unit, tz=tz) + for unit in ["s", "ms", "us", "ns"] + for tz in [None, "UTC", "US/Pacific", "US/Eastern"] + ] + TIMEDELTA_PYARROW_DTYPES = [pa.duration(unit) for unit in ["s", "ms", "us", "ns"]] + + BOOL_PYARROW_DTYPES = [pa.bool_()] + + # TODO: Add container like pyarrow types: + # https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions + ALL_PYARROW_DTYPES = ( + ALL_INT_PYARROW_DTYPES + + FLOAT_PYARROW_DTYPES + + DECIMAL_PYARROW_DTYPES + + STRING_PYARROW_DTYPES + + BINARY_PYARROW_DTYPES + + TIME_PYARROW_DTYPES + + DATE_PYARROW_DTYPES + + DATETIME_PYARROW_DTYPES + + TIMEDELTA_PYARROW_DTYPES + + BOOL_PYARROW_DTYPES + ) + ALL_REAL_PYARROW_DTYPES_STR_REPR = ( + ALL_INT_PYARROW_DTYPES_STR_REPR + FLOAT_PYARROW_DTYPES_STR_REPR + ) +else: + FLOAT_PYARROW_DTYPES_STR_REPR = [] + ALL_INT_PYARROW_DTYPES_STR_REPR = [] + ALL_PYARROW_DTYPES = [] + ALL_REAL_PYARROW_DTYPES_STR_REPR = [] + +ALL_REAL_NULLABLE_DTYPES = ( + FLOAT_NUMPY_DTYPES + ALL_REAL_EXTENSION_DTYPES + ALL_REAL_PYARROW_DTYPES_STR_REPR +) + +arithmetic_dunder_methods = [ + "__add__", + "__radd__", + "__sub__", + "__rsub__", + "__mul__", + "__rmul__", + "__floordiv__", + "__rfloordiv__", + "__truediv__", + "__rtruediv__", + "__pow__", + "__rpow__", + "__mod__", + "__rmod__", +] + +comparison_dunder_methods = ["__eq__", "__ne__", "__le__", "__lt__", "__ge__", "__gt__"] + + +# ----------------------------------------------------------------------------- +# Comparators + + +def box_expected(expected, box_cls, transpose: bool = True): + """ + Helper function to wrap the expected output of a test in a given box_class. + + Parameters + ---------- + expected : np.ndarray, Index, Series + box_cls : {Index, Series, DataFrame} + + Returns + ------- + subclass of box_cls + """ + if box_cls is pd.array: + if isinstance(expected, RangeIndex): + # pd.array would return an IntegerArray + expected = NumpyExtensionArray(np.asarray(expected._values)) + else: + expected = pd.array(expected, copy=False) + elif box_cls is Index: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "Dtype inference", category=FutureWarning) + expected = Index(expected) + elif box_cls is Series: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "Dtype inference", category=FutureWarning) + expected = Series(expected) + elif box_cls is DataFrame: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "Dtype inference", category=FutureWarning) + expected = Series(expected).to_frame() + if transpose: + # for vector operations, we need a DataFrame to be a single-row, + # not a single-column, in order to operate against non-DataFrame + # vectors of the same length. But convert to two rows to avoid + # single-row special cases in datetime arithmetic + expected = expected.T + expected = pd.concat([expected] * 2, ignore_index=True) + elif box_cls is np.ndarray or box_cls is np.array: + expected = np.array(expected) + elif box_cls is to_array: + expected = to_array(expected) + else: + raise NotImplementedError(box_cls) + return expected + + +def to_array(obj): + """ + Similar to pd.array, but does not cast numpy dtypes to nullable dtypes. + """ + # temporary implementation until we get pd.array in place + dtype = getattr(obj, "dtype", None) + + if dtype is None: + return np.asarray(obj) + + return extract_array(obj, extract_numpy=True) + + +class SubclassedSeries(Series): + _metadata = ["testattr", "name"] + + @property + def _constructor(self): + # For testing, those properties return a generic callable, and not + # the actual class. In this case that is equivalent, but it is to + # ensure we don't rely on the property returning a class + # See https://github.com/pandas-dev/pandas/pull/46018 and + # https://github.com/pandas-dev/pandas/issues/32638 and linked issues + return lambda *args, **kwargs: SubclassedSeries(*args, **kwargs) + + @property + def _constructor_expanddim(self): + return lambda *args, **kwargs: SubclassedDataFrame(*args, **kwargs) + + +class SubclassedDataFrame(DataFrame): + _metadata = ["testattr"] + + @property + def _constructor(self): + return lambda *args, **kwargs: SubclassedDataFrame(*args, **kwargs) + + @property + def _constructor_sliced(self): + return lambda *args, **kwargs: SubclassedSeries(*args, **kwargs) + + +def convert_rows_list_to_csv_str(rows_list: list[str]) -> str: + """ + Convert list of CSV rows to single CSV-formatted string for current OS. + + This method is used for creating expected value of to_csv() method. + + Parameters + ---------- + rows_list : List[str] + Each element represents the row of csv. + + Returns + ------- + str + Expected output of to_csv() in current OS. + """ + sep = os.linesep + return sep.join(rows_list) + sep + + +def external_error_raised(expected_exception: type[Exception]) -> ContextManager: + """ + Helper function to mark pytest.raises that have an external error message. + + Parameters + ---------- + expected_exception : Exception + Expected error to raise. + + Returns + ------- + Callable + Regular `pytest.raises` function with `match` equal to `None`. + """ + import pytest + + return pytest.raises(expected_exception, match=None) + + +cython_table = pd.core.common._cython_table.items() + + +def get_cython_table_params(ndframe, func_names_and_expected): + """ + Combine frame, functions from com._cython_table + keys and expected result. + + Parameters + ---------- + ndframe : DataFrame or Series + func_names_and_expected : Sequence of two items + The first item is a name of a NDFrame method ('sum', 'prod') etc. + The second item is the expected return value. + + Returns + ------- + list + List of three items (DataFrame, function, expected result) + """ + results = [] + for func_name, expected in func_names_and_expected: + results.append((ndframe, func_name, expected)) + results += [ + (ndframe, func, expected) + for func, name in cython_table + if name == func_name + ] + return results + + +def get_op_from_name(op_name: str) -> Callable: + """ + The operator function for a given op name. + + Parameters + ---------- + op_name : str + The op name, in form of "add" or "__add__". + + Returns + ------- + function + A function performing the operation. + """ + short_opname = op_name.strip("_") + try: + op = getattr(operator, short_opname) + except AttributeError: + # Assume it is the reverse operator + rop = getattr(operator, short_opname[1:]) + op = lambda x, y: rop(y, x) + + return op + + +# ----------------------------------------------------------------------------- +# Indexing test helpers + + +def getitem(x): + return x + + +def setitem(x): + return x + + +def loc(x): + return x.loc + + +def iloc(x): + return x.iloc + + +def at(x): + return x.at + + +def iat(x): + return x.iat + + +# ----------------------------------------------------------------------------- + +_UNITS = ["s", "ms", "us", "ns"] + + +def get_finest_unit(left: str, right: str): + """ + Find the higher of two datetime64 units. + """ + if _UNITS.index(left) >= _UNITS.index(right): + return left + return right + + +def shares_memory(left, right) -> bool: + """ + Pandas-compat for np.shares_memory. + """ + if isinstance(left, np.ndarray) and isinstance(right, np.ndarray): + return np.shares_memory(left, right) + elif isinstance(left, np.ndarray): + # Call with reversed args to get to unpacking logic below. + return shares_memory(right, left) + + if isinstance(left, RangeIndex): + return False + if isinstance(left, MultiIndex): + return shares_memory(left._codes, right) + if isinstance(left, (Index, Series)): + return shares_memory(left._values, right) + + if isinstance(left, NDArrayBackedExtensionArray): + return shares_memory(left._ndarray, right) + if isinstance(left, pd.core.arrays.SparseArray): + return shares_memory(left.sp_values, right) + if isinstance(left, pd.core.arrays.IntervalArray): + return shares_memory(left._left, right) or shares_memory(left._right, right) + + if ( + isinstance(left, ExtensionArray) + and is_string_dtype(left.dtype) + and left.dtype.storage in ("pyarrow", "pyarrow_numpy") # type: ignore[attr-defined] + ): + # https://github.com/pandas-dev/pandas/pull/43930#discussion_r736862669 + left = cast("ArrowExtensionArray", left) + if ( + isinstance(right, ExtensionArray) + and is_string_dtype(right.dtype) + and right.dtype.storage in ("pyarrow", "pyarrow_numpy") # type: ignore[attr-defined] + ): + right = cast("ArrowExtensionArray", right) + left_pa_data = left._pa_array + right_pa_data = right._pa_array + left_buf1 = left_pa_data.chunk(0).buffers()[1] + right_buf1 = right_pa_data.chunk(0).buffers()[1] + return left_buf1 == right_buf1 + + if isinstance(left, BaseMaskedArray) and isinstance(right, BaseMaskedArray): + # By convention, we'll say these share memory if they share *either* + # the _data or the _mask + return np.shares_memory(left._data, right._data) or np.shares_memory( + left._mask, right._mask + ) + + if isinstance(left, DataFrame) and len(left._mgr.arrays) == 1: + arr = left._mgr.arrays[0] + return shares_memory(arr, right) + + raise NotImplementedError(type(left), type(right)) + + +__all__ = [ + "ALL_INT_EA_DTYPES", + "ALL_INT_NUMPY_DTYPES", + "ALL_NUMPY_DTYPES", + "ALL_REAL_NUMPY_DTYPES", + "assert_almost_equal", + "assert_attr_equal", + "assert_categorical_equal", + "assert_class_equal", + "assert_contains_all", + "assert_copy", + "assert_datetime_array_equal", + "assert_dict_equal", + "assert_equal", + "assert_extension_array_equal", + "assert_frame_equal", + "assert_index_equal", + "assert_indexing_slices_equivalent", + "assert_interval_array_equal", + "assert_is_sorted", + "assert_is_valid_plot_return_object", + "assert_metadata_equivalent", + "assert_numpy_array_equal", + "assert_period_array_equal", + "assert_produces_warning", + "assert_series_equal", + "assert_sp_array_equal", + "assert_timedelta_array_equal", + "assert_cow_warning", + "at", + "BOOL_DTYPES", + "box_expected", + "BYTES_DTYPES", + "can_set_locale", + "COMPLEX_DTYPES", + "convert_rows_list_to_csv_str", + "DATETIME64_DTYPES", + "decompress_file", + "ENDIAN", + "ensure_clean", + "external_error_raised", + "FLOAT_EA_DTYPES", + "FLOAT_NUMPY_DTYPES", + "get_cython_table_params", + "get_dtype", + "getitem", + "get_locales", + "get_finest_unit", + "get_obj", + "get_op_from_name", + "iat", + "iloc", + "loc", + "maybe_produces_warning", + "NARROW_NP_DTYPES", + "NP_NAT_OBJECTS", + "NULL_OBJECTS", + "OBJECT_DTYPES", + "raise_assert_detail", + "raises_chained_assignment_error", + "round_trip_localpath", + "round_trip_pathlib", + "round_trip_pickle", + "setitem", + "set_locale", + "set_timezone", + "shares_memory", + "SIGNED_INT_EA_DTYPES", + "SIGNED_INT_NUMPY_DTYPES", + "STRING_DTYPES", + "SubclassedDataFrame", + "SubclassedSeries", + "TIMEDELTA64_DTYPES", + "to_array", + "UNSIGNED_INT_EA_DTYPES", + "UNSIGNED_INT_NUMPY_DTYPES", + "use_numexpr", + "with_csv_dialect", + "write_to_compressed", +] diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_testing/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/_testing/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f66588787c57d2245c1302433ebbdc707732963 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_testing/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_testing/__pycache__/_hypothesis.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/_testing/__pycache__/_hypothesis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7346b9e0d846a573c74b53da705f749a07085e46 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_testing/__pycache__/_hypothesis.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_testing/__pycache__/_io.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/_testing/__pycache__/_io.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..87c98914e03aeb54ad35d22c9ccf576375a338f1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_testing/__pycache__/_io.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_testing/__pycache__/_warnings.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/_testing/__pycache__/_warnings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8142267fd84dbba72bd438916bf89beecef62bad Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_testing/__pycache__/_warnings.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_testing/__pycache__/asserters.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/_testing/__pycache__/asserters.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..793ab2930a8ca8641ea30afa75c57674b90a5338 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_testing/__pycache__/asserters.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_testing/__pycache__/compat.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/_testing/__pycache__/compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc725c74a5dd77fb611ef4eb2f877e40cfc56e0a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_testing/__pycache__/compat.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_testing/__pycache__/contexts.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/_testing/__pycache__/contexts.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3aa5cb03935fc02efbc8d53db5fee0d241ac00e1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_testing/__pycache__/contexts.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_testing/_hypothesis.py b/env-llmeval/lib/python3.10/site-packages/pandas/_testing/_hypothesis.py new file mode 100644 index 0000000000000000000000000000000000000000..084ca9c306d192a2543108249dbc345d1259be01 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_testing/_hypothesis.py @@ -0,0 +1,93 @@ +""" +Hypothesis data generator helpers. +""" +from datetime import datetime + +from hypothesis import strategies as st +from hypothesis.extra.dateutil import timezones as dateutil_timezones +from hypothesis.extra.pytz import timezones as pytz_timezones + +from pandas.compat import is_platform_windows + +import pandas as pd + +from pandas.tseries.offsets import ( + BMonthBegin, + BMonthEnd, + BQuarterBegin, + BQuarterEnd, + BYearBegin, + BYearEnd, + MonthBegin, + MonthEnd, + QuarterBegin, + QuarterEnd, + YearBegin, + YearEnd, +) + +OPTIONAL_INTS = st.lists(st.one_of(st.integers(), st.none()), max_size=10, min_size=3) + +OPTIONAL_FLOATS = st.lists(st.one_of(st.floats(), st.none()), max_size=10, min_size=3) + +OPTIONAL_TEXT = st.lists(st.one_of(st.none(), st.text()), max_size=10, min_size=3) + +OPTIONAL_DICTS = st.lists( + st.one_of(st.none(), st.dictionaries(st.text(), st.integers())), + max_size=10, + min_size=3, +) + +OPTIONAL_LISTS = st.lists( + st.one_of(st.none(), st.lists(st.text(), max_size=10, min_size=3)), + max_size=10, + min_size=3, +) + +OPTIONAL_ONE_OF_ALL = st.one_of( + OPTIONAL_DICTS, OPTIONAL_FLOATS, OPTIONAL_INTS, OPTIONAL_LISTS, OPTIONAL_TEXT +) + +if is_platform_windows(): + DATETIME_NO_TZ = st.datetimes(min_value=datetime(1900, 1, 1)) +else: + DATETIME_NO_TZ = st.datetimes() + +DATETIME_JAN_1_1900_OPTIONAL_TZ = st.datetimes( + min_value=pd.Timestamp( + 1900, 1, 1 + ).to_pydatetime(), # pyright: ignore[reportGeneralTypeIssues] + max_value=pd.Timestamp( + 1900, 1, 1 + ).to_pydatetime(), # pyright: ignore[reportGeneralTypeIssues] + timezones=st.one_of(st.none(), dateutil_timezones(), pytz_timezones()), +) + +DATETIME_IN_PD_TIMESTAMP_RANGE_NO_TZ = st.datetimes( + min_value=pd.Timestamp.min.to_pydatetime(warn=False), + max_value=pd.Timestamp.max.to_pydatetime(warn=False), +) + +INT_NEG_999_TO_POS_999 = st.integers(-999, 999) + +# The strategy for each type is registered in conftest.py, as they don't carry +# enough runtime information (e.g. type hints) to infer how to build them. +YQM_OFFSET = st.one_of( + *map( + st.from_type, + [ + MonthBegin, + MonthEnd, + BMonthBegin, + BMonthEnd, + QuarterBegin, + QuarterEnd, + BQuarterBegin, + BQuarterEnd, + YearBegin, + YearEnd, + BYearBegin, + BYearEnd, + ], + ) +) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_testing/_io.py b/env-llmeval/lib/python3.10/site-packages/pandas/_testing/_io.py new file mode 100644 index 0000000000000000000000000000000000000000..95977edb600ade42a8f8a1fada2b5085cee1da56 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_testing/_io.py @@ -0,0 +1,170 @@ +from __future__ import annotations + +import gzip +import io +import pathlib +import tarfile +from typing import ( + TYPE_CHECKING, + Any, + Callable, +) +import uuid +import zipfile + +from pandas.compat import ( + get_bz2_file, + get_lzma_file, +) +from pandas.compat._optional import import_optional_dependency + +import pandas as pd +from pandas._testing.contexts import ensure_clean + +if TYPE_CHECKING: + from pandas._typing import ( + FilePath, + ReadPickleBuffer, + ) + + from pandas import ( + DataFrame, + Series, + ) + +# ------------------------------------------------------------------ +# File-IO + + +def round_trip_pickle( + obj: Any, path: FilePath | ReadPickleBuffer | None = None +) -> DataFrame | Series: + """ + Pickle an object and then read it again. + + Parameters + ---------- + obj : any object + The object to pickle and then re-read. + path : str, path object or file-like object, default None + The path where the pickled object is written and then read. + + Returns + ------- + pandas object + The original object that was pickled and then re-read. + """ + _path = path + if _path is None: + _path = f"__{uuid.uuid4()}__.pickle" + with ensure_clean(_path) as temp_path: + pd.to_pickle(obj, temp_path) + return pd.read_pickle(temp_path) + + +def round_trip_pathlib(writer, reader, path: str | None = None): + """ + Write an object to file specified by a pathlib.Path and read it back + + Parameters + ---------- + writer : callable bound to pandas object + IO writing function (e.g. DataFrame.to_csv ) + reader : callable + IO reading function (e.g. pd.read_csv ) + path : str, default None + The path where the object is written and then read. + + Returns + ------- + pandas object + The original object that was serialized and then re-read. + """ + Path = pathlib.Path + if path is None: + path = "___pathlib___" + with ensure_clean(path) as path: + writer(Path(path)) # type: ignore[arg-type] + obj = reader(Path(path)) # type: ignore[arg-type] + return obj + + +def round_trip_localpath(writer, reader, path: str | None = None): + """ + Write an object to file specified by a py.path LocalPath and read it back. + + Parameters + ---------- + writer : callable bound to pandas object + IO writing function (e.g. DataFrame.to_csv ) + reader : callable + IO reading function (e.g. pd.read_csv ) + path : str, default None + The path where the object is written and then read. + + Returns + ------- + pandas object + The original object that was serialized and then re-read. + """ + import pytest + + LocalPath = pytest.importorskip("py.path").local + if path is None: + path = "___localpath___" + with ensure_clean(path) as path: + writer(LocalPath(path)) + obj = reader(LocalPath(path)) + return obj + + +def write_to_compressed(compression, path, data, dest: str = "test") -> None: + """ + Write data to a compressed file. + + Parameters + ---------- + compression : {'gzip', 'bz2', 'zip', 'xz', 'zstd'} + The compression type to use. + path : str + The file path to write the data. + data : str + The data to write. + dest : str, default "test" + The destination file (for ZIP only) + + Raises + ------ + ValueError : An invalid compression value was passed in. + """ + args: tuple[Any, ...] = (data,) + mode = "wb" + method = "write" + compress_method: Callable + + if compression == "zip": + compress_method = zipfile.ZipFile + mode = "w" + args = (dest, data) + method = "writestr" + elif compression == "tar": + compress_method = tarfile.TarFile + mode = "w" + file = tarfile.TarInfo(name=dest) + bytes = io.BytesIO(data) + file.size = len(data) + args = (file, bytes) + method = "addfile" + elif compression == "gzip": + compress_method = gzip.GzipFile + elif compression == "bz2": + compress_method = get_bz2_file() + elif compression == "zstd": + compress_method = import_optional_dependency("zstandard").open + elif compression == "xz": + compress_method = get_lzma_file() + else: + raise ValueError(f"Unrecognized compression type: {compression}") + + with compress_method(path, mode=mode) as f: + getattr(f, method)(*args) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_testing/_warnings.py b/env-llmeval/lib/python3.10/site-packages/pandas/_testing/_warnings.py new file mode 100644 index 0000000000000000000000000000000000000000..c9a287942f2dac5ddbaf49168db280ec2ba3f2c4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_testing/_warnings.py @@ -0,0 +1,232 @@ +from __future__ import annotations + +from contextlib import ( + contextmanager, + nullcontext, +) +import inspect +import re +import sys +from typing import ( + TYPE_CHECKING, + Literal, + cast, +) +import warnings + +from pandas.compat import PY311 + +if TYPE_CHECKING: + from collections.abc import ( + Generator, + Sequence, + ) + + +@contextmanager +def assert_produces_warning( + expected_warning: type[Warning] | bool | tuple[type[Warning], ...] | None = Warning, + filter_level: Literal[ + "error", "ignore", "always", "default", "module", "once" + ] = "always", + check_stacklevel: bool = True, + raise_on_extra_warnings: bool = True, + match: str | None = None, +) -> Generator[list[warnings.WarningMessage], None, None]: + """ + Context manager for running code expected to either raise a specific warning, + multiple specific warnings, or not raise any warnings. Verifies that the code + raises the expected warning(s), and that it does not raise any other unexpected + warnings. It is basically a wrapper around ``warnings.catch_warnings``. + + Parameters + ---------- + expected_warning : {Warning, False, tuple[Warning, ...], None}, default Warning + The type of Exception raised. ``exception.Warning`` is the base + class for all warnings. To raise multiple types of exceptions, + pass them as a tuple. To check that no warning is returned, + specify ``False`` or ``None``. + filter_level : str or None, default "always" + Specifies whether warnings are ignored, displayed, or turned + into errors. + Valid values are: + + * "error" - turns matching warnings into exceptions + * "ignore" - discard the warning + * "always" - always emit a warning + * "default" - print the warning the first time it is generated + from each location + * "module" - print the warning the first time it is generated + from each module + * "once" - print the warning the first time it is generated + + check_stacklevel : bool, default True + If True, displays the line that called the function containing + the warning to show were the function is called. Otherwise, the + line that implements the function is displayed. + raise_on_extra_warnings : bool, default True + Whether extra warnings not of the type `expected_warning` should + cause the test to fail. + match : str, optional + Match warning message. + + Examples + -------- + >>> import warnings + >>> with assert_produces_warning(): + ... warnings.warn(UserWarning()) + ... + >>> with assert_produces_warning(False): + ... warnings.warn(RuntimeWarning()) + ... + Traceback (most recent call last): + ... + AssertionError: Caused unexpected warning(s): ['RuntimeWarning']. + >>> with assert_produces_warning(UserWarning): + ... warnings.warn(RuntimeWarning()) + Traceback (most recent call last): + ... + AssertionError: Did not see expected warning of class 'UserWarning'. + + ..warn:: This is *not* thread-safe. + """ + __tracebackhide__ = True + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter(filter_level) + try: + yield w + finally: + if expected_warning: + expected_warning = cast(type[Warning], expected_warning) + _assert_caught_expected_warning( + caught_warnings=w, + expected_warning=expected_warning, + match=match, + check_stacklevel=check_stacklevel, + ) + if raise_on_extra_warnings: + _assert_caught_no_extra_warnings( + caught_warnings=w, + expected_warning=expected_warning, + ) + + +def maybe_produces_warning(warning: type[Warning], condition: bool, **kwargs): + """ + Return a context manager that possibly checks a warning based on the condition + """ + if condition: + return assert_produces_warning(warning, **kwargs) + else: + return nullcontext() + + +def _assert_caught_expected_warning( + *, + caught_warnings: Sequence[warnings.WarningMessage], + expected_warning: type[Warning], + match: str | None, + check_stacklevel: bool, +) -> None: + """Assert that there was the expected warning among the caught warnings.""" + saw_warning = False + matched_message = False + unmatched_messages = [] + + for actual_warning in caught_warnings: + if issubclass(actual_warning.category, expected_warning): + saw_warning = True + + if check_stacklevel: + _assert_raised_with_correct_stacklevel(actual_warning) + + if match is not None: + if re.search(match, str(actual_warning.message)): + matched_message = True + else: + unmatched_messages.append(actual_warning.message) + + if not saw_warning: + raise AssertionError( + f"Did not see expected warning of class " + f"{repr(expected_warning.__name__)}" + ) + + if match and not matched_message: + raise AssertionError( + f"Did not see warning {repr(expected_warning.__name__)} " + f"matching '{match}'. The emitted warning messages are " + f"{unmatched_messages}" + ) + + +def _assert_caught_no_extra_warnings( + *, + caught_warnings: Sequence[warnings.WarningMessage], + expected_warning: type[Warning] | bool | tuple[type[Warning], ...] | None, +) -> None: + """Assert that no extra warnings apart from the expected ones are caught.""" + extra_warnings = [] + + for actual_warning in caught_warnings: + if _is_unexpected_warning(actual_warning, expected_warning): + # GH#38630 pytest.filterwarnings does not suppress these. + if actual_warning.category == ResourceWarning: + # GH 44732: Don't make the CI flaky by filtering SSL-related + # ResourceWarning from dependencies + if "unclosed bool: + """Check if the actual warning issued is unexpected.""" + if actual_warning and not expected_warning: + return True + expected_warning = cast(type[Warning], expected_warning) + return bool(not issubclass(actual_warning.category, expected_warning)) + + +def _assert_raised_with_correct_stacklevel( + actual_warning: warnings.WarningMessage, +) -> None: + # https://stackoverflow.com/questions/17407119/python-inspect-stack-is-slow + frame = inspect.currentframe() + for _ in range(4): + frame = frame.f_back # type: ignore[union-attr] + try: + caller_filename = inspect.getfile(frame) # type: ignore[arg-type] + finally: + # See note in + # https://docs.python.org/3/library/inspect.html#inspect.Traceback + del frame + msg = ( + "Warning not set with correct stacklevel. " + f"File where warning is raised: {actual_warning.filename} != " + f"{caller_filename}. Warning message: {actual_warning.message}" + ) + assert actual_warning.filename == caller_filename, msg diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_testing/asserters.py b/env-llmeval/lib/python3.10/site-packages/pandas/_testing/asserters.py new file mode 100644 index 0000000000000000000000000000000000000000..41d2a7344a4edf2e05664eb599b0049d2c696e4c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_testing/asserters.py @@ -0,0 +1,1435 @@ +from __future__ import annotations + +import operator +from typing import ( + TYPE_CHECKING, + Literal, + NoReturn, + cast, +) + +import numpy as np + +from pandas._libs import lib +from pandas._libs.missing import is_matching_na +from pandas._libs.sparse import SparseIndex +import pandas._libs.testing as _testing +from pandas._libs.tslibs.np_datetime import compare_mismatched_resolutions + +from pandas.core.dtypes.common import ( + is_bool, + is_float_dtype, + is_integer_dtype, + is_number, + is_numeric_dtype, + needs_i8_conversion, +) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + DatetimeTZDtype, + ExtensionDtype, + NumpyEADtype, +) +from pandas.core.dtypes.missing import array_equivalent + +import pandas as pd +from pandas import ( + Categorical, + DataFrame, + DatetimeIndex, + Index, + IntervalDtype, + IntervalIndex, + MultiIndex, + PeriodIndex, + RangeIndex, + Series, + TimedeltaIndex, +) +from pandas.core.arrays import ( + DatetimeArray, + ExtensionArray, + IntervalArray, + PeriodArray, + TimedeltaArray, +) +from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin +from pandas.core.arrays.string_ import StringDtype +from pandas.core.indexes.api import safe_sort_index + +from pandas.io.formats.printing import pprint_thing + +if TYPE_CHECKING: + from pandas._typing import DtypeObj + + +def assert_almost_equal( + left, + right, + check_dtype: bool | Literal["equiv"] = "equiv", + rtol: float = 1.0e-5, + atol: float = 1.0e-8, + **kwargs, +) -> None: + """ + Check that the left and right objects are approximately equal. + + By approximately equal, we refer to objects that are numbers or that + contain numbers which may be equivalent to specific levels of precision. + + Parameters + ---------- + left : object + right : object + check_dtype : bool or {'equiv'}, default 'equiv' + Check dtype if both a and b are the same type. If 'equiv' is passed in, + then `RangeIndex` and `Index` with int64 dtype are also considered + equivalent when doing type checking. + rtol : float, default 1e-5 + Relative tolerance. + atol : float, default 1e-8 + Absolute tolerance. + """ + if isinstance(left, Index): + assert_index_equal( + left, + right, + check_exact=False, + exact=check_dtype, + rtol=rtol, + atol=atol, + **kwargs, + ) + + elif isinstance(left, Series): + assert_series_equal( + left, + right, + check_exact=False, + check_dtype=check_dtype, + rtol=rtol, + atol=atol, + **kwargs, + ) + + elif isinstance(left, DataFrame): + assert_frame_equal( + left, + right, + check_exact=False, + check_dtype=check_dtype, + rtol=rtol, + atol=atol, + **kwargs, + ) + + else: + # Other sequences. + if check_dtype: + if is_number(left) and is_number(right): + # Do not compare numeric classes, like np.float64 and float. + pass + elif is_bool(left) and is_bool(right): + # Do not compare bool classes, like np.bool_ and bool. + pass + else: + if isinstance(left, np.ndarray) or isinstance(right, np.ndarray): + obj = "numpy array" + else: + obj = "Input" + assert_class_equal(left, right, obj=obj) + + # if we have "equiv", this becomes True + _testing.assert_almost_equal( + left, right, check_dtype=bool(check_dtype), rtol=rtol, atol=atol, **kwargs + ) + + +def _check_isinstance(left, right, cls) -> None: + """ + Helper method for our assert_* methods that ensures that + the two objects being compared have the right type before + proceeding with the comparison. + + Parameters + ---------- + left : The first object being compared. + right : The second object being compared. + cls : The class type to check against. + + Raises + ------ + AssertionError : Either `left` or `right` is not an instance of `cls`. + """ + cls_name = cls.__name__ + + if not isinstance(left, cls): + raise AssertionError( + f"{cls_name} Expected type {cls}, found {type(left)} instead" + ) + if not isinstance(right, cls): + raise AssertionError( + f"{cls_name} Expected type {cls}, found {type(right)} instead" + ) + + +def assert_dict_equal(left, right, compare_keys: bool = True) -> None: + _check_isinstance(left, right, dict) + _testing.assert_dict_equal(left, right, compare_keys=compare_keys) + + +def assert_index_equal( + left: Index, + right: Index, + exact: bool | str = "equiv", + check_names: bool = True, + check_exact: bool = True, + check_categorical: bool = True, + check_order: bool = True, + rtol: float = 1.0e-5, + atol: float = 1.0e-8, + obj: str = "Index", +) -> None: + """ + Check that left and right Index are equal. + + Parameters + ---------- + left : Index + right : Index + exact : bool or {'equiv'}, default 'equiv' + Whether to check the Index class, dtype and inferred_type + are identical. If 'equiv', then RangeIndex can be substituted for + Index with an int64 dtype as well. + check_names : bool, default True + Whether to check the names attribute. + check_exact : bool, default True + Whether to compare number exactly. + check_categorical : bool, default True + Whether to compare internal Categorical exactly. + check_order : bool, default True + Whether to compare the order of index entries as well as their values. + If True, both indexes must contain the same elements, in the same order. + If False, both indexes must contain the same elements, but in any order. + rtol : float, default 1e-5 + Relative tolerance. Only used when check_exact is False. + atol : float, default 1e-8 + Absolute tolerance. Only used when check_exact is False. + obj : str, default 'Index' + Specify object name being compared, internally used to show appropriate + assertion message. + + Examples + -------- + >>> from pandas import testing as tm + >>> a = pd.Index([1, 2, 3]) + >>> b = pd.Index([1, 2, 3]) + >>> tm.assert_index_equal(a, b) + """ + __tracebackhide__ = True + + def _check_types(left, right, obj: str = "Index") -> None: + if not exact: + return + + assert_class_equal(left, right, exact=exact, obj=obj) + assert_attr_equal("inferred_type", left, right, obj=obj) + + # Skip exact dtype checking when `check_categorical` is False + if isinstance(left.dtype, CategoricalDtype) and isinstance( + right.dtype, CategoricalDtype + ): + if check_categorical: + assert_attr_equal("dtype", left, right, obj=obj) + assert_index_equal(left.categories, right.categories, exact=exact) + return + + assert_attr_equal("dtype", left, right, obj=obj) + + # instance validation + _check_isinstance(left, right, Index) + + # class / dtype comparison + _check_types(left, right, obj=obj) + + # level comparison + if left.nlevels != right.nlevels: + msg1 = f"{obj} levels are different" + msg2 = f"{left.nlevels}, {left}" + msg3 = f"{right.nlevels}, {right}" + raise_assert_detail(obj, msg1, msg2, msg3) + + # length comparison + if len(left) != len(right): + msg1 = f"{obj} length are different" + msg2 = f"{len(left)}, {left}" + msg3 = f"{len(right)}, {right}" + raise_assert_detail(obj, msg1, msg2, msg3) + + # If order doesn't matter then sort the index entries + if not check_order: + left = safe_sort_index(left) + right = safe_sort_index(right) + + # MultiIndex special comparison for little-friendly error messages + if isinstance(left, MultiIndex): + right = cast(MultiIndex, right) + + for level in range(left.nlevels): + lobj = f"MultiIndex level [{level}]" + try: + # try comparison on levels/codes to avoid densifying MultiIndex + assert_index_equal( + left.levels[level], + right.levels[level], + exact=exact, + check_names=check_names, + check_exact=check_exact, + check_categorical=check_categorical, + rtol=rtol, + atol=atol, + obj=lobj, + ) + assert_numpy_array_equal(left.codes[level], right.codes[level]) + except AssertionError: + llevel = left.get_level_values(level) + rlevel = right.get_level_values(level) + + assert_index_equal( + llevel, + rlevel, + exact=exact, + check_names=check_names, + check_exact=check_exact, + check_categorical=check_categorical, + rtol=rtol, + atol=atol, + obj=lobj, + ) + # get_level_values may change dtype + _check_types(left.levels[level], right.levels[level], obj=obj) + + # skip exact index checking when `check_categorical` is False + elif check_exact and check_categorical: + if not left.equals(right): + mismatch = left._values != right._values + + if not isinstance(mismatch, np.ndarray): + mismatch = cast("ExtensionArray", mismatch).fillna(True) + + diff = np.sum(mismatch.astype(int)) * 100.0 / len(left) + msg = f"{obj} values are different ({np.round(diff, 5)} %)" + raise_assert_detail(obj, msg, left, right) + else: + # if we have "equiv", this becomes True + exact_bool = bool(exact) + _testing.assert_almost_equal( + left.values, + right.values, + rtol=rtol, + atol=atol, + check_dtype=exact_bool, + obj=obj, + lobj=left, + robj=right, + ) + + # metadata comparison + if check_names: + assert_attr_equal("names", left, right, obj=obj) + if isinstance(left, PeriodIndex) or isinstance(right, PeriodIndex): + assert_attr_equal("dtype", left, right, obj=obj) + if isinstance(left, IntervalIndex) or isinstance(right, IntervalIndex): + assert_interval_array_equal(left._values, right._values) + + if check_categorical: + if isinstance(left.dtype, CategoricalDtype) or isinstance( + right.dtype, CategoricalDtype + ): + assert_categorical_equal(left._values, right._values, obj=f"{obj} category") + + +def assert_class_equal( + left, right, exact: bool | str = True, obj: str = "Input" +) -> None: + """ + Checks classes are equal. + """ + __tracebackhide__ = True + + def repr_class(x): + if isinstance(x, Index): + # return Index as it is to include values in the error message + return x + + return type(x).__name__ + + def is_class_equiv(idx: Index) -> bool: + """Classes that are a RangeIndex (sub-)instance or exactly an `Index` . + + This only checks class equivalence. There is a separate check that the + dtype is int64. + """ + return type(idx) is Index or isinstance(idx, RangeIndex) + + if type(left) == type(right): + return + + if exact == "equiv": + if is_class_equiv(left) and is_class_equiv(right): + return + + msg = f"{obj} classes are different" + raise_assert_detail(obj, msg, repr_class(left), repr_class(right)) + + +def assert_attr_equal(attr: str, left, right, obj: str = "Attributes") -> None: + """ + Check attributes are equal. Both objects must have attribute. + + Parameters + ---------- + attr : str + Attribute name being compared. + left : object + right : object + obj : str, default 'Attributes' + Specify object name being compared, internally used to show appropriate + assertion message + """ + __tracebackhide__ = True + + left_attr = getattr(left, attr) + right_attr = getattr(right, attr) + + if left_attr is right_attr or is_matching_na(left_attr, right_attr): + # e.g. both np.nan, both NaT, both pd.NA, ... + return None + + try: + result = left_attr == right_attr + except TypeError: + # datetimetz on rhs may raise TypeError + result = False + if (left_attr is pd.NA) ^ (right_attr is pd.NA): + result = False + elif not isinstance(result, bool): + result = result.all() + + if not result: + msg = f'Attribute "{attr}" are different' + raise_assert_detail(obj, msg, left_attr, right_attr) + return None + + +def assert_is_valid_plot_return_object(objs) -> None: + from matplotlib.artist import Artist + from matplotlib.axes import Axes + + if isinstance(objs, (Series, np.ndarray)): + if isinstance(objs, Series): + objs = objs._values + for el in objs.ravel(): + msg = ( + "one of 'objs' is not a matplotlib Axes instance, " + f"type encountered {repr(type(el).__name__)}" + ) + assert isinstance(el, (Axes, dict)), msg + else: + msg = ( + "objs is neither an ndarray of Artist instances nor a single " + "ArtistArtist instance, tuple, or dict, 'objs' is a " + f"{repr(type(objs).__name__)}" + ) + assert isinstance(objs, (Artist, tuple, dict)), msg + + +def assert_is_sorted(seq) -> None: + """Assert that the sequence is sorted.""" + if isinstance(seq, (Index, Series)): + seq = seq.values + # sorting does not change precisions + if isinstance(seq, np.ndarray): + assert_numpy_array_equal(seq, np.sort(np.array(seq))) + else: + assert_extension_array_equal(seq, seq[seq.argsort()]) + + +def assert_categorical_equal( + left, + right, + check_dtype: bool = True, + check_category_order: bool = True, + obj: str = "Categorical", +) -> None: + """ + Test that Categoricals are equivalent. + + Parameters + ---------- + left : Categorical + right : Categorical + check_dtype : bool, default True + Check that integer dtype of the codes are the same. + check_category_order : bool, default True + Whether the order of the categories should be compared, which + implies identical integer codes. If False, only the resulting + values are compared. The ordered attribute is + checked regardless. + obj : str, default 'Categorical' + Specify object name being compared, internally used to show appropriate + assertion message. + """ + _check_isinstance(left, right, Categorical) + + exact: bool | str + if isinstance(left.categories, RangeIndex) or isinstance( + right.categories, RangeIndex + ): + exact = "equiv" + else: + # We still want to require exact matches for Index + exact = True + + if check_category_order: + assert_index_equal( + left.categories, right.categories, obj=f"{obj}.categories", exact=exact + ) + assert_numpy_array_equal( + left.codes, right.codes, check_dtype=check_dtype, obj=f"{obj}.codes" + ) + else: + try: + lc = left.categories.sort_values() + rc = right.categories.sort_values() + except TypeError: + # e.g. '<' not supported between instances of 'int' and 'str' + lc, rc = left.categories, right.categories + assert_index_equal(lc, rc, obj=f"{obj}.categories", exact=exact) + assert_index_equal( + left.categories.take(left.codes), + right.categories.take(right.codes), + obj=f"{obj}.values", + exact=exact, + ) + + assert_attr_equal("ordered", left, right, obj=obj) + + +def assert_interval_array_equal( + left, right, exact: bool | Literal["equiv"] = "equiv", obj: str = "IntervalArray" +) -> None: + """ + Test that two IntervalArrays are equivalent. + + Parameters + ---------- + left, right : IntervalArray + The IntervalArrays to compare. + exact : bool or {'equiv'}, default 'equiv' + Whether to check the Index class, dtype and inferred_type + are identical. If 'equiv', then RangeIndex can be substituted for + Index with an int64 dtype as well. + obj : str, default 'IntervalArray' + Specify object name being compared, internally used to show appropriate + assertion message + """ + _check_isinstance(left, right, IntervalArray) + + kwargs = {} + if left._left.dtype.kind in "mM": + # We have a DatetimeArray or TimedeltaArray + kwargs["check_freq"] = False + + assert_equal(left._left, right._left, obj=f"{obj}.left", **kwargs) + assert_equal(left._right, right._right, obj=f"{obj}.left", **kwargs) + + assert_attr_equal("closed", left, right, obj=obj) + + +def assert_period_array_equal(left, right, obj: str = "PeriodArray") -> None: + _check_isinstance(left, right, PeriodArray) + + assert_numpy_array_equal(left._ndarray, right._ndarray, obj=f"{obj}._ndarray") + assert_attr_equal("dtype", left, right, obj=obj) + + +def assert_datetime_array_equal( + left, right, obj: str = "DatetimeArray", check_freq: bool = True +) -> None: + __tracebackhide__ = True + _check_isinstance(left, right, DatetimeArray) + + assert_numpy_array_equal(left._ndarray, right._ndarray, obj=f"{obj}._ndarray") + if check_freq: + assert_attr_equal("freq", left, right, obj=obj) + assert_attr_equal("tz", left, right, obj=obj) + + +def assert_timedelta_array_equal( + left, right, obj: str = "TimedeltaArray", check_freq: bool = True +) -> None: + __tracebackhide__ = True + _check_isinstance(left, right, TimedeltaArray) + assert_numpy_array_equal(left._ndarray, right._ndarray, obj=f"{obj}._ndarray") + if check_freq: + assert_attr_equal("freq", left, right, obj=obj) + + +def raise_assert_detail( + obj, message, left, right, diff=None, first_diff=None, index_values=None +) -> NoReturn: + __tracebackhide__ = True + + msg = f"""{obj} are different + +{message}""" + + if isinstance(index_values, Index): + index_values = np.asarray(index_values) + + if isinstance(index_values, np.ndarray): + msg += f"\n[index]: {pprint_thing(index_values)}" + + if isinstance(left, np.ndarray): + left = pprint_thing(left) + elif isinstance(left, (CategoricalDtype, NumpyEADtype, StringDtype)): + left = repr(left) + + if isinstance(right, np.ndarray): + right = pprint_thing(right) + elif isinstance(right, (CategoricalDtype, NumpyEADtype, StringDtype)): + right = repr(right) + + msg += f""" +[left]: {left} +[right]: {right}""" + + if diff is not None: + msg += f"\n[diff]: {diff}" + + if first_diff is not None: + msg += f"\n{first_diff}" + + raise AssertionError(msg) + + +def assert_numpy_array_equal( + left, + right, + strict_nan: bool = False, + check_dtype: bool | Literal["equiv"] = True, + err_msg=None, + check_same=None, + obj: str = "numpy array", + index_values=None, +) -> None: + """ + Check that 'np.ndarray' is equivalent. + + Parameters + ---------- + left, right : numpy.ndarray or iterable + The two arrays to be compared. + strict_nan : bool, default False + If True, consider NaN and None to be different. + check_dtype : bool, default True + Check dtype if both a and b are np.ndarray. + err_msg : str, default None + If provided, used as assertion message. + check_same : None|'copy'|'same', default None + Ensure left and right refer/do not refer to the same memory area. + obj : str, default 'numpy array' + Specify object name being compared, internally used to show appropriate + assertion message. + index_values : Index | numpy.ndarray, default None + optional index (shared by both left and right), used in output. + """ + __tracebackhide__ = True + + # instance validation + # Show a detailed error message when classes are different + assert_class_equal(left, right, obj=obj) + # both classes must be an np.ndarray + _check_isinstance(left, right, np.ndarray) + + def _get_base(obj): + return obj.base if getattr(obj, "base", None) is not None else obj + + left_base = _get_base(left) + right_base = _get_base(right) + + if check_same == "same": + if left_base is not right_base: + raise AssertionError(f"{repr(left_base)} is not {repr(right_base)}") + elif check_same == "copy": + if left_base is right_base: + raise AssertionError(f"{repr(left_base)} is {repr(right_base)}") + + def _raise(left, right, err_msg) -> NoReturn: + if err_msg is None: + if left.shape != right.shape: + raise_assert_detail( + obj, f"{obj} shapes are different", left.shape, right.shape + ) + + diff = 0 + for left_arr, right_arr in zip(left, right): + # count up differences + if not array_equivalent(left_arr, right_arr, strict_nan=strict_nan): + diff += 1 + + diff = diff * 100.0 / left.size + msg = f"{obj} values are different ({np.round(diff, 5)} %)" + raise_assert_detail(obj, msg, left, right, index_values=index_values) + + raise AssertionError(err_msg) + + # compare shape and values + if not array_equivalent(left, right, strict_nan=strict_nan): + _raise(left, right, err_msg) + + if check_dtype: + if isinstance(left, np.ndarray) and isinstance(right, np.ndarray): + assert_attr_equal("dtype", left, right, obj=obj) + + +def assert_extension_array_equal( + left, + right, + check_dtype: bool | Literal["equiv"] = True, + index_values=None, + check_exact: bool | lib.NoDefault = lib.no_default, + rtol: float | lib.NoDefault = lib.no_default, + atol: float | lib.NoDefault = lib.no_default, + obj: str = "ExtensionArray", +) -> None: + """ + Check that left and right ExtensionArrays are equal. + + Parameters + ---------- + left, right : ExtensionArray + The two arrays to compare. + check_dtype : bool, default True + Whether to check if the ExtensionArray dtypes are identical. + index_values : Index | numpy.ndarray, default None + Optional index (shared by both left and right), used in output. + check_exact : bool, default False + Whether to compare number exactly. + + .. versionchanged:: 2.2.0 + + Defaults to True for integer dtypes if none of + ``check_exact``, ``rtol`` and ``atol`` are specified. + rtol : float, default 1e-5 + Relative tolerance. Only used when check_exact is False. + atol : float, default 1e-8 + Absolute tolerance. Only used when check_exact is False. + obj : str, default 'ExtensionArray' + Specify object name being compared, internally used to show appropriate + assertion message. + + .. versionadded:: 2.0.0 + + Notes + ----- + Missing values are checked separately from valid values. + A mask of missing values is computed for each and checked to match. + The remaining all-valid values are cast to object dtype and checked. + + Examples + -------- + >>> from pandas import testing as tm + >>> a = pd.Series([1, 2, 3, 4]) + >>> b, c = a.array, a.array + >>> tm.assert_extension_array_equal(b, c) + """ + if ( + check_exact is lib.no_default + and rtol is lib.no_default + and atol is lib.no_default + ): + check_exact = ( + is_numeric_dtype(left.dtype) + and not is_float_dtype(left.dtype) + or is_numeric_dtype(right.dtype) + and not is_float_dtype(right.dtype) + ) + elif check_exact is lib.no_default: + check_exact = False + + rtol = rtol if rtol is not lib.no_default else 1.0e-5 + atol = atol if atol is not lib.no_default else 1.0e-8 + + assert isinstance(left, ExtensionArray), "left is not an ExtensionArray" + assert isinstance(right, ExtensionArray), "right is not an ExtensionArray" + if check_dtype: + assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}") + + if ( + isinstance(left, DatetimeLikeArrayMixin) + and isinstance(right, DatetimeLikeArrayMixin) + and type(right) == type(left) + ): + # GH 52449 + if not check_dtype and left.dtype.kind in "mM": + if not isinstance(left.dtype, np.dtype): + l_unit = cast(DatetimeTZDtype, left.dtype).unit + else: + l_unit = np.datetime_data(left.dtype)[0] + if not isinstance(right.dtype, np.dtype): + r_unit = cast(DatetimeTZDtype, right.dtype).unit + else: + r_unit = np.datetime_data(right.dtype)[0] + if ( + l_unit != r_unit + and compare_mismatched_resolutions( + left._ndarray, right._ndarray, operator.eq + ).all() + ): + return + # Avoid slow object-dtype comparisons + # np.asarray for case where we have a np.MaskedArray + assert_numpy_array_equal( + np.asarray(left.asi8), + np.asarray(right.asi8), + index_values=index_values, + obj=obj, + ) + return + + left_na = np.asarray(left.isna()) + right_na = np.asarray(right.isna()) + assert_numpy_array_equal( + left_na, right_na, obj=f"{obj} NA mask", index_values=index_values + ) + + left_valid = left[~left_na].to_numpy(dtype=object) + right_valid = right[~right_na].to_numpy(dtype=object) + if check_exact: + assert_numpy_array_equal( + left_valid, right_valid, obj=obj, index_values=index_values + ) + else: + _testing.assert_almost_equal( + left_valid, + right_valid, + check_dtype=bool(check_dtype), + rtol=rtol, + atol=atol, + obj=obj, + index_values=index_values, + ) + + +# This could be refactored to use the NDFrame.equals method +def assert_series_equal( + left, + right, + check_dtype: bool | Literal["equiv"] = True, + check_index_type: bool | Literal["equiv"] = "equiv", + check_series_type: bool = True, + check_names: bool = True, + check_exact: bool | lib.NoDefault = lib.no_default, + check_datetimelike_compat: bool = False, + check_categorical: bool = True, + check_category_order: bool = True, + check_freq: bool = True, + check_flags: bool = True, + rtol: float | lib.NoDefault = lib.no_default, + atol: float | lib.NoDefault = lib.no_default, + obj: str = "Series", + *, + check_index: bool = True, + check_like: bool = False, +) -> None: + """ + Check that left and right Series are equal. + + Parameters + ---------- + left : Series + right : Series + check_dtype : bool, default True + Whether to check the Series dtype is identical. + check_index_type : bool or {'equiv'}, default 'equiv' + Whether to check the Index class, dtype and inferred_type + are identical. + check_series_type : bool, default True + Whether to check the Series class is identical. + check_names : bool, default True + Whether to check the Series and Index names attribute. + check_exact : bool, default False + Whether to compare number exactly. + + .. versionchanged:: 2.2.0 + + Defaults to True for integer dtypes if none of + ``check_exact``, ``rtol`` and ``atol`` are specified. + check_datetimelike_compat : bool, default False + Compare datetime-like which is comparable ignoring dtype. + check_categorical : bool, default True + Whether to compare internal Categorical exactly. + check_category_order : bool, default True + Whether to compare category order of internal Categoricals. + check_freq : bool, default True + Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex. + check_flags : bool, default True + Whether to check the `flags` attribute. + rtol : float, default 1e-5 + Relative tolerance. Only used when check_exact is False. + atol : float, default 1e-8 + Absolute tolerance. Only used when check_exact is False. + obj : str, default 'Series' + Specify object name being compared, internally used to show appropriate + assertion message. + check_index : bool, default True + Whether to check index equivalence. If False, then compare only values. + + .. versionadded:: 1.3.0 + check_like : bool, default False + If True, ignore the order of the index. Must be False if check_index is False. + Note: same labels must be with the same data. + + .. versionadded:: 1.5.0 + + Examples + -------- + >>> from pandas import testing as tm + >>> a = pd.Series([1, 2, 3, 4]) + >>> b = pd.Series([1, 2, 3, 4]) + >>> tm.assert_series_equal(a, b) + """ + __tracebackhide__ = True + check_exact_index = False if check_exact is lib.no_default else check_exact + if ( + check_exact is lib.no_default + and rtol is lib.no_default + and atol is lib.no_default + ): + check_exact = ( + is_numeric_dtype(left.dtype) + and not is_float_dtype(left.dtype) + or is_numeric_dtype(right.dtype) + and not is_float_dtype(right.dtype) + ) + elif check_exact is lib.no_default: + check_exact = False + + rtol = rtol if rtol is not lib.no_default else 1.0e-5 + atol = atol if atol is not lib.no_default else 1.0e-8 + + if not check_index and check_like: + raise ValueError("check_like must be False if check_index is False") + + # instance validation + _check_isinstance(left, right, Series) + + if check_series_type: + assert_class_equal(left, right, obj=obj) + + # length comparison + if len(left) != len(right): + msg1 = f"{len(left)}, {left.index}" + msg2 = f"{len(right)}, {right.index}" + raise_assert_detail(obj, "Series length are different", msg1, msg2) + + if check_flags: + assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}" + + if check_index: + # GH #38183 + assert_index_equal( + left.index, + right.index, + exact=check_index_type, + check_names=check_names, + check_exact=check_exact_index, + check_categorical=check_categorical, + check_order=not check_like, + rtol=rtol, + atol=atol, + obj=f"{obj}.index", + ) + + if check_like: + left = left.reindex_like(right) + + if check_freq and isinstance(left.index, (DatetimeIndex, TimedeltaIndex)): + lidx = left.index + ridx = right.index + assert lidx.freq == ridx.freq, (lidx.freq, ridx.freq) + + if check_dtype: + # We want to skip exact dtype checking when `check_categorical` + # is False. We'll still raise if only one is a `Categorical`, + # regardless of `check_categorical` + if ( + isinstance(left.dtype, CategoricalDtype) + and isinstance(right.dtype, CategoricalDtype) + and not check_categorical + ): + pass + else: + assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}") + if check_exact: + left_values = left._values + right_values = right._values + # Only check exact if dtype is numeric + if isinstance(left_values, ExtensionArray) and isinstance( + right_values, ExtensionArray + ): + assert_extension_array_equal( + left_values, + right_values, + check_dtype=check_dtype, + index_values=left.index, + obj=str(obj), + ) + else: + # convert both to NumPy if not, check_dtype would raise earlier + lv, rv = left_values, right_values + if isinstance(left_values, ExtensionArray): + lv = left_values.to_numpy() + if isinstance(right_values, ExtensionArray): + rv = right_values.to_numpy() + assert_numpy_array_equal( + lv, + rv, + check_dtype=check_dtype, + obj=str(obj), + index_values=left.index, + ) + elif check_datetimelike_compat and ( + needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype) + ): + # we want to check only if we have compat dtypes + # e.g. integer and M|m are NOT compat, but we can simply check + # the values in that case + + # datetimelike may have different objects (e.g. datetime.datetime + # vs Timestamp) but will compare equal + if not Index(left._values).equals(Index(right._values)): + msg = ( + f"[datetimelike_compat=True] {left._values} " + f"is not equal to {right._values}." + ) + raise AssertionError(msg) + elif isinstance(left.dtype, IntervalDtype) and isinstance( + right.dtype, IntervalDtype + ): + assert_interval_array_equal(left.array, right.array) + elif isinstance(left.dtype, CategoricalDtype) or isinstance( + right.dtype, CategoricalDtype + ): + _testing.assert_almost_equal( + left._values, + right._values, + rtol=rtol, + atol=atol, + check_dtype=bool(check_dtype), + obj=str(obj), + index_values=left.index, + ) + elif isinstance(left.dtype, ExtensionDtype) and isinstance( + right.dtype, ExtensionDtype + ): + assert_extension_array_equal( + left._values, + right._values, + rtol=rtol, + atol=atol, + check_dtype=check_dtype, + index_values=left.index, + obj=str(obj), + ) + elif is_extension_array_dtype_and_needs_i8_conversion( + left.dtype, right.dtype + ) or is_extension_array_dtype_and_needs_i8_conversion(right.dtype, left.dtype): + assert_extension_array_equal( + left._values, + right._values, + check_dtype=check_dtype, + index_values=left.index, + obj=str(obj), + ) + elif needs_i8_conversion(left.dtype) and needs_i8_conversion(right.dtype): + # DatetimeArray or TimedeltaArray + assert_extension_array_equal( + left._values, + right._values, + check_dtype=check_dtype, + index_values=left.index, + obj=str(obj), + ) + else: + _testing.assert_almost_equal( + left._values, + right._values, + rtol=rtol, + atol=atol, + check_dtype=bool(check_dtype), + obj=str(obj), + index_values=left.index, + ) + + # metadata comparison + if check_names: + assert_attr_equal("name", left, right, obj=obj) + + if check_categorical: + if isinstance(left.dtype, CategoricalDtype) or isinstance( + right.dtype, CategoricalDtype + ): + assert_categorical_equal( + left._values, + right._values, + obj=f"{obj} category", + check_category_order=check_category_order, + ) + + +# This could be refactored to use the NDFrame.equals method +def assert_frame_equal( + left, + right, + check_dtype: bool | Literal["equiv"] = True, + check_index_type: bool | Literal["equiv"] = "equiv", + check_column_type: bool | Literal["equiv"] = "equiv", + check_frame_type: bool = True, + check_names: bool = True, + by_blocks: bool = False, + check_exact: bool | lib.NoDefault = lib.no_default, + check_datetimelike_compat: bool = False, + check_categorical: bool = True, + check_like: bool = False, + check_freq: bool = True, + check_flags: bool = True, + rtol: float | lib.NoDefault = lib.no_default, + atol: float | lib.NoDefault = lib.no_default, + obj: str = "DataFrame", +) -> None: + """ + Check that left and right DataFrame are equal. + + This function is intended to compare two DataFrames and output any + differences. It is mostly intended for use in unit tests. + Additional parameters allow varying the strictness of the + equality checks performed. + + Parameters + ---------- + left : DataFrame + First DataFrame to compare. + right : DataFrame + Second DataFrame to compare. + check_dtype : bool, default True + Whether to check the DataFrame dtype is identical. + check_index_type : bool or {'equiv'}, default 'equiv' + Whether to check the Index class, dtype and inferred_type + are identical. + check_column_type : bool or {'equiv'}, default 'equiv' + Whether to check the columns class, dtype and inferred_type + are identical. Is passed as the ``exact`` argument of + :func:`assert_index_equal`. + check_frame_type : bool, default True + Whether to check the DataFrame class is identical. + check_names : bool, default True + Whether to check that the `names` attribute for both the `index` + and `column` attributes of the DataFrame is identical. + by_blocks : bool, default False + Specify how to compare internal data. If False, compare by columns. + If True, compare by blocks. + check_exact : bool, default False + Whether to compare number exactly. + + .. versionchanged:: 2.2.0 + + Defaults to True for integer dtypes if none of + ``check_exact``, ``rtol`` and ``atol`` are specified. + check_datetimelike_compat : bool, default False + Compare datetime-like which is comparable ignoring dtype. + check_categorical : bool, default True + Whether to compare internal Categorical exactly. + check_like : bool, default False + If True, ignore the order of index & columns. + Note: index labels must match their respective rows + (same as in columns) - same labels must be with the same data. + check_freq : bool, default True + Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex. + check_flags : bool, default True + Whether to check the `flags` attribute. + rtol : float, default 1e-5 + Relative tolerance. Only used when check_exact is False. + atol : float, default 1e-8 + Absolute tolerance. Only used when check_exact is False. + obj : str, default 'DataFrame' + Specify object name being compared, internally used to show appropriate + assertion message. + + See Also + -------- + assert_series_equal : Equivalent method for asserting Series equality. + DataFrame.equals : Check DataFrame equality. + + Examples + -------- + This example shows comparing two DataFrames that are equal + but with columns of differing dtypes. + + >>> from pandas.testing import assert_frame_equal + >>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]}) + >>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]}) + + df1 equals itself. + + >>> assert_frame_equal(df1, df1) + + df1 differs from df2 as column 'b' is of a different type. + + >>> assert_frame_equal(df1, df2) + Traceback (most recent call last): + ... + AssertionError: Attributes of DataFrame.iloc[:, 1] (column name="b") are different + + Attribute "dtype" are different + [left]: int64 + [right]: float64 + + Ignore differing dtypes in columns with check_dtype. + + >>> assert_frame_equal(df1, df2, check_dtype=False) + """ + __tracebackhide__ = True + _rtol = rtol if rtol is not lib.no_default else 1.0e-5 + _atol = atol if atol is not lib.no_default else 1.0e-8 + _check_exact = check_exact if check_exact is not lib.no_default else False + + # instance validation + _check_isinstance(left, right, DataFrame) + + if check_frame_type: + assert isinstance(left, type(right)) + # assert_class_equal(left, right, obj=obj) + + # shape comparison + if left.shape != right.shape: + raise_assert_detail( + obj, f"{obj} shape mismatch", f"{repr(left.shape)}", f"{repr(right.shape)}" + ) + + if check_flags: + assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}" + + # index comparison + assert_index_equal( + left.index, + right.index, + exact=check_index_type, + check_names=check_names, + check_exact=_check_exact, + check_categorical=check_categorical, + check_order=not check_like, + rtol=_rtol, + atol=_atol, + obj=f"{obj}.index", + ) + + # column comparison + assert_index_equal( + left.columns, + right.columns, + exact=check_column_type, + check_names=check_names, + check_exact=_check_exact, + check_categorical=check_categorical, + check_order=not check_like, + rtol=_rtol, + atol=_atol, + obj=f"{obj}.columns", + ) + + if check_like: + left = left.reindex_like(right) + + # compare by blocks + if by_blocks: + rblocks = right._to_dict_of_blocks() + lblocks = left._to_dict_of_blocks() + for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))): + assert dtype in lblocks + assert dtype in rblocks + assert_frame_equal( + lblocks[dtype], rblocks[dtype], check_dtype=check_dtype, obj=obj + ) + + # compare by columns + else: + for i, col in enumerate(left.columns): + # We have already checked that columns match, so we can do + # fast location-based lookups + lcol = left._ixs(i, axis=1) + rcol = right._ixs(i, axis=1) + + # GH #38183 + # use check_index=False, because we do not want to run + # assert_index_equal for each column, + # as we already checked it for the whole dataframe before. + assert_series_equal( + lcol, + rcol, + check_dtype=check_dtype, + check_index_type=check_index_type, + check_exact=check_exact, + check_names=check_names, + check_datetimelike_compat=check_datetimelike_compat, + check_categorical=check_categorical, + check_freq=check_freq, + obj=f'{obj}.iloc[:, {i}] (column name="{col}")', + rtol=rtol, + atol=atol, + check_index=False, + check_flags=False, + ) + + +def assert_equal(left, right, **kwargs) -> None: + """ + Wrapper for tm.assert_*_equal to dispatch to the appropriate test function. + + Parameters + ---------- + left, right : Index, Series, DataFrame, ExtensionArray, or np.ndarray + The two items to be compared. + **kwargs + All keyword arguments are passed through to the underlying assert method. + """ + __tracebackhide__ = True + + if isinstance(left, Index): + assert_index_equal(left, right, **kwargs) + if isinstance(left, (DatetimeIndex, TimedeltaIndex)): + assert left.freq == right.freq, (left.freq, right.freq) + elif isinstance(left, Series): + assert_series_equal(left, right, **kwargs) + elif isinstance(left, DataFrame): + assert_frame_equal(left, right, **kwargs) + elif isinstance(left, IntervalArray): + assert_interval_array_equal(left, right, **kwargs) + elif isinstance(left, PeriodArray): + assert_period_array_equal(left, right, **kwargs) + elif isinstance(left, DatetimeArray): + assert_datetime_array_equal(left, right, **kwargs) + elif isinstance(left, TimedeltaArray): + assert_timedelta_array_equal(left, right, **kwargs) + elif isinstance(left, ExtensionArray): + assert_extension_array_equal(left, right, **kwargs) + elif isinstance(left, np.ndarray): + assert_numpy_array_equal(left, right, **kwargs) + elif isinstance(left, str): + assert kwargs == {} + assert left == right + else: + assert kwargs == {} + assert_almost_equal(left, right) + + +def assert_sp_array_equal(left, right) -> None: + """ + Check that the left and right SparseArray are equal. + + Parameters + ---------- + left : SparseArray + right : SparseArray + """ + _check_isinstance(left, right, pd.arrays.SparseArray) + + assert_numpy_array_equal(left.sp_values, right.sp_values) + + # SparseIndex comparison + assert isinstance(left.sp_index, SparseIndex) + assert isinstance(right.sp_index, SparseIndex) + + left_index = left.sp_index + right_index = right.sp_index + + if not left_index.equals(right_index): + raise_assert_detail( + "SparseArray.index", "index are not equal", left_index, right_index + ) + else: + # Just ensure a + pass + + assert_attr_equal("fill_value", left, right) + assert_attr_equal("dtype", left, right) + assert_numpy_array_equal(left.to_dense(), right.to_dense()) + + +def assert_contains_all(iterable, dic) -> None: + for k in iterable: + assert k in dic, f"Did not contain item: {repr(k)}" + + +def assert_copy(iter1, iter2, **eql_kwargs) -> None: + """ + iter1, iter2: iterables that produce elements + comparable with assert_almost_equal + + Checks that the elements are equal, but not + the same object. (Does not check that items + in sequences are also not the same object) + """ + for elem1, elem2 in zip(iter1, iter2): + assert_almost_equal(elem1, elem2, **eql_kwargs) + msg = ( + f"Expected object {repr(type(elem1))} and object {repr(type(elem2))} to be " + "different objects, but they were the same object." + ) + assert elem1 is not elem2, msg + + +def is_extension_array_dtype_and_needs_i8_conversion( + left_dtype: DtypeObj, right_dtype: DtypeObj +) -> bool: + """ + Checks that we have the combination of an ExtensionArraydtype and + a dtype that should be converted to int64 + + Returns + ------- + bool + + Related to issue #37609 + """ + return isinstance(left_dtype, ExtensionDtype) and needs_i8_conversion(right_dtype) + + +def assert_indexing_slices_equivalent(ser: Series, l_slc: slice, i_slc: slice) -> None: + """ + Check that ser.iloc[i_slc] matches ser.loc[l_slc] and, if applicable, + ser[l_slc]. + """ + expected = ser.iloc[i_slc] + + assert_series_equal(ser.loc[l_slc], expected) + + if not is_integer_dtype(ser.index): + # For integer indices, .loc and plain getitem are position-based. + assert_series_equal(ser[l_slc], expected) + + +def assert_metadata_equivalent( + left: DataFrame | Series, right: DataFrame | Series | None = None +) -> None: + """ + Check that ._metadata attributes are equivalent. + """ + for attr in left._metadata: + val = getattr(left, attr, None) + if right is None: + assert val is None + else: + assert val == getattr(right, attr, None) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_testing/compat.py b/env-llmeval/lib/python3.10/site-packages/pandas/_testing/compat.py new file mode 100644 index 0000000000000000000000000000000000000000..cc352ba7b8f2f5a5548d4d5749d3b48ac838aced --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_testing/compat.py @@ -0,0 +1,29 @@ +""" +Helpers for sharing tests between DataFrame/Series +""" +from __future__ import annotations + +from typing import TYPE_CHECKING + +from pandas import DataFrame + +if TYPE_CHECKING: + from pandas._typing import DtypeObj + + +def get_dtype(obj) -> DtypeObj: + if isinstance(obj, DataFrame): + # Note: we are assuming only one column + return obj.dtypes.iat[0] + else: + return obj.dtype + + +def get_obj(df: DataFrame, klass): + """ + For sharing tests using frame_or_series, either return the DataFrame + unchanged or return it's first column as a Series. + """ + if klass is DataFrame: + return df + return df._ixs(0, axis=1) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_testing/contexts.py b/env-llmeval/lib/python3.10/site-packages/pandas/_testing/contexts.py new file mode 100644 index 0000000000000000000000000000000000000000..eb6e4a917889aef221b2fc08eb2723c4fe568e04 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_testing/contexts.py @@ -0,0 +1,257 @@ +from __future__ import annotations + +from contextlib import contextmanager +import os +from pathlib import Path +import tempfile +from typing import ( + IO, + TYPE_CHECKING, + Any, +) +import uuid + +from pandas._config import using_copy_on_write + +from pandas.compat import PYPY +from pandas.errors import ChainedAssignmentError + +from pandas import set_option + +from pandas.io.common import get_handle + +if TYPE_CHECKING: + from collections.abc import Generator + + from pandas._typing import ( + BaseBuffer, + CompressionOptions, + FilePath, + ) + + +@contextmanager +def decompress_file( + path: FilePath | BaseBuffer, compression: CompressionOptions +) -> Generator[IO[bytes], None, None]: + """ + Open a compressed file and return a file object. + + Parameters + ---------- + path : str + The path where the file is read from. + + compression : {'gzip', 'bz2', 'zip', 'xz', 'zstd', None} + Name of the decompression to use + + Returns + ------- + file object + """ + with get_handle(path, "rb", compression=compression, is_text=False) as handle: + yield handle.handle + + +@contextmanager +def set_timezone(tz: str) -> Generator[None, None, None]: + """ + Context manager for temporarily setting a timezone. + + Parameters + ---------- + tz : str + A string representing a valid timezone. + + Examples + -------- + >>> from datetime import datetime + >>> from dateutil.tz import tzlocal + >>> tzlocal().tzname(datetime(2021, 1, 1)) # doctest: +SKIP + 'IST' + + >>> with set_timezone('US/Eastern'): + ... tzlocal().tzname(datetime(2021, 1, 1)) + ... + 'EST' + """ + import time + + def setTZ(tz) -> None: + if tz is None: + try: + del os.environ["TZ"] + except KeyError: + pass + else: + os.environ["TZ"] = tz + time.tzset() + + orig_tz = os.environ.get("TZ") + setTZ(tz) + try: + yield + finally: + setTZ(orig_tz) + + +@contextmanager +def ensure_clean( + filename=None, return_filelike: bool = False, **kwargs: Any +) -> Generator[Any, None, None]: + """ + Gets a temporary path and agrees to remove on close. + + This implementation does not use tempfile.mkstemp to avoid having a file handle. + If the code using the returned path wants to delete the file itself, windows + requires that no program has a file handle to it. + + Parameters + ---------- + filename : str (optional) + suffix of the created file. + return_filelike : bool (default False) + if True, returns a file-like which is *always* cleaned. Necessary for + savefig and other functions which want to append extensions. + **kwargs + Additional keywords are passed to open(). + + """ + folder = Path(tempfile.gettempdir()) + + if filename is None: + filename = "" + filename = str(uuid.uuid4()) + filename + path = folder / filename + + path.touch() + + handle_or_str: str | IO = str(path) + encoding = kwargs.pop("encoding", None) + if return_filelike: + kwargs.setdefault("mode", "w+b") + if encoding is None and "b" not in kwargs["mode"]: + encoding = "utf-8" + handle_or_str = open(path, encoding=encoding, **kwargs) + + try: + yield handle_or_str + finally: + if not isinstance(handle_or_str, str): + handle_or_str.close() + if path.is_file(): + path.unlink() + + +@contextmanager +def with_csv_dialect(name: str, **kwargs) -> Generator[None, None, None]: + """ + Context manager to temporarily register a CSV dialect for parsing CSV. + + Parameters + ---------- + name : str + The name of the dialect. + kwargs : mapping + The parameters for the dialect. + + Raises + ------ + ValueError : the name of the dialect conflicts with a builtin one. + + See Also + -------- + csv : Python's CSV library. + """ + import csv + + _BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"} + + if name in _BUILTIN_DIALECTS: + raise ValueError("Cannot override builtin dialect.") + + csv.register_dialect(name, **kwargs) + try: + yield + finally: + csv.unregister_dialect(name) + + +@contextmanager +def use_numexpr(use, min_elements=None) -> Generator[None, None, None]: + from pandas.core.computation import expressions as expr + + if min_elements is None: + min_elements = expr._MIN_ELEMENTS + + olduse = expr.USE_NUMEXPR + oldmin = expr._MIN_ELEMENTS + set_option("compute.use_numexpr", use) + expr._MIN_ELEMENTS = min_elements + try: + yield + finally: + expr._MIN_ELEMENTS = oldmin + set_option("compute.use_numexpr", olduse) + + +def raises_chained_assignment_error(warn=True, extra_warnings=(), extra_match=()): + from pandas._testing import assert_produces_warning + + if not warn: + from contextlib import nullcontext + + return nullcontext() + + if PYPY and not extra_warnings: + from contextlib import nullcontext + + return nullcontext() + elif PYPY and extra_warnings: + return assert_produces_warning( + extra_warnings, + match="|".join(extra_match), + ) + else: + if using_copy_on_write(): + warning = ChainedAssignmentError + match = ( + "A value is trying to be set on a copy of a DataFrame or Series " + "through chained assignment" + ) + else: + warning = FutureWarning # type: ignore[assignment] + # TODO update match + match = "ChainedAssignmentError" + if extra_warnings: + warning = (warning, *extra_warnings) # type: ignore[assignment] + return assert_produces_warning( + warning, + match="|".join((match, *extra_match)), + ) + + +def assert_cow_warning(warn=True, match=None, **kwargs): + """ + Assert that a warning is raised in the CoW warning mode. + + Parameters + ---------- + warn : bool, default True + By default, check that a warning is raised. Can be turned off by passing False. + match : str + The warning message to match against, if different from the default. + kwargs + Passed through to assert_produces_warning + """ + from pandas._testing import assert_produces_warning + + if not warn: + from contextlib import nullcontext + + return nullcontext() + + if not match: + match = "Setting a value on a view" + + return assert_produces_warning(FutureWarning, match=match, **kwargs) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/arrays/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/arrays/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a11755275d00e070bea6ab73a881b98d0b976551 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/arrays/__init__.py @@ -0,0 +1,53 @@ +""" +All of pandas' ExtensionArrays. + +See :ref:`extending.extension-types` for more. +""" +from pandas.core.arrays import ( + ArrowExtensionArray, + ArrowStringArray, + BooleanArray, + Categorical, + DatetimeArray, + FloatingArray, + IntegerArray, + IntervalArray, + NumpyExtensionArray, + PeriodArray, + SparseArray, + StringArray, + TimedeltaArray, +) + +__all__ = [ + "ArrowExtensionArray", + "ArrowStringArray", + "BooleanArray", + "Categorical", + "DatetimeArray", + "FloatingArray", + "IntegerArray", + "IntervalArray", + "NumpyExtensionArray", + "PeriodArray", + "SparseArray", + "StringArray", + "TimedeltaArray", +] + + +def __getattr__(name: str) -> type[NumpyExtensionArray]: + if name == "PandasArray": + # GH#53694 + import warnings + + from pandas.util._exceptions import find_stack_level + + warnings.warn( + "PandasArray has been renamed NumpyExtensionArray. Use that " + "instead. This alias will be removed in a future version.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return NumpyExtensionArray + raise AttributeError(f"module 'pandas.arrays' has no attribute '{name}'") diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/arrays/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/arrays/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8f3cb57898e5c1ebb446b9a72d286dfc9c9e0dc Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/arrays/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_odswriter.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_odswriter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4aa45ccbce0d812d87e3df5153db5bbb4dedad81 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_odswriter.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_openpyxl.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_openpyxl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57fddf3d0eaf8b034462066e444eefbb39ae765e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_openpyxl.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_calamine.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_calamine.py new file mode 100644 index 0000000000000000000000000000000000000000..5259469f7a569a1913aa49635b3c14e89a18d157 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_calamine.py @@ -0,0 +1,121 @@ +from __future__ import annotations + +from datetime import ( + date, + datetime, + time, + timedelta, +) +from typing import ( + TYPE_CHECKING, + Any, + Union, +) + +from pandas.compat._optional import import_optional_dependency +from pandas.util._decorators import doc + +import pandas as pd +from pandas.core.shared_docs import _shared_docs + +from pandas.io.excel._base import BaseExcelReader + +if TYPE_CHECKING: + from python_calamine import ( + CalamineSheet, + CalamineWorkbook, + ) + + from pandas._typing import ( + FilePath, + NaTType, + ReadBuffer, + Scalar, + StorageOptions, + ) + +_CellValue = Union[int, float, str, bool, time, date, datetime, timedelta] + + +class CalamineReader(BaseExcelReader["CalamineWorkbook"]): + @doc(storage_options=_shared_docs["storage_options"]) + def __init__( + self, + filepath_or_buffer: FilePath | ReadBuffer[bytes], + storage_options: StorageOptions | None = None, + engine_kwargs: dict | None = None, + ) -> None: + """ + Reader using calamine engine (xlsx/xls/xlsb/ods). + + Parameters + ---------- + filepath_or_buffer : str, path to be parsed or + an open readable stream. + {storage_options} + engine_kwargs : dict, optional + Arbitrary keyword arguments passed to excel engine. + """ + import_optional_dependency("python_calamine") + super().__init__( + filepath_or_buffer, + storage_options=storage_options, + engine_kwargs=engine_kwargs, + ) + + @property + def _workbook_class(self) -> type[CalamineWorkbook]: + from python_calamine import CalamineWorkbook + + return CalamineWorkbook + + def load_workbook( + self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs: Any + ) -> CalamineWorkbook: + from python_calamine import load_workbook + + return load_workbook(filepath_or_buffer, **engine_kwargs) + + @property + def sheet_names(self) -> list[str]: + from python_calamine import SheetTypeEnum + + return [ + sheet.name + for sheet in self.book.sheets_metadata + if sheet.typ == SheetTypeEnum.WorkSheet + ] + + def get_sheet_by_name(self, name: str) -> CalamineSheet: + self.raise_if_bad_sheet_by_name(name) + return self.book.get_sheet_by_name(name) + + def get_sheet_by_index(self, index: int) -> CalamineSheet: + self.raise_if_bad_sheet_by_index(index) + return self.book.get_sheet_by_index(index) + + def get_sheet_data( + self, sheet: CalamineSheet, file_rows_needed: int | None = None + ) -> list[list[Scalar | NaTType | time]]: + def _convert_cell(value: _CellValue) -> Scalar | NaTType | time: + if isinstance(value, float): + val = int(value) + if val == value: + return val + else: + return value + elif isinstance(value, date): + return pd.Timestamp(value) + elif isinstance(value, timedelta): + return pd.Timedelta(value) + elif isinstance(value, time): + return value + + return value + + rows: list[list[_CellValue]] = sheet.to_python( + skip_empty_area=False, nrows=file_rows_needed + ) + data = [[_convert_cell(cell) for cell in row] for row in rows] + + return data diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_odswriter.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_odswriter.py new file mode 100644 index 0000000000000000000000000000000000000000..bc7dca2d95b6b434279f8290fdf929e737f75459 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_odswriter.py @@ -0,0 +1,357 @@ +from __future__ import annotations + +from collections import defaultdict +import datetime +import json +from typing import ( + TYPE_CHECKING, + Any, + DefaultDict, + cast, + overload, +) + +from pandas.io.excel._base import ExcelWriter +from pandas.io.excel._util import ( + combine_kwargs, + validate_freeze_panes, +) + +if TYPE_CHECKING: + from pandas._typing import ( + ExcelWriterIfSheetExists, + FilePath, + StorageOptions, + WriteExcelBuffer, + ) + + from pandas.io.formats.excel import ExcelCell + + +class ODSWriter(ExcelWriter): + _engine = "odf" + _supported_extensions = (".ods",) + + def __init__( + self, + path: FilePath | WriteExcelBuffer | ExcelWriter, + engine: str | None = None, + date_format: str | None = None, + datetime_format=None, + mode: str = "w", + storage_options: StorageOptions | None = None, + if_sheet_exists: ExcelWriterIfSheetExists | None = None, + engine_kwargs: dict[str, Any] | None = None, + **kwargs, + ) -> None: + from odf.opendocument import OpenDocumentSpreadsheet + + if mode == "a": + raise ValueError("Append mode is not supported with odf!") + + engine_kwargs = combine_kwargs(engine_kwargs, kwargs) + self._book = OpenDocumentSpreadsheet(**engine_kwargs) + + super().__init__( + path, + mode=mode, + storage_options=storage_options, + if_sheet_exists=if_sheet_exists, + engine_kwargs=engine_kwargs, + ) + + self._style_dict: dict[str, str] = {} + + @property + def book(self): + """ + Book instance of class odf.opendocument.OpenDocumentSpreadsheet. + + This attribute can be used to access engine-specific features. + """ + return self._book + + @property + def sheets(self) -> dict[str, Any]: + """Mapping of sheet names to sheet objects.""" + from odf.table import Table + + result = { + sheet.getAttribute("name"): sheet + for sheet in self.book.getElementsByType(Table) + } + return result + + def _save(self) -> None: + """ + Save workbook to disk. + """ + for sheet in self.sheets.values(): + self.book.spreadsheet.addElement(sheet) + self.book.save(self._handles.handle) + + def _write_cells( + self, + cells: list[ExcelCell], + sheet_name: str | None = None, + startrow: int = 0, + startcol: int = 0, + freeze_panes: tuple[int, int] | None = None, + ) -> None: + """ + Write the frame cells using odf + """ + from odf.table import ( + Table, + TableCell, + TableRow, + ) + from odf.text import P + + sheet_name = self._get_sheet_name(sheet_name) + assert sheet_name is not None + + if sheet_name in self.sheets: + wks = self.sheets[sheet_name] + else: + wks = Table(name=sheet_name) + self.book.spreadsheet.addElement(wks) + + if validate_freeze_panes(freeze_panes): + freeze_panes = cast(tuple[int, int], freeze_panes) + self._create_freeze_panes(sheet_name, freeze_panes) + + for _ in range(startrow): + wks.addElement(TableRow()) + + rows: DefaultDict = defaultdict(TableRow) + col_count: DefaultDict = defaultdict(int) + + for cell in sorted(cells, key=lambda cell: (cell.row, cell.col)): + # only add empty cells if the row is still empty + if not col_count[cell.row]: + for _ in range(startcol): + rows[cell.row].addElement(TableCell()) + + # fill with empty cells if needed + for _ in range(cell.col - col_count[cell.row]): + rows[cell.row].addElement(TableCell()) + col_count[cell.row] += 1 + + pvalue, tc = self._make_table_cell(cell) + rows[cell.row].addElement(tc) + col_count[cell.row] += 1 + p = P(text=pvalue) + tc.addElement(p) + + # add all rows to the sheet + if len(rows) > 0: + for row_nr in range(max(rows.keys()) + 1): + wks.addElement(rows[row_nr]) + + def _make_table_cell_attributes(self, cell) -> dict[str, int | str]: + """Convert cell attributes to OpenDocument attributes + + Parameters + ---------- + cell : ExcelCell + Spreadsheet cell data + + Returns + ------- + attributes : Dict[str, Union[int, str]] + Dictionary with attributes and attribute values + """ + attributes: dict[str, int | str] = {} + style_name = self._process_style(cell.style) + if style_name is not None: + attributes["stylename"] = style_name + if cell.mergestart is not None and cell.mergeend is not None: + attributes["numberrowsspanned"] = max(1, cell.mergestart) + attributes["numbercolumnsspanned"] = cell.mergeend + return attributes + + def _make_table_cell(self, cell) -> tuple[object, Any]: + """Convert cell data to an OpenDocument spreadsheet cell + + Parameters + ---------- + cell : ExcelCell + Spreadsheet cell data + + Returns + ------- + pvalue, cell : Tuple[str, TableCell] + Display value, Cell value + """ + from odf.table import TableCell + + attributes = self._make_table_cell_attributes(cell) + val, fmt = self._value_with_fmt(cell.val) + pvalue = value = val + if isinstance(val, bool): + value = str(val).lower() + pvalue = str(val).upper() + return ( + pvalue, + TableCell( + valuetype="boolean", + booleanvalue=value, + attributes=attributes, + ), + ) + elif isinstance(val, datetime.datetime): + # Fast formatting + value = val.isoformat() + # Slow but locale-dependent + pvalue = val.strftime("%c") + return ( + pvalue, + TableCell(valuetype="date", datevalue=value, attributes=attributes), + ) + elif isinstance(val, datetime.date): + # Fast formatting + value = f"{val.year}-{val.month:02d}-{val.day:02d}" + # Slow but locale-dependent + pvalue = val.strftime("%x") + return ( + pvalue, + TableCell(valuetype="date", datevalue=value, attributes=attributes), + ) + elif isinstance(val, str): + return ( + pvalue, + TableCell( + valuetype="string", + stringvalue=value, + attributes=attributes, + ), + ) + else: + return ( + pvalue, + TableCell( + valuetype="float", + value=value, + attributes=attributes, + ), + ) + + @overload + def _process_style(self, style: dict[str, Any]) -> str: + ... + + @overload + def _process_style(self, style: None) -> None: + ... + + def _process_style(self, style: dict[str, Any] | None) -> str | None: + """Convert a style dictionary to a OpenDocument style sheet + + Parameters + ---------- + style : Dict + Style dictionary + + Returns + ------- + style_key : str + Unique style key for later reference in sheet + """ + from odf.style import ( + ParagraphProperties, + Style, + TableCellProperties, + TextProperties, + ) + + if style is None: + return None + style_key = json.dumps(style) + if style_key in self._style_dict: + return self._style_dict[style_key] + name = f"pd{len(self._style_dict)+1}" + self._style_dict[style_key] = name + odf_style = Style(name=name, family="table-cell") + if "font" in style: + font = style["font"] + if font.get("bold", False): + odf_style.addElement(TextProperties(fontweight="bold")) + if "borders" in style: + borders = style["borders"] + for side, thickness in borders.items(): + thickness_translation = {"thin": "0.75pt solid #000000"} + odf_style.addElement( + TableCellProperties( + attributes={f"border{side}": thickness_translation[thickness]} + ) + ) + if "alignment" in style: + alignment = style["alignment"] + horizontal = alignment.get("horizontal") + if horizontal: + odf_style.addElement(ParagraphProperties(textalign=horizontal)) + vertical = alignment.get("vertical") + if vertical: + odf_style.addElement(TableCellProperties(verticalalign=vertical)) + self.book.styles.addElement(odf_style) + return name + + def _create_freeze_panes( + self, sheet_name: str, freeze_panes: tuple[int, int] + ) -> None: + """ + Create freeze panes in the sheet. + + Parameters + ---------- + sheet_name : str + Name of the spreadsheet + freeze_panes : tuple of (int, int) + Freeze pane location x and y + """ + from odf.config import ( + ConfigItem, + ConfigItemMapEntry, + ConfigItemMapIndexed, + ConfigItemMapNamed, + ConfigItemSet, + ) + + config_item_set = ConfigItemSet(name="ooo:view-settings") + self.book.settings.addElement(config_item_set) + + config_item_map_indexed = ConfigItemMapIndexed(name="Views") + config_item_set.addElement(config_item_map_indexed) + + config_item_map_entry = ConfigItemMapEntry() + config_item_map_indexed.addElement(config_item_map_entry) + + config_item_map_named = ConfigItemMapNamed(name="Tables") + config_item_map_entry.addElement(config_item_map_named) + + config_item_map_entry = ConfigItemMapEntry(name=sheet_name) + config_item_map_named.addElement(config_item_map_entry) + + config_item_map_entry.addElement( + ConfigItem(name="HorizontalSplitMode", type="short", text="2") + ) + config_item_map_entry.addElement( + ConfigItem(name="VerticalSplitMode", type="short", text="2") + ) + config_item_map_entry.addElement( + ConfigItem( + name="HorizontalSplitPosition", type="int", text=str(freeze_panes[0]) + ) + ) + config_item_map_entry.addElement( + ConfigItem( + name="VerticalSplitPosition", type="int", text=str(freeze_panes[1]) + ) + ) + config_item_map_entry.addElement( + ConfigItem(name="PositionRight", type="int", text=str(freeze_panes[0])) + ) + config_item_map_entry.addElement( + ConfigItem(name="PositionBottom", type="int", text=str(freeze_panes[1])) + ) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_openpyxl.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_openpyxl.py new file mode 100644 index 0000000000000000000000000000000000000000..c546443868a62aed062bf3fd41d80933e4fbc59e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_openpyxl.py @@ -0,0 +1,639 @@ +from __future__ import annotations + +import mmap +from typing import ( + TYPE_CHECKING, + Any, + cast, +) + +import numpy as np + +from pandas.compat._optional import import_optional_dependency +from pandas.util._decorators import doc + +from pandas.core.shared_docs import _shared_docs + +from pandas.io.excel._base import ( + BaseExcelReader, + ExcelWriter, +) +from pandas.io.excel._util import ( + combine_kwargs, + validate_freeze_panes, +) + +if TYPE_CHECKING: + from openpyxl import Workbook + from openpyxl.descriptors.serialisable import Serialisable + + from pandas._typing import ( + ExcelWriterIfSheetExists, + FilePath, + ReadBuffer, + Scalar, + StorageOptions, + WriteExcelBuffer, + ) + + +class OpenpyxlWriter(ExcelWriter): + _engine = "openpyxl" + _supported_extensions = (".xlsx", ".xlsm") + + def __init__( + self, + path: FilePath | WriteExcelBuffer | ExcelWriter, + engine: str | None = None, + date_format: str | None = None, + datetime_format: str | None = None, + mode: str = "w", + storage_options: StorageOptions | None = None, + if_sheet_exists: ExcelWriterIfSheetExists | None = None, + engine_kwargs: dict[str, Any] | None = None, + **kwargs, + ) -> None: + # Use the openpyxl module as the Excel writer. + from openpyxl.workbook import Workbook + + engine_kwargs = combine_kwargs(engine_kwargs, kwargs) + + super().__init__( + path, + mode=mode, + storage_options=storage_options, + if_sheet_exists=if_sheet_exists, + engine_kwargs=engine_kwargs, + ) + + # ExcelWriter replaced "a" by "r+" to allow us to first read the excel file from + # the file and later write to it + if "r+" in self._mode: # Load from existing workbook + from openpyxl import load_workbook + + try: + self._book = load_workbook(self._handles.handle, **engine_kwargs) + except TypeError: + self._handles.handle.close() + raise + self._handles.handle.seek(0) + else: + # Create workbook object with default optimized_write=True. + try: + self._book = Workbook(**engine_kwargs) + except TypeError: + self._handles.handle.close() + raise + + if self.book.worksheets: + self.book.remove(self.book.worksheets[0]) + + @property + def book(self) -> Workbook: + """ + Book instance of class openpyxl.workbook.Workbook. + + This attribute can be used to access engine-specific features. + """ + return self._book + + @property + def sheets(self) -> dict[str, Any]: + """Mapping of sheet names to sheet objects.""" + result = {name: self.book[name] for name in self.book.sheetnames} + return result + + def _save(self) -> None: + """ + Save workbook to disk. + """ + self.book.save(self._handles.handle) + if "r+" in self._mode and not isinstance(self._handles.handle, mmap.mmap): + # truncate file to the written content + self._handles.handle.truncate() + + @classmethod + def _convert_to_style_kwargs(cls, style_dict: dict) -> dict[str, Serialisable]: + """ + Convert a style_dict to a set of kwargs suitable for initializing + or updating-on-copy an openpyxl v2 style object. + + Parameters + ---------- + style_dict : dict + A dict with zero or more of the following keys (or their synonyms). + 'font' + 'fill' + 'border' ('borders') + 'alignment' + 'number_format' + 'protection' + + Returns + ------- + style_kwargs : dict + A dict with the same, normalized keys as ``style_dict`` but each + value has been replaced with a native openpyxl style object of the + appropriate class. + """ + _style_key_map = {"borders": "border"} + + style_kwargs: dict[str, Serialisable] = {} + for k, v in style_dict.items(): + k = _style_key_map.get(k, k) + _conv_to_x = getattr(cls, f"_convert_to_{k}", lambda x: None) + new_v = _conv_to_x(v) + if new_v: + style_kwargs[k] = new_v + + return style_kwargs + + @classmethod + def _convert_to_color(cls, color_spec): + """ + Convert ``color_spec`` to an openpyxl v2 Color object. + + Parameters + ---------- + color_spec : str, dict + A 32-bit ARGB hex string, or a dict with zero or more of the + following keys. + 'rgb' + 'indexed' + 'auto' + 'theme' + 'tint' + 'index' + 'type' + + Returns + ------- + color : openpyxl.styles.Color + """ + from openpyxl.styles import Color + + if isinstance(color_spec, str): + return Color(color_spec) + else: + return Color(**color_spec) + + @classmethod + def _convert_to_font(cls, font_dict): + """ + Convert ``font_dict`` to an openpyxl v2 Font object. + + Parameters + ---------- + font_dict : dict + A dict with zero or more of the following keys (or their synonyms). + 'name' + 'size' ('sz') + 'bold' ('b') + 'italic' ('i') + 'underline' ('u') + 'strikethrough' ('strike') + 'color' + 'vertAlign' ('vertalign') + 'charset' + 'scheme' + 'family' + 'outline' + 'shadow' + 'condense' + + Returns + ------- + font : openpyxl.styles.Font + """ + from openpyxl.styles import Font + + _font_key_map = { + "sz": "size", + "b": "bold", + "i": "italic", + "u": "underline", + "strike": "strikethrough", + "vertalign": "vertAlign", + } + + font_kwargs = {} + for k, v in font_dict.items(): + k = _font_key_map.get(k, k) + if k == "color": + v = cls._convert_to_color(v) + font_kwargs[k] = v + + return Font(**font_kwargs) + + @classmethod + def _convert_to_stop(cls, stop_seq): + """ + Convert ``stop_seq`` to a list of openpyxl v2 Color objects, + suitable for initializing the ``GradientFill`` ``stop`` parameter. + + Parameters + ---------- + stop_seq : iterable + An iterable that yields objects suitable for consumption by + ``_convert_to_color``. + + Returns + ------- + stop : list of openpyxl.styles.Color + """ + return map(cls._convert_to_color, stop_seq) + + @classmethod + def _convert_to_fill(cls, fill_dict: dict[str, Any]): + """ + Convert ``fill_dict`` to an openpyxl v2 Fill object. + + Parameters + ---------- + fill_dict : dict + A dict with one or more of the following keys (or their synonyms), + 'fill_type' ('patternType', 'patterntype') + 'start_color' ('fgColor', 'fgcolor') + 'end_color' ('bgColor', 'bgcolor') + or one or more of the following keys (or their synonyms). + 'type' ('fill_type') + 'degree' + 'left' + 'right' + 'top' + 'bottom' + 'stop' + + Returns + ------- + fill : openpyxl.styles.Fill + """ + from openpyxl.styles import ( + GradientFill, + PatternFill, + ) + + _pattern_fill_key_map = { + "patternType": "fill_type", + "patterntype": "fill_type", + "fgColor": "start_color", + "fgcolor": "start_color", + "bgColor": "end_color", + "bgcolor": "end_color", + } + + _gradient_fill_key_map = {"fill_type": "type"} + + pfill_kwargs = {} + gfill_kwargs = {} + for k, v in fill_dict.items(): + pk = _pattern_fill_key_map.get(k) + gk = _gradient_fill_key_map.get(k) + if pk in ["start_color", "end_color"]: + v = cls._convert_to_color(v) + if gk == "stop": + v = cls._convert_to_stop(v) + if pk: + pfill_kwargs[pk] = v + elif gk: + gfill_kwargs[gk] = v + else: + pfill_kwargs[k] = v + gfill_kwargs[k] = v + + try: + return PatternFill(**pfill_kwargs) + except TypeError: + return GradientFill(**gfill_kwargs) + + @classmethod + def _convert_to_side(cls, side_spec): + """ + Convert ``side_spec`` to an openpyxl v2 Side object. + + Parameters + ---------- + side_spec : str, dict + A string specifying the border style, or a dict with zero or more + of the following keys (or their synonyms). + 'style' ('border_style') + 'color' + + Returns + ------- + side : openpyxl.styles.Side + """ + from openpyxl.styles import Side + + _side_key_map = {"border_style": "style"} + + if isinstance(side_spec, str): + return Side(style=side_spec) + + side_kwargs = {} + for k, v in side_spec.items(): + k = _side_key_map.get(k, k) + if k == "color": + v = cls._convert_to_color(v) + side_kwargs[k] = v + + return Side(**side_kwargs) + + @classmethod + def _convert_to_border(cls, border_dict): + """ + Convert ``border_dict`` to an openpyxl v2 Border object. + + Parameters + ---------- + border_dict : dict + A dict with zero or more of the following keys (or their synonyms). + 'left' + 'right' + 'top' + 'bottom' + 'diagonal' + 'diagonal_direction' + 'vertical' + 'horizontal' + 'diagonalUp' ('diagonalup') + 'diagonalDown' ('diagonaldown') + 'outline' + + Returns + ------- + border : openpyxl.styles.Border + """ + from openpyxl.styles import Border + + _border_key_map = {"diagonalup": "diagonalUp", "diagonaldown": "diagonalDown"} + + border_kwargs = {} + for k, v in border_dict.items(): + k = _border_key_map.get(k, k) + if k == "color": + v = cls._convert_to_color(v) + if k in ["left", "right", "top", "bottom", "diagonal"]: + v = cls._convert_to_side(v) + border_kwargs[k] = v + + return Border(**border_kwargs) + + @classmethod + def _convert_to_alignment(cls, alignment_dict): + """ + Convert ``alignment_dict`` to an openpyxl v2 Alignment object. + + Parameters + ---------- + alignment_dict : dict + A dict with zero or more of the following keys (or their synonyms). + 'horizontal' + 'vertical' + 'text_rotation' + 'wrap_text' + 'shrink_to_fit' + 'indent' + Returns + ------- + alignment : openpyxl.styles.Alignment + """ + from openpyxl.styles import Alignment + + return Alignment(**alignment_dict) + + @classmethod + def _convert_to_number_format(cls, number_format_dict): + """ + Convert ``number_format_dict`` to an openpyxl v2.1.0 number format + initializer. + + Parameters + ---------- + number_format_dict : dict + A dict with zero or more of the following keys. + 'format_code' : str + + Returns + ------- + number_format : str + """ + return number_format_dict["format_code"] + + @classmethod + def _convert_to_protection(cls, protection_dict): + """ + Convert ``protection_dict`` to an openpyxl v2 Protection object. + + Parameters + ---------- + protection_dict : dict + A dict with zero or more of the following keys. + 'locked' + 'hidden' + + Returns + ------- + """ + from openpyxl.styles import Protection + + return Protection(**protection_dict) + + def _write_cells( + self, + cells, + sheet_name: str | None = None, + startrow: int = 0, + startcol: int = 0, + freeze_panes: tuple[int, int] | None = None, + ) -> None: + # Write the frame cells using openpyxl. + sheet_name = self._get_sheet_name(sheet_name) + + _style_cache: dict[str, dict[str, Serialisable]] = {} + + if sheet_name in self.sheets and self._if_sheet_exists != "new": + if "r+" in self._mode: + if self._if_sheet_exists == "replace": + old_wks = self.sheets[sheet_name] + target_index = self.book.index(old_wks) + del self.book[sheet_name] + wks = self.book.create_sheet(sheet_name, target_index) + elif self._if_sheet_exists == "error": + raise ValueError( + f"Sheet '{sheet_name}' already exists and " + f"if_sheet_exists is set to 'error'." + ) + elif self._if_sheet_exists == "overlay": + wks = self.sheets[sheet_name] + else: + raise ValueError( + f"'{self._if_sheet_exists}' is not valid for if_sheet_exists. " + "Valid options are 'error', 'new', 'replace' and 'overlay'." + ) + else: + wks = self.sheets[sheet_name] + else: + wks = self.book.create_sheet() + wks.title = sheet_name + + if validate_freeze_panes(freeze_panes): + freeze_panes = cast(tuple[int, int], freeze_panes) + wks.freeze_panes = wks.cell( + row=freeze_panes[0] + 1, column=freeze_panes[1] + 1 + ) + + for cell in cells: + xcell = wks.cell( + row=startrow + cell.row + 1, column=startcol + cell.col + 1 + ) + xcell.value, fmt = self._value_with_fmt(cell.val) + if fmt: + xcell.number_format = fmt + + style_kwargs: dict[str, Serialisable] | None = {} + if cell.style: + key = str(cell.style) + style_kwargs = _style_cache.get(key) + if style_kwargs is None: + style_kwargs = self._convert_to_style_kwargs(cell.style) + _style_cache[key] = style_kwargs + + if style_kwargs: + for k, v in style_kwargs.items(): + setattr(xcell, k, v) + + if cell.mergestart is not None and cell.mergeend is not None: + wks.merge_cells( + start_row=startrow + cell.row + 1, + start_column=startcol + cell.col + 1, + end_column=startcol + cell.mergeend + 1, + end_row=startrow + cell.mergestart + 1, + ) + + # When cells are merged only the top-left cell is preserved + # The behaviour of the other cells in a merged range is + # undefined + if style_kwargs: + first_row = startrow + cell.row + 1 + last_row = startrow + cell.mergestart + 1 + first_col = startcol + cell.col + 1 + last_col = startcol + cell.mergeend + 1 + + for row in range(first_row, last_row + 1): + for col in range(first_col, last_col + 1): + if row == first_row and col == first_col: + # Ignore first cell. It is already handled. + continue + xcell = wks.cell(column=col, row=row) + for k, v in style_kwargs.items(): + setattr(xcell, k, v) + + +class OpenpyxlReader(BaseExcelReader["Workbook"]): + @doc(storage_options=_shared_docs["storage_options"]) + def __init__( + self, + filepath_or_buffer: FilePath | ReadBuffer[bytes], + storage_options: StorageOptions | None = None, + engine_kwargs: dict | None = None, + ) -> None: + """ + Reader using openpyxl engine. + + Parameters + ---------- + filepath_or_buffer : str, path object or Workbook + Object to be parsed. + {storage_options} + engine_kwargs : dict, optional + Arbitrary keyword arguments passed to excel engine. + """ + import_optional_dependency("openpyxl") + super().__init__( + filepath_or_buffer, + storage_options=storage_options, + engine_kwargs=engine_kwargs, + ) + + @property + def _workbook_class(self) -> type[Workbook]: + from openpyxl import Workbook + + return Workbook + + def load_workbook( + self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs + ) -> Workbook: + from openpyxl import load_workbook + + default_kwargs = {"read_only": True, "data_only": True, "keep_links": False} + + return load_workbook( + filepath_or_buffer, + **(default_kwargs | engine_kwargs), + ) + + @property + def sheet_names(self) -> list[str]: + return [sheet.title for sheet in self.book.worksheets] + + def get_sheet_by_name(self, name: str): + self.raise_if_bad_sheet_by_name(name) + return self.book[name] + + def get_sheet_by_index(self, index: int): + self.raise_if_bad_sheet_by_index(index) + return self.book.worksheets[index] + + def _convert_cell(self, cell) -> Scalar: + from openpyxl.cell.cell import ( + TYPE_ERROR, + TYPE_NUMERIC, + ) + + if cell.value is None: + return "" # compat with xlrd + elif cell.data_type == TYPE_ERROR: + return np.nan + elif cell.data_type == TYPE_NUMERIC: + val = int(cell.value) + if val == cell.value: + return val + return float(cell.value) + + return cell.value + + def get_sheet_data( + self, sheet, file_rows_needed: int | None = None + ) -> list[list[Scalar]]: + if self.book.read_only: + sheet.reset_dimensions() + + data: list[list[Scalar]] = [] + last_row_with_data = -1 + for row_number, row in enumerate(sheet.rows): + converted_row = [self._convert_cell(cell) for cell in row] + while converted_row and converted_row[-1] == "": + # trim trailing empty elements + converted_row.pop() + if converted_row: + last_row_with_data = row_number + data.append(converted_row) + if file_rows_needed is not None and len(data) >= file_rows_needed: + break + + # Trim trailing empty rows + data = data[: last_row_with_data + 1] + + if len(data) > 0: + # extend rows to max width + max_width = max(len(data_row) for data_row in data) + if min(len(data_row) for data_row in data) < max_width: + empty_cell: list[Scalar] = [""] + data = [ + data_row + (max_width - len(data_row)) * empty_cell + for data_row in data + ] + + return data diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/csvs.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/csvs.py new file mode 100644 index 0000000000000000000000000000000000000000..50503e862ef433901f40715987c2105f6f16263a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/csvs.py @@ -0,0 +1,330 @@ +""" +Module for formatting output data into CSV files. +""" + +from __future__ import annotations + +from collections.abc import ( + Hashable, + Iterable, + Iterator, + Sequence, +) +import csv as csvlib +import os +from typing import ( + TYPE_CHECKING, + Any, + cast, +) + +import numpy as np + +from pandas._libs import writers as libwriters +from pandas._typing import SequenceNotStr +from pandas.util._decorators import cache_readonly + +from pandas.core.dtypes.generic import ( + ABCDatetimeIndex, + ABCIndex, + ABCMultiIndex, + ABCPeriodIndex, +) +from pandas.core.dtypes.missing import notna + +from pandas.core.indexes.api import Index + +from pandas.io.common import get_handle + +if TYPE_CHECKING: + from pandas._typing import ( + CompressionOptions, + FilePath, + FloatFormatType, + IndexLabel, + StorageOptions, + WriteBuffer, + npt, + ) + + from pandas.io.formats.format import DataFrameFormatter + + +_DEFAULT_CHUNKSIZE_CELLS = 100_000 + + +class CSVFormatter: + cols: npt.NDArray[np.object_] + + def __init__( + self, + formatter: DataFrameFormatter, + path_or_buf: FilePath | WriteBuffer[str] | WriteBuffer[bytes] = "", + sep: str = ",", + cols: Sequence[Hashable] | None = None, + index_label: IndexLabel | None = None, + mode: str = "w", + encoding: str | None = None, + errors: str = "strict", + compression: CompressionOptions = "infer", + quoting: int | None = None, + lineterminator: str | None = "\n", + chunksize: int | None = None, + quotechar: str | None = '"', + date_format: str | None = None, + doublequote: bool = True, + escapechar: str | None = None, + storage_options: StorageOptions | None = None, + ) -> None: + self.fmt = formatter + + self.obj = self.fmt.frame + + self.filepath_or_buffer = path_or_buf + self.encoding = encoding + self.compression: CompressionOptions = compression + self.mode = mode + self.storage_options = storage_options + + self.sep = sep + self.index_label = self._initialize_index_label(index_label) + self.errors = errors + self.quoting = quoting or csvlib.QUOTE_MINIMAL + self.quotechar = self._initialize_quotechar(quotechar) + self.doublequote = doublequote + self.escapechar = escapechar + self.lineterminator = lineterminator or os.linesep + self.date_format = date_format + self.cols = self._initialize_columns(cols) + self.chunksize = self._initialize_chunksize(chunksize) + + @property + def na_rep(self) -> str: + return self.fmt.na_rep + + @property + def float_format(self) -> FloatFormatType | None: + return self.fmt.float_format + + @property + def decimal(self) -> str: + return self.fmt.decimal + + @property + def header(self) -> bool | SequenceNotStr[str]: + return self.fmt.header + + @property + def index(self) -> bool: + return self.fmt.index + + def _initialize_index_label(self, index_label: IndexLabel | None) -> IndexLabel: + if index_label is not False: + if index_label is None: + return self._get_index_label_from_obj() + elif not isinstance(index_label, (list, tuple, np.ndarray, ABCIndex)): + # given a string for a DF with Index + return [index_label] + return index_label + + def _get_index_label_from_obj(self) -> Sequence[Hashable]: + if isinstance(self.obj.index, ABCMultiIndex): + return self._get_index_label_multiindex() + else: + return self._get_index_label_flat() + + def _get_index_label_multiindex(self) -> Sequence[Hashable]: + return [name or "" for name in self.obj.index.names] + + def _get_index_label_flat(self) -> Sequence[Hashable]: + index_label = self.obj.index.name + return [""] if index_label is None else [index_label] + + def _initialize_quotechar(self, quotechar: str | None) -> str | None: + if self.quoting != csvlib.QUOTE_NONE: + # prevents crash in _csv + return quotechar + return None + + @property + def has_mi_columns(self) -> bool: + return bool(isinstance(self.obj.columns, ABCMultiIndex)) + + def _initialize_columns( + self, cols: Iterable[Hashable] | None + ) -> npt.NDArray[np.object_]: + # validate mi options + if self.has_mi_columns: + if cols is not None: + msg = "cannot specify cols with a MultiIndex on the columns" + raise TypeError(msg) + + if cols is not None: + if isinstance(cols, ABCIndex): + cols = cols._get_values_for_csv(**self._number_format) + else: + cols = list(cols) + self.obj = self.obj.loc[:, cols] + + # update columns to include possible multiplicity of dupes + # and make sure cols is just a list of labels + new_cols = self.obj.columns + return new_cols._get_values_for_csv(**self._number_format) + + def _initialize_chunksize(self, chunksize: int | None) -> int: + if chunksize is None: + return (_DEFAULT_CHUNKSIZE_CELLS // (len(self.cols) or 1)) or 1 + return int(chunksize) + + @property + def _number_format(self) -> dict[str, Any]: + """Dictionary used for storing number formatting settings.""" + return { + "na_rep": self.na_rep, + "float_format": self.float_format, + "date_format": self.date_format, + "quoting": self.quoting, + "decimal": self.decimal, + } + + @cache_readonly + def data_index(self) -> Index: + data_index = self.obj.index + if ( + isinstance(data_index, (ABCDatetimeIndex, ABCPeriodIndex)) + and self.date_format is not None + ): + data_index = Index( + [x.strftime(self.date_format) if notna(x) else "" for x in data_index] + ) + elif isinstance(data_index, ABCMultiIndex): + data_index = data_index.remove_unused_levels() + return data_index + + @property + def nlevels(self) -> int: + if self.index: + return getattr(self.data_index, "nlevels", 1) + else: + return 0 + + @property + def _has_aliases(self) -> bool: + return isinstance(self.header, (tuple, list, np.ndarray, ABCIndex)) + + @property + def _need_to_save_header(self) -> bool: + return bool(self._has_aliases or self.header) + + @property + def write_cols(self) -> SequenceNotStr[Hashable]: + if self._has_aliases: + assert not isinstance(self.header, bool) + if len(self.header) != len(self.cols): + raise ValueError( + f"Writing {len(self.cols)} cols but got {len(self.header)} aliases" + ) + return self.header + else: + # self.cols is an ndarray derived from Index._get_values_for_csv, + # so its entries are strings, i.e. hashable + return cast(SequenceNotStr[Hashable], self.cols) + + @property + def encoded_labels(self) -> list[Hashable]: + encoded_labels: list[Hashable] = [] + + if self.index and self.index_label: + assert isinstance(self.index_label, Sequence) + encoded_labels = list(self.index_label) + + if not self.has_mi_columns or self._has_aliases: + encoded_labels += list(self.write_cols) + + return encoded_labels + + def save(self) -> None: + """ + Create the writer & save. + """ + # apply compression and byte/text conversion + with get_handle( + self.filepath_or_buffer, + self.mode, + encoding=self.encoding, + errors=self.errors, + compression=self.compression, + storage_options=self.storage_options, + ) as handles: + # Note: self.encoding is irrelevant here + self.writer = csvlib.writer( + handles.handle, + lineterminator=self.lineterminator, + delimiter=self.sep, + quoting=self.quoting, + doublequote=self.doublequote, + escapechar=self.escapechar, + quotechar=self.quotechar, + ) + + self._save() + + def _save(self) -> None: + if self._need_to_save_header: + self._save_header() + self._save_body() + + def _save_header(self) -> None: + if not self.has_mi_columns or self._has_aliases: + self.writer.writerow(self.encoded_labels) + else: + for row in self._generate_multiindex_header_rows(): + self.writer.writerow(row) + + def _generate_multiindex_header_rows(self) -> Iterator[list[Hashable]]: + columns = self.obj.columns + for i in range(columns.nlevels): + # we need at least 1 index column to write our col names + col_line = [] + if self.index: + # name is the first column + col_line.append(columns.names[i]) + + if isinstance(self.index_label, list) and len(self.index_label) > 1: + col_line.extend([""] * (len(self.index_label) - 1)) + + col_line.extend(columns._get_level_values(i)) + yield col_line + + # Write out the index line if it's not empty. + # Otherwise, we will print out an extraneous + # blank line between the mi and the data rows. + if self.encoded_labels and set(self.encoded_labels) != {""}: + yield self.encoded_labels + [""] * len(columns) + + def _save_body(self) -> None: + nrows = len(self.data_index) + chunks = (nrows // self.chunksize) + 1 + for i in range(chunks): + start_i = i * self.chunksize + end_i = min(start_i + self.chunksize, nrows) + if start_i >= end_i: + break + self._save_chunk(start_i, end_i) + + def _save_chunk(self, start_i: int, end_i: int) -> None: + # create the data for a chunk + slicer = slice(start_i, end_i) + df = self.obj.iloc[slicer] + + res = df._get_values_for_csv(**self._number_format) + data = list(res._iter_column_arrays()) + + ix = self.data_index[slicer]._get_values_for_csv(**self._number_format) + libwriters.write_csv_rows( + data, + ix, + self.nlevels, + self.cols, + self.writer, + ) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/json/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/json/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8f4e7a62834b57c151189cdd2994a55d1ad9f7de --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/json/__init__.py @@ -0,0 +1,15 @@ +from pandas.io.json._json import ( + read_json, + to_json, + ujson_dumps, + ujson_loads, +) +from pandas.io.json._table_schema import build_table_schema + +__all__ = [ + "ujson_dumps", + "ujson_loads", + "read_json", + "to_json", + "build_table_schema", +] diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/json/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/json/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8ef30d1e0b47526e5258bbb1b2178c11251a461 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/json/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/json/__pycache__/_json.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/json/__pycache__/_json.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53cad2436daa63dc3ea667f27ded0a56f03c7fe5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/json/__pycache__/_json.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/json/__pycache__/_normalize.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/json/__pycache__/_normalize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e9aceb12976cd29b00c5f61b5fbf07546fcc81db Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/json/__pycache__/_normalize.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/json/__pycache__/_table_schema.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/json/__pycache__/_table_schema.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e302a333ce4bf3fd9b9863d06914326994e8ed46 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/json/__pycache__/_table_schema.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/json/_json.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/json/_json.py new file mode 100644 index 0000000000000000000000000000000000000000..9414f4521502999881238cf0b6e0dcb5c7ab1ad5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/json/_json.py @@ -0,0 +1,1505 @@ +from __future__ import annotations + +from abc import ( + ABC, + abstractmethod, +) +from collections import abc +from io import StringIO +from itertools import islice +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Generic, + Literal, + TypeVar, + final, + overload, +) +import warnings + +import numpy as np + +from pandas._libs import lib +from pandas._libs.json import ( + ujson_dumps, + ujson_loads, +) +from pandas._libs.tslibs import iNaT +from pandas.compat._optional import import_optional_dependency +from pandas.errors import AbstractMethodError +from pandas.util._decorators import doc +from pandas.util._exceptions import find_stack_level +from pandas.util._validators import check_dtype_backend + +from pandas.core.dtypes.common import ( + ensure_str, + is_string_dtype, +) +from pandas.core.dtypes.dtypes import PeriodDtype + +from pandas import ( + ArrowDtype, + DataFrame, + Index, + MultiIndex, + Series, + isna, + notna, + to_datetime, +) +from pandas.core.reshape.concat import concat +from pandas.core.shared_docs import _shared_docs + +from pandas.io.common import ( + IOHandles, + dedup_names, + extension_to_compression, + file_exists, + get_handle, + is_fsspec_url, + is_potential_multi_index, + is_url, + stringify_path, +) +from pandas.io.json._normalize import convert_to_line_delimits +from pandas.io.json._table_schema import ( + build_table_schema, + parse_table_schema, +) +from pandas.io.parsers.readers import validate_integer + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Mapping, + ) + from types import TracebackType + + from pandas._typing import ( + CompressionOptions, + DtypeArg, + DtypeBackend, + FilePath, + IndexLabel, + JSONEngine, + JSONSerializable, + ReadBuffer, + Self, + StorageOptions, + WriteBuffer, + ) + + from pandas.core.generic import NDFrame + +FrameSeriesStrT = TypeVar("FrameSeriesStrT", bound=Literal["frame", "series"]) + + +# interface to/from +@overload +def to_json( + path_or_buf: FilePath | WriteBuffer[str] | WriteBuffer[bytes], + obj: NDFrame, + orient: str | None = ..., + date_format: str = ..., + double_precision: int = ..., + force_ascii: bool = ..., + date_unit: str = ..., + default_handler: Callable[[Any], JSONSerializable] | None = ..., + lines: bool = ..., + compression: CompressionOptions = ..., + index: bool | None = ..., + indent: int = ..., + storage_options: StorageOptions = ..., + mode: Literal["a", "w"] = ..., +) -> None: + ... + + +@overload +def to_json( + path_or_buf: None, + obj: NDFrame, + orient: str | None = ..., + date_format: str = ..., + double_precision: int = ..., + force_ascii: bool = ..., + date_unit: str = ..., + default_handler: Callable[[Any], JSONSerializable] | None = ..., + lines: bool = ..., + compression: CompressionOptions = ..., + index: bool | None = ..., + indent: int = ..., + storage_options: StorageOptions = ..., + mode: Literal["a", "w"] = ..., +) -> str: + ... + + +def to_json( + path_or_buf: FilePath | WriteBuffer[str] | WriteBuffer[bytes] | None, + obj: NDFrame, + orient: str | None = None, + date_format: str = "epoch", + double_precision: int = 10, + force_ascii: bool = True, + date_unit: str = "ms", + default_handler: Callable[[Any], JSONSerializable] | None = None, + lines: bool = False, + compression: CompressionOptions = "infer", + index: bool | None = None, + indent: int = 0, + storage_options: StorageOptions | None = None, + mode: Literal["a", "w"] = "w", +) -> str | None: + if orient in ["records", "values"] and index is True: + raise ValueError( + "'index=True' is only valid when 'orient' is 'split', 'table', " + "'index', or 'columns'." + ) + elif orient in ["index", "columns"] and index is False: + raise ValueError( + "'index=False' is only valid when 'orient' is 'split', 'table', " + "'records', or 'values'." + ) + elif index is None: + # will be ignored for orient='records' and 'values' + index = True + + if lines and orient != "records": + raise ValueError("'lines' keyword only valid when 'orient' is records") + + if mode not in ["a", "w"]: + msg = ( + f"mode={mode} is not a valid option." + "Only 'w' and 'a' are currently supported." + ) + raise ValueError(msg) + + if mode == "a" and (not lines or orient != "records"): + msg = ( + "mode='a' (append) is only supported when " + "lines is True and orient is 'records'" + ) + raise ValueError(msg) + + if orient == "table" and isinstance(obj, Series): + obj = obj.to_frame(name=obj.name or "values") + + writer: type[Writer] + if orient == "table" and isinstance(obj, DataFrame): + writer = JSONTableWriter + elif isinstance(obj, Series): + writer = SeriesWriter + elif isinstance(obj, DataFrame): + writer = FrameWriter + else: + raise NotImplementedError("'obj' should be a Series or a DataFrame") + + s = writer( + obj, + orient=orient, + date_format=date_format, + double_precision=double_precision, + ensure_ascii=force_ascii, + date_unit=date_unit, + default_handler=default_handler, + index=index, + indent=indent, + ).write() + + if lines: + s = convert_to_line_delimits(s) + + if path_or_buf is not None: + # apply compression and byte/text conversion + with get_handle( + path_or_buf, mode, compression=compression, storage_options=storage_options + ) as handles: + handles.handle.write(s) + else: + return s + return None + + +class Writer(ABC): + _default_orient: str + + def __init__( + self, + obj: NDFrame, + orient: str | None, + date_format: str, + double_precision: int, + ensure_ascii: bool, + date_unit: str, + index: bool, + default_handler: Callable[[Any], JSONSerializable] | None = None, + indent: int = 0, + ) -> None: + self.obj = obj + + if orient is None: + orient = self._default_orient + + self.orient = orient + self.date_format = date_format + self.double_precision = double_precision + self.ensure_ascii = ensure_ascii + self.date_unit = date_unit + self.default_handler = default_handler + self.index = index + self.indent = indent + + self.is_copy = None + self._format_axes() + + def _format_axes(self) -> None: + raise AbstractMethodError(self) + + def write(self) -> str: + iso_dates = self.date_format == "iso" + return ujson_dumps( + self.obj_to_write, + orient=self.orient, + double_precision=self.double_precision, + ensure_ascii=self.ensure_ascii, + date_unit=self.date_unit, + iso_dates=iso_dates, + default_handler=self.default_handler, + indent=self.indent, + ) + + @property + @abstractmethod + def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]: + """Object to write in JSON format.""" + + +class SeriesWriter(Writer): + _default_orient = "index" + + @property + def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]: + if not self.index and self.orient == "split": + return {"name": self.obj.name, "data": self.obj.values} + else: + return self.obj + + def _format_axes(self) -> None: + if not self.obj.index.is_unique and self.orient == "index": + raise ValueError(f"Series index must be unique for orient='{self.orient}'") + + +class FrameWriter(Writer): + _default_orient = "columns" + + @property + def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]: + if not self.index and self.orient == "split": + obj_to_write = self.obj.to_dict(orient="split") + del obj_to_write["index"] + else: + obj_to_write = self.obj + return obj_to_write + + def _format_axes(self) -> None: + """ + Try to format axes if they are datelike. + """ + if not self.obj.index.is_unique and self.orient in ("index", "columns"): + raise ValueError( + f"DataFrame index must be unique for orient='{self.orient}'." + ) + if not self.obj.columns.is_unique and self.orient in ( + "index", + "columns", + "records", + ): + raise ValueError( + f"DataFrame columns must be unique for orient='{self.orient}'." + ) + + +class JSONTableWriter(FrameWriter): + _default_orient = "records" + + def __init__( + self, + obj, + orient: str | None, + date_format: str, + double_precision: int, + ensure_ascii: bool, + date_unit: str, + index: bool, + default_handler: Callable[[Any], JSONSerializable] | None = None, + indent: int = 0, + ) -> None: + """ + Adds a `schema` attribute with the Table Schema, resets + the index (can't do in caller, because the schema inference needs + to know what the index is, forces orient to records, and forces + date_format to 'iso'. + """ + super().__init__( + obj, + orient, + date_format, + double_precision, + ensure_ascii, + date_unit, + index, + default_handler=default_handler, + indent=indent, + ) + + if date_format != "iso": + msg = ( + "Trying to write with `orient='table'` and " + f"`date_format='{date_format}'`. Table Schema requires dates " + "to be formatted with `date_format='iso'`" + ) + raise ValueError(msg) + + self.schema = build_table_schema(obj, index=self.index) + + # NotImplemented on a column MultiIndex + if obj.ndim == 2 and isinstance(obj.columns, MultiIndex): + raise NotImplementedError( + "orient='table' is not supported for MultiIndex columns" + ) + + # TODO: Do this timedelta properly in objToJSON.c See GH #15137 + if ( + (obj.ndim == 1) + and (obj.name in set(obj.index.names)) + or len(obj.columns.intersection(obj.index.names)) + ): + msg = "Overlapping names between the index and columns" + raise ValueError(msg) + + obj = obj.copy() + timedeltas = obj.select_dtypes(include=["timedelta"]).columns + if len(timedeltas): + obj[timedeltas] = obj[timedeltas].map(lambda x: x.isoformat()) + # Convert PeriodIndex to datetimes before serializing + if isinstance(obj.index.dtype, PeriodDtype): + obj.index = obj.index.to_timestamp() + + # exclude index from obj if index=False + if not self.index: + self.obj = obj.reset_index(drop=True) + else: + self.obj = obj.reset_index(drop=False) + self.date_format = "iso" + self.orient = "records" + self.index = index + + @property + def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]: + return {"schema": self.schema, "data": self.obj} + + +@overload +def read_json( + path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes], + *, + orient: str | None = ..., + typ: Literal["frame"] = ..., + dtype: DtypeArg | None = ..., + convert_axes: bool | None = ..., + convert_dates: bool | list[str] = ..., + keep_default_dates: bool = ..., + precise_float: bool = ..., + date_unit: str | None = ..., + encoding: str | None = ..., + encoding_errors: str | None = ..., + lines: bool = ..., + chunksize: int, + compression: CompressionOptions = ..., + nrows: int | None = ..., + storage_options: StorageOptions = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., + engine: JSONEngine = ..., +) -> JsonReader[Literal["frame"]]: + ... + + +@overload +def read_json( + path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes], + *, + orient: str | None = ..., + typ: Literal["series"], + dtype: DtypeArg | None = ..., + convert_axes: bool | None = ..., + convert_dates: bool | list[str] = ..., + keep_default_dates: bool = ..., + precise_float: bool = ..., + date_unit: str | None = ..., + encoding: str | None = ..., + encoding_errors: str | None = ..., + lines: bool = ..., + chunksize: int, + compression: CompressionOptions = ..., + nrows: int | None = ..., + storage_options: StorageOptions = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., + engine: JSONEngine = ..., +) -> JsonReader[Literal["series"]]: + ... + + +@overload +def read_json( + path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes], + *, + orient: str | None = ..., + typ: Literal["series"], + dtype: DtypeArg | None = ..., + convert_axes: bool | None = ..., + convert_dates: bool | list[str] = ..., + keep_default_dates: bool = ..., + precise_float: bool = ..., + date_unit: str | None = ..., + encoding: str | None = ..., + encoding_errors: str | None = ..., + lines: bool = ..., + chunksize: None = ..., + compression: CompressionOptions = ..., + nrows: int | None = ..., + storage_options: StorageOptions = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., + engine: JSONEngine = ..., +) -> Series: + ... + + +@overload +def read_json( + path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes], + *, + orient: str | None = ..., + typ: Literal["frame"] = ..., + dtype: DtypeArg | None = ..., + convert_axes: bool | None = ..., + convert_dates: bool | list[str] = ..., + keep_default_dates: bool = ..., + precise_float: bool = ..., + date_unit: str | None = ..., + encoding: str | None = ..., + encoding_errors: str | None = ..., + lines: bool = ..., + chunksize: None = ..., + compression: CompressionOptions = ..., + nrows: int | None = ..., + storage_options: StorageOptions = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., + engine: JSONEngine = ..., +) -> DataFrame: + ... + + +@doc( + storage_options=_shared_docs["storage_options"], + decompression_options=_shared_docs["decompression_options"] % "path_or_buf", +) +def read_json( + path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes], + *, + orient: str | None = None, + typ: Literal["frame", "series"] = "frame", + dtype: DtypeArg | None = None, + convert_axes: bool | None = None, + convert_dates: bool | list[str] = True, + keep_default_dates: bool = True, + precise_float: bool = False, + date_unit: str | None = None, + encoding: str | None = None, + encoding_errors: str | None = "strict", + lines: bool = False, + chunksize: int | None = None, + compression: CompressionOptions = "infer", + nrows: int | None = None, + storage_options: StorageOptions | None = None, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, + engine: JSONEngine = "ujson", +) -> DataFrame | Series | JsonReader: + """ + Convert a JSON string to pandas object. + + Parameters + ---------- + path_or_buf : a valid JSON str, path object or file-like object + Any valid string path is acceptable. The string could be a URL. Valid + URL schemes include http, ftp, s3, and file. For file URLs, a host is + expected. A local file could be: + ``file://localhost/path/to/table.json``. + + If you want to pass in a path object, pandas accepts any + ``os.PathLike``. + + By file-like object, we refer to objects with a ``read()`` method, + such as a file handle (e.g. via builtin ``open`` function) + or ``StringIO``. + + .. deprecated:: 2.1.0 + Passing json literal strings is deprecated. + + orient : str, optional + Indication of expected JSON string format. + Compatible JSON strings can be produced by ``to_json()`` with a + corresponding orient value. + The set of possible orients is: + + - ``'split'`` : dict like + ``{{index -> [index], columns -> [columns], data -> [values]}}`` + - ``'records'`` : list like + ``[{{column -> value}}, ... , {{column -> value}}]`` + - ``'index'`` : dict like ``{{index -> {{column -> value}}}}`` + - ``'columns'`` : dict like ``{{column -> {{index -> value}}}}`` + - ``'values'`` : just the values array + - ``'table'`` : dict like ``{{'schema': {{schema}}, 'data': {{data}}}}`` + + The allowed and default values depend on the value + of the `typ` parameter. + + * when ``typ == 'series'``, + + - allowed orients are ``{{'split','records','index'}}`` + - default is ``'index'`` + - The Series index must be unique for orient ``'index'``. + + * when ``typ == 'frame'``, + + - allowed orients are ``{{'split','records','index', + 'columns','values', 'table'}}`` + - default is ``'columns'`` + - The DataFrame index must be unique for orients ``'index'`` and + ``'columns'``. + - The DataFrame columns must be unique for orients ``'index'``, + ``'columns'``, and ``'records'``. + + typ : {{'frame', 'series'}}, default 'frame' + The type of object to recover. + + dtype : bool or dict, default None + If True, infer dtypes; if a dict of column to dtype, then use those; + if False, then don't infer dtypes at all, applies only to the data. + + For all ``orient`` values except ``'table'``, default is True. + + convert_axes : bool, default None + Try to convert the axes to the proper dtypes. + + For all ``orient`` values except ``'table'``, default is True. + + convert_dates : bool or list of str, default True + If True then default datelike columns may be converted (depending on + keep_default_dates). + If False, no dates will be converted. + If a list of column names, then those columns will be converted and + default datelike columns may also be converted (depending on + keep_default_dates). + + keep_default_dates : bool, default True + If parsing dates (convert_dates is not False), then try to parse the + default datelike columns. + A column label is datelike if + + * it ends with ``'_at'``, + + * it ends with ``'_time'``, + + * it begins with ``'timestamp'``, + + * it is ``'modified'``, or + + * it is ``'date'``. + + precise_float : bool, default False + Set to enable usage of higher precision (strtod) function when + decoding string to double values. Default (False) is to use fast but + less precise builtin functionality. + + date_unit : str, default None + The timestamp unit to detect if converting dates. The default behaviour + is to try and detect the correct precision, but if this is not desired + then pass one of 's', 'ms', 'us' or 'ns' to force parsing only seconds, + milliseconds, microseconds or nanoseconds respectively. + + encoding : str, default is 'utf-8' + The encoding to use to decode py3 bytes. + + encoding_errors : str, optional, default "strict" + How encoding errors are treated. `List of possible values + `_ . + + .. versionadded:: 1.3.0 + + lines : bool, default False + Read the file as a json object per line. + + chunksize : int, optional + Return JsonReader object for iteration. + See the `line-delimited json docs + `_ + for more information on ``chunksize``. + This can only be passed if `lines=True`. + If this is None, the file will be read into memory all at once. + {decompression_options} + + .. versionchanged:: 1.4.0 Zstandard support. + + nrows : int, optional + The number of lines from the line-delimited jsonfile that has to be read. + This can only be passed if `lines=True`. + If this is None, all the rows will be returned. + + {storage_options} + + dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + + engine : {{"ujson", "pyarrow"}}, default "ujson" + Parser engine to use. The ``"pyarrow"`` engine is only available when + ``lines=True``. + + .. versionadded:: 2.0 + + Returns + ------- + Series, DataFrame, or pandas.api.typing.JsonReader + A JsonReader is returned when ``chunksize`` is not ``0`` or ``None``. + Otherwise, the type returned depends on the value of ``typ``. + + See Also + -------- + DataFrame.to_json : Convert a DataFrame to a JSON string. + Series.to_json : Convert a Series to a JSON string. + json_normalize : Normalize semi-structured JSON data into a flat table. + + Notes + ----- + Specific to ``orient='table'``, if a :class:`DataFrame` with a literal + :class:`Index` name of `index` gets written with :func:`to_json`, the + subsequent read operation will incorrectly set the :class:`Index` name to + ``None``. This is because `index` is also used by :func:`DataFrame.to_json` + to denote a missing :class:`Index` name, and the subsequent + :func:`read_json` operation cannot distinguish between the two. The same + limitation is encountered with a :class:`MultiIndex` and any names + beginning with ``'level_'``. + + Examples + -------- + >>> from io import StringIO + >>> df = pd.DataFrame([['a', 'b'], ['c', 'd']], + ... index=['row 1', 'row 2'], + ... columns=['col 1', 'col 2']) + + Encoding/decoding a Dataframe using ``'split'`` formatted JSON: + + >>> df.to_json(orient='split') + '\ +{{\ +"columns":["col 1","col 2"],\ +"index":["row 1","row 2"],\ +"data":[["a","b"],["c","d"]]\ +}}\ +' + >>> pd.read_json(StringIO(_), orient='split') + col 1 col 2 + row 1 a b + row 2 c d + + Encoding/decoding a Dataframe using ``'index'`` formatted JSON: + + >>> df.to_json(orient='index') + '{{"row 1":{{"col 1":"a","col 2":"b"}},"row 2":{{"col 1":"c","col 2":"d"}}}}' + + >>> pd.read_json(StringIO(_), orient='index') + col 1 col 2 + row 1 a b + row 2 c d + + Encoding/decoding a Dataframe using ``'records'`` formatted JSON. + Note that index labels are not preserved with this encoding. + + >>> df.to_json(orient='records') + '[{{"col 1":"a","col 2":"b"}},{{"col 1":"c","col 2":"d"}}]' + >>> pd.read_json(StringIO(_), orient='records') + col 1 col 2 + 0 a b + 1 c d + + Encoding with Table Schema + + >>> df.to_json(orient='table') + '\ +{{"schema":{{"fields":[\ +{{"name":"index","type":"string"}},\ +{{"name":"col 1","type":"string"}},\ +{{"name":"col 2","type":"string"}}],\ +"primaryKey":["index"],\ +"pandas_version":"1.4.0"}},\ +"data":[\ +{{"index":"row 1","col 1":"a","col 2":"b"}},\ +{{"index":"row 2","col 1":"c","col 2":"d"}}]\ +}}\ +' + + The following example uses ``dtype_backend="numpy_nullable"`` + + >>> data = '''{{"index": {{"0": 0, "1": 1}}, + ... "a": {{"0": 1, "1": null}}, + ... "b": {{"0": 2.5, "1": 4.5}}, + ... "c": {{"0": true, "1": false}}, + ... "d": {{"0": "a", "1": "b"}}, + ... "e": {{"0": 1577.2, "1": 1577.1}}}}''' + >>> pd.read_json(StringIO(data), dtype_backend="numpy_nullable") + index a b c d e + 0 0 1 2.5 True a 1577.2 + 1 1 4.5 False b 1577.1 + """ + if orient == "table" and dtype: + raise ValueError("cannot pass both dtype and orient='table'") + if orient == "table" and convert_axes: + raise ValueError("cannot pass both convert_axes and orient='table'") + + check_dtype_backend(dtype_backend) + + if dtype is None and orient != "table": + # error: Incompatible types in assignment (expression has type "bool", variable + # has type "Union[ExtensionDtype, str, dtype[Any], Type[str], Type[float], + # Type[int], Type[complex], Type[bool], Type[object], Dict[Hashable, + # Union[ExtensionDtype, Union[str, dtype[Any]], Type[str], Type[float], + # Type[int], Type[complex], Type[bool], Type[object]]], None]") + dtype = True # type: ignore[assignment] + if convert_axes is None and orient != "table": + convert_axes = True + + json_reader = JsonReader( + path_or_buf, + orient=orient, + typ=typ, + dtype=dtype, + convert_axes=convert_axes, + convert_dates=convert_dates, + keep_default_dates=keep_default_dates, + precise_float=precise_float, + date_unit=date_unit, + encoding=encoding, + lines=lines, + chunksize=chunksize, + compression=compression, + nrows=nrows, + storage_options=storage_options, + encoding_errors=encoding_errors, + dtype_backend=dtype_backend, + engine=engine, + ) + + if chunksize: + return json_reader + else: + return json_reader.read() + + +class JsonReader(abc.Iterator, Generic[FrameSeriesStrT]): + """ + JsonReader provides an interface for reading in a JSON file. + + If initialized with ``lines=True`` and ``chunksize``, can be iterated over + ``chunksize`` lines at a time. Otherwise, calling ``read`` reads in the + whole document. + """ + + def __init__( + self, + filepath_or_buffer, + orient, + typ: FrameSeriesStrT, + dtype, + convert_axes: bool | None, + convert_dates, + keep_default_dates: bool, + precise_float: bool, + date_unit, + encoding, + lines: bool, + chunksize: int | None, + compression: CompressionOptions, + nrows: int | None, + storage_options: StorageOptions | None = None, + encoding_errors: str | None = "strict", + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, + engine: JSONEngine = "ujson", + ) -> None: + self.orient = orient + self.typ = typ + self.dtype = dtype + self.convert_axes = convert_axes + self.convert_dates = convert_dates + self.keep_default_dates = keep_default_dates + self.precise_float = precise_float + self.date_unit = date_unit + self.encoding = encoding + self.engine = engine + self.compression = compression + self.storage_options = storage_options + self.lines = lines + self.chunksize = chunksize + self.nrows_seen = 0 + self.nrows = nrows + self.encoding_errors = encoding_errors + self.handles: IOHandles[str] | None = None + self.dtype_backend = dtype_backend + + if self.engine not in {"pyarrow", "ujson"}: + raise ValueError( + f"The engine type {self.engine} is currently not supported." + ) + if self.chunksize is not None: + self.chunksize = validate_integer("chunksize", self.chunksize, 1) + if not self.lines: + raise ValueError("chunksize can only be passed if lines=True") + if self.engine == "pyarrow": + raise ValueError( + "currently pyarrow engine doesn't support chunksize parameter" + ) + if self.nrows is not None: + self.nrows = validate_integer("nrows", self.nrows, 0) + if not self.lines: + raise ValueError("nrows can only be passed if lines=True") + if ( + isinstance(filepath_or_buffer, str) + and not self.lines + and "\n" in filepath_or_buffer + ): + warnings.warn( + "Passing literal json to 'read_json' is deprecated and " + "will be removed in a future version. To read from a " + "literal string, wrap it in a 'StringIO' object.", + FutureWarning, + stacklevel=find_stack_level(), + ) + if self.engine == "pyarrow": + if not self.lines: + raise ValueError( + "currently pyarrow engine only supports " + "the line-delimited JSON format" + ) + self.data = filepath_or_buffer + elif self.engine == "ujson": + data = self._get_data_from_filepath(filepath_or_buffer) + self.data = self._preprocess_data(data) + + def _preprocess_data(self, data): + """ + At this point, the data either has a `read` attribute (e.g. a file + object or a StringIO) or is a string that is a JSON document. + + If self.chunksize, we prepare the data for the `__next__` method. + Otherwise, we read it into memory for the `read` method. + """ + if hasattr(data, "read") and not (self.chunksize or self.nrows): + with self: + data = data.read() + if not hasattr(data, "read") and (self.chunksize or self.nrows): + data = StringIO(data) + + return data + + def _get_data_from_filepath(self, filepath_or_buffer): + """ + The function read_json accepts three input types: + 1. filepath (string-like) + 2. file-like object (e.g. open file object, StringIO) + 3. JSON string + + This method turns (1) into (2) to simplify the rest of the processing. + It returns input types (2) and (3) unchanged. + + It raises FileNotFoundError if the input is a string ending in + one of .json, .json.gz, .json.bz2, etc. but no such file exists. + """ + # if it is a string but the file does not exist, it might be a JSON string + filepath_or_buffer = stringify_path(filepath_or_buffer) + if ( + not isinstance(filepath_or_buffer, str) + or is_url(filepath_or_buffer) + or is_fsspec_url(filepath_or_buffer) + or file_exists(filepath_or_buffer) + ): + self.handles = get_handle( + filepath_or_buffer, + "r", + encoding=self.encoding, + compression=self.compression, + storage_options=self.storage_options, + errors=self.encoding_errors, + ) + filepath_or_buffer = self.handles.handle + elif ( + isinstance(filepath_or_buffer, str) + and filepath_or_buffer.lower().endswith( + (".json",) + tuple(f".json{c}" for c in extension_to_compression) + ) + and not file_exists(filepath_or_buffer) + ): + raise FileNotFoundError(f"File {filepath_or_buffer} does not exist") + else: + warnings.warn( + "Passing literal json to 'read_json' is deprecated and " + "will be removed in a future version. To read from a " + "literal string, wrap it in a 'StringIO' object.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return filepath_or_buffer + + def _combine_lines(self, lines) -> str: + """ + Combines a list of JSON objects into one JSON object. + """ + return ( + f'[{",".join([line for line in (line.strip() for line in lines) if line])}]' + ) + + @overload + def read(self: JsonReader[Literal["frame"]]) -> DataFrame: + ... + + @overload + def read(self: JsonReader[Literal["series"]]) -> Series: + ... + + @overload + def read(self: JsonReader[Literal["frame", "series"]]) -> DataFrame | Series: + ... + + def read(self) -> DataFrame | Series: + """ + Read the whole JSON input into a pandas object. + """ + obj: DataFrame | Series + with self: + if self.engine == "pyarrow": + pyarrow_json = import_optional_dependency("pyarrow.json") + pa_table = pyarrow_json.read_json(self.data) + + mapping: type[ArrowDtype] | None | Callable + if self.dtype_backend == "pyarrow": + mapping = ArrowDtype + elif self.dtype_backend == "numpy_nullable": + from pandas.io._util import _arrow_dtype_mapping + + mapping = _arrow_dtype_mapping().get + else: + mapping = None + + return pa_table.to_pandas(types_mapper=mapping) + elif self.engine == "ujson": + if self.lines: + if self.chunksize: + obj = concat(self) + elif self.nrows: + lines = list(islice(self.data, self.nrows)) + lines_json = self._combine_lines(lines) + obj = self._get_object_parser(lines_json) + else: + data = ensure_str(self.data) + data_lines = data.split("\n") + obj = self._get_object_parser(self._combine_lines(data_lines)) + else: + obj = self._get_object_parser(self.data) + if self.dtype_backend is not lib.no_default: + return obj.convert_dtypes( + infer_objects=False, dtype_backend=self.dtype_backend + ) + else: + return obj + + def _get_object_parser(self, json) -> DataFrame | Series: + """ + Parses a json document into a pandas object. + """ + typ = self.typ + dtype = self.dtype + kwargs = { + "orient": self.orient, + "dtype": self.dtype, + "convert_axes": self.convert_axes, + "convert_dates": self.convert_dates, + "keep_default_dates": self.keep_default_dates, + "precise_float": self.precise_float, + "date_unit": self.date_unit, + "dtype_backend": self.dtype_backend, + } + obj = None + if typ == "frame": + obj = FrameParser(json, **kwargs).parse() + + if typ == "series" or obj is None: + if not isinstance(dtype, bool): + kwargs["dtype"] = dtype + obj = SeriesParser(json, **kwargs).parse() + + return obj + + def close(self) -> None: + """ + If we opened a stream earlier, in _get_data_from_filepath, we should + close it. + + If an open stream or file was passed, we leave it open. + """ + if self.handles is not None: + self.handles.close() + + def __iter__(self) -> Self: + return self + + @overload + def __next__(self: JsonReader[Literal["frame"]]) -> DataFrame: + ... + + @overload + def __next__(self: JsonReader[Literal["series"]]) -> Series: + ... + + @overload + def __next__(self: JsonReader[Literal["frame", "series"]]) -> DataFrame | Series: + ... + + def __next__(self) -> DataFrame | Series: + if self.nrows and self.nrows_seen >= self.nrows: + self.close() + raise StopIteration + + lines = list(islice(self.data, self.chunksize)) + if not lines: + self.close() + raise StopIteration + + try: + lines_json = self._combine_lines(lines) + obj = self._get_object_parser(lines_json) + + # Make sure that the returned objects have the right index. + obj.index = range(self.nrows_seen, self.nrows_seen + len(obj)) + self.nrows_seen += len(obj) + except Exception as ex: + self.close() + raise ex + + if self.dtype_backend is not lib.no_default: + return obj.convert_dtypes( + infer_objects=False, dtype_backend=self.dtype_backend + ) + else: + return obj + + def __enter__(self) -> Self: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + self.close() + + +class Parser: + _split_keys: tuple[str, ...] + _default_orient: str + + _STAMP_UNITS = ("s", "ms", "us", "ns") + _MIN_STAMPS = { + "s": 31536000, + "ms": 31536000000, + "us": 31536000000000, + "ns": 31536000000000000, + } + json: str + + def __init__( + self, + json: str, + orient, + dtype: DtypeArg | None = None, + convert_axes: bool = True, + convert_dates: bool | list[str] = True, + keep_default_dates: bool = False, + precise_float: bool = False, + date_unit=None, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, + ) -> None: + self.json = json + + if orient is None: + orient = self._default_orient + + self.orient = orient + + self.dtype = dtype + + if date_unit is not None: + date_unit = date_unit.lower() + if date_unit not in self._STAMP_UNITS: + raise ValueError(f"date_unit must be one of {self._STAMP_UNITS}") + self.min_stamp = self._MIN_STAMPS[date_unit] + else: + self.min_stamp = self._MIN_STAMPS["s"] + + self.precise_float = precise_float + self.convert_axes = convert_axes + self.convert_dates = convert_dates + self.date_unit = date_unit + self.keep_default_dates = keep_default_dates + self.obj: DataFrame | Series | None = None + self.dtype_backend = dtype_backend + + @final + def check_keys_split(self, decoded: dict) -> None: + """ + Checks that dict has only the appropriate keys for orient='split'. + """ + bad_keys = set(decoded.keys()).difference(set(self._split_keys)) + if bad_keys: + bad_keys_joined = ", ".join(bad_keys) + raise ValueError(f"JSON data had unexpected key(s): {bad_keys_joined}") + + @final + def parse(self): + self._parse() + + if self.obj is None: + return None + if self.convert_axes: + self._convert_axes() + self._try_convert_types() + return self.obj + + def _parse(self) -> None: + raise AbstractMethodError(self) + + @final + def _convert_axes(self) -> None: + """ + Try to convert axes. + """ + obj = self.obj + assert obj is not None # for mypy + for axis_name in obj._AXIS_ORDERS: + ax = obj._get_axis(axis_name) + ser = Series(ax, dtype=ax.dtype, copy=False) + new_ser, result = self._try_convert_data( + name=axis_name, + data=ser, + use_dtypes=False, + convert_dates=True, + is_axis=True, + ) + if result: + new_axis = Index(new_ser, dtype=new_ser.dtype, copy=False) + setattr(self.obj, axis_name, new_axis) + + def _try_convert_types(self) -> None: + raise AbstractMethodError(self) + + @final + def _try_convert_data( + self, + name: Hashable, + data: Series, + use_dtypes: bool = True, + convert_dates: bool | list[str] = True, + is_axis: bool = False, + ) -> tuple[Series, bool]: + """ + Try to parse a Series into a column by inferring dtype. + """ + # don't try to coerce, unless a force conversion + if use_dtypes: + if not self.dtype: + if all(notna(data)): + return data, False + + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + "Downcasting object dtype arrays", + category=FutureWarning, + ) + filled = data.fillna(np.nan) + + return filled, True + + elif self.dtype is True: + pass + else: + # dtype to force + dtype = ( + self.dtype.get(name) if isinstance(self.dtype, dict) else self.dtype + ) + if dtype is not None: + try: + return data.astype(dtype), True + except (TypeError, ValueError): + return data, False + + if convert_dates: + new_data, result = self._try_convert_to_date(data) + if result: + return new_data, True + + converted = False + if self.dtype_backend is not lib.no_default and not is_axis: + # Fall through for conversion later on + return data, True + elif is_string_dtype(data.dtype): + # try float + try: + data = data.astype("float64") + converted = True + except (TypeError, ValueError): + pass + + if data.dtype.kind == "f" and data.dtype != "float64": + # coerce floats to 64 + try: + data = data.astype("float64") + converted = True + except (TypeError, ValueError): + pass + + # don't coerce 0-len data + if len(data) and data.dtype in ("float", "object"): + # coerce ints if we can + try: + new_data = data.astype("int64") + if (new_data == data).all(): + data = new_data + converted = True + except (TypeError, ValueError, OverflowError): + pass + + if data.dtype == "int" and data.dtype != "int64": + # coerce ints to 64 + try: + data = data.astype("int64") + converted = True + except (TypeError, ValueError): + pass + + # if we have an index, we want to preserve dtypes + if name == "index" and len(data): + if self.orient == "split": + return data, False + + return data, converted + + @final + def _try_convert_to_date(self, data: Series) -> tuple[Series, bool]: + """ + Try to parse a ndarray like into a date column. + + Try to coerce object in epoch/iso formats and integer/float in epoch + formats. Return a boolean if parsing was successful. + """ + # no conversion on empty + if not len(data): + return data, False + + new_data = data + + if new_data.dtype == "string": + new_data = new_data.astype(object) + + if new_data.dtype == "object": + try: + new_data = data.astype("int64") + except OverflowError: + return data, False + except (TypeError, ValueError): + pass + + # ignore numbers that are out of range + if issubclass(new_data.dtype.type, np.number): + in_range = ( + isna(new_data._values) + | (new_data > self.min_stamp) + | (new_data._values == iNaT) + ) + if not in_range.all(): + return data, False + + date_units = (self.date_unit,) if self.date_unit else self._STAMP_UNITS + for date_unit in date_units: + try: + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + ".*parsing datetimes with mixed time " + "zones will raise an error", + category=FutureWarning, + ) + new_data = to_datetime(new_data, errors="raise", unit=date_unit) + except (ValueError, OverflowError, TypeError): + continue + return new_data, True + return data, False + + +class SeriesParser(Parser): + _default_orient = "index" + _split_keys = ("name", "index", "data") + obj: Series | None + + def _parse(self) -> None: + data = ujson_loads(self.json, precise_float=self.precise_float) + + if self.orient == "split": + decoded = {str(k): v for k, v in data.items()} + self.check_keys_split(decoded) + self.obj = Series(**decoded) + else: + self.obj = Series(data) + + def _try_convert_types(self) -> None: + if self.obj is None: + return + obj, result = self._try_convert_data( + "data", self.obj, convert_dates=self.convert_dates + ) + if result: + self.obj = obj + + +class FrameParser(Parser): + _default_orient = "columns" + _split_keys = ("columns", "index", "data") + obj: DataFrame | None + + def _parse(self) -> None: + json = self.json + orient = self.orient + + if orient == "columns": + self.obj = DataFrame( + ujson_loads(json, precise_float=self.precise_float), dtype=None + ) + elif orient == "split": + decoded = { + str(k): v + for k, v in ujson_loads(json, precise_float=self.precise_float).items() + } + self.check_keys_split(decoded) + orig_names = [ + (tuple(col) if isinstance(col, list) else col) + for col in decoded["columns"] + ] + decoded["columns"] = dedup_names( + orig_names, + is_potential_multi_index(orig_names, None), + ) + self.obj = DataFrame(dtype=None, **decoded) + elif orient == "index": + self.obj = DataFrame.from_dict( + ujson_loads(json, precise_float=self.precise_float), + dtype=None, + orient="index", + ) + elif orient == "table": + self.obj = parse_table_schema(json, precise_float=self.precise_float) + else: + self.obj = DataFrame( + ujson_loads(json, precise_float=self.precise_float), dtype=None + ) + + def _process_converter( + self, + f: Callable[[Hashable, Series], tuple[Series, bool]], + filt: Callable[[Hashable], bool] | None = None, + ) -> None: + """ + Take a conversion function and possibly recreate the frame. + """ + if filt is None: + filt = lambda col: True + + obj = self.obj + assert obj is not None # for mypy + + needs_new_obj = False + new_obj = {} + for i, (col, c) in enumerate(obj.items()): + if filt(col): + new_data, result = f(col, c) + if result: + c = new_data + needs_new_obj = True + new_obj[i] = c + + if needs_new_obj: + # possibly handle dup columns + new_frame = DataFrame(new_obj, index=obj.index) + new_frame.columns = obj.columns + self.obj = new_frame + + def _try_convert_types(self) -> None: + if self.obj is None: + return + if self.convert_dates: + self._try_convert_dates() + + self._process_converter( + lambda col, c: self._try_convert_data(col, c, convert_dates=False) + ) + + def _try_convert_dates(self) -> None: + if self.obj is None: + return + + # our columns to parse + convert_dates_list_bool = self.convert_dates + if isinstance(convert_dates_list_bool, bool): + convert_dates_list_bool = [] + convert_dates = set(convert_dates_list_bool) + + def is_ok(col) -> bool: + """ + Return if this col is ok to try for a date parse. + """ + if col in convert_dates: + return True + if not self.keep_default_dates: + return False + if not isinstance(col, str): + return False + + col_lower = col.lower() + if ( + col_lower.endswith(("_at", "_time")) + or col_lower == "modified" + or col_lower == "date" + or col_lower == "datetime" + or col_lower.startswith("timestamp") + ): + return True + return False + + self._process_converter(lambda col, c: self._try_convert_to_date(c), filt=is_ok) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/json/_normalize.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/json/_normalize.py new file mode 100644 index 0000000000000000000000000000000000000000..b1e2210f9d8940a0931b07e1631350089140ff95 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/json/_normalize.py @@ -0,0 +1,544 @@ +# --------------------------------------------------------------------- +# JSON normalization routines +from __future__ import annotations + +from collections import ( + abc, + defaultdict, +) +import copy +from typing import ( + TYPE_CHECKING, + Any, + DefaultDict, +) + +import numpy as np + +from pandas._libs.writers import convert_json_to_lines + +import pandas as pd +from pandas import DataFrame + +if TYPE_CHECKING: + from collections.abc import Iterable + + from pandas._typing import ( + IgnoreRaise, + Scalar, + ) + + +def convert_to_line_delimits(s: str) -> str: + """ + Helper function that converts JSON lists to line delimited JSON. + """ + # Determine we have a JSON list to turn to lines otherwise just return the + # json object, only lists can + if not s[0] == "[" and s[-1] == "]": + return s + s = s[1:-1] + + return convert_json_to_lines(s) + + +def nested_to_record( + ds, + prefix: str = "", + sep: str = ".", + level: int = 0, + max_level: int | None = None, +): + """ + A simplified json_normalize + + Converts a nested dict into a flat dict ("record"), unlike json_normalize, + it does not attempt to extract a subset of the data. + + Parameters + ---------- + ds : dict or list of dicts + prefix: the prefix, optional, default: "" + sep : str, default '.' + Nested records will generate names separated by sep, + e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar + level: int, optional, default: 0 + The number of levels in the json string. + + max_level: int, optional, default: None + The max depth to normalize. + + Returns + ------- + d - dict or list of dicts, matching `ds` + + Examples + -------- + >>> nested_to_record( + ... dict(flat1=1, dict1=dict(c=1, d=2), nested=dict(e=dict(c=1, d=2), d=2)) + ... ) + {\ +'flat1': 1, \ +'dict1.c': 1, \ +'dict1.d': 2, \ +'nested.e.c': 1, \ +'nested.e.d': 2, \ +'nested.d': 2\ +} + """ + singleton = False + if isinstance(ds, dict): + ds = [ds] + singleton = True + new_ds = [] + for d in ds: + new_d = copy.deepcopy(d) + for k, v in d.items(): + # each key gets renamed with prefix + if not isinstance(k, str): + k = str(k) + if level == 0: + newkey = k + else: + newkey = prefix + sep + k + + # flatten if type is dict and + # current dict level < maximum level provided and + # only dicts gets recurse-flattened + # only at level>1 do we rename the rest of the keys + if not isinstance(v, dict) or ( + max_level is not None and level >= max_level + ): + if level != 0: # so we skip copying for top level, common case + v = new_d.pop(k) + new_d[newkey] = v + continue + + v = new_d.pop(k) + new_d.update(nested_to_record(v, newkey, sep, level + 1, max_level)) + new_ds.append(new_d) + + if singleton: + return new_ds[0] + return new_ds + + +def _normalise_json( + data: Any, + key_string: str, + normalized_dict: dict[str, Any], + separator: str, +) -> dict[str, Any]: + """ + Main recursive function + Designed for the most basic use case of pd.json_normalize(data) + intended as a performance improvement, see #15621 + + Parameters + ---------- + data : Any + Type dependent on types contained within nested Json + key_string : str + New key (with separator(s) in) for data + normalized_dict : dict + The new normalized/flattened Json dict + separator : str, default '.' + Nested records will generate names separated by sep, + e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar + """ + if isinstance(data, dict): + for key, value in data.items(): + new_key = f"{key_string}{separator}{key}" + + if not key_string: + new_key = new_key.removeprefix(separator) + + _normalise_json( + data=value, + key_string=new_key, + normalized_dict=normalized_dict, + separator=separator, + ) + else: + normalized_dict[key_string] = data + return normalized_dict + + +def _normalise_json_ordered(data: dict[str, Any], separator: str) -> dict[str, Any]: + """ + Order the top level keys and then recursively go to depth + + Parameters + ---------- + data : dict or list of dicts + separator : str, default '.' + Nested records will generate names separated by sep, + e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar + + Returns + ------- + dict or list of dicts, matching `normalised_json_object` + """ + top_dict_ = {k: v for k, v in data.items() if not isinstance(v, dict)} + nested_dict_ = _normalise_json( + data={k: v for k, v in data.items() if isinstance(v, dict)}, + key_string="", + normalized_dict={}, + separator=separator, + ) + return {**top_dict_, **nested_dict_} + + +def _simple_json_normalize( + ds: dict | list[dict], + sep: str = ".", +) -> dict | list[dict] | Any: + """ + A optimized basic json_normalize + + Converts a nested dict into a flat dict ("record"), unlike + json_normalize and nested_to_record it doesn't do anything clever. + But for the most basic use cases it enhances performance. + E.g. pd.json_normalize(data) + + Parameters + ---------- + ds : dict or list of dicts + sep : str, default '.' + Nested records will generate names separated by sep, + e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar + + Returns + ------- + frame : DataFrame + d - dict or list of dicts, matching `normalised_json_object` + + Examples + -------- + >>> _simple_json_normalize( + ... { + ... "flat1": 1, + ... "dict1": {"c": 1, "d": 2}, + ... "nested": {"e": {"c": 1, "d": 2}, "d": 2}, + ... } + ... ) + {\ +'flat1': 1, \ +'dict1.c': 1, \ +'dict1.d': 2, \ +'nested.e.c': 1, \ +'nested.e.d': 2, \ +'nested.d': 2\ +} + + """ + normalised_json_object = {} + # expect a dictionary, as most jsons are. However, lists are perfectly valid + if isinstance(ds, dict): + normalised_json_object = _normalise_json_ordered(data=ds, separator=sep) + elif isinstance(ds, list): + normalised_json_list = [_simple_json_normalize(row, sep=sep) for row in ds] + return normalised_json_list + return normalised_json_object + + +def json_normalize( + data: dict | list[dict], + record_path: str | list | None = None, + meta: str | list[str | list[str]] | None = None, + meta_prefix: str | None = None, + record_prefix: str | None = None, + errors: IgnoreRaise = "raise", + sep: str = ".", + max_level: int | None = None, +) -> DataFrame: + """ + Normalize semi-structured JSON data into a flat table. + + Parameters + ---------- + data : dict or list of dicts + Unserialized JSON objects. + record_path : str or list of str, default None + Path in each object to list of records. If not passed, data will be + assumed to be an array of records. + meta : list of paths (str or list of str), default None + Fields to use as metadata for each record in resulting table. + meta_prefix : str, default None + If True, prefix records with dotted (?) path, e.g. foo.bar.field if + meta is ['foo', 'bar']. + record_prefix : str, default None + If True, prefix records with dotted (?) path, e.g. foo.bar.field if + path to records is ['foo', 'bar']. + errors : {'raise', 'ignore'}, default 'raise' + Configures error handling. + + * 'ignore' : will ignore KeyError if keys listed in meta are not + always present. + * 'raise' : will raise KeyError if keys listed in meta are not + always present. + sep : str, default '.' + Nested records will generate names separated by sep. + e.g., for sep='.', {'foo': {'bar': 0}} -> foo.bar. + max_level : int, default None + Max number of levels(depth of dict) to normalize. + if None, normalizes all levels. + + Returns + ------- + frame : DataFrame + Normalize semi-structured JSON data into a flat table. + + Examples + -------- + >>> data = [ + ... {"id": 1, "name": {"first": "Coleen", "last": "Volk"}}, + ... {"name": {"given": "Mark", "family": "Regner"}}, + ... {"id": 2, "name": "Faye Raker"}, + ... ] + >>> pd.json_normalize(data) + id name.first name.last name.given name.family name + 0 1.0 Coleen Volk NaN NaN NaN + 1 NaN NaN NaN Mark Regner NaN + 2 2.0 NaN NaN NaN NaN Faye Raker + + >>> data = [ + ... { + ... "id": 1, + ... "name": "Cole Volk", + ... "fitness": {"height": 130, "weight": 60}, + ... }, + ... {"name": "Mark Reg", "fitness": {"height": 130, "weight": 60}}, + ... { + ... "id": 2, + ... "name": "Faye Raker", + ... "fitness": {"height": 130, "weight": 60}, + ... }, + ... ] + >>> pd.json_normalize(data, max_level=0) + id name fitness + 0 1.0 Cole Volk {'height': 130, 'weight': 60} + 1 NaN Mark Reg {'height': 130, 'weight': 60} + 2 2.0 Faye Raker {'height': 130, 'weight': 60} + + Normalizes nested data up to level 1. + + >>> data = [ + ... { + ... "id": 1, + ... "name": "Cole Volk", + ... "fitness": {"height": 130, "weight": 60}, + ... }, + ... {"name": "Mark Reg", "fitness": {"height": 130, "weight": 60}}, + ... { + ... "id": 2, + ... "name": "Faye Raker", + ... "fitness": {"height": 130, "weight": 60}, + ... }, + ... ] + >>> pd.json_normalize(data, max_level=1) + id name fitness.height fitness.weight + 0 1.0 Cole Volk 130 60 + 1 NaN Mark Reg 130 60 + 2 2.0 Faye Raker 130 60 + + >>> data = [ + ... { + ... "state": "Florida", + ... "shortname": "FL", + ... "info": {"governor": "Rick Scott"}, + ... "counties": [ + ... {"name": "Dade", "population": 12345}, + ... {"name": "Broward", "population": 40000}, + ... {"name": "Palm Beach", "population": 60000}, + ... ], + ... }, + ... { + ... "state": "Ohio", + ... "shortname": "OH", + ... "info": {"governor": "John Kasich"}, + ... "counties": [ + ... {"name": "Summit", "population": 1234}, + ... {"name": "Cuyahoga", "population": 1337}, + ... ], + ... }, + ... ] + >>> result = pd.json_normalize( + ... data, "counties", ["state", "shortname", ["info", "governor"]] + ... ) + >>> result + name population state shortname info.governor + 0 Dade 12345 Florida FL Rick Scott + 1 Broward 40000 Florida FL Rick Scott + 2 Palm Beach 60000 Florida FL Rick Scott + 3 Summit 1234 Ohio OH John Kasich + 4 Cuyahoga 1337 Ohio OH John Kasich + + >>> data = {"A": [1, 2]} + >>> pd.json_normalize(data, "A", record_prefix="Prefix.") + Prefix.0 + 0 1 + 1 2 + + Returns normalized data with columns prefixed with the given string. + """ + + def _pull_field( + js: dict[str, Any], spec: list | str, extract_record: bool = False + ) -> Scalar | Iterable: + """Internal function to pull field""" + result = js + try: + if isinstance(spec, list): + for field in spec: + if result is None: + raise KeyError(field) + result = result[field] + else: + result = result[spec] + except KeyError as e: + if extract_record: + raise KeyError( + f"Key {e} not found. If specifying a record_path, all elements of " + f"data should have the path." + ) from e + if errors == "ignore": + return np.nan + else: + raise KeyError( + f"Key {e} not found. To replace missing values of {e} with " + f"np.nan, pass in errors='ignore'" + ) from e + + return result + + def _pull_records(js: dict[str, Any], spec: list | str) -> list: + """ + Internal function to pull field for records, and similar to + _pull_field, but require to return list. And will raise error + if has non iterable value. + """ + result = _pull_field(js, spec, extract_record=True) + + # GH 31507 GH 30145, GH 26284 if result is not list, raise TypeError if not + # null, otherwise return an empty list + if not isinstance(result, list): + if pd.isnull(result): + result = [] + else: + raise TypeError( + f"{js} has non list value {result} for path {spec}. " + "Must be list or null." + ) + return result + + if isinstance(data, list) and not data: + return DataFrame() + elif isinstance(data, dict): + # A bit of a hackjob + data = [data] + elif isinstance(data, abc.Iterable) and not isinstance(data, str): + # GH35923 Fix pd.json_normalize to not skip the first element of a + # generator input + data = list(data) + else: + raise NotImplementedError + + # check to see if a simple recursive function is possible to + # improve performance (see #15621) but only for cases such + # as pd.Dataframe(data) or pd.Dataframe(data, sep) + if ( + record_path is None + and meta is None + and meta_prefix is None + and record_prefix is None + and max_level is None + ): + return DataFrame(_simple_json_normalize(data, sep=sep)) + + if record_path is None: + if any([isinstance(x, dict) for x in y.values()] for y in data): + # naive normalization, this is idempotent for flat records + # and potentially will inflate the data considerably for + # deeply nested structures: + # {VeryLong: { b: 1,c:2}} -> {VeryLong.b:1 ,VeryLong.c:@} + # + # TODO: handle record value which are lists, at least error + # reasonably + data = nested_to_record(data, sep=sep, max_level=max_level) + return DataFrame(data) + elif not isinstance(record_path, list): + record_path = [record_path] + + if meta is None: + meta = [] + elif not isinstance(meta, list): + meta = [meta] + + _meta = [m if isinstance(m, list) else [m] for m in meta] + + # Disastrously inefficient for now + records: list = [] + lengths = [] + + meta_vals: DefaultDict = defaultdict(list) + meta_keys = [sep.join(val) for val in _meta] + + def _recursive_extract(data, path, seen_meta, level: int = 0) -> None: + if isinstance(data, dict): + data = [data] + if len(path) > 1: + for obj in data: + for val, key in zip(_meta, meta_keys): + if level + 1 == len(val): + seen_meta[key] = _pull_field(obj, val[-1]) + + _recursive_extract(obj[path[0]], path[1:], seen_meta, level=level + 1) + else: + for obj in data: + recs = _pull_records(obj, path[0]) + recs = [ + nested_to_record(r, sep=sep, max_level=max_level) + if isinstance(r, dict) + else r + for r in recs + ] + + # For repeating the metadata later + lengths.append(len(recs)) + for val, key in zip(_meta, meta_keys): + if level + 1 > len(val): + meta_val = seen_meta[key] + else: + meta_val = _pull_field(obj, val[level:]) + meta_vals[key].append(meta_val) + records.extend(recs) + + _recursive_extract(data, record_path, {}, level=0) + + result = DataFrame(records) + + if record_prefix is not None: + result = result.rename(columns=lambda x: f"{record_prefix}{x}") + + # Data types, a problem + for k, v in meta_vals.items(): + if meta_prefix is not None: + k = meta_prefix + k + + if k in result: + raise ValueError( + f"Conflicting metadata name {k}, need distinguishing prefix " + ) + # GH 37782 + + values = np.array(v, dtype=object) + + if values.ndim > 1: + # GH 37782 + values = np.empty((len(v),), dtype=object) + for i, v in enumerate(v): + values[i] = v + + result[k] = values.repeat(lengths) + return result diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/json/_table_schema.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/json/_table_schema.py new file mode 100644 index 0000000000000000000000000000000000000000..4d9fba72cf1733e8893e194c29d5a00bc3ad1d5c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/json/_table_schema.py @@ -0,0 +1,389 @@ +""" +Table Schema builders + +https://specs.frictionlessdata.io/table-schema/ +""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, + cast, +) +import warnings + +from pandas._libs import lib +from pandas._libs.json import ujson_loads +from pandas._libs.tslibs import timezones +from pandas._libs.tslibs.dtypes import freq_to_period_freqstr +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.base import _registry as registry +from pandas.core.dtypes.common import ( + is_bool_dtype, + is_integer_dtype, + is_numeric_dtype, + is_string_dtype, +) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + DatetimeTZDtype, + ExtensionDtype, + PeriodDtype, +) + +from pandas import DataFrame +import pandas.core.common as com + +from pandas.tseries.frequencies import to_offset + +if TYPE_CHECKING: + from pandas._typing import ( + DtypeObj, + JSONSerializable, + ) + + from pandas import Series + from pandas.core.indexes.multi import MultiIndex + + +TABLE_SCHEMA_VERSION = "1.4.0" + + +def as_json_table_type(x: DtypeObj) -> str: + """ + Convert a NumPy / pandas type to its corresponding json_table. + + Parameters + ---------- + x : np.dtype or ExtensionDtype + + Returns + ------- + str + the Table Schema data types + + Notes + ----- + This table shows the relationship between NumPy / pandas dtypes, + and Table Schema dtypes. + + ============== ================= + Pandas type Table Schema type + ============== ================= + int64 integer + float64 number + bool boolean + datetime64[ns] datetime + timedelta64[ns] duration + object str + categorical any + =============== ================= + """ + if is_integer_dtype(x): + return "integer" + elif is_bool_dtype(x): + return "boolean" + elif is_numeric_dtype(x): + return "number" + elif lib.is_np_dtype(x, "M") or isinstance(x, (DatetimeTZDtype, PeriodDtype)): + return "datetime" + elif lib.is_np_dtype(x, "m"): + return "duration" + elif isinstance(x, ExtensionDtype): + return "any" + elif is_string_dtype(x): + return "string" + else: + return "any" + + +def set_default_names(data): + """Sets index names to 'index' for regular, or 'level_x' for Multi""" + if com.all_not_none(*data.index.names): + nms = data.index.names + if len(nms) == 1 and data.index.name == "index": + warnings.warn( + "Index name of 'index' is not round-trippable.", + stacklevel=find_stack_level(), + ) + elif len(nms) > 1 and any(x.startswith("level_") for x in nms): + warnings.warn( + "Index names beginning with 'level_' are not round-trippable.", + stacklevel=find_stack_level(), + ) + return data + + data = data.copy() + if data.index.nlevels > 1: + data.index.names = com.fill_missing_names(data.index.names) + else: + data.index.name = data.index.name or "index" + return data + + +def convert_pandas_type_to_json_field(arr) -> dict[str, JSONSerializable]: + dtype = arr.dtype + name: JSONSerializable + if arr.name is None: + name = "values" + else: + name = arr.name + field: dict[str, JSONSerializable] = { + "name": name, + "type": as_json_table_type(dtype), + } + + if isinstance(dtype, CategoricalDtype): + cats = dtype.categories + ordered = dtype.ordered + + field["constraints"] = {"enum": list(cats)} + field["ordered"] = ordered + elif isinstance(dtype, PeriodDtype): + field["freq"] = dtype.freq.freqstr + elif isinstance(dtype, DatetimeTZDtype): + if timezones.is_utc(dtype.tz): + # timezone.utc has no "zone" attr + field["tz"] = "UTC" + else: + # error: "tzinfo" has no attribute "zone" + field["tz"] = dtype.tz.zone # type: ignore[attr-defined] + elif isinstance(dtype, ExtensionDtype): + field["extDtype"] = dtype.name + return field + + +def convert_json_field_to_pandas_type(field) -> str | CategoricalDtype: + """ + Converts a JSON field descriptor into its corresponding NumPy / pandas type + + Parameters + ---------- + field + A JSON field descriptor + + Returns + ------- + dtype + + Raises + ------ + ValueError + If the type of the provided field is unknown or currently unsupported + + Examples + -------- + >>> convert_json_field_to_pandas_type({"name": "an_int", "type": "integer"}) + 'int64' + + >>> convert_json_field_to_pandas_type( + ... { + ... "name": "a_categorical", + ... "type": "any", + ... "constraints": {"enum": ["a", "b", "c"]}, + ... "ordered": True, + ... } + ... ) + CategoricalDtype(categories=['a', 'b', 'c'], ordered=True, categories_dtype=object) + + >>> convert_json_field_to_pandas_type({"name": "a_datetime", "type": "datetime"}) + 'datetime64[ns]' + + >>> convert_json_field_to_pandas_type( + ... {"name": "a_datetime_with_tz", "type": "datetime", "tz": "US/Central"} + ... ) + 'datetime64[ns, US/Central]' + """ + typ = field["type"] + if typ == "string": + return "object" + elif typ == "integer": + return field.get("extDtype", "int64") + elif typ == "number": + return field.get("extDtype", "float64") + elif typ == "boolean": + return field.get("extDtype", "bool") + elif typ == "duration": + return "timedelta64" + elif typ == "datetime": + if field.get("tz"): + return f"datetime64[ns, {field['tz']}]" + elif field.get("freq"): + # GH#9586 rename frequency M to ME for offsets + offset = to_offset(field["freq"]) + freq_n, freq_name = offset.n, offset.name + freq = freq_to_period_freqstr(freq_n, freq_name) + # GH#47747 using datetime over period to minimize the change surface + return f"period[{freq}]" + else: + return "datetime64[ns]" + elif typ == "any": + if "constraints" in field and "ordered" in field: + return CategoricalDtype( + categories=field["constraints"]["enum"], ordered=field["ordered"] + ) + elif "extDtype" in field: + return registry.find(field["extDtype"]) + else: + return "object" + + raise ValueError(f"Unsupported or invalid field type: {typ}") + + +def build_table_schema( + data: DataFrame | Series, + index: bool = True, + primary_key: bool | None = None, + version: bool = True, +) -> dict[str, JSONSerializable]: + """ + Create a Table schema from ``data``. + + Parameters + ---------- + data : Series, DataFrame + index : bool, default True + Whether to include ``data.index`` in the schema. + primary_key : bool or None, default True + Column names to designate as the primary key. + The default `None` will set `'primaryKey'` to the index + level or levels if the index is unique. + version : bool, default True + Whether to include a field `pandas_version` with the version + of pandas that last revised the table schema. This version + can be different from the installed pandas version. + + Returns + ------- + dict + + Notes + ----- + See `Table Schema + `__ for + conversion types. + Timedeltas as converted to ISO8601 duration format with + 9 decimal places after the seconds field for nanosecond precision. + + Categoricals are converted to the `any` dtype, and use the `enum` field + constraint to list the allowed values. The `ordered` attribute is included + in an `ordered` field. + + Examples + -------- + >>> from pandas.io.json._table_schema import build_table_schema + >>> df = pd.DataFrame( + ... {'A': [1, 2, 3], + ... 'B': ['a', 'b', 'c'], + ... 'C': pd.date_range('2016-01-01', freq='d', periods=3), + ... }, index=pd.Index(range(3), name='idx')) + >>> build_table_schema(df) + {'fields': \ +[{'name': 'idx', 'type': 'integer'}, \ +{'name': 'A', 'type': 'integer'}, \ +{'name': 'B', 'type': 'string'}, \ +{'name': 'C', 'type': 'datetime'}], \ +'primaryKey': ['idx'], \ +'pandas_version': '1.4.0'} + """ + if index is True: + data = set_default_names(data) + + schema: dict[str, Any] = {} + fields = [] + + if index: + if data.index.nlevels > 1: + data.index = cast("MultiIndex", data.index) + for level, name in zip(data.index.levels, data.index.names): + new_field = convert_pandas_type_to_json_field(level) + new_field["name"] = name + fields.append(new_field) + else: + fields.append(convert_pandas_type_to_json_field(data.index)) + + if data.ndim > 1: + for column, s in data.items(): + fields.append(convert_pandas_type_to_json_field(s)) + else: + fields.append(convert_pandas_type_to_json_field(data)) + + schema["fields"] = fields + if index and data.index.is_unique and primary_key is None: + if data.index.nlevels == 1: + schema["primaryKey"] = [data.index.name] + else: + schema["primaryKey"] = data.index.names + elif primary_key is not None: + schema["primaryKey"] = primary_key + + if version: + schema["pandas_version"] = TABLE_SCHEMA_VERSION + return schema + + +def parse_table_schema(json, precise_float: bool) -> DataFrame: + """ + Builds a DataFrame from a given schema + + Parameters + ---------- + json : + A JSON table schema + precise_float : bool + Flag controlling precision when decoding string to double values, as + dictated by ``read_json`` + + Returns + ------- + df : DataFrame + + Raises + ------ + NotImplementedError + If the JSON table schema contains either timezone or timedelta data + + Notes + ----- + Because :func:`DataFrame.to_json` uses the string 'index' to denote a + name-less :class:`Index`, this function sets the name of the returned + :class:`DataFrame` to ``None`` when said string is encountered with a + normal :class:`Index`. For a :class:`MultiIndex`, the same limitation + applies to any strings beginning with 'level_'. Therefore, an + :class:`Index` name of 'index' and :class:`MultiIndex` names starting + with 'level_' are not supported. + + See Also + -------- + build_table_schema : Inverse function. + pandas.read_json + """ + table = ujson_loads(json, precise_float=precise_float) + col_order = [field["name"] for field in table["schema"]["fields"]] + df = DataFrame(table["data"], columns=col_order)[col_order] + + dtypes = { + field["name"]: convert_json_field_to_pandas_type(field) + for field in table["schema"]["fields"] + } + + # No ISO constructor for Timedelta as of yet, so need to raise + if "timedelta64" in dtypes.values(): + raise NotImplementedError( + 'table="orient" can not yet read ISO-formatted Timedelta data' + ) + + df = df.astype(dtypes) + + if "primaryKey" in table["schema"]: + df = df.set_index(table["schema"]["primaryKey"]) + if len(df.index.names) == 1: + if df.index.name == "index": + df.index.name = None + else: + df.index.names = [ + None if x.startswith("level_") else x for x in df.index.names + ] + + return df diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41d3b04a8dc6e657e7fbe2252ff1d3bebfbb956a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/python_parser.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/python_parser.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d510f8ac90047c92abf9be3c0eeb521c0e5d1b81 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/python_parser.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tseries/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tseries/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e361726dc6f80d41cb4975641b44624427b489d6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tseries/__init__.py @@ -0,0 +1,12 @@ +# ruff: noqa: TCH004 +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + # import modules that have public classes/functions: + from pandas.tseries import ( + frequencies, + offsets, + ) + + # and mark only those modules as public + __all__ = ["frequencies", "offsets"] diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tseries/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tseries/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b276af3d59b5547eb282166ce107111111cfb02 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tseries/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tseries/__pycache__/holiday.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tseries/__pycache__/holiday.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..07804c8e37e470ca2426a320728d14b5eb22091c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tseries/__pycache__/holiday.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tseries/api.py b/env-llmeval/lib/python3.10/site-packages/pandas/tseries/api.py new file mode 100644 index 0000000000000000000000000000000000000000..ec2d7d230483947d78f737af42684effa3e60514 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tseries/api.py @@ -0,0 +1,10 @@ +""" +Timeseries API +""" + +from pandas._libs.tslibs.parsing import guess_datetime_format + +from pandas.tseries import offsets +from pandas.tseries.frequencies import infer_freq + +__all__ = ["infer_freq", "offsets", "guess_datetime_format"] diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tseries/frequencies.py b/env-llmeval/lib/python3.10/site-packages/pandas/tseries/frequencies.py new file mode 100644 index 0000000000000000000000000000000000000000..4a1a668426b36ef10f56352653121b6420070882 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tseries/frequencies.py @@ -0,0 +1,602 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +from pandas._libs import lib +from pandas._libs.algos import unique_deltas +from pandas._libs.tslibs import ( + Timestamp, + get_unit_from_dtype, + periods_per_day, + tz_convert_from_utc, +) +from pandas._libs.tslibs.ccalendar import ( + DAYS, + MONTH_ALIASES, + MONTH_NUMBERS, + MONTHS, + int_to_weekday, +) +from pandas._libs.tslibs.dtypes import ( + OFFSET_TO_PERIOD_FREQSTR, + freq_to_period_freqstr, +) +from pandas._libs.tslibs.fields import ( + build_field_sarray, + month_position_check, +) +from pandas._libs.tslibs.offsets import ( + DateOffset, + Day, + to_offset, +) +from pandas._libs.tslibs.parsing import get_rule_month +from pandas.util._decorators import cache_readonly + +from pandas.core.dtypes.common import is_numeric_dtype +from pandas.core.dtypes.dtypes import ( + DatetimeTZDtype, + PeriodDtype, +) +from pandas.core.dtypes.generic import ( + ABCIndex, + ABCSeries, +) + +from pandas.core.algorithms import unique + +if TYPE_CHECKING: + from pandas._typing import npt + + from pandas import ( + DatetimeIndex, + Series, + TimedeltaIndex, + ) + from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin +# -------------------------------------------------------------------- +# Offset related functions + +_need_suffix = ["QS", "BQE", "BQS", "YS", "BYE", "BYS"] + +for _prefix in _need_suffix: + for _m in MONTHS: + key = f"{_prefix}-{_m}" + OFFSET_TO_PERIOD_FREQSTR[key] = OFFSET_TO_PERIOD_FREQSTR[_prefix] + +for _prefix in ["Y", "Q"]: + for _m in MONTHS: + _alias = f"{_prefix}-{_m}" + OFFSET_TO_PERIOD_FREQSTR[_alias] = _alias + +for _d in DAYS: + OFFSET_TO_PERIOD_FREQSTR[f"W-{_d}"] = f"W-{_d}" + + +def get_period_alias(offset_str: str) -> str | None: + """ + Alias to closest period strings BQ->Q etc. + """ + return OFFSET_TO_PERIOD_FREQSTR.get(offset_str, None) + + +# --------------------------------------------------------------------- +# Period codes + + +def infer_freq( + index: DatetimeIndex | TimedeltaIndex | Series | DatetimeLikeArrayMixin, +) -> str | None: + """ + Infer the most likely frequency given the input index. + + Parameters + ---------- + index : DatetimeIndex, TimedeltaIndex, Series or array-like + If passed a Series will use the values of the series (NOT THE INDEX). + + Returns + ------- + str or None + None if no discernible frequency. + + Raises + ------ + TypeError + If the index is not datetime-like. + ValueError + If there are fewer than three values. + + Examples + -------- + >>> idx = pd.date_range(start='2020/12/01', end='2020/12/30', periods=30) + >>> pd.infer_freq(idx) + 'D' + """ + from pandas.core.api import DatetimeIndex + + if isinstance(index, ABCSeries): + values = index._values + if not ( + lib.is_np_dtype(values.dtype, "mM") + or isinstance(values.dtype, DatetimeTZDtype) + or values.dtype == object + ): + raise TypeError( + "cannot infer freq from a non-convertible dtype " + f"on a Series of {index.dtype}" + ) + index = values + + inferer: _FrequencyInferer + + if not hasattr(index, "dtype"): + pass + elif isinstance(index.dtype, PeriodDtype): + raise TypeError( + "PeriodIndex given. Check the `freq` attribute " + "instead of using infer_freq." + ) + elif lib.is_np_dtype(index.dtype, "m"): + # Allow TimedeltaIndex and TimedeltaArray + inferer = _TimedeltaFrequencyInferer(index) + return inferer.get_freq() + + elif is_numeric_dtype(index.dtype): + raise TypeError( + f"cannot infer freq from a non-convertible index of dtype {index.dtype}" + ) + + if not isinstance(index, DatetimeIndex): + index = DatetimeIndex(index) + + inferer = _FrequencyInferer(index) + return inferer.get_freq() + + +class _FrequencyInferer: + """ + Not sure if I can avoid the state machine here + """ + + def __init__(self, index) -> None: + self.index = index + self.i8values = index.asi8 + + # For get_unit_from_dtype we need the dtype to the underlying ndarray, + # which for tz-aware is not the same as index.dtype + if isinstance(index, ABCIndex): + # error: Item "ndarray[Any, Any]" of "Union[ExtensionArray, + # ndarray[Any, Any]]" has no attribute "_ndarray" + self._creso = get_unit_from_dtype( + index._data._ndarray.dtype # type: ignore[union-attr] + ) + else: + # otherwise we have DTA/TDA + self._creso = get_unit_from_dtype(index._ndarray.dtype) + + # This moves the values, which are implicitly in UTC, to the + # the timezone so they are in local time + if hasattr(index, "tz"): + if index.tz is not None: + self.i8values = tz_convert_from_utc( + self.i8values, index.tz, reso=self._creso + ) + + if len(index) < 3: + raise ValueError("Need at least 3 dates to infer frequency") + + self.is_monotonic = ( + self.index._is_monotonic_increasing or self.index._is_monotonic_decreasing + ) + + @cache_readonly + def deltas(self) -> npt.NDArray[np.int64]: + return unique_deltas(self.i8values) + + @cache_readonly + def deltas_asi8(self) -> npt.NDArray[np.int64]: + # NB: we cannot use self.i8values here because we may have converted + # the tz in __init__ + return unique_deltas(self.index.asi8) + + @cache_readonly + def is_unique(self) -> bool: + return len(self.deltas) == 1 + + @cache_readonly + def is_unique_asi8(self) -> bool: + return len(self.deltas_asi8) == 1 + + def get_freq(self) -> str | None: + """ + Find the appropriate frequency string to describe the inferred + frequency of self.i8values + + Returns + ------- + str or None + """ + if not self.is_monotonic or not self.index._is_unique: + return None + + delta = self.deltas[0] + ppd = periods_per_day(self._creso) + if delta and _is_multiple(delta, ppd): + return self._infer_daily_rule() + + # Business hourly, maybe. 17: one day / 65: one weekend + if self.hour_deltas in ([1, 17], [1, 65], [1, 17, 65]): + return "bh" + + # Possibly intraday frequency. Here we use the + # original .asi8 values as the modified values + # will not work around DST transitions. See #8772 + if not self.is_unique_asi8: + return None + + delta = self.deltas_asi8[0] + pph = ppd // 24 + ppm = pph // 60 + pps = ppm // 60 + if _is_multiple(delta, pph): + # Hours + return _maybe_add_count("h", delta / pph) + elif _is_multiple(delta, ppm): + # Minutes + return _maybe_add_count("min", delta / ppm) + elif _is_multiple(delta, pps): + # Seconds + return _maybe_add_count("s", delta / pps) + elif _is_multiple(delta, (pps // 1000)): + # Milliseconds + return _maybe_add_count("ms", delta / (pps // 1000)) + elif _is_multiple(delta, (pps // 1_000_000)): + # Microseconds + return _maybe_add_count("us", delta / (pps // 1_000_000)) + else: + # Nanoseconds + return _maybe_add_count("ns", delta) + + @cache_readonly + def day_deltas(self) -> list[int]: + ppd = periods_per_day(self._creso) + return [x / ppd for x in self.deltas] + + @cache_readonly + def hour_deltas(self) -> list[int]: + pph = periods_per_day(self._creso) // 24 + return [x / pph for x in self.deltas] + + @cache_readonly + def fields(self) -> np.ndarray: # structured array of fields + return build_field_sarray(self.i8values, reso=self._creso) + + @cache_readonly + def rep_stamp(self) -> Timestamp: + return Timestamp(self.i8values[0], unit=self.index.unit) + + def month_position_check(self) -> str | None: + return month_position_check(self.fields, self.index.dayofweek) + + @cache_readonly + def mdiffs(self) -> npt.NDArray[np.int64]: + nmonths = self.fields["Y"] * 12 + self.fields["M"] + return unique_deltas(nmonths.astype("i8")) + + @cache_readonly + def ydiffs(self) -> npt.NDArray[np.int64]: + return unique_deltas(self.fields["Y"].astype("i8")) + + def _infer_daily_rule(self) -> str | None: + annual_rule = self._get_annual_rule() + if annual_rule: + nyears = self.ydiffs[0] + month = MONTH_ALIASES[self.rep_stamp.month] + alias = f"{annual_rule}-{month}" + return _maybe_add_count(alias, nyears) + + quarterly_rule = self._get_quarterly_rule() + if quarterly_rule: + nquarters = self.mdiffs[0] / 3 + mod_dict = {0: 12, 2: 11, 1: 10} + month = MONTH_ALIASES[mod_dict[self.rep_stamp.month % 3]] + alias = f"{quarterly_rule}-{month}" + return _maybe_add_count(alias, nquarters) + + monthly_rule = self._get_monthly_rule() + if monthly_rule: + return _maybe_add_count(monthly_rule, self.mdiffs[0]) + + if self.is_unique: + return self._get_daily_rule() + + if self._is_business_daily(): + return "B" + + wom_rule = self._get_wom_rule() + if wom_rule: + return wom_rule + + return None + + def _get_daily_rule(self) -> str | None: + ppd = periods_per_day(self._creso) + days = self.deltas[0] / ppd + if days % 7 == 0: + # Weekly + wd = int_to_weekday[self.rep_stamp.weekday()] + alias = f"W-{wd}" + return _maybe_add_count(alias, days / 7) + else: + return _maybe_add_count("D", days) + + def _get_annual_rule(self) -> str | None: + if len(self.ydiffs) > 1: + return None + + if len(unique(self.fields["M"])) > 1: + return None + + pos_check = self.month_position_check() + + if pos_check is None: + return None + else: + return {"cs": "YS", "bs": "BYS", "ce": "YE", "be": "BYE"}.get(pos_check) + + def _get_quarterly_rule(self) -> str | None: + if len(self.mdiffs) > 1: + return None + + if not self.mdiffs[0] % 3 == 0: + return None + + pos_check = self.month_position_check() + + if pos_check is None: + return None + else: + return {"cs": "QS", "bs": "BQS", "ce": "QE", "be": "BQE"}.get(pos_check) + + def _get_monthly_rule(self) -> str | None: + if len(self.mdiffs) > 1: + return None + pos_check = self.month_position_check() + + if pos_check is None: + return None + else: + return {"cs": "MS", "bs": "BMS", "ce": "ME", "be": "BME"}.get(pos_check) + + def _is_business_daily(self) -> bool: + # quick check: cannot be business daily + if self.day_deltas != [1, 3]: + return False + + # probably business daily, but need to confirm + first_weekday = self.index[0].weekday() + shifts = np.diff(self.i8values) + ppd = periods_per_day(self._creso) + shifts = np.floor_divide(shifts, ppd) + weekdays = np.mod(first_weekday + np.cumsum(shifts), 7) + + return bool( + np.all( + ((weekdays == 0) & (shifts == 3)) + | ((weekdays > 0) & (weekdays <= 4) & (shifts == 1)) + ) + ) + + def _get_wom_rule(self) -> str | None: + weekdays = unique(self.index.weekday) + if len(weekdays) > 1: + return None + + week_of_months = unique((self.index.day - 1) // 7) + # Only attempt to infer up to WOM-4. See #9425 + week_of_months = week_of_months[week_of_months < 4] + if len(week_of_months) == 0 or len(week_of_months) > 1: + return None + + # get which week + week = week_of_months[0] + 1 + wd = int_to_weekday[weekdays[0]] + + return f"WOM-{week}{wd}" + + +class _TimedeltaFrequencyInferer(_FrequencyInferer): + def _infer_daily_rule(self): + if self.is_unique: + return self._get_daily_rule() + + +def _is_multiple(us, mult: int) -> bool: + return us % mult == 0 + + +def _maybe_add_count(base: str, count: float) -> str: + if count != 1: + assert count == int(count) + count = int(count) + return f"{count}{base}" + else: + return base + + +# ---------------------------------------------------------------------- +# Frequency comparison + + +def is_subperiod(source, target) -> bool: + """ + Returns True if downsampling is possible between source and target + frequencies + + Parameters + ---------- + source : str or DateOffset + Frequency converting from + target : str or DateOffset + Frequency converting to + + Returns + ------- + bool + """ + if target is None or source is None: + return False + source = _maybe_coerce_freq(source) + target = _maybe_coerce_freq(target) + + if _is_annual(target): + if _is_quarterly(source): + return _quarter_months_conform( + get_rule_month(source), get_rule_month(target) + ) + return source in {"D", "C", "B", "M", "h", "min", "s", "ms", "us", "ns"} + elif _is_quarterly(target): + return source in {"D", "C", "B", "M", "h", "min", "s", "ms", "us", "ns"} + elif _is_monthly(target): + return source in {"D", "C", "B", "h", "min", "s", "ms", "us", "ns"} + elif _is_weekly(target): + return source in {target, "D", "C", "B", "h", "min", "s", "ms", "us", "ns"} + elif target == "B": + return source in {"B", "h", "min", "s", "ms", "us", "ns"} + elif target == "C": + return source in {"C", "h", "min", "s", "ms", "us", "ns"} + elif target == "D": + return source in {"D", "h", "min", "s", "ms", "us", "ns"} + elif target == "h": + return source in {"h", "min", "s", "ms", "us", "ns"} + elif target == "min": + return source in {"min", "s", "ms", "us", "ns"} + elif target == "s": + return source in {"s", "ms", "us", "ns"} + elif target == "ms": + return source in {"ms", "us", "ns"} + elif target == "us": + return source in {"us", "ns"} + elif target == "ns": + return source in {"ns"} + else: + return False + + +def is_superperiod(source, target) -> bool: + """ + Returns True if upsampling is possible between source and target + frequencies + + Parameters + ---------- + source : str or DateOffset + Frequency converting from + target : str or DateOffset + Frequency converting to + + Returns + ------- + bool + """ + if target is None or source is None: + return False + source = _maybe_coerce_freq(source) + target = _maybe_coerce_freq(target) + + if _is_annual(source): + if _is_annual(target): + return get_rule_month(source) == get_rule_month(target) + + if _is_quarterly(target): + smonth = get_rule_month(source) + tmonth = get_rule_month(target) + return _quarter_months_conform(smonth, tmonth) + return target in {"D", "C", "B", "M", "h", "min", "s", "ms", "us", "ns"} + elif _is_quarterly(source): + return target in {"D", "C", "B", "M", "h", "min", "s", "ms", "us", "ns"} + elif _is_monthly(source): + return target in {"D", "C", "B", "h", "min", "s", "ms", "us", "ns"} + elif _is_weekly(source): + return target in {source, "D", "C", "B", "h", "min", "s", "ms", "us", "ns"} + elif source == "B": + return target in {"D", "C", "B", "h", "min", "s", "ms", "us", "ns"} + elif source == "C": + return target in {"D", "C", "B", "h", "min", "s", "ms", "us", "ns"} + elif source == "D": + return target in {"D", "C", "B", "h", "min", "s", "ms", "us", "ns"} + elif source == "h": + return target in {"h", "min", "s", "ms", "us", "ns"} + elif source == "min": + return target in {"min", "s", "ms", "us", "ns"} + elif source == "s": + return target in {"s", "ms", "us", "ns"} + elif source == "ms": + return target in {"ms", "us", "ns"} + elif source == "us": + return target in {"us", "ns"} + elif source == "ns": + return target in {"ns"} + else: + return False + + +def _maybe_coerce_freq(code) -> str: + """we might need to coerce a code to a rule_code + and uppercase it + + Parameters + ---------- + source : str or DateOffset + Frequency converting from + + Returns + ------- + str + """ + assert code is not None + if isinstance(code, DateOffset): + code = freq_to_period_freqstr(1, code.name) + if code in {"h", "min", "s", "ms", "us", "ns"}: + return code + else: + return code.upper() + + +def _quarter_months_conform(source: str, target: str) -> bool: + snum = MONTH_NUMBERS[source] + tnum = MONTH_NUMBERS[target] + return snum % 3 == tnum % 3 + + +def _is_annual(rule: str) -> bool: + rule = rule.upper() + return rule == "Y" or rule.startswith("Y-") + + +def _is_quarterly(rule: str) -> bool: + rule = rule.upper() + return rule == "Q" or rule.startswith(("Q-", "BQ")) + + +def _is_monthly(rule: str) -> bool: + rule = rule.upper() + return rule in ("M", "BM") + + +def _is_weekly(rule: str) -> bool: + rule = rule.upper() + return rule == "W" or rule.startswith("W-") + + +__all__ = [ + "Day", + "get_period_alias", + "infer_freq", + "is_subperiod", + "is_superperiod", + "to_offset", +] diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tseries/holiday.py b/env-llmeval/lib/python3.10/site-packages/pandas/tseries/holiday.py new file mode 100644 index 0000000000000000000000000000000000000000..3c429a960b45181c831055cb3e877dbc4cad3b30 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tseries/holiday.py @@ -0,0 +1,634 @@ +from __future__ import annotations + +from datetime import ( + datetime, + timedelta, +) +import warnings + +from dateutil.relativedelta import ( + FR, + MO, + SA, + SU, + TH, + TU, + WE, +) +import numpy as np + +from pandas.errors import PerformanceWarning + +from pandas import ( + DateOffset, + DatetimeIndex, + Series, + Timestamp, + concat, + date_range, +) + +from pandas.tseries.offsets import ( + Day, + Easter, +) + + +def next_monday(dt: datetime) -> datetime: + """ + If holiday falls on Saturday, use following Monday instead; + if holiday falls on Sunday, use Monday instead + """ + if dt.weekday() == 5: + return dt + timedelta(2) + elif dt.weekday() == 6: + return dt + timedelta(1) + return dt + + +def next_monday_or_tuesday(dt: datetime) -> datetime: + """ + For second holiday of two adjacent ones! + If holiday falls on Saturday, use following Monday instead; + if holiday falls on Sunday or Monday, use following Tuesday instead + (because Monday is already taken by adjacent holiday on the day before) + """ + dow = dt.weekday() + if dow in (5, 6): + return dt + timedelta(2) + if dow == 0: + return dt + timedelta(1) + return dt + + +def previous_friday(dt: datetime) -> datetime: + """ + If holiday falls on Saturday or Sunday, use previous Friday instead. + """ + if dt.weekday() == 5: + return dt - timedelta(1) + elif dt.weekday() == 6: + return dt - timedelta(2) + return dt + + +def sunday_to_monday(dt: datetime) -> datetime: + """ + If holiday falls on Sunday, use day thereafter (Monday) instead. + """ + if dt.weekday() == 6: + return dt + timedelta(1) + return dt + + +def weekend_to_monday(dt: datetime) -> datetime: + """ + If holiday falls on Sunday or Saturday, + use day thereafter (Monday) instead. + Needed for holidays such as Christmas observation in Europe + """ + if dt.weekday() == 6: + return dt + timedelta(1) + elif dt.weekday() == 5: + return dt + timedelta(2) + return dt + + +def nearest_workday(dt: datetime) -> datetime: + """ + If holiday falls on Saturday, use day before (Friday) instead; + if holiday falls on Sunday, use day thereafter (Monday) instead. + """ + if dt.weekday() == 5: + return dt - timedelta(1) + elif dt.weekday() == 6: + return dt + timedelta(1) + return dt + + +def next_workday(dt: datetime) -> datetime: + """ + returns next weekday used for observances + """ + dt += timedelta(days=1) + while dt.weekday() > 4: + # Mon-Fri are 0-4 + dt += timedelta(days=1) + return dt + + +def previous_workday(dt: datetime) -> datetime: + """ + returns previous weekday used for observances + """ + dt -= timedelta(days=1) + while dt.weekday() > 4: + # Mon-Fri are 0-4 + dt -= timedelta(days=1) + return dt + + +def before_nearest_workday(dt: datetime) -> datetime: + """ + returns previous workday after nearest workday + """ + return previous_workday(nearest_workday(dt)) + + +def after_nearest_workday(dt: datetime) -> datetime: + """ + returns next workday after nearest workday + needed for Boxing day or multiple holidays in a series + """ + return next_workday(nearest_workday(dt)) + + +class Holiday: + """ + Class that defines a holiday with start/end dates and rules + for observance. + """ + + start_date: Timestamp | None + end_date: Timestamp | None + days_of_week: tuple[int, ...] | None + + def __init__( + self, + name: str, + year=None, + month=None, + day=None, + offset=None, + observance=None, + start_date=None, + end_date=None, + days_of_week=None, + ) -> None: + """ + Parameters + ---------- + name : str + Name of the holiday , defaults to class name + offset : array of pandas.tseries.offsets or + class from pandas.tseries.offsets + computes offset from date + observance: function + computes when holiday is given a pandas Timestamp + days_of_week: + provide a tuple of days e.g (0,1,2,3,) for Monday Through Thursday + Monday=0,..,Sunday=6 + + Examples + -------- + >>> from dateutil.relativedelta import MO + + >>> USMemorialDay = pd.tseries.holiday.Holiday( + ... "Memorial Day", month=5, day=31, offset=pd.DateOffset(weekday=MO(-1)) + ... ) + >>> USMemorialDay + Holiday: Memorial Day (month=5, day=31, offset=) + + >>> USLaborDay = pd.tseries.holiday.Holiday( + ... "Labor Day", month=9, day=1, offset=pd.DateOffset(weekday=MO(1)) + ... ) + >>> USLaborDay + Holiday: Labor Day (month=9, day=1, offset=) + + >>> July3rd = pd.tseries.holiday.Holiday("July 3rd", month=7, day=3) + >>> July3rd + Holiday: July 3rd (month=7, day=3, ) + + >>> NewYears = pd.tseries.holiday.Holiday( + ... "New Years Day", month=1, day=1, + ... observance=pd.tseries.holiday.nearest_workday + ... ) + >>> NewYears # doctest: +SKIP + Holiday: New Years Day ( + month=1, day=1, observance= + ) + + >>> July3rd = pd.tseries.holiday.Holiday( + ... "July 3rd", month=7, day=3, + ... days_of_week=(0, 1, 2, 3) + ... ) + >>> July3rd + Holiday: July 3rd (month=7, day=3, ) + """ + if offset is not None and observance is not None: + raise NotImplementedError("Cannot use both offset and observance.") + + self.name = name + self.year = year + self.month = month + self.day = day + self.offset = offset + self.start_date = ( + Timestamp(start_date) if start_date is not None else start_date + ) + self.end_date = Timestamp(end_date) if end_date is not None else end_date + self.observance = observance + assert days_of_week is None or type(days_of_week) == tuple + self.days_of_week = days_of_week + + def __repr__(self) -> str: + info = "" + if self.year is not None: + info += f"year={self.year}, " + info += f"month={self.month}, day={self.day}, " + + if self.offset is not None: + info += f"offset={self.offset}" + + if self.observance is not None: + info += f"observance={self.observance}" + + repr = f"Holiday: {self.name} ({info})" + return repr + + def dates( + self, start_date, end_date, return_name: bool = False + ) -> Series | DatetimeIndex: + """ + Calculate holidays observed between start date and end date + + Parameters + ---------- + start_date : starting date, datetime-like, optional + end_date : ending date, datetime-like, optional + return_name : bool, optional, default=False + If True, return a series that has dates and holiday names. + False will only return dates. + + Returns + ------- + Series or DatetimeIndex + Series if return_name is True + """ + start_date = Timestamp(start_date) + end_date = Timestamp(end_date) + + filter_start_date = start_date + filter_end_date = end_date + + if self.year is not None: + dt = Timestamp(datetime(self.year, self.month, self.day)) + dti = DatetimeIndex([dt]) + if return_name: + return Series(self.name, index=dti) + else: + return dti + + dates = self._reference_dates(start_date, end_date) + holiday_dates = self._apply_rule(dates) + if self.days_of_week is not None: + holiday_dates = holiday_dates[ + np.isin( + # error: "DatetimeIndex" has no attribute "dayofweek" + holiday_dates.dayofweek, # type: ignore[attr-defined] + self.days_of_week, + ).ravel() + ] + + if self.start_date is not None: + filter_start_date = max( + self.start_date.tz_localize(filter_start_date.tz), filter_start_date + ) + if self.end_date is not None: + filter_end_date = min( + self.end_date.tz_localize(filter_end_date.tz), filter_end_date + ) + holiday_dates = holiday_dates[ + (holiday_dates >= filter_start_date) & (holiday_dates <= filter_end_date) + ] + if return_name: + return Series(self.name, index=holiday_dates) + return holiday_dates + + def _reference_dates( + self, start_date: Timestamp, end_date: Timestamp + ) -> DatetimeIndex: + """ + Get reference dates for the holiday. + + Return reference dates for the holiday also returning the year + prior to the start_date and year following the end_date. This ensures + that any offsets to be applied will yield the holidays within + the passed in dates. + """ + if self.start_date is not None: + start_date = self.start_date.tz_localize(start_date.tz) + + if self.end_date is not None: + end_date = self.end_date.tz_localize(start_date.tz) + + year_offset = DateOffset(years=1) + reference_start_date = Timestamp( + datetime(start_date.year - 1, self.month, self.day) + ) + + reference_end_date = Timestamp( + datetime(end_date.year + 1, self.month, self.day) + ) + # Don't process unnecessary holidays + dates = date_range( + start=reference_start_date, + end=reference_end_date, + freq=year_offset, + tz=start_date.tz, + ) + + return dates + + def _apply_rule(self, dates: DatetimeIndex) -> DatetimeIndex: + """ + Apply the given offset/observance to a DatetimeIndex of dates. + + Parameters + ---------- + dates : DatetimeIndex + Dates to apply the given offset/observance rule + + Returns + ------- + Dates with rules applied + """ + if dates.empty: + return dates.copy() + + if self.observance is not None: + return dates.map(lambda d: self.observance(d)) + + if self.offset is not None: + if not isinstance(self.offset, list): + offsets = [self.offset] + else: + offsets = self.offset + for offset in offsets: + # if we are adding a non-vectorized value + # ignore the PerformanceWarnings: + with warnings.catch_warnings(): + warnings.simplefilter("ignore", PerformanceWarning) + dates += offset + return dates + + +holiday_calendars = {} + + +def register(cls) -> None: + try: + name = cls.name + except AttributeError: + name = cls.__name__ + holiday_calendars[name] = cls + + +def get_calendar(name: str): + """ + Return an instance of a calendar based on its name. + + Parameters + ---------- + name : str + Calendar name to return an instance of + """ + return holiday_calendars[name]() + + +class HolidayCalendarMetaClass(type): + def __new__(cls, clsname: str, bases, attrs): + calendar_class = super().__new__(cls, clsname, bases, attrs) + register(calendar_class) + return calendar_class + + +class AbstractHolidayCalendar(metaclass=HolidayCalendarMetaClass): + """ + Abstract interface to create holidays following certain rules. + """ + + rules: list[Holiday] = [] + start_date = Timestamp(datetime(1970, 1, 1)) + end_date = Timestamp(datetime(2200, 12, 31)) + _cache = None + + def __init__(self, name: str = "", rules=None) -> None: + """ + Initializes holiday object with a given set a rules. Normally + classes just have the rules defined within them. + + Parameters + ---------- + name : str + Name of the holiday calendar, defaults to class name + rules : array of Holiday objects + A set of rules used to create the holidays. + """ + super().__init__() + if not name: + name = type(self).__name__ + self.name = name + + if rules is not None: + self.rules = rules + + def rule_from_name(self, name: str): + for rule in self.rules: + if rule.name == name: + return rule + + return None + + def holidays(self, start=None, end=None, return_name: bool = False): + """ + Returns a curve with holidays between start_date and end_date + + Parameters + ---------- + start : starting date, datetime-like, optional + end : ending date, datetime-like, optional + return_name : bool, optional + If True, return a series that has dates and holiday names. + False will only return a DatetimeIndex of dates. + + Returns + ------- + DatetimeIndex of holidays + """ + if self.rules is None: + raise Exception( + f"Holiday Calendar {self.name} does not have any rules specified" + ) + + if start is None: + start = AbstractHolidayCalendar.start_date + + if end is None: + end = AbstractHolidayCalendar.end_date + + start = Timestamp(start) + end = Timestamp(end) + + # If we don't have a cache or the dates are outside the prior cache, we + # get them again + if self._cache is None or start < self._cache[0] or end > self._cache[1]: + pre_holidays = [ + rule.dates(start, end, return_name=True) for rule in self.rules + ] + if pre_holidays: + # error: Argument 1 to "concat" has incompatible type + # "List[Union[Series, DatetimeIndex]]"; expected + # "Union[Iterable[DataFrame], Mapping[, DataFrame]]" + holidays = concat(pre_holidays) # type: ignore[arg-type] + else: + # error: Incompatible types in assignment (expression has type + # "Series", variable has type "DataFrame") + holidays = Series( + index=DatetimeIndex([]), dtype=object + ) # type: ignore[assignment] + + self._cache = (start, end, holidays.sort_index()) + + holidays = self._cache[2] + holidays = holidays[start:end] + + if return_name: + return holidays + else: + return holidays.index + + @staticmethod + def merge_class(base, other): + """ + Merge holiday calendars together. The base calendar + will take precedence to other. The merge will be done + based on each holiday's name. + + Parameters + ---------- + base : AbstractHolidayCalendar + instance/subclass or array of Holiday objects + other : AbstractHolidayCalendar + instance/subclass or array of Holiday objects + """ + try: + other = other.rules + except AttributeError: + pass + + if not isinstance(other, list): + other = [other] + other_holidays = {holiday.name: holiday for holiday in other} + + try: + base = base.rules + except AttributeError: + pass + + if not isinstance(base, list): + base = [base] + base_holidays = {holiday.name: holiday for holiday in base} + + other_holidays.update(base_holidays) + return list(other_holidays.values()) + + def merge(self, other, inplace: bool = False): + """ + Merge holiday calendars together. The caller's class + rules take precedence. The merge will be done + based on each holiday's name. + + Parameters + ---------- + other : holiday calendar + inplace : bool (default=False) + If True set rule_table to holidays, else return array of Holidays + """ + holidays = self.merge_class(self, other) + if inplace: + self.rules = holidays + else: + return holidays + + +USMemorialDay = Holiday( + "Memorial Day", month=5, day=31, offset=DateOffset(weekday=MO(-1)) +) +USLaborDay = Holiday("Labor Day", month=9, day=1, offset=DateOffset(weekday=MO(1))) +USColumbusDay = Holiday( + "Columbus Day", month=10, day=1, offset=DateOffset(weekday=MO(2)) +) +USThanksgivingDay = Holiday( + "Thanksgiving Day", month=11, day=1, offset=DateOffset(weekday=TH(4)) +) +USMartinLutherKingJr = Holiday( + "Birthday of Martin Luther King, Jr.", + start_date=datetime(1986, 1, 1), + month=1, + day=1, + offset=DateOffset(weekday=MO(3)), +) +USPresidentsDay = Holiday( + "Washington's Birthday", month=2, day=1, offset=DateOffset(weekday=MO(3)) +) +GoodFriday = Holiday("Good Friday", month=1, day=1, offset=[Easter(), Day(-2)]) + +EasterMonday = Holiday("Easter Monday", month=1, day=1, offset=[Easter(), Day(1)]) + + +class USFederalHolidayCalendar(AbstractHolidayCalendar): + """ + US Federal Government Holiday Calendar based on rules specified by: + https://www.opm.gov/policy-data-oversight/pay-leave/federal-holidays/ + """ + + rules = [ + Holiday("New Year's Day", month=1, day=1, observance=nearest_workday), + USMartinLutherKingJr, + USPresidentsDay, + USMemorialDay, + Holiday( + "Juneteenth National Independence Day", + month=6, + day=19, + start_date="2021-06-18", + observance=nearest_workday, + ), + Holiday("Independence Day", month=7, day=4, observance=nearest_workday), + USLaborDay, + USColumbusDay, + Holiday("Veterans Day", month=11, day=11, observance=nearest_workday), + USThanksgivingDay, + Holiday("Christmas Day", month=12, day=25, observance=nearest_workday), + ] + + +def HolidayCalendarFactory(name: str, base, other, base_class=AbstractHolidayCalendar): + rules = AbstractHolidayCalendar.merge_class(base, other) + calendar_class = type(name, (base_class,), {"rules": rules, "name": name}) + return calendar_class + + +__all__ = [ + "after_nearest_workday", + "before_nearest_workday", + "FR", + "get_calendar", + "HolidayCalendarFactory", + "MO", + "nearest_workday", + "next_monday", + "next_monday_or_tuesday", + "next_workday", + "previous_friday", + "previous_workday", + "register", + "SA", + "SU", + "sunday_to_monday", + "TH", + "TU", + "WE", + "weekend_to_monday", +] diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tseries/offsets.py b/env-llmeval/lib/python3.10/site-packages/pandas/tseries/offsets.py new file mode 100644 index 0000000000000000000000000000000000000000..169c9cc18a7fde6289a112ba5932cbb634eb3714 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tseries/offsets.py @@ -0,0 +1,91 @@ +from __future__ import annotations + +from pandas._libs.tslibs.offsets import ( + FY5253, + BaseOffset, + BDay, + BMonthBegin, + BMonthEnd, + BQuarterBegin, + BQuarterEnd, + BusinessDay, + BusinessHour, + BusinessMonthBegin, + BusinessMonthEnd, + BYearBegin, + BYearEnd, + CBMonthBegin, + CBMonthEnd, + CDay, + CustomBusinessDay, + CustomBusinessHour, + CustomBusinessMonthBegin, + CustomBusinessMonthEnd, + DateOffset, + Day, + Easter, + FY5253Quarter, + Hour, + LastWeekOfMonth, + Micro, + Milli, + Minute, + MonthBegin, + MonthEnd, + Nano, + QuarterBegin, + QuarterEnd, + Second, + SemiMonthBegin, + SemiMonthEnd, + Tick, + Week, + WeekOfMonth, + YearBegin, + YearEnd, +) + +__all__ = [ + "Day", + "BaseOffset", + "BusinessDay", + "BusinessMonthBegin", + "BusinessMonthEnd", + "BDay", + "CustomBusinessDay", + "CustomBusinessMonthBegin", + "CustomBusinessMonthEnd", + "CDay", + "CBMonthEnd", + "CBMonthBegin", + "MonthBegin", + "BMonthBegin", + "MonthEnd", + "BMonthEnd", + "SemiMonthEnd", + "SemiMonthBegin", + "BusinessHour", + "CustomBusinessHour", + "YearBegin", + "BYearBegin", + "YearEnd", + "BYearEnd", + "QuarterBegin", + "BQuarterBegin", + "QuarterEnd", + "BQuarterEnd", + "LastWeekOfMonth", + "FY5253Quarter", + "FY5253", + "Week", + "WeekOfMonth", + "Easter", + "Tick", + "Hour", + "Minute", + "Second", + "Milli", + "Micro", + "Nano", + "DateOffset", +]