diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_154_mp_rank_00_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_154_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..9d06bb964f1e24c8e1ce9a912c13ba549d8c8eda --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_154_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b875ee81b5c8375be7e5410ba10694a6d6a02a66e9f188eb085970b012668a5a +size 41830148 diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_169_mp_rank_01_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_169_mp_rank_01_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..a675b93075ba507c05b3be4a1132427be6318e2b --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_169_mp_rank_01_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8c1c8c86830ae13ac8451ec68bcea1534070ceac5b00547b696235339aa3af6 +size 41830148 diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_191_mp_rank_03_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_191_mp_rank_03_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..17eacf98e566b054294454a420968af56dec7519 --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_191_mp_rank_03_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bcb402ef8e202173495e1f2984b5128dcb865c560a032f40686c0af7b3adf760 +size 41830340 diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_220_mp_rank_03_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_220_mp_rank_03_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..af19a124dfcd2ad88c23a6258f288067bedc799d --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_220_mp_rank_03_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ba7f4713f5a4fd90d981efcbceb2d421b2306259122738a1d3c6eb8aa19b359 +size 41830468 diff --git a/venv/lib/python3.10/site-packages/pandas/_libs/byteswap.pyi b/venv/lib/python3.10/site-packages/pandas/_libs/byteswap.pyi new file mode 100644 index 0000000000000000000000000000000000000000..bb0dbfc6a50b1bb7cd509dc5b3dfeed55ad70b09 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/_libs/byteswap.pyi @@ -0,0 +1,5 @@ +def read_float_with_byteswap(data: bytes, offset: int, byteswap: bool) -> float: ... +def read_double_with_byteswap(data: bytes, offset: int, byteswap: bool) -> float: ... +def read_uint16_with_byteswap(data: bytes, offset: int, byteswap: bool) -> int: ... +def read_uint32_with_byteswap(data: bytes, offset: int, byteswap: bool) -> int: ... +def read_uint64_with_byteswap(data: bytes, offset: int, byteswap: bool) -> int: ... diff --git a/venv/lib/python3.10/site-packages/pandas/_libs/groupby.pyi b/venv/lib/python3.10/site-packages/pandas/_libs/groupby.pyi new file mode 100644 index 0000000000000000000000000000000000000000..a494b61fa7e3d70c77a921eb8bfcc269c76db9fb --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/_libs/groupby.pyi @@ -0,0 +1,216 @@ +from typing import Literal + +import numpy as np + +from pandas._typing import npt + +def group_median_float64( + out: np.ndarray, # ndarray[float64_t, ndim=2] + counts: npt.NDArray[np.int64], + values: np.ndarray, # ndarray[float64_t, ndim=2] + labels: npt.NDArray[np.int64], + min_count: int = ..., # Py_ssize_t + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., +) -> None: ... +def group_cumprod( + out: np.ndarray, # float64_t[:, ::1] + values: np.ndarray, # const float64_t[:, :] + labels: np.ndarray, # const int64_t[:] + ngroups: int, + is_datetimelike: bool, + skipna: bool = ..., + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., +) -> None: ... +def group_cumsum( + out: np.ndarray, # int64float_t[:, ::1] + values: np.ndarray, # ndarray[int64float_t, ndim=2] + labels: np.ndarray, # const int64_t[:] + ngroups: int, + is_datetimelike: bool, + skipna: bool = ..., + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., +) -> None: ... +def group_shift_indexer( + out: np.ndarray, # int64_t[::1] + labels: np.ndarray, # const int64_t[:] + ngroups: int, + periods: int, +) -> None: ... +def group_fillna_indexer( + out: np.ndarray, # ndarray[intp_t] + labels: np.ndarray, # ndarray[int64_t] + sorted_labels: npt.NDArray[np.intp], + mask: npt.NDArray[np.uint8], + limit: int, # int64_t + dropna: bool, +) -> None: ... +def group_any_all( + out: np.ndarray, # uint8_t[::1] + values: np.ndarray, # const uint8_t[::1] + labels: np.ndarray, # const int64_t[:] + mask: np.ndarray, # const uint8_t[::1] + val_test: Literal["any", "all"], + skipna: bool, + result_mask: np.ndarray | None, +) -> None: ... +def group_sum( + out: np.ndarray, # complexfloatingintuint_t[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[complexfloatingintuint_t, ndim=2] + labels: np.ndarray, # const intp_t[:] + mask: np.ndarray | None, + result_mask: np.ndarray | None = ..., + min_count: int = ..., + is_datetimelike: bool = ..., +) -> None: ... +def group_prod( + out: np.ndarray, # int64float_t[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[int64float_t, ndim=2] + labels: np.ndarray, # const intp_t[:] + mask: np.ndarray | None, + result_mask: np.ndarray | None = ..., + min_count: int = ..., +) -> None: ... +def group_var( + out: np.ndarray, # floating[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[floating, ndim=2] + labels: np.ndarray, # const intp_t[:] + min_count: int = ..., # Py_ssize_t + ddof: int = ..., # int64_t + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., + is_datetimelike: bool = ..., + name: str = ..., +) -> None: ... +def group_skew( + out: np.ndarray, # float64_t[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[float64_T, ndim=2] + labels: np.ndarray, # const intp_t[::1] + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., + skipna: bool = ..., +) -> None: ... +def group_mean( + out: np.ndarray, # floating[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[floating, ndim=2] + labels: np.ndarray, # const intp_t[:] + min_count: int = ..., # Py_ssize_t + is_datetimelike: bool = ..., # bint + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., +) -> None: ... +def group_ohlc( + out: np.ndarray, # floatingintuint_t[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[floatingintuint_t, ndim=2] + labels: np.ndarray, # const intp_t[:] + min_count: int = ..., + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., +) -> None: ... +def group_quantile( + out: npt.NDArray[np.float64], + values: np.ndarray, # ndarray[numeric, ndim=1] + labels: npt.NDArray[np.intp], + mask: npt.NDArray[np.uint8], + qs: npt.NDArray[np.float64], # const + starts: npt.NDArray[np.int64], + ends: npt.NDArray[np.int64], + interpolation: Literal["linear", "lower", "higher", "nearest", "midpoint"], + result_mask: np.ndarray | None, + is_datetimelike: bool, +) -> None: ... +def group_last( + out: np.ndarray, # rank_t[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[rank_t, ndim=2] + labels: np.ndarray, # const int64_t[:] + mask: npt.NDArray[np.bool_] | None, + result_mask: npt.NDArray[np.bool_] | None = ..., + min_count: int = ..., # Py_ssize_t + is_datetimelike: bool = ..., + skipna: bool = ..., +) -> None: ... +def group_nth( + out: np.ndarray, # rank_t[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[rank_t, ndim=2] + labels: np.ndarray, # const int64_t[:] + mask: npt.NDArray[np.bool_] | None, + result_mask: npt.NDArray[np.bool_] | None = ..., + min_count: int = ..., # int64_t + rank: int = ..., # int64_t + is_datetimelike: bool = ..., + skipna: bool = ..., +) -> None: ... +def group_rank( + out: np.ndarray, # float64_t[:, ::1] + values: np.ndarray, # ndarray[rank_t, ndim=2] + labels: np.ndarray, # const int64_t[:] + ngroups: int, + is_datetimelike: bool, + ties_method: Literal["average", "min", "max", "first", "dense"] = ..., + ascending: bool = ..., + pct: bool = ..., + na_option: Literal["keep", "top", "bottom"] = ..., + mask: npt.NDArray[np.bool_] | None = ..., +) -> None: ... +def group_max( + out: np.ndarray, # groupby_t[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[groupby_t, ndim=2] + labels: np.ndarray, # const int64_t[:] + min_count: int = ..., + is_datetimelike: bool = ..., + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., +) -> None: ... +def group_min( + out: np.ndarray, # groupby_t[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[groupby_t, ndim=2] + labels: np.ndarray, # const int64_t[:] + min_count: int = ..., + is_datetimelike: bool = ..., + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., +) -> None: ... +def group_idxmin_idxmax( + out: npt.NDArray[np.intp], + counts: npt.NDArray[np.int64], + values: np.ndarray, # ndarray[groupby_t, ndim=2] + labels: npt.NDArray[np.intp], + min_count: int = ..., + is_datetimelike: bool = ..., + mask: np.ndarray | None = ..., + name: str = ..., + skipna: bool = ..., + result_mask: np.ndarray | None = ..., +) -> None: ... +def group_cummin( + out: np.ndarray, # groupby_t[:, ::1] + values: np.ndarray, # ndarray[groupby_t, ndim=2] + labels: np.ndarray, # const int64_t[:] + ngroups: int, + is_datetimelike: bool, + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., + skipna: bool = ..., +) -> None: ... +def group_cummax( + out: np.ndarray, # groupby_t[:, ::1] + values: np.ndarray, # ndarray[groupby_t, ndim=2] + labels: np.ndarray, # const int64_t[:] + ngroups: int, + is_datetimelike: bool, + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., + skipna: bool = ..., +) -> None: ... diff --git a/venv/lib/python3.10/site-packages/pandas/_libs/hashing.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/pandas/_libs/hashing.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..fe5f310aa8123dc096570d78b1c27e3a524dcd53 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/_libs/hashing.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/pandas/_libs/hashing.pyi b/venv/lib/python3.10/site-packages/pandas/_libs/hashing.pyi new file mode 100644 index 0000000000000000000000000000000000000000..8361026e4a87d462e04c53f7f1f8aee8a7f6ffe0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/_libs/hashing.pyi @@ -0,0 +1,9 @@ +import numpy as np + +from pandas._typing import npt + +def hash_object_array( + arr: npt.NDArray[np.object_], + key: str, + encoding: str = ..., +) -> npt.NDArray[np.uint64]: ... diff --git a/venv/lib/python3.10/site-packages/pandas/_libs/index.pyi b/venv/lib/python3.10/site-packages/pandas/_libs/index.pyi new file mode 100644 index 0000000000000000000000000000000000000000..75db47bf3160e828b178c0cd733452c9dc5e8919 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/_libs/index.pyi @@ -0,0 +1,100 @@ +import numpy as np + +from pandas._typing import npt + +from pandas import MultiIndex +from pandas.core.arrays import ExtensionArray + +multiindex_nulls_shift: int + +class IndexEngine: + over_size_threshold: bool + def __init__(self, values: np.ndarray) -> None: ... + def __contains__(self, val: object) -> bool: ... + + # -> int | slice | np.ndarray[bool] + def get_loc(self, val: object) -> int | slice | np.ndarray: ... + def sizeof(self, deep: bool = ...) -> int: ... + def __sizeof__(self) -> int: ... + @property + def is_unique(self) -> bool: ... + @property + def is_monotonic_increasing(self) -> bool: ... + @property + def is_monotonic_decreasing(self) -> bool: ... + @property + def is_mapping_populated(self) -> bool: ... + def clear_mapping(self): ... + def get_indexer(self, values: np.ndarray) -> npt.NDArray[np.intp]: ... + def get_indexer_non_unique( + self, + targets: np.ndarray, + ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... + +class MaskedIndexEngine(IndexEngine): + def __init__(self, values: object) -> None: ... + def get_indexer_non_unique( + self, targets: object + ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... + +class Float64Engine(IndexEngine): ... +class Float32Engine(IndexEngine): ... +class Complex128Engine(IndexEngine): ... +class Complex64Engine(IndexEngine): ... +class Int64Engine(IndexEngine): ... +class Int32Engine(IndexEngine): ... +class Int16Engine(IndexEngine): ... +class Int8Engine(IndexEngine): ... +class UInt64Engine(IndexEngine): ... +class UInt32Engine(IndexEngine): ... +class UInt16Engine(IndexEngine): ... +class UInt8Engine(IndexEngine): ... +class ObjectEngine(IndexEngine): ... +class DatetimeEngine(Int64Engine): ... +class TimedeltaEngine(DatetimeEngine): ... +class PeriodEngine(Int64Engine): ... +class BoolEngine(UInt8Engine): ... +class MaskedFloat64Engine(MaskedIndexEngine): ... +class MaskedFloat32Engine(MaskedIndexEngine): ... +class MaskedComplex128Engine(MaskedIndexEngine): ... +class MaskedComplex64Engine(MaskedIndexEngine): ... +class MaskedInt64Engine(MaskedIndexEngine): ... +class MaskedInt32Engine(MaskedIndexEngine): ... +class MaskedInt16Engine(MaskedIndexEngine): ... +class MaskedInt8Engine(MaskedIndexEngine): ... +class MaskedUInt64Engine(MaskedIndexEngine): ... +class MaskedUInt32Engine(MaskedIndexEngine): ... +class MaskedUInt16Engine(MaskedIndexEngine): ... +class MaskedUInt8Engine(MaskedIndexEngine): ... +class MaskedBoolEngine(MaskedUInt8Engine): ... + +class BaseMultiIndexCodesEngine: + levels: list[np.ndarray] + offsets: np.ndarray # ndarray[uint64_t, ndim=1] + + def __init__( + self, + levels: list[np.ndarray], # all entries hashable + labels: list[np.ndarray], # all entries integer-dtyped + offsets: np.ndarray, # np.ndarray[np.uint64, ndim=1] + ) -> None: ... + def get_indexer(self, target: npt.NDArray[np.object_]) -> npt.NDArray[np.intp]: ... + def _extract_level_codes(self, target: MultiIndex) -> np.ndarray: ... + +class ExtensionEngine: + def __init__(self, values: ExtensionArray) -> None: ... + def __contains__(self, val: object) -> bool: ... + def get_loc(self, val: object) -> int | slice | np.ndarray: ... + def get_indexer(self, values: np.ndarray) -> npt.NDArray[np.intp]: ... + def get_indexer_non_unique( + self, + targets: np.ndarray, + ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... + @property + def is_unique(self) -> bool: ... + @property + def is_monotonic_increasing(self) -> bool: ... + @property + def is_monotonic_decreasing(self) -> bool: ... + def sizeof(self, deep: bool = ...) -> int: ... + def clear_mapping(self): ... diff --git a/venv/lib/python3.10/site-packages/pandas/_libs/indexing.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/pandas/_libs/indexing.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..4e8f746b0a014a24114fdfe7af6b33a05a0b9a08 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/_libs/indexing.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/pandas/_libs/internals.pyi b/venv/lib/python3.10/site-packages/pandas/_libs/internals.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ffe6c7730bcdcfb8af1407bf55b59170c4093699 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/_libs/internals.pyi @@ -0,0 +1,94 @@ +from typing import ( + Iterator, + Sequence, + final, + overload, +) +import weakref + +import numpy as np + +from pandas._typing import ( + ArrayLike, + Self, + npt, +) + +from pandas import Index +from pandas.core.internals.blocks import Block as B + +def slice_len(slc: slice, objlen: int = ...) -> int: ... +def get_concat_blkno_indexers( + blknos_list: list[npt.NDArray[np.intp]], +) -> list[tuple[npt.NDArray[np.intp], BlockPlacement]]: ... +def get_blkno_indexers( + blknos: np.ndarray, # int64_t[:] + group: bool = ..., +) -> list[tuple[int, slice | np.ndarray]]: ... +def get_blkno_placements( + blknos: np.ndarray, + group: bool = ..., +) -> Iterator[tuple[int, BlockPlacement]]: ... +def update_blklocs_and_blknos( + blklocs: npt.NDArray[np.intp], + blknos: npt.NDArray[np.intp], + loc: int, + nblocks: int, +) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... +@final +class BlockPlacement: + def __init__(self, val: int | slice | np.ndarray) -> None: ... + @property + def indexer(self) -> np.ndarray | slice: ... + @property + def as_array(self) -> np.ndarray: ... + @property + def as_slice(self) -> slice: ... + @property + def is_slice_like(self) -> bool: ... + @overload + def __getitem__( + self, loc: slice | Sequence[int] | npt.NDArray[np.intp] + ) -> BlockPlacement: ... + @overload + def __getitem__(self, loc: int) -> int: ... + def __iter__(self) -> Iterator[int]: ... + def __len__(self) -> int: ... + def delete(self, loc) -> BlockPlacement: ... + def add(self, other) -> BlockPlacement: ... + def append(self, others: list[BlockPlacement]) -> BlockPlacement: ... + def tile_for_unstack(self, factor: int) -> npt.NDArray[np.intp]: ... + +class Block: + _mgr_locs: BlockPlacement + ndim: int + values: ArrayLike + refs: BlockValuesRefs + def __init__( + self, + values: ArrayLike, + placement: BlockPlacement, + ndim: int, + refs: BlockValuesRefs | None = ..., + ) -> None: ... + def slice_block_rows(self, slicer: slice) -> Self: ... + +class BlockManager: + blocks: tuple[B, ...] + axes: list[Index] + _known_consolidated: bool + _is_consolidated: bool + _blknos: np.ndarray + _blklocs: np.ndarray + def __init__( + self, blocks: tuple[B, ...], axes: list[Index], verify_integrity=... + ) -> None: ... + def get_slice(self, slobj: slice, axis: int = ...) -> Self: ... + def _rebuild_blknos_and_blklocs(self) -> None: ... + +class BlockValuesRefs: + referenced_blocks: list[weakref.ref] + def __init__(self, blk: Block | None = ...) -> None: ... + def add_reference(self, blk: Block) -> None: ... + def add_index_reference(self, index: Index) -> None: ... + def has_reference(self) -> bool: ... diff --git a/venv/lib/python3.10/site-packages/pandas/_libs/json.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/pandas/_libs/json.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..9735fbb3eb8744a280ceb95ec482af38bf3de822 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/_libs/json.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/pandas/_libs/missing.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/pandas/_libs/missing.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..f6b1b985edfe12b8b49326cf86a78f88054a0ece Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/_libs/missing.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/pandas/_libs/sparse.pyi b/venv/lib/python3.10/site-packages/pandas/_libs/sparse.pyi new file mode 100644 index 0000000000000000000000000000000000000000..536265b25425e9e7ecabd37f095115442a7cb209 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/_libs/sparse.pyi @@ -0,0 +1,51 @@ +from typing import Sequence + +import numpy as np + +from pandas._typing import ( + Self, + npt, +) + +class SparseIndex: + length: int + npoints: int + def __init__(self) -> None: ... + @property + def ngaps(self) -> int: ... + @property + def nbytes(self) -> int: ... + @property + def indices(self) -> npt.NDArray[np.int32]: ... + def equals(self, other) -> bool: ... + def lookup(self, index: int) -> np.int32: ... + def lookup_array(self, indexer: npt.NDArray[np.int32]) -> npt.NDArray[np.int32]: ... + def to_int_index(self) -> IntIndex: ... + def to_block_index(self) -> BlockIndex: ... + def intersect(self, y_: SparseIndex) -> Self: ... + def make_union(self, y_: SparseIndex) -> Self: ... + +class IntIndex(SparseIndex): + indices: npt.NDArray[np.int32] + def __init__( + self, length: int, indices: Sequence[int], check_integrity: bool = ... + ) -> None: ... + +class BlockIndex(SparseIndex): + nblocks: int + blocs: np.ndarray + blengths: np.ndarray + def __init__( + self, length: int, blocs: np.ndarray, blengths: np.ndarray + ) -> None: ... + + # Override to have correct parameters + def intersect(self, other: SparseIndex) -> Self: ... + def make_union(self, y: SparseIndex) -> Self: ... + +def make_mask_object_ndarray( + arr: npt.NDArray[np.object_], fill_value +) -> npt.NDArray[np.bool_]: ... +def get_blocks( + indices: npt.NDArray[np.int32], +) -> tuple[npt.NDArray[np.int32], npt.NDArray[np.int32]]: ... diff --git a/venv/lib/python3.10/site-packages/pandas/_libs/tslib.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/pandas/_libs/tslib.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..d7b6470049b9f5a8c3ee46d8decd52df757fbd57 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/_libs/tslib.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/pandas/_libs/tslib.pyi b/venv/lib/python3.10/site-packages/pandas/_libs/tslib.pyi new file mode 100644 index 0000000000000000000000000000000000000000..5a340c1d88bc439663c2e482c9eb7270f8ebc5c6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/_libs/tslib.pyi @@ -0,0 +1,37 @@ +from datetime import tzinfo + +import numpy as np + +from pandas._typing import npt + +def format_array_from_datetime( + values: npt.NDArray[np.int64], + tz: tzinfo | None = ..., + format: str | None = ..., + na_rep: str | float = ..., + reso: int = ..., # NPY_DATETIMEUNIT +) -> npt.NDArray[np.object_]: ... +def array_with_unit_to_datetime( + values: npt.NDArray[np.object_], + unit: str, + errors: str = ..., +) -> tuple[np.ndarray, tzinfo | None]: ... +def first_non_null(values: np.ndarray) -> int: ... +def array_to_datetime( + values: npt.NDArray[np.object_], + errors: str = ..., + dayfirst: bool = ..., + yearfirst: bool = ..., + utc: bool = ..., + creso: int = ..., +) -> tuple[np.ndarray, tzinfo | None]: ... + +# returned ndarray may be object dtype or datetime64[ns] + +def array_to_datetime_with_tz( + values: npt.NDArray[np.object_], + tz: tzinfo, + dayfirst: bool, + yearfirst: bool, + creso: int, +) -> npt.NDArray[np.int64]: ... diff --git a/venv/lib/python3.10/site-packages/pandas/core/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77b7ea6ab2fae4c20091da93b44c6353e07982fb Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/__pycache__/accessor.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/accessor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26dd6045327162d954ab1705d8bde4ff7caa32e0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/accessor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/__pycache__/algorithms.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/algorithms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b9ed0f0d576295abf725f7c4a06a742fac927e5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/algorithms.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/__pycache__/api.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c02a407797131e2f63192a814920fcade1d8824 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/__pycache__/apply.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/apply.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7126d2a1f4eee5faf25a9f5192bb330062c4f072 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/apply.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/__pycache__/arraylike.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/arraylike.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a79b863020e9a924b7a0f3c5ff6f5ad3dfc19633 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/arraylike.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/__pycache__/base.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5389ca2f40327e51dc246f0c4430ab8b423db87b Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/__pycache__/common.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02c9532df625ce8c04837f0bb09f9346eff3661d Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/__pycache__/config_init.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/config_init.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5f31564d99343c284a2c0dd915b3e58a0a547d1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/config_init.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/__pycache__/construction.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/construction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe7b49be18d5eb7d782a3cfe3c8836c86e9b2513 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/construction.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/__pycache__/flags.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/flags.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3f6b16eb0d77ddbd2582fae494bde436d856d47 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/flags.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/__pycache__/frame.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/frame.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0225d318dd4cd04adcbe91b9248cf785689f1af Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/frame.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/__pycache__/generic.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/generic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97935909b796193b4babed05d5cf60d9de536690 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/generic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/__pycache__/indexing.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/indexing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8cc0b0b0b04a078a63eac0dd92db44655b99d446 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/indexing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/__pycache__/missing.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/missing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb6e2f76970953a4272a646ea61c58c27d27d865 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/missing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/__pycache__/nanops.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/nanops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..60cde30c0fee7ac727d43ef69ad16b3c7bd74d38 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/nanops.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/__pycache__/resample.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/resample.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e1904a42afdfa8373fc7993718a3a4568aa4c04 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/resample.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/__pycache__/roperator.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/roperator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e18f1ce13779fb052b09ab9bd11b60190f217a7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/roperator.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/__pycache__/sample.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/sample.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3dfcc44f4453aadc4a028a09053dfb3c94005be Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/sample.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/__pycache__/series.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/series.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa96aa687ec81a58cb5ddbb8801ccf737305c5c5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/series.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/__pycache__/shared_docs.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/shared_docs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19e642793ff29858827a6abe25ea77e3c445eeda Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/shared_docs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/__pycache__/sorting.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/sorting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5406bc3d3abfb4e30fdda205228bc3549164c60 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/__pycache__/sorting.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/_numba/executor.py b/venv/lib/python3.10/site-packages/pandas/core/_numba/executor.py new file mode 100644 index 0000000000000000000000000000000000000000..0a26acb7df60a20a253ca3b864568c984bb86e3d --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/_numba/executor.py @@ -0,0 +1,239 @@ +from __future__ import annotations + +import functools +from typing import ( + TYPE_CHECKING, + Any, + Callable, +) + +if TYPE_CHECKING: + from pandas._typing import Scalar + +import numpy as np + +from pandas.compat._optional import import_optional_dependency + + +@functools.cache +def generate_apply_looper(func, nopython=True, nogil=True, parallel=False): + if TYPE_CHECKING: + import numba + else: + numba = import_optional_dependency("numba") + nb_compat_func = numba.extending.register_jitable(func) + + @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) + def nb_looper(values, axis): + # Operate on the first row/col in order to get + # the output shape + if axis == 0: + first_elem = values[:, 0] + dim0 = values.shape[1] + else: + first_elem = values[0] + dim0 = values.shape[0] + res0 = nb_compat_func(first_elem) + # Use np.asarray to get shape for + # https://github.com/numba/numba/issues/4202#issuecomment-1185981507 + buf_shape = (dim0,) + np.atleast_1d(np.asarray(res0)).shape + if axis == 0: + buf_shape = buf_shape[::-1] + buff = np.empty(buf_shape) + + if axis == 1: + buff[0] = res0 + for i in numba.prange(1, values.shape[0]): + buff[i] = nb_compat_func(values[i]) + else: + buff[:, 0] = res0 + for j in numba.prange(1, values.shape[1]): + buff[:, j] = nb_compat_func(values[:, j]) + return buff + + return nb_looper + + +@functools.cache +def make_looper(func, result_dtype, is_grouped_kernel, nopython, nogil, parallel): + if TYPE_CHECKING: + import numba + else: + numba = import_optional_dependency("numba") + + if is_grouped_kernel: + + @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) + def column_looper( + values: np.ndarray, + labels: np.ndarray, + ngroups: int, + min_periods: int, + *args, + ): + result = np.empty((values.shape[0], ngroups), dtype=result_dtype) + na_positions = {} + for i in numba.prange(values.shape[0]): + output, na_pos = func( + values[i], result_dtype, labels, ngroups, min_periods, *args + ) + result[i] = output + if len(na_pos) > 0: + na_positions[i] = np.array(na_pos) + return result, na_positions + + else: + + @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) + def column_looper( + values: np.ndarray, + start: np.ndarray, + end: np.ndarray, + min_periods: int, + *args, + ): + result = np.empty((values.shape[0], len(start)), dtype=result_dtype) + na_positions = {} + for i in numba.prange(values.shape[0]): + output, na_pos = func( + values[i], result_dtype, start, end, min_periods, *args + ) + result[i] = output + if len(na_pos) > 0: + na_positions[i] = np.array(na_pos) + return result, na_positions + + return column_looper + + +default_dtype_mapping: dict[np.dtype, Any] = { + np.dtype("int8"): np.int64, + np.dtype("int16"): np.int64, + np.dtype("int32"): np.int64, + np.dtype("int64"): np.int64, + np.dtype("uint8"): np.uint64, + np.dtype("uint16"): np.uint64, + np.dtype("uint32"): np.uint64, + np.dtype("uint64"): np.uint64, + np.dtype("float32"): np.float64, + np.dtype("float64"): np.float64, + np.dtype("complex64"): np.complex128, + np.dtype("complex128"): np.complex128, +} + + +# TODO: Preserve complex dtypes + +float_dtype_mapping: dict[np.dtype, Any] = { + np.dtype("int8"): np.float64, + np.dtype("int16"): np.float64, + np.dtype("int32"): np.float64, + np.dtype("int64"): np.float64, + np.dtype("uint8"): np.float64, + np.dtype("uint16"): np.float64, + np.dtype("uint32"): np.float64, + np.dtype("uint64"): np.float64, + np.dtype("float32"): np.float64, + np.dtype("float64"): np.float64, + np.dtype("complex64"): np.float64, + np.dtype("complex128"): np.float64, +} + +identity_dtype_mapping: dict[np.dtype, Any] = { + np.dtype("int8"): np.int8, + np.dtype("int16"): np.int16, + np.dtype("int32"): np.int32, + np.dtype("int64"): np.int64, + np.dtype("uint8"): np.uint8, + np.dtype("uint16"): np.uint16, + np.dtype("uint32"): np.uint32, + np.dtype("uint64"): np.uint64, + np.dtype("float32"): np.float32, + np.dtype("float64"): np.float64, + np.dtype("complex64"): np.complex64, + np.dtype("complex128"): np.complex128, +} + + +def generate_shared_aggregator( + func: Callable[..., Scalar], + dtype_mapping: dict[np.dtype, np.dtype], + is_grouped_kernel: bool, + nopython: bool, + nogil: bool, + parallel: bool, +): + """ + Generate a Numba function that loops over the columns 2D object and applies + a 1D numba kernel over each column. + + Parameters + ---------- + func : function + aggregation function to be applied to each column + dtype_mapping: dict or None + If not None, maps a dtype to a result dtype. + Otherwise, will fall back to default mapping. + is_grouped_kernel: bool, default False + Whether func operates using the group labels (True) + or using starts/ends arrays + + If true, you also need to pass the number of groups to this function + nopython : bool + nopython to be passed into numba.jit + nogil : bool + nogil to be passed into numba.jit + parallel : bool + parallel to be passed into numba.jit + + Returns + ------- + Numba function + """ + + # A wrapper around the looper function, + # to dispatch based on dtype since numba is unable to do that in nopython mode + + # It also post-processes the values by inserting nans where number of observations + # is less than min_periods + # Cannot do this in numba nopython mode + # (you'll run into type-unification error when you cast int -> float) + def looper_wrapper( + values, + start=None, + end=None, + labels=None, + ngroups=None, + min_periods: int = 0, + **kwargs, + ): + result_dtype = dtype_mapping[values.dtype] + column_looper = make_looper( + func, result_dtype, is_grouped_kernel, nopython, nogil, parallel + ) + # Need to unpack kwargs since numba only supports *args + if is_grouped_kernel: + result, na_positions = column_looper( + values, labels, ngroups, min_periods, *kwargs.values() + ) + else: + result, na_positions = column_looper( + values, start, end, min_periods, *kwargs.values() + ) + if result.dtype.kind == "i": + # Look if na_positions is not empty + # If so, convert the whole block + # This is OK since int dtype cannot hold nan, + # so if min_periods not satisfied for 1 col, it is not satisfied for + # all columns at that index + for na_pos in na_positions.values(): + if len(na_pos) > 0: + result = result.astype("float64") + break + # TODO: Optimize this + for i, na_pos in na_positions.items(): + if len(na_pos) > 0: + result[i, na_pos] = np.nan + return result + + return looper_wrapper diff --git a/venv/lib/python3.10/site-packages/pandas/core/_numba/extensions.py b/venv/lib/python3.10/site-packages/pandas/core/_numba/extensions.py new file mode 100644 index 0000000000000000000000000000000000000000..ee09c9380fb0f2bbc2b84d2ad6fa44b21a0ab8ce --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/_numba/extensions.py @@ -0,0 +1,584 @@ +# Disable type checking for this module since numba's internals +# are not typed, and we use numba's internals via its extension API +# mypy: ignore-errors +""" +Utility classes/functions to let numba recognize +pandas Index/Series/DataFrame + +Mostly vendored from https://github.com/numba/numba/blob/main/numba/tests/pdlike_usecase.py +""" + +from __future__ import annotations + +from contextlib import contextmanager +import operator + +import numba +from numba import types +from numba.core import cgutils +from numba.core.datamodel import models +from numba.core.extending import ( + NativeValue, + box, + lower_builtin, + make_attribute_wrapper, + overload, + overload_attribute, + overload_method, + register_model, + type_callable, + typeof_impl, + unbox, +) +from numba.core.imputils import impl_ret_borrowed +import numpy as np + +from pandas._libs import lib + +from pandas.core.indexes.base import Index +from pandas.core.indexing import _iLocIndexer +from pandas.core.internals import SingleBlockManager +from pandas.core.series import Series + + +# Helper function to hack around fact that Index casts numpy string dtype to object +# +# Idea is to set an attribute on a Index called _numba_data +# that is the original data, or the object data casted to numpy string dtype, +# with a context manager that is unset afterwards +@contextmanager +def set_numba_data(index: Index): + numba_data = index._data + if numba_data.dtype == object: + if not lib.is_string_array(numba_data): + raise ValueError( + "The numba engine only supports using string or numeric column names" + ) + numba_data = numba_data.astype("U") + try: + index._numba_data = numba_data + yield index + finally: + del index._numba_data + + +# TODO: Range index support +# (this currently lowers OK, but does not round-trip) +class IndexType(types.Type): + """ + The type class for Index objects. + """ + + def __init__(self, dtype, layout, pyclass: any) -> None: + self.pyclass = pyclass + name = f"index({dtype}, {layout})" + self.dtype = dtype + self.layout = layout + super().__init__(name) + + @property + def key(self): + return self.pyclass, self.dtype, self.layout + + @property + def as_array(self): + return types.Array(self.dtype, 1, self.layout) + + def copy(self, dtype=None, ndim: int = 1, layout=None): + assert ndim == 1 + if dtype is None: + dtype = self.dtype + layout = layout or self.layout + return type(self)(dtype, layout, self.pyclass) + + +class SeriesType(types.Type): + """ + The type class for Series objects. + """ + + def __init__(self, dtype, index, namety) -> None: + assert isinstance(index, IndexType) + self.dtype = dtype + self.index = index + self.values = types.Array(self.dtype, 1, "C") + self.namety = namety + name = f"series({dtype}, {index}, {namety})" + super().__init__(name) + + @property + def key(self): + return self.dtype, self.index, self.namety + + @property + def as_array(self): + return self.values + + def copy(self, dtype=None, ndim: int = 1, layout: str = "C"): + assert ndim == 1 + assert layout == "C" + if dtype is None: + dtype = self.dtype + return type(self)(dtype, self.index, self.namety) + + +@typeof_impl.register(Index) +def typeof_index(val, c): + """ + This will assume that only strings are in object dtype + index. + (you should check this before this gets lowered down to numba) + """ + # arrty = typeof_impl(val._data, c) + arrty = typeof_impl(val._numba_data, c) + assert arrty.ndim == 1 + return IndexType(arrty.dtype, arrty.layout, type(val)) + + +@typeof_impl.register(Series) +def typeof_series(val, c): + index = typeof_impl(val.index, c) + arrty = typeof_impl(val.values, c) + namety = typeof_impl(val.name, c) + assert arrty.ndim == 1 + assert arrty.layout == "C" + return SeriesType(arrty.dtype, index, namety) + + +@type_callable(Series) +def type_series_constructor(context): + def typer(data, index, name=None): + if isinstance(index, IndexType) and isinstance(data, types.Array): + assert data.ndim == 1 + if name is None: + name = types.intp + return SeriesType(data.dtype, index, name) + + return typer + + +@type_callable(Index) +def type_index_constructor(context): + def typer(data, hashmap=None): + if isinstance(data, types.Array): + assert data.layout == "C" + assert data.ndim == 1 + assert hashmap is None or isinstance(hashmap, types.DictType) + return IndexType(data.dtype, layout=data.layout, pyclass=Index) + + return typer + + +# Backend extensions for Index and Series and Frame +@register_model(IndexType) +class IndexModel(models.StructModel): + def __init__(self, dmm, fe_type) -> None: + # We don't want the numpy string scalar type in our hashmap + members = [ + ("data", fe_type.as_array), + # This is an attempt to emulate our hashtable code with a numba + # typed dict + # It maps from values in the index to their integer positions in the array + ("hashmap", types.DictType(fe_type.dtype, types.intp)), + # Pointer to the Index object this was created from, or that it + # boxes to + # https://numba.discourse.group/t/qst-how-to-cache-the-boxing-of-an-object/2128/2?u=lithomas1 + ("parent", types.pyobject), + ] + models.StructModel.__init__(self, dmm, fe_type, members) + + +@register_model(SeriesType) +class SeriesModel(models.StructModel): + def __init__(self, dmm, fe_type) -> None: + members = [ + ("index", fe_type.index), + ("values", fe_type.as_array), + ("name", fe_type.namety), + ] + models.StructModel.__init__(self, dmm, fe_type, members) + + +make_attribute_wrapper(IndexType, "data", "_data") +make_attribute_wrapper(IndexType, "hashmap", "hashmap") + +make_attribute_wrapper(SeriesType, "index", "index") +make_attribute_wrapper(SeriesType, "values", "values") +make_attribute_wrapper(SeriesType, "name", "name") + + +@lower_builtin(Series, types.Array, IndexType) +def pdseries_constructor(context, builder, sig, args): + data, index = args + series = cgutils.create_struct_proxy(sig.return_type)(context, builder) + series.index = index + series.values = data + series.name = context.get_constant(types.intp, 0) + return impl_ret_borrowed(context, builder, sig.return_type, series._getvalue()) + + +@lower_builtin(Series, types.Array, IndexType, types.intp) +@lower_builtin(Series, types.Array, IndexType, types.float64) +@lower_builtin(Series, types.Array, IndexType, types.unicode_type) +def pdseries_constructor_with_name(context, builder, sig, args): + data, index, name = args + series = cgutils.create_struct_proxy(sig.return_type)(context, builder) + series.index = index + series.values = data + series.name = name + return impl_ret_borrowed(context, builder, sig.return_type, series._getvalue()) + + +@lower_builtin(Index, types.Array, types.DictType, types.pyobject) +def index_constructor_2arg(context, builder, sig, args): + (data, hashmap, parent) = args + index = cgutils.create_struct_proxy(sig.return_type)(context, builder) + + index.data = data + index.hashmap = hashmap + index.parent = parent + return impl_ret_borrowed(context, builder, sig.return_type, index._getvalue()) + + +@lower_builtin(Index, types.Array, types.DictType) +def index_constructor_2arg_parent(context, builder, sig, args): + # Basically same as index_constructor_1arg, but also lets you specify the + # parent object + (data, hashmap) = args + index = cgutils.create_struct_proxy(sig.return_type)(context, builder) + + index.data = data + index.hashmap = hashmap + return impl_ret_borrowed(context, builder, sig.return_type, index._getvalue()) + + +@lower_builtin(Index, types.Array) +def index_constructor_1arg(context, builder, sig, args): + from numba.typed import Dict + + key_type = sig.return_type.dtype + value_type = types.intp + + def index_impl(data): + return Index(data, Dict.empty(key_type, value_type)) + + return context.compile_internal(builder, index_impl, sig, args) + + +# Helper to convert the unicodecharseq (numpy string scalar) into a unicode_type +# (regular string) +def maybe_cast_str(x): + # Dummy function that numba can overload + pass + + +@overload(maybe_cast_str) +def maybe_cast_str_impl(x): + """Converts numba UnicodeCharSeq (numpy string scalar) -> unicode type (string). + Is a no-op for other types.""" + if isinstance(x, types.UnicodeCharSeq): + return lambda x: str(x) + else: + return lambda x: x + + +@unbox(IndexType) +def unbox_index(typ, obj, c): + """ + Convert a Index object to a native structure. + + Note: Object dtype is not allowed here + """ + data_obj = c.pyapi.object_getattr_string(obj, "_numba_data") + index = cgutils.create_struct_proxy(typ)(c.context, c.builder) + # If we see an object array, assume its been validated as only containing strings + # We still need to do the conversion though + index.data = c.unbox(typ.as_array, data_obj).value + typed_dict_obj = c.pyapi.unserialize(c.pyapi.serialize_object(numba.typed.Dict)) + # Create an empty typed dict in numba for the hashmap for indexing + # equiv of numba.typed.Dict.empty(typ.dtype, types.intp) + arr_type_obj = c.pyapi.unserialize(c.pyapi.serialize_object(typ.dtype)) + intp_type_obj = c.pyapi.unserialize(c.pyapi.serialize_object(types.intp)) + hashmap_obj = c.pyapi.call_method( + typed_dict_obj, "empty", (arr_type_obj, intp_type_obj) + ) + index.hashmap = c.unbox(types.DictType(typ.dtype, types.intp), hashmap_obj).value + # Set the parent for speedy boxing. + index.parent = obj + + # Decrefs + c.pyapi.decref(data_obj) + c.pyapi.decref(arr_type_obj) + c.pyapi.decref(intp_type_obj) + c.pyapi.decref(typed_dict_obj) + + return NativeValue(index._getvalue()) + + +@unbox(SeriesType) +def unbox_series(typ, obj, c): + """ + Convert a Series object to a native structure. + """ + index_obj = c.pyapi.object_getattr_string(obj, "index") + values_obj = c.pyapi.object_getattr_string(obj, "values") + name_obj = c.pyapi.object_getattr_string(obj, "name") + + series = cgutils.create_struct_proxy(typ)(c.context, c.builder) + series.index = c.unbox(typ.index, index_obj).value + series.values = c.unbox(typ.values, values_obj).value + series.name = c.unbox(typ.namety, name_obj).value + + # Decrefs + c.pyapi.decref(index_obj) + c.pyapi.decref(values_obj) + c.pyapi.decref(name_obj) + + return NativeValue(series._getvalue()) + + +@box(IndexType) +def box_index(typ, val, c): + """ + Convert a native index structure to a Index object. + + If our native index is of a numpy string dtype, we'll cast it to + object. + """ + # First build a Numpy array object, then wrap it in a Index + index = cgutils.create_struct_proxy(typ)(c.context, c.builder, value=val) + + res = cgutils.alloca_once_value(c.builder, index.parent) + + # Does parent exist? + # (it means already boxed once, or Index same as original df.index or df.columns) + # xref https://github.com/numba/numba/blob/596e8a55334cc46854e3192766e643767bd7c934/numba/core/boxing.py#L593C17-L593C17 + with c.builder.if_else(cgutils.is_not_null(c.builder, index.parent)) as ( + has_parent, + otherwise, + ): + with has_parent: + c.pyapi.incref(index.parent) + with otherwise: + # TODO: preserve the original class for the index + # Also need preserve the name of the Index + # class_obj = c.pyapi.unserialize(c.pyapi.serialize_object(typ.pyclass)) + class_obj = c.pyapi.unserialize(c.pyapi.serialize_object(Index)) + array_obj = c.box(typ.as_array, index.data) + if isinstance(typ.dtype, types.UnicodeCharSeq): + # We converted to numpy string dtype, convert back + # to object since _simple_new won't do that for uss + object_str_obj = c.pyapi.unserialize(c.pyapi.serialize_object("object")) + array_obj = c.pyapi.call_method(array_obj, "astype", (object_str_obj,)) + c.pyapi.decref(object_str_obj) + # this is basically Index._simple_new(array_obj, name_obj) in python + index_obj = c.pyapi.call_method(class_obj, "_simple_new", (array_obj,)) + index.parent = index_obj + c.builder.store(index_obj, res) + + # Decrefs + c.pyapi.decref(class_obj) + c.pyapi.decref(array_obj) + return c.builder.load(res) + + +@box(SeriesType) +def box_series(typ, val, c): + """ + Convert a native series structure to a Series object. + """ + series = cgutils.create_struct_proxy(typ)(c.context, c.builder, value=val) + series_const_obj = c.pyapi.unserialize(c.pyapi.serialize_object(Series._from_mgr)) + mgr_const_obj = c.pyapi.unserialize( + c.pyapi.serialize_object(SingleBlockManager.from_array) + ) + index_obj = c.box(typ.index, series.index) + array_obj = c.box(typ.as_array, series.values) + name_obj = c.box(typ.namety, series.name) + # This is basically equivalent of + # pd.Series(data=array_obj, index=index_obj) + # To improve perf, we will construct the Series from a manager + # object to avoid checks. + # We'll also set the name attribute manually to avoid validation + mgr_obj = c.pyapi.call_function_objargs( + mgr_const_obj, + ( + array_obj, + index_obj, + ), + ) + mgr_axes_obj = c.pyapi.object_getattr_string(mgr_obj, "axes") + # Series._constructor_from_mgr(mgr, axes) + series_obj = c.pyapi.call_function_objargs( + series_const_obj, (mgr_obj, mgr_axes_obj) + ) + c.pyapi.object_setattr_string(series_obj, "_name", name_obj) + + # Decrefs + c.pyapi.decref(series_const_obj) + c.pyapi.decref(mgr_axes_obj) + c.pyapi.decref(mgr_obj) + c.pyapi.decref(mgr_const_obj) + c.pyapi.decref(index_obj) + c.pyapi.decref(array_obj) + c.pyapi.decref(name_obj) + + return series_obj + + +# Add common series reductions (e.g. mean, sum), +# and also add common binops (e.g. add, sub, mul, div) +def generate_series_reduction(ser_reduction, ser_method): + @overload_method(SeriesType, ser_reduction) + def series_reduction(series): + def series_reduction_impl(series): + return ser_method(series.values) + + return series_reduction_impl + + return series_reduction + + +def generate_series_binop(binop): + @overload(binop) + def series_binop(series1, value): + if isinstance(series1, SeriesType): + if isinstance(value, SeriesType): + + def series_binop_impl(series1, series2): + # TODO: Check index matching? + return Series( + binop(series1.values, series2.values), + series1.index, + series1.name, + ) + + return series_binop_impl + else: + + def series_binop_impl(series1, value): + return Series( + binop(series1.values, value), series1.index, series1.name + ) + + return series_binop_impl + + return series_binop + + +series_reductions = [ + ("sum", np.sum), + ("mean", np.mean), + # Disabled due to discrepancies between numba std. dev + # and pandas std. dev (no way to specify dof) + # ("std", np.std), + # ("var", np.var), + ("min", np.min), + ("max", np.max), +] +for reduction, reduction_method in series_reductions: + generate_series_reduction(reduction, reduction_method) + +series_binops = [operator.add, operator.sub, operator.mul, operator.truediv] + +for ser_binop in series_binops: + generate_series_binop(ser_binop) + + +# get_loc on Index +@overload_method(IndexType, "get_loc") +def index_get_loc(index, item): + def index_get_loc_impl(index, item): + # Initialize the hash table if not initialized + if len(index.hashmap) == 0: + for i, val in enumerate(index._data): + index.hashmap[val] = i + return index.hashmap[item] + + return index_get_loc_impl + + +# Indexing for Series/Index +@overload(operator.getitem) +def series_indexing(series, item): + if isinstance(series, SeriesType): + + def series_getitem(series, item): + loc = series.index.get_loc(item) + return series.iloc[loc] + + return series_getitem + + +@overload(operator.getitem) +def index_indexing(index, idx): + if isinstance(index, IndexType): + + def index_getitem(index, idx): + return index._data[idx] + + return index_getitem + + +class IlocType(types.Type): + def __init__(self, obj_type) -> None: + self.obj_type = obj_type + name = f"iLocIndexer({obj_type})" + super().__init__(name=name) + + @property + def key(self): + return self.obj_type + + +@typeof_impl.register(_iLocIndexer) +def typeof_iloc(val, c): + objtype = typeof_impl(val.obj, c) + return IlocType(objtype) + + +@type_callable(_iLocIndexer) +def type_iloc_constructor(context): + def typer(obj): + if isinstance(obj, SeriesType): + return IlocType(obj) + + return typer + + +@lower_builtin(_iLocIndexer, SeriesType) +def iloc_constructor(context, builder, sig, args): + (obj,) = args + iloc_indexer = cgutils.create_struct_proxy(sig.return_type)(context, builder) + iloc_indexer.obj = obj + return impl_ret_borrowed( + context, builder, sig.return_type, iloc_indexer._getvalue() + ) + + +@register_model(IlocType) +class ILocModel(models.StructModel): + def __init__(self, dmm, fe_type) -> None: + members = [("obj", fe_type.obj_type)] + models.StructModel.__init__(self, dmm, fe_type, members) + + +make_attribute_wrapper(IlocType, "obj", "obj") + + +@overload_attribute(SeriesType, "iloc") +def series_iloc(series): + def get(series): + return _iLocIndexer(series) + + return get + + +@overload(operator.getitem) +def iloc_getitem(iloc_indexer, i): + if isinstance(iloc_indexer, IlocType): + + def getitem_impl(iloc_indexer, i): + return iloc_indexer.obj.values[i] + + return getitem_impl diff --git a/venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ea31659d8c51756ac6c2e9fd865014e388be66c Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/mean_.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/mean_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01c3b708e07906217b49bb2947152e6df39010fe Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/mean_.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/min_max_.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/min_max_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53b7e9c4949215aa0577f6831c6b72d2ab42270a Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/min_max_.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/shared.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/shared.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9c802b2bd350532b01180a46f14d5e8863290ae Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/shared.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/sum_.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/sum_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1bc6b67e02171db4fb702479c0e71e2c4473bc46 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/sum_.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/var_.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/var_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0bc3690003a0bd6f544d7472dbb27cd1b9f7bb3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/var_.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/computation/__init__.py b/venv/lib/python3.10/site-packages/pandas/core/computation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..60659bfe94be095f1573e2d131d697a6e60d0c72 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/align.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/align.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f748c2872e2eba2affbc3662a56531b156a95e47 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/align.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/api.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64b3b49060ce73b8d0d93ca7a9dfe2325db048d9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/check.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/check.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d23d6b659284a604255a15253d62ba544d4d884a Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/check.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/common.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c54ab14a633ff8e4d6624f87be9727694c057b71 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/engines.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/engines.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..002a44611ed82f680d7e5e90470068a9c5a6a77a Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/engines.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/eval.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/eval.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f1dad8466398411519c40e472e6e513eaac84a7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/eval.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/expr.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/expr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db4392ef096eac3d03a549d36f976dd922456552 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/expr.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/expressions.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/expressions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b9ad3adbb598fb9e8da7653dc048d0ff2ce87d2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/expressions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/ops.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe95803ef37eaf5437c2c3349a538a2a5bfabf68 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/ops.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/parsing.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/parsing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a3d6a3fa18808bad53ff3d756fa26500db6c95cc Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/parsing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/pytables.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/pytables.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56c3c829f2b21267bb7a775eb4da52285ec1f07a Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/pytables.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/scope.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/scope.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..42db24992f8b438cfabc723e7c2ab56fb8d64bb2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/computation/__pycache__/scope.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/computation/align.py b/venv/lib/python3.10/site-packages/pandas/core/computation/align.py new file mode 100644 index 0000000000000000000000000000000000000000..cd852ba9249cf26ea39b7e51bbfa754ffe9c10ce --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/computation/align.py @@ -0,0 +1,213 @@ +""" +Core eval alignment algorithms. +""" +from __future__ import annotations + +from functools import ( + partial, + wraps, +) +from typing import ( + TYPE_CHECKING, + Callable, +) +import warnings + +import numpy as np + +from pandas.errors import PerformanceWarning +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCSeries, +) + +from pandas.core.base import PandasObject +import pandas.core.common as com +from pandas.core.computation.common import result_type_many + +if TYPE_CHECKING: + from collections.abc import Sequence + + from pandas._typing import F + + from pandas.core.generic import NDFrame + from pandas.core.indexes.api import Index + + +def _align_core_single_unary_op( + term, +) -> tuple[partial | type[NDFrame], dict[str, Index] | None]: + typ: partial | type[NDFrame] + axes: dict[str, Index] | None = None + + if isinstance(term.value, np.ndarray): + typ = partial(np.asanyarray, dtype=term.value.dtype) + else: + typ = type(term.value) + if hasattr(term.value, "axes"): + axes = _zip_axes_from_type(typ, term.value.axes) + + return typ, axes + + +def _zip_axes_from_type( + typ: type[NDFrame], new_axes: Sequence[Index] +) -> dict[str, Index]: + return {name: new_axes[i] for i, name in enumerate(typ._AXIS_ORDERS)} + + +def _any_pandas_objects(terms) -> bool: + """ + Check a sequence of terms for instances of PandasObject. + """ + return any(isinstance(term.value, PandasObject) for term in terms) + + +def _filter_special_cases(f) -> Callable[[F], F]: + @wraps(f) + def wrapper(terms): + # single unary operand + if len(terms) == 1: + return _align_core_single_unary_op(terms[0]) + + term_values = (term.value for term in terms) + + # we don't have any pandas objects + if not _any_pandas_objects(terms): + return result_type_many(*term_values), None + + return f(terms) + + return wrapper + + +@_filter_special_cases +def _align_core(terms): + term_index = [i for i, term in enumerate(terms) if hasattr(term.value, "axes")] + term_dims = [terms[i].value.ndim for i in term_index] + + from pandas import Series + + ndims = Series(dict(zip(term_index, term_dims))) + + # initial axes are the axes of the largest-axis'd term + biggest = terms[ndims.idxmax()].value + typ = biggest._constructor + axes = biggest.axes + naxes = len(axes) + gt_than_one_axis = naxes > 1 + + for value in (terms[i].value for i in term_index): + is_series = isinstance(value, ABCSeries) + is_series_and_gt_one_axis = is_series and gt_than_one_axis + + for axis, items in enumerate(value.axes): + if is_series_and_gt_one_axis: + ax, itm = naxes - 1, value.index + else: + ax, itm = axis, items + + if not axes[ax].is_(itm): + axes[ax] = axes[ax].union(itm) + + for i, ndim in ndims.items(): + for axis, items in zip(range(ndim), axes): + ti = terms[i].value + + if hasattr(ti, "reindex"): + transpose = isinstance(ti, ABCSeries) and naxes > 1 + reindexer = axes[naxes - 1] if transpose else items + + term_axis_size = len(ti.axes[axis]) + reindexer_size = len(reindexer) + + ordm = np.log10(max(1, abs(reindexer_size - term_axis_size))) + if ordm >= 1 and reindexer_size >= 10000: + w = ( + f"Alignment difference on axis {axis} is larger " + f"than an order of magnitude on term {repr(terms[i].name)}, " + f"by more than {ordm:.4g}; performance may suffer." + ) + warnings.warn( + w, category=PerformanceWarning, stacklevel=find_stack_level() + ) + + obj = ti.reindex(reindexer, axis=axis, copy=False) + terms[i].update(obj) + + terms[i].update(terms[i].value.values) + + return typ, _zip_axes_from_type(typ, axes) + + +def align_terms(terms): + """ + Align a set of terms. + """ + try: + # flatten the parse tree (a nested list, really) + terms = list(com.flatten(terms)) + except TypeError: + # can't iterate so it must just be a constant or single variable + if isinstance(terms.value, (ABCSeries, ABCDataFrame)): + typ = type(terms.value) + return typ, _zip_axes_from_type(typ, terms.value.axes) + return np.result_type(terms.type), None + + # if all resolved variables are numeric scalars + if all(term.is_scalar for term in terms): + return result_type_many(*(term.value for term in terms)).type, None + + # perform the main alignment + typ, axes = _align_core(terms) + return typ, axes + + +def reconstruct_object(typ, obj, axes, dtype): + """ + Reconstruct an object given its type, raw value, and possibly empty + (None) axes. + + Parameters + ---------- + typ : object + A type + obj : object + The value to use in the type constructor + axes : dict + The axes to use to construct the resulting pandas object + + Returns + ------- + ret : typ + An object of type ``typ`` with the value `obj` and possible axes + `axes`. + """ + try: + typ = typ.type + except AttributeError: + pass + + res_t = np.result_type(obj.dtype, dtype) + + if not isinstance(typ, partial) and issubclass(typ, PandasObject): + return typ(obj, dtype=res_t, **axes) + + # special case for pathological things like ~True/~False + if hasattr(res_t, "type") and typ == np.bool_ and res_t != np.bool_: + ret_value = res_t.type(obj) + else: + ret_value = typ(obj).astype(res_t) + # The condition is to distinguish 0-dim array (returned in case of + # scalar) and 1 element array + # e.g. np.array(0) and np.array([0]) + if ( + len(obj.shape) == 1 + and len(obj) == 1 + and not isinstance(ret_value, np.ndarray) + ): + ret_value = np.array([ret_value]).astype(res_t) + + return ret_value diff --git a/venv/lib/python3.10/site-packages/pandas/core/computation/api.py b/venv/lib/python3.10/site-packages/pandas/core/computation/api.py new file mode 100644 index 0000000000000000000000000000000000000000..bd3be5b3f8c42267c8a61421b7f0877a01b33d34 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/computation/api.py @@ -0,0 +1,2 @@ +__all__ = ["eval"] +from pandas.core.computation.eval import eval diff --git a/venv/lib/python3.10/site-packages/pandas/core/computation/check.py b/venv/lib/python3.10/site-packages/pandas/core/computation/check.py new file mode 100644 index 0000000000000000000000000000000000000000..caccf34f811112abbe04d965d6f6be1e21527e8b --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/computation/check.py @@ -0,0 +1,8 @@ +from __future__ import annotations + +from pandas.compat._optional import import_optional_dependency + +ne = import_optional_dependency("numexpr", errors="warn") +NUMEXPR_INSTALLED = ne is not None + +__all__ = ["NUMEXPR_INSTALLED"] diff --git a/venv/lib/python3.10/site-packages/pandas/core/computation/common.py b/venv/lib/python3.10/site-packages/pandas/core/computation/common.py new file mode 100644 index 0000000000000000000000000000000000000000..115191829f044a7d6d7f17c279025ccc26d44d04 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/computation/common.py @@ -0,0 +1,48 @@ +from __future__ import annotations + +from functools import reduce + +import numpy as np + +from pandas._config import get_option + + +def ensure_decoded(s) -> str: + """ + If we have bytes, decode them to unicode. + """ + if isinstance(s, (np.bytes_, bytes)): + s = s.decode(get_option("display.encoding")) + return s + + +def result_type_many(*arrays_and_dtypes): + """ + Wrapper around numpy.result_type which overcomes the NPY_MAXARGS (32) + argument limit. + """ + try: + return np.result_type(*arrays_and_dtypes) + except ValueError: + # we have > NPY_MAXARGS terms in our expression + return reduce(np.result_type, arrays_and_dtypes) + except TypeError: + from pandas.core.dtypes.cast import find_common_type + from pandas.core.dtypes.common import is_extension_array_dtype + + arr_and_dtypes = list(arrays_and_dtypes) + ea_dtypes, non_ea_dtypes = [], [] + for arr_or_dtype in arr_and_dtypes: + if is_extension_array_dtype(arr_or_dtype): + ea_dtypes.append(arr_or_dtype) + else: + non_ea_dtypes.append(arr_or_dtype) + + if non_ea_dtypes: + try: + np_dtype = np.result_type(*non_ea_dtypes) + except ValueError: + np_dtype = reduce(np.result_type, arrays_and_dtypes) + return find_common_type(ea_dtypes + [np_dtype]) + + return find_common_type(ea_dtypes) diff --git a/venv/lib/python3.10/site-packages/pandas/core/computation/engines.py b/venv/lib/python3.10/site-packages/pandas/core/computation/engines.py new file mode 100644 index 0000000000000000000000000000000000000000..a3a05a9d75c6ed6b80564a69ff5b6cf5a648c1b3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/computation/engines.py @@ -0,0 +1,143 @@ +""" +Engine classes for :func:`~pandas.eval` +""" +from __future__ import annotations + +import abc +from typing import TYPE_CHECKING + +from pandas.errors import NumExprClobberingError + +from pandas.core.computation.align import ( + align_terms, + reconstruct_object, +) +from pandas.core.computation.ops import ( + MATHOPS, + REDUCTIONS, +) + +from pandas.io.formats import printing + +if TYPE_CHECKING: + from pandas.core.computation.expr import Expr + +_ne_builtins = frozenset(MATHOPS + REDUCTIONS) + + +def _check_ne_builtin_clash(expr: Expr) -> None: + """ + Attempt to prevent foot-shooting in a helpful way. + + Parameters + ---------- + expr : Expr + Terms can contain + """ + names = expr.names + overlap = names & _ne_builtins + + if overlap: + s = ", ".join([repr(x) for x in overlap]) + raise NumExprClobberingError( + f'Variables in expression "{expr}" overlap with builtins: ({s})' + ) + + +class AbstractEngine(metaclass=abc.ABCMeta): + """Object serving as a base class for all engines.""" + + has_neg_frac = False + + def __init__(self, expr) -> None: + self.expr = expr + self.aligned_axes = None + self.result_type = None + + def convert(self) -> str: + """ + Convert an expression for evaluation. + + Defaults to return the expression as a string. + """ + return printing.pprint_thing(self.expr) + + def evaluate(self) -> object: + """ + Run the engine on the expression. + + This method performs alignment which is necessary no matter what engine + is being used, thus its implementation is in the base class. + + Returns + ------- + object + The result of the passed expression. + """ + if not self._is_aligned: + self.result_type, self.aligned_axes = align_terms(self.expr.terms) + + # make sure no names in resolvers and locals/globals clash + res = self._evaluate() + return reconstruct_object( + self.result_type, res, self.aligned_axes, self.expr.terms.return_type + ) + + @property + def _is_aligned(self) -> bool: + return self.aligned_axes is not None and self.result_type is not None + + @abc.abstractmethod + def _evaluate(self): + """ + Return an evaluated expression. + + Parameters + ---------- + env : Scope + The local and global environment in which to evaluate an + expression. + + Notes + ----- + Must be implemented by subclasses. + """ + + +class NumExprEngine(AbstractEngine): + """NumExpr engine class""" + + has_neg_frac = True + + def _evaluate(self): + import numexpr as ne + + # convert the expression to a valid numexpr expression + s = self.convert() + + env = self.expr.env + scope = env.full_scope + _check_ne_builtin_clash(self.expr) + return ne.evaluate(s, local_dict=scope) + + +class PythonEngine(AbstractEngine): + """ + Evaluate an expression in Python space. + + Mostly for testing purposes. + """ + + has_neg_frac = False + + def evaluate(self): + return self.expr() + + def _evaluate(self) -> None: + pass + + +ENGINES: dict[str, type[AbstractEngine]] = { + "numexpr": NumExprEngine, + "python": PythonEngine, +} diff --git a/venv/lib/python3.10/site-packages/pandas/core/computation/eval.py b/venv/lib/python3.10/site-packages/pandas/core/computation/eval.py new file mode 100644 index 0000000000000000000000000000000000000000..f1fe528de06f8eeb5bdfb0f22339499c15ebbd9c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/computation/eval.py @@ -0,0 +1,415 @@ +""" +Top level ``eval`` module. +""" +from __future__ import annotations + +import tokenize +from typing import TYPE_CHECKING +import warnings + +from pandas.util._exceptions import find_stack_level +from pandas.util._validators import validate_bool_kwarg + +from pandas.core.dtypes.common import is_extension_array_dtype + +from pandas.core.computation.engines import ENGINES +from pandas.core.computation.expr import ( + PARSERS, + Expr, +) +from pandas.core.computation.parsing import tokenize_string +from pandas.core.computation.scope import ensure_scope +from pandas.core.generic import NDFrame + +from pandas.io.formats.printing import pprint_thing + +if TYPE_CHECKING: + from pandas.core.computation.ops import BinOp + + +def _check_engine(engine: str | None) -> str: + """ + Make sure a valid engine is passed. + + Parameters + ---------- + engine : str + String to validate. + + Raises + ------ + KeyError + * If an invalid engine is passed. + ImportError + * If numexpr was requested but doesn't exist. + + Returns + ------- + str + Engine name. + """ + from pandas.core.computation.check import NUMEXPR_INSTALLED + from pandas.core.computation.expressions import USE_NUMEXPR + + if engine is None: + engine = "numexpr" if USE_NUMEXPR else "python" + + if engine not in ENGINES: + valid_engines = list(ENGINES.keys()) + raise KeyError( + f"Invalid engine '{engine}' passed, valid engines are {valid_engines}" + ) + + # TODO: validate this in a more general way (thinking of future engines + # that won't necessarily be import-able) + # Could potentially be done on engine instantiation + if engine == "numexpr" and not NUMEXPR_INSTALLED: + raise ImportError( + "'numexpr' is not installed or an unsupported version. Cannot use " + "engine='numexpr' for query/eval if 'numexpr' is not installed" + ) + + return engine + + +def _check_parser(parser: str): + """ + Make sure a valid parser is passed. + + Parameters + ---------- + parser : str + + Raises + ------ + KeyError + * If an invalid parser is passed + """ + if parser not in PARSERS: + raise KeyError( + f"Invalid parser '{parser}' passed, valid parsers are {PARSERS.keys()}" + ) + + +def _check_resolvers(resolvers): + if resolvers is not None: + for resolver in resolvers: + if not hasattr(resolver, "__getitem__"): + name = type(resolver).__name__ + raise TypeError( + f"Resolver of type '{name}' does not " + "implement the __getitem__ method" + ) + + +def _check_expression(expr): + """ + Make sure an expression is not an empty string + + Parameters + ---------- + expr : object + An object that can be converted to a string + + Raises + ------ + ValueError + * If expr is an empty string + """ + if not expr: + raise ValueError("expr cannot be an empty string") + + +def _convert_expression(expr) -> str: + """ + Convert an object to an expression. + + This function converts an object to an expression (a unicode string) and + checks to make sure it isn't empty after conversion. This is used to + convert operators to their string representation for recursive calls to + :func:`~pandas.eval`. + + Parameters + ---------- + expr : object + The object to be converted to a string. + + Returns + ------- + str + The string representation of an object. + + Raises + ------ + ValueError + * If the expression is empty. + """ + s = pprint_thing(expr) + _check_expression(s) + return s + + +def _check_for_locals(expr: str, stack_level: int, parser: str): + at_top_of_stack = stack_level == 0 + not_pandas_parser = parser != "pandas" + + if not_pandas_parser: + msg = "The '@' prefix is only supported by the pandas parser" + elif at_top_of_stack: + msg = ( + "The '@' prefix is not allowed in top-level eval calls.\n" + "please refer to your variables by name without the '@' prefix." + ) + + if at_top_of_stack or not_pandas_parser: + for toknum, tokval in tokenize_string(expr): + if toknum == tokenize.OP and tokval == "@": + raise SyntaxError(msg) + + +def eval( + expr: str | BinOp, # we leave BinOp out of the docstr bc it isn't for users + parser: str = "pandas", + engine: str | None = None, + local_dict=None, + global_dict=None, + resolvers=(), + level: int = 0, + target=None, + inplace: bool = False, +): + """ + Evaluate a Python expression as a string using various backends. + + The following arithmetic operations are supported: ``+``, ``-``, ``*``, + ``/``, ``**``, ``%``, ``//`` (python engine only) along with the following + boolean operations: ``|`` (or), ``&`` (and), and ``~`` (not). + Additionally, the ``'pandas'`` parser allows the use of :keyword:`and`, + :keyword:`or`, and :keyword:`not` with the same semantics as the + corresponding bitwise operators. :class:`~pandas.Series` and + :class:`~pandas.DataFrame` objects are supported and behave as they would + with plain ol' Python evaluation. + + Parameters + ---------- + expr : str + The expression to evaluate. This string cannot contain any Python + `statements + `__, + only Python `expressions + `__. + parser : {'pandas', 'python'}, default 'pandas' + The parser to use to construct the syntax tree from the expression. The + default of ``'pandas'`` parses code slightly different than standard + Python. Alternatively, you can parse an expression using the + ``'python'`` parser to retain strict Python semantics. See the + :ref:`enhancing performance ` documentation for + more details. + engine : {'python', 'numexpr'}, default 'numexpr' + + The engine used to evaluate the expression. Supported engines are + + - None : tries to use ``numexpr``, falls back to ``python`` + - ``'numexpr'`` : This default engine evaluates pandas objects using + numexpr for large speed ups in complex expressions with large frames. + - ``'python'`` : Performs operations as if you had ``eval``'d in top + level python. This engine is generally not that useful. + + More backends may be available in the future. + local_dict : dict or None, optional + A dictionary of local variables, taken from locals() by default. + global_dict : dict or None, optional + A dictionary of global variables, taken from globals() by default. + resolvers : list of dict-like or None, optional + A list of objects implementing the ``__getitem__`` special method that + you can use to inject an additional collection of namespaces to use for + variable lookup. For example, this is used in the + :meth:`~DataFrame.query` method to inject the + ``DataFrame.index`` and ``DataFrame.columns`` + variables that refer to their respective :class:`~pandas.DataFrame` + instance attributes. + level : int, optional + The number of prior stack frames to traverse and add to the current + scope. Most users will **not** need to change this parameter. + target : object, optional, default None + This is the target object for assignment. It is used when there is + variable assignment in the expression. If so, then `target` must + support item assignment with string keys, and if a copy is being + returned, it must also support `.copy()`. + inplace : bool, default False + If `target` is provided, and the expression mutates `target`, whether + to modify `target` inplace. Otherwise, return a copy of `target` with + the mutation. + + Returns + ------- + ndarray, numeric scalar, DataFrame, Series, or None + The completion value of evaluating the given code or None if ``inplace=True``. + + Raises + ------ + ValueError + There are many instances where such an error can be raised: + + - `target=None`, but the expression is multiline. + - The expression is multiline, but not all them have item assignment. + An example of such an arrangement is this: + + a = b + 1 + a + 2 + + Here, there are expressions on different lines, making it multiline, + but the last line has no variable assigned to the output of `a + 2`. + - `inplace=True`, but the expression is missing item assignment. + - Item assignment is provided, but the `target` does not support + string item assignment. + - Item assignment is provided and `inplace=False`, but the `target` + does not support the `.copy()` method + + See Also + -------- + DataFrame.query : Evaluates a boolean expression to query the columns + of a frame. + DataFrame.eval : Evaluate a string describing operations on + DataFrame columns. + + Notes + ----- + The ``dtype`` of any objects involved in an arithmetic ``%`` operation are + recursively cast to ``float64``. + + See the :ref:`enhancing performance ` documentation for + more details. + + Examples + -------- + >>> df = pd.DataFrame({"animal": ["dog", "pig"], "age": [10, 20]}) + >>> df + animal age + 0 dog 10 + 1 pig 20 + + We can add a new column using ``pd.eval``: + + >>> pd.eval("double_age = df.age * 2", target=df) + animal age double_age + 0 dog 10 20 + 1 pig 20 40 + """ + inplace = validate_bool_kwarg(inplace, "inplace") + + exprs: list[str | BinOp] + if isinstance(expr, str): + _check_expression(expr) + exprs = [e.strip() for e in expr.splitlines() if e.strip() != ""] + else: + # ops.BinOp; for internal compat, not intended to be passed by users + exprs = [expr] + multi_line = len(exprs) > 1 + + if multi_line and target is None: + raise ValueError( + "multi-line expressions are only valid in the " + "context of data, use DataFrame.eval" + ) + engine = _check_engine(engine) + _check_parser(parser) + _check_resolvers(resolvers) + + ret = None + first_expr = True + target_modified = False + + for expr in exprs: + expr = _convert_expression(expr) + _check_for_locals(expr, level, parser) + + # get our (possibly passed-in) scope + env = ensure_scope( + level + 1, + global_dict=global_dict, + local_dict=local_dict, + resolvers=resolvers, + target=target, + ) + + parsed_expr = Expr(expr, engine=engine, parser=parser, env=env) + + if engine == "numexpr" and ( + is_extension_array_dtype(parsed_expr.terms.return_type) + or getattr(parsed_expr.terms, "operand_types", None) is not None + and any( + is_extension_array_dtype(elem) + for elem in parsed_expr.terms.operand_types + ) + ): + warnings.warn( + "Engine has switched to 'python' because numexpr does not support " + "extension array dtypes. Please set your engine to python manually.", + RuntimeWarning, + stacklevel=find_stack_level(), + ) + engine = "python" + + # construct the engine and evaluate the parsed expression + eng = ENGINES[engine] + eng_inst = eng(parsed_expr) + ret = eng_inst.evaluate() + + if parsed_expr.assigner is None: + if multi_line: + raise ValueError( + "Multi-line expressions are only valid " + "if all expressions contain an assignment" + ) + if inplace: + raise ValueError("Cannot operate inplace if there is no assignment") + + # assign if needed + assigner = parsed_expr.assigner + if env.target is not None and assigner is not None: + target_modified = True + + # if returning a copy, copy only on the first assignment + if not inplace and first_expr: + try: + target = env.target + if isinstance(target, NDFrame): + target = target.copy(deep=None) + else: + target = target.copy() + except AttributeError as err: + raise ValueError("Cannot return a copy of the target") from err + else: + target = env.target + + # TypeError is most commonly raised (e.g. int, list), but you + # get IndexError if you try to do this assignment on np.ndarray. + # we will ignore numpy warnings here; e.g. if trying + # to use a non-numeric indexer + try: + if inplace and isinstance(target, NDFrame): + target.loc[:, assigner] = ret + else: + target[assigner] = ret # pyright: ignore[reportGeneralTypeIssues] + except (TypeError, IndexError) as err: + raise ValueError("Cannot assign expression output to target") from err + + if not resolvers: + resolvers = ({assigner: ret},) + else: + # existing resolver needs updated to handle + # case of mutating existing column in copy + for resolver in resolvers: + if assigner in resolver: + resolver[assigner] = ret + break + else: + resolvers += ({assigner: ret},) + + ret = None + first_expr = False + + # We want to exclude `inplace=None` as being False. + if inplace is False: + return target if target_modified else ret diff --git a/venv/lib/python3.10/site-packages/pandas/core/computation/expr.py b/venv/lib/python3.10/site-packages/pandas/core/computation/expr.py new file mode 100644 index 0000000000000000000000000000000000000000..b5861fbaebe9ca28703f378483fff6185aa589de --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/computation/expr.py @@ -0,0 +1,840 @@ +""" +:func:`~pandas.eval` parsers. +""" +from __future__ import annotations + +import ast +from functools import ( + partial, + reduce, +) +from keyword import iskeyword +import tokenize +from typing import ( + Callable, + ClassVar, + TypeVar, +) + +import numpy as np + +from pandas.errors import UndefinedVariableError + +import pandas.core.common as com +from pandas.core.computation.ops import ( + ARITH_OPS_SYMS, + BOOL_OPS_SYMS, + CMP_OPS_SYMS, + LOCAL_TAG, + MATHOPS, + REDUCTIONS, + UNARY_OPS_SYMS, + BinOp, + Constant, + Div, + FuncNode, + Op, + Term, + UnaryOp, + is_term, +) +from pandas.core.computation.parsing import ( + clean_backtick_quoted_toks, + tokenize_string, +) +from pandas.core.computation.scope import Scope + +from pandas.io.formats import printing + + +def _rewrite_assign(tok: tuple[int, str]) -> tuple[int, str]: + """ + Rewrite the assignment operator for PyTables expressions that use ``=`` + as a substitute for ``==``. + + Parameters + ---------- + tok : tuple of int, str + ints correspond to the all caps constants in the tokenize module + + Returns + ------- + tuple of int, str + Either the input or token or the replacement values + """ + toknum, tokval = tok + return toknum, "==" if tokval == "=" else tokval + + +def _replace_booleans(tok: tuple[int, str]) -> tuple[int, str]: + """ + Replace ``&`` with ``and`` and ``|`` with ``or`` so that bitwise + precedence is changed to boolean precedence. + + Parameters + ---------- + tok : tuple of int, str + ints correspond to the all caps constants in the tokenize module + + Returns + ------- + tuple of int, str + Either the input or token or the replacement values + """ + toknum, tokval = tok + if toknum == tokenize.OP: + if tokval == "&": + return tokenize.NAME, "and" + elif tokval == "|": + return tokenize.NAME, "or" + return toknum, tokval + return toknum, tokval + + +def _replace_locals(tok: tuple[int, str]) -> tuple[int, str]: + """ + Replace local variables with a syntactically valid name. + + Parameters + ---------- + tok : tuple of int, str + ints correspond to the all caps constants in the tokenize module + + Returns + ------- + tuple of int, str + Either the input or token or the replacement values + + Notes + ----- + This is somewhat of a hack in that we rewrite a string such as ``'@a'`` as + ``'__pd_eval_local_a'`` by telling the tokenizer that ``__pd_eval_local_`` + is a ``tokenize.OP`` and to replace the ``'@'`` symbol with it. + """ + toknum, tokval = tok + if toknum == tokenize.OP and tokval == "@": + return tokenize.OP, LOCAL_TAG + return toknum, tokval + + +def _compose2(f, g): + """ + Compose 2 callables. + """ + return lambda *args, **kwargs: f(g(*args, **kwargs)) + + +def _compose(*funcs): + """ + Compose 2 or more callables. + """ + assert len(funcs) > 1, "At least 2 callables must be passed to compose" + return reduce(_compose2, funcs) + + +def _preparse( + source: str, + f=_compose( + _replace_locals, _replace_booleans, _rewrite_assign, clean_backtick_quoted_toks + ), +) -> str: + """ + Compose a collection of tokenization functions. + + Parameters + ---------- + source : str + A Python source code string + f : callable + This takes a tuple of (toknum, tokval) as its argument and returns a + tuple with the same structure but possibly different elements. Defaults + to the composition of ``_rewrite_assign``, ``_replace_booleans``, and + ``_replace_locals``. + + Returns + ------- + str + Valid Python source code + + Notes + ----- + The `f` parameter can be any callable that takes *and* returns input of the + form ``(toknum, tokval)``, where ``toknum`` is one of the constants from + the ``tokenize`` module and ``tokval`` is a string. + """ + assert callable(f), "f must be callable" + return tokenize.untokenize(f(x) for x in tokenize_string(source)) + + +def _is_type(t): + """ + Factory for a type checking function of type ``t`` or tuple of types. + """ + return lambda x: isinstance(x.value, t) + + +_is_list = _is_type(list) +_is_str = _is_type(str) + + +# partition all AST nodes +_all_nodes = frozenset( + node + for node in (getattr(ast, name) for name in dir(ast)) + if isinstance(node, type) and issubclass(node, ast.AST) +) + + +def _filter_nodes(superclass, all_nodes=_all_nodes): + """ + Filter out AST nodes that are subclasses of ``superclass``. + """ + node_names = (node.__name__ for node in all_nodes if issubclass(node, superclass)) + return frozenset(node_names) + + +_all_node_names = frozenset(x.__name__ for x in _all_nodes) +_mod_nodes = _filter_nodes(ast.mod) +_stmt_nodes = _filter_nodes(ast.stmt) +_expr_nodes = _filter_nodes(ast.expr) +_expr_context_nodes = _filter_nodes(ast.expr_context) +_boolop_nodes = _filter_nodes(ast.boolop) +_operator_nodes = _filter_nodes(ast.operator) +_unary_op_nodes = _filter_nodes(ast.unaryop) +_cmp_op_nodes = _filter_nodes(ast.cmpop) +_comprehension_nodes = _filter_nodes(ast.comprehension) +_handler_nodes = _filter_nodes(ast.excepthandler) +_arguments_nodes = _filter_nodes(ast.arguments) +_keyword_nodes = _filter_nodes(ast.keyword) +_alias_nodes = _filter_nodes(ast.alias) + + +# nodes that we don't support directly but are needed for parsing +_hacked_nodes = frozenset(["Assign", "Module", "Expr"]) + + +_unsupported_expr_nodes = frozenset( + [ + "Yield", + "GeneratorExp", + "IfExp", + "DictComp", + "SetComp", + "Repr", + "Lambda", + "Set", + "AST", + "Is", + "IsNot", + ] +) + +# these nodes are low priority or won't ever be supported (e.g., AST) +_unsupported_nodes = ( + _stmt_nodes + | _mod_nodes + | _handler_nodes + | _arguments_nodes + | _keyword_nodes + | _alias_nodes + | _expr_context_nodes + | _unsupported_expr_nodes +) - _hacked_nodes + +# we're adding a different assignment in some cases to be equality comparison +# and we don't want `stmt` and friends in their so get only the class whose +# names are capitalized +_base_supported_nodes = (_all_node_names - _unsupported_nodes) | _hacked_nodes +intersection = _unsupported_nodes & _base_supported_nodes +_msg = f"cannot both support and not support {intersection}" +assert not intersection, _msg + + +def _node_not_implemented(node_name: str) -> Callable[..., None]: + """ + Return a function that raises a NotImplementedError with a passed node name. + """ + + def f(self, *args, **kwargs): + raise NotImplementedError(f"'{node_name}' nodes are not implemented") + + return f + + +# should be bound by BaseExprVisitor but that creates a circular dependency: +# _T is used in disallow, but disallow is used to define BaseExprVisitor +# https://github.com/microsoft/pyright/issues/2315 +_T = TypeVar("_T") + + +def disallow(nodes: set[str]) -> Callable[[type[_T]], type[_T]]: + """ + Decorator to disallow certain nodes from parsing. Raises a + NotImplementedError instead. + + Returns + ------- + callable + """ + + def disallowed(cls: type[_T]) -> type[_T]: + # error: "Type[_T]" has no attribute "unsupported_nodes" + cls.unsupported_nodes = () # type: ignore[attr-defined] + for node in nodes: + new_method = _node_not_implemented(node) + name = f"visit_{node}" + # error: "Type[_T]" has no attribute "unsupported_nodes" + cls.unsupported_nodes += (name,) # type: ignore[attr-defined] + setattr(cls, name, new_method) + return cls + + return disallowed + + +def _op_maker(op_class, op_symbol): + """ + Return a function to create an op class with its symbol already passed. + + Returns + ------- + callable + """ + + def f(self, node, *args, **kwargs): + """ + Return a partial function with an Op subclass with an operator already passed. + + Returns + ------- + callable + """ + return partial(op_class, op_symbol, *args, **kwargs) + + return f + + +_op_classes = {"binary": BinOp, "unary": UnaryOp} + + +def add_ops(op_classes): + """ + Decorator to add default implementation of ops. + """ + + def f(cls): + for op_attr_name, op_class in op_classes.items(): + ops = getattr(cls, f"{op_attr_name}_ops") + ops_map = getattr(cls, f"{op_attr_name}_op_nodes_map") + for op in ops: + op_node = ops_map[op] + if op_node is not None: + made_op = _op_maker(op_class, op) + setattr(cls, f"visit_{op_node}", made_op) + return cls + + return f + + +@disallow(_unsupported_nodes) +@add_ops(_op_classes) +class BaseExprVisitor(ast.NodeVisitor): + """ + Custom ast walker. Parsers of other engines should subclass this class + if necessary. + + Parameters + ---------- + env : Scope + engine : str + parser : str + preparser : callable + """ + + const_type: ClassVar[type[Term]] = Constant + term_type: ClassVar[type[Term]] = Term + + binary_ops = CMP_OPS_SYMS + BOOL_OPS_SYMS + ARITH_OPS_SYMS + binary_op_nodes = ( + "Gt", + "Lt", + "GtE", + "LtE", + "Eq", + "NotEq", + "In", + "NotIn", + "BitAnd", + "BitOr", + "And", + "Or", + "Add", + "Sub", + "Mult", + None, + "Pow", + "FloorDiv", + "Mod", + ) + binary_op_nodes_map = dict(zip(binary_ops, binary_op_nodes)) + + unary_ops = UNARY_OPS_SYMS + unary_op_nodes = "UAdd", "USub", "Invert", "Not" + unary_op_nodes_map = dict(zip(unary_ops, unary_op_nodes)) + + rewrite_map = { + ast.Eq: ast.In, + ast.NotEq: ast.NotIn, + ast.In: ast.In, + ast.NotIn: ast.NotIn, + } + + unsupported_nodes: tuple[str, ...] + + def __init__(self, env, engine, parser, preparser=_preparse) -> None: + self.env = env + self.engine = engine + self.parser = parser + self.preparser = preparser + self.assigner = None + + def visit(self, node, **kwargs): + if isinstance(node, str): + clean = self.preparser(node) + try: + node = ast.fix_missing_locations(ast.parse(clean)) + except SyntaxError as e: + if any(iskeyword(x) for x in clean.split()): + e.msg = "Python keyword not valid identifier in numexpr query" + raise e + + method = f"visit_{type(node).__name__}" + visitor = getattr(self, method) + return visitor(node, **kwargs) + + def visit_Module(self, node, **kwargs): + if len(node.body) != 1: + raise SyntaxError("only a single expression is allowed") + expr = node.body[0] + return self.visit(expr, **kwargs) + + def visit_Expr(self, node, **kwargs): + return self.visit(node.value, **kwargs) + + def _rewrite_membership_op(self, node, left, right): + # the kind of the operator (is actually an instance) + op_instance = node.op + op_type = type(op_instance) + + # must be two terms and the comparison operator must be ==/!=/in/not in + if is_term(left) and is_term(right) and op_type in self.rewrite_map: + left_list, right_list = map(_is_list, (left, right)) + left_str, right_str = map(_is_str, (left, right)) + + # if there are any strings or lists in the expression + if left_list or right_list or left_str or right_str: + op_instance = self.rewrite_map[op_type]() + + # pop the string variable out of locals and replace it with a list + # of one string, kind of a hack + if right_str: + name = self.env.add_tmp([right.value]) + right = self.term_type(name, self.env) + + if left_str: + name = self.env.add_tmp([left.value]) + left = self.term_type(name, self.env) + + op = self.visit(op_instance) + return op, op_instance, left, right + + def _maybe_transform_eq_ne(self, node, left=None, right=None): + if left is None: + left = self.visit(node.left, side="left") + if right is None: + right = self.visit(node.right, side="right") + op, op_class, left, right = self._rewrite_membership_op(node, left, right) + return op, op_class, left, right + + def _maybe_downcast_constants(self, left, right): + f32 = np.dtype(np.float32) + if ( + left.is_scalar + and hasattr(left, "value") + and not right.is_scalar + and right.return_type == f32 + ): + # right is a float32 array, left is a scalar + name = self.env.add_tmp(np.float32(left.value)) + left = self.term_type(name, self.env) + if ( + right.is_scalar + and hasattr(right, "value") + and not left.is_scalar + and left.return_type == f32 + ): + # left is a float32 array, right is a scalar + name = self.env.add_tmp(np.float32(right.value)) + right = self.term_type(name, self.env) + + return left, right + + def _maybe_eval(self, binop, eval_in_python): + # eval `in` and `not in` (for now) in "partial" python space + # things that can be evaluated in "eval" space will be turned into + # temporary variables. for example, + # [1,2] in a + 2 * b + # in that case a + 2 * b will be evaluated using numexpr, and the "in" + # call will be evaluated using isin (in python space) + return binop.evaluate( + self.env, self.engine, self.parser, self.term_type, eval_in_python + ) + + def _maybe_evaluate_binop( + self, + op, + op_class, + lhs, + rhs, + eval_in_python=("in", "not in"), + maybe_eval_in_python=("==", "!=", "<", ">", "<=", ">="), + ): + res = op(lhs, rhs) + + if res.has_invalid_return_type: + raise TypeError( + f"unsupported operand type(s) for {res.op}: " + f"'{lhs.type}' and '{rhs.type}'" + ) + + if self.engine != "pytables" and ( + res.op in CMP_OPS_SYMS + and getattr(lhs, "is_datetime", False) + or getattr(rhs, "is_datetime", False) + ): + # all date ops must be done in python bc numexpr doesn't work + # well with NaT + return self._maybe_eval(res, self.binary_ops) + + if res.op in eval_in_python: + # "in"/"not in" ops are always evaluated in python + return self._maybe_eval(res, eval_in_python) + elif self.engine != "pytables": + if ( + getattr(lhs, "return_type", None) == object + or getattr(rhs, "return_type", None) == object + ): + # evaluate "==" and "!=" in python if either of our operands + # has an object return type + return self._maybe_eval(res, eval_in_python + maybe_eval_in_python) + return res + + def visit_BinOp(self, node, **kwargs): + op, op_class, left, right = self._maybe_transform_eq_ne(node) + left, right = self._maybe_downcast_constants(left, right) + return self._maybe_evaluate_binop(op, op_class, left, right) + + def visit_Div(self, node, **kwargs): + return lambda lhs, rhs: Div(lhs, rhs) + + def visit_UnaryOp(self, node, **kwargs): + op = self.visit(node.op) + operand = self.visit(node.operand) + return op(operand) + + def visit_Name(self, node, **kwargs) -> Term: + return self.term_type(node.id, self.env, **kwargs) + + # TODO(py314): deprecated since Python 3.8. Remove after Python 3.14 is min + def visit_NameConstant(self, node, **kwargs) -> Term: + return self.const_type(node.value, self.env) + + # TODO(py314): deprecated since Python 3.8. Remove after Python 3.14 is min + def visit_Num(self, node, **kwargs) -> Term: + return self.const_type(node.value, self.env) + + def visit_Constant(self, node, **kwargs) -> Term: + return self.const_type(node.value, self.env) + + # TODO(py314): deprecated since Python 3.8. Remove after Python 3.14 is min + def visit_Str(self, node, **kwargs) -> Term: + name = self.env.add_tmp(node.s) + return self.term_type(name, self.env) + + def visit_List(self, node, **kwargs) -> Term: + name = self.env.add_tmp([self.visit(e)(self.env) for e in node.elts]) + return self.term_type(name, self.env) + + visit_Tuple = visit_List + + def visit_Index(self, node, **kwargs): + """df.index[4]""" + return self.visit(node.value) + + def visit_Subscript(self, node, **kwargs) -> Term: + from pandas import eval as pd_eval + + value = self.visit(node.value) + slobj = self.visit(node.slice) + result = pd_eval( + slobj, local_dict=self.env, engine=self.engine, parser=self.parser + ) + try: + # a Term instance + v = value.value[result] + except AttributeError: + # an Op instance + lhs = pd_eval( + value, local_dict=self.env, engine=self.engine, parser=self.parser + ) + v = lhs[result] + name = self.env.add_tmp(v) + return self.term_type(name, env=self.env) + + def visit_Slice(self, node, **kwargs) -> slice: + """df.index[slice(4,6)]""" + lower = node.lower + if lower is not None: + lower = self.visit(lower).value + upper = node.upper + if upper is not None: + upper = self.visit(upper).value + step = node.step + if step is not None: + step = self.visit(step).value + + return slice(lower, upper, step) + + def visit_Assign(self, node, **kwargs): + """ + support a single assignment node, like + + c = a + b + + set the assigner at the top level, must be a Name node which + might or might not exist in the resolvers + + """ + if len(node.targets) != 1: + raise SyntaxError("can only assign a single expression") + if not isinstance(node.targets[0], ast.Name): + raise SyntaxError("left hand side of an assignment must be a single name") + if self.env.target is None: + raise ValueError("cannot assign without a target object") + + try: + assigner = self.visit(node.targets[0], **kwargs) + except UndefinedVariableError: + assigner = node.targets[0].id + + self.assigner = getattr(assigner, "name", assigner) + if self.assigner is None: + raise SyntaxError( + "left hand side of an assignment must be a single resolvable name" + ) + + return self.visit(node.value, **kwargs) + + def visit_Attribute(self, node, **kwargs): + attr = node.attr + value = node.value + + ctx = node.ctx + if isinstance(ctx, ast.Load): + # resolve the value + resolved = self.visit(value).value + try: + v = getattr(resolved, attr) + name = self.env.add_tmp(v) + return self.term_type(name, self.env) + except AttributeError: + # something like datetime.datetime where scope is overridden + if isinstance(value, ast.Name) and value.id == attr: + return resolved + raise + + raise ValueError(f"Invalid Attribute context {type(ctx).__name__}") + + def visit_Call(self, node, side=None, **kwargs): + if isinstance(node.func, ast.Attribute) and node.func.attr != "__call__": + res = self.visit_Attribute(node.func) + elif not isinstance(node.func, ast.Name): + raise TypeError("Only named functions are supported") + else: + try: + res = self.visit(node.func) + except UndefinedVariableError: + # Check if this is a supported function name + try: + res = FuncNode(node.func.id) + except ValueError: + # Raise original error + raise + + if res is None: + # error: "expr" has no attribute "id" + raise ValueError( + f"Invalid function call {node.func.id}" # type: ignore[attr-defined] + ) + if hasattr(res, "value"): + res = res.value + + if isinstance(res, FuncNode): + new_args = [self.visit(arg) for arg in node.args] + + if node.keywords: + raise TypeError( + f'Function "{res.name}" does not support keyword arguments' + ) + + return res(*new_args) + + else: + new_args = [self.visit(arg)(self.env) for arg in node.args] + + for key in node.keywords: + if not isinstance(key, ast.keyword): + # error: "expr" has no attribute "id" + raise ValueError( + "keyword error in function call " + f"'{node.func.id}'" # type: ignore[attr-defined] + ) + + if key.arg: + kwargs[key.arg] = self.visit(key.value)(self.env) + + name = self.env.add_tmp(res(*new_args, **kwargs)) + return self.term_type(name=name, env=self.env) + + def translate_In(self, op): + return op + + def visit_Compare(self, node, **kwargs): + ops = node.ops + comps = node.comparators + + # base case: we have something like a CMP b + if len(comps) == 1: + op = self.translate_In(ops[0]) + binop = ast.BinOp(op=op, left=node.left, right=comps[0]) + return self.visit(binop) + + # recursive case: we have a chained comparison, a CMP b CMP c, etc. + left = node.left + values = [] + for op, comp in zip(ops, comps): + new_node = self.visit( + ast.Compare(comparators=[comp], left=left, ops=[self.translate_In(op)]) + ) + left = comp + values.append(new_node) + return self.visit(ast.BoolOp(op=ast.And(), values=values)) + + def _try_visit_binop(self, bop): + if isinstance(bop, (Op, Term)): + return bop + return self.visit(bop) + + def visit_BoolOp(self, node, **kwargs): + def visitor(x, y): + lhs = self._try_visit_binop(x) + rhs = self._try_visit_binop(y) + + op, op_class, lhs, rhs = self._maybe_transform_eq_ne(node, lhs, rhs) + return self._maybe_evaluate_binop(op, node.op, lhs, rhs) + + operands = node.values + return reduce(visitor, operands) + + +_python_not_supported = frozenset(["Dict", "BoolOp", "In", "NotIn"]) +_numexpr_supported_calls = frozenset(REDUCTIONS + MATHOPS) + + +@disallow( + (_unsupported_nodes | _python_not_supported) + - (_boolop_nodes | frozenset(["BoolOp", "Attribute", "In", "NotIn", "Tuple"])) +) +class PandasExprVisitor(BaseExprVisitor): + def __init__( + self, + env, + engine, + parser, + preparser=partial( + _preparse, + f=_compose(_replace_locals, _replace_booleans, clean_backtick_quoted_toks), + ), + ) -> None: + super().__init__(env, engine, parser, preparser) + + +@disallow(_unsupported_nodes | _python_not_supported | frozenset(["Not"])) +class PythonExprVisitor(BaseExprVisitor): + def __init__( + self, env, engine, parser, preparser=lambda source, f=None: source + ) -> None: + super().__init__(env, engine, parser, preparser=preparser) + + +class Expr: + """ + Object encapsulating an expression. + + Parameters + ---------- + expr : str + engine : str, optional, default 'numexpr' + parser : str, optional, default 'pandas' + env : Scope, optional, default None + level : int, optional, default 2 + """ + + env: Scope + engine: str + parser: str + + def __init__( + self, + expr, + engine: str = "numexpr", + parser: str = "pandas", + env: Scope | None = None, + level: int = 0, + ) -> None: + self.expr = expr + self.env = env or Scope(level=level + 1) + self.engine = engine + self.parser = parser + self._visitor = PARSERS[parser](self.env, self.engine, self.parser) + self.terms = self.parse() + + @property + def assigner(self): + return getattr(self._visitor, "assigner", None) + + def __call__(self): + return self.terms(self.env) + + def __repr__(self) -> str: + return printing.pprint_thing(self.terms) + + def __len__(self) -> int: + return len(self.expr) + + def parse(self): + """ + Parse an expression. + """ + return self._visitor.visit(self.expr) + + @property + def names(self): + """ + Get the names in an expression. + """ + if is_term(self.terms): + return frozenset([self.terms.name]) + return frozenset(term.name for term in com.flatten(self.terms)) + + +PARSERS = {"python": PythonExprVisitor, "pandas": PandasExprVisitor} diff --git a/venv/lib/python3.10/site-packages/pandas/core/computation/expressions.py b/venv/lib/python3.10/site-packages/pandas/core/computation/expressions.py new file mode 100644 index 0000000000000000000000000000000000000000..6219cac4aeb16ee019551f95a03af59da44c9d06 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/computation/expressions.py @@ -0,0 +1,286 @@ +""" +Expressions +----------- + +Offer fast expression evaluation through numexpr + +""" +from __future__ import annotations + +import operator +from typing import TYPE_CHECKING +import warnings + +import numpy as np + +from pandas._config import get_option + +from pandas.util._exceptions import find_stack_level + +from pandas.core import roperator +from pandas.core.computation.check import NUMEXPR_INSTALLED + +if NUMEXPR_INSTALLED: + import numexpr as ne + +if TYPE_CHECKING: + from pandas._typing import FuncType + +_TEST_MODE: bool | None = None +_TEST_RESULT: list[bool] = [] +USE_NUMEXPR = NUMEXPR_INSTALLED +_evaluate: FuncType | None = None +_where: FuncType | None = None + +# the set of dtypes that we will allow pass to numexpr +_ALLOWED_DTYPES = { + "evaluate": {"int64", "int32", "float64", "float32", "bool"}, + "where": {"int64", "float64", "bool"}, +} + +# the minimum prod shape that we will use numexpr +_MIN_ELEMENTS = 1_000_000 + + +def set_use_numexpr(v: bool = True) -> None: + # set/unset to use numexpr + global USE_NUMEXPR + if NUMEXPR_INSTALLED: + USE_NUMEXPR = v + + # choose what we are going to do + global _evaluate, _where + + _evaluate = _evaluate_numexpr if USE_NUMEXPR else _evaluate_standard + _where = _where_numexpr if USE_NUMEXPR else _where_standard + + +def set_numexpr_threads(n=None) -> None: + # if we are using numexpr, set the threads to n + # otherwise reset + if NUMEXPR_INSTALLED and USE_NUMEXPR: + if n is None: + n = ne.detect_number_of_cores() + ne.set_num_threads(n) + + +def _evaluate_standard(op, op_str, a, b): + """ + Standard evaluation. + """ + if _TEST_MODE: + _store_test_result(False) + return op(a, b) + + +def _can_use_numexpr(op, op_str, a, b, dtype_check) -> bool: + """return a boolean if we WILL be using numexpr""" + if op_str is not None: + # required min elements (otherwise we are adding overhead) + if a.size > _MIN_ELEMENTS: + # check for dtype compatibility + dtypes: set[str] = set() + for o in [a, b]: + # ndarray and Series Case + if hasattr(o, "dtype"): + dtypes |= {o.dtype.name} + + # allowed are a superset + if not len(dtypes) or _ALLOWED_DTYPES[dtype_check] >= dtypes: + return True + + return False + + +def _evaluate_numexpr(op, op_str, a, b): + result = None + + if _can_use_numexpr(op, op_str, a, b, "evaluate"): + is_reversed = op.__name__.strip("_").startswith("r") + if is_reversed: + # we were originally called by a reversed op method + a, b = b, a + + a_value = a + b_value = b + + try: + result = ne.evaluate( + f"a_value {op_str} b_value", + local_dict={"a_value": a_value, "b_value": b_value}, + casting="safe", + ) + except TypeError: + # numexpr raises eg for array ** array with integers + # (https://github.com/pydata/numexpr/issues/379) + pass + except NotImplementedError: + if _bool_arith_fallback(op_str, a, b): + pass + else: + raise + + if is_reversed: + # reverse order to original for fallback + a, b = b, a + + if _TEST_MODE: + _store_test_result(result is not None) + + if result is None: + result = _evaluate_standard(op, op_str, a, b) + + return result + + +_op_str_mapping = { + operator.add: "+", + roperator.radd: "+", + operator.mul: "*", + roperator.rmul: "*", + operator.sub: "-", + roperator.rsub: "-", + operator.truediv: "/", + roperator.rtruediv: "/", + # floordiv not supported by numexpr 2.x + operator.floordiv: None, + roperator.rfloordiv: None, + # we require Python semantics for mod of negative for backwards compatibility + # see https://github.com/pydata/numexpr/issues/365 + # so sticking with unaccelerated for now GH#36552 + operator.mod: None, + roperator.rmod: None, + operator.pow: "**", + roperator.rpow: "**", + operator.eq: "==", + operator.ne: "!=", + operator.le: "<=", + operator.lt: "<", + operator.ge: ">=", + operator.gt: ">", + operator.and_: "&", + roperator.rand_: "&", + operator.or_: "|", + roperator.ror_: "|", + operator.xor: "^", + roperator.rxor: "^", + divmod: None, + roperator.rdivmod: None, +} + + +def _where_standard(cond, a, b): + # Caller is responsible for extracting ndarray if necessary + return np.where(cond, a, b) + + +def _where_numexpr(cond, a, b): + # Caller is responsible for extracting ndarray if necessary + result = None + + if _can_use_numexpr(None, "where", a, b, "where"): + result = ne.evaluate( + "where(cond_value, a_value, b_value)", + local_dict={"cond_value": cond, "a_value": a, "b_value": b}, + casting="safe", + ) + + if result is None: + result = _where_standard(cond, a, b) + + return result + + +# turn myself on +set_use_numexpr(get_option("compute.use_numexpr")) + + +def _has_bool_dtype(x): + try: + return x.dtype == bool + except AttributeError: + return isinstance(x, (bool, np.bool_)) + + +_BOOL_OP_UNSUPPORTED = {"+": "|", "*": "&", "-": "^"} + + +def _bool_arith_fallback(op_str, a, b) -> bool: + """ + Check if we should fallback to the python `_evaluate_standard` in case + of an unsupported operation by numexpr, which is the case for some + boolean ops. + """ + if _has_bool_dtype(a) and _has_bool_dtype(b): + if op_str in _BOOL_OP_UNSUPPORTED: + warnings.warn( + f"evaluating in Python space because the {repr(op_str)} " + "operator is not supported by numexpr for the bool dtype, " + f"use {repr(_BOOL_OP_UNSUPPORTED[op_str])} instead.", + stacklevel=find_stack_level(), + ) + return True + return False + + +def evaluate(op, a, b, use_numexpr: bool = True): + """ + Evaluate and return the expression of the op on a and b. + + Parameters + ---------- + op : the actual operand + a : left operand + b : right operand + use_numexpr : bool, default True + Whether to try to use numexpr. + """ + op_str = _op_str_mapping[op] + if op_str is not None: + if use_numexpr: + # error: "None" not callable + return _evaluate(op, op_str, a, b) # type: ignore[misc] + return _evaluate_standard(op, op_str, a, b) + + +def where(cond, a, b, use_numexpr: bool = True): + """ + Evaluate the where condition cond on a and b. + + Parameters + ---------- + cond : np.ndarray[bool] + a : return if cond is True + b : return if cond is False + use_numexpr : bool, default True + Whether to try to use numexpr. + """ + assert _where is not None + return _where(cond, a, b) if use_numexpr else _where_standard(cond, a, b) + + +def set_test_mode(v: bool = True) -> None: + """ + Keeps track of whether numexpr was used. + + Stores an additional ``True`` for every successful use of evaluate with + numexpr since the last ``get_test_result``. + """ + global _TEST_MODE, _TEST_RESULT + _TEST_MODE = v + _TEST_RESULT = [] + + +def _store_test_result(used_numexpr: bool) -> None: + if used_numexpr: + _TEST_RESULT.append(used_numexpr) + + +def get_test_result() -> list[bool]: + """ + Get test result and reset test_results. + """ + global _TEST_RESULT + res = _TEST_RESULT + _TEST_RESULT = [] + return res diff --git a/venv/lib/python3.10/site-packages/pandas/core/computation/ops.py b/venv/lib/python3.10/site-packages/pandas/core/computation/ops.py new file mode 100644 index 0000000000000000000000000000000000000000..95ac20ba39edcd6b3a2920260dbf55f1b72e2f80 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/computation/ops.py @@ -0,0 +1,621 @@ +""" +Operator classes for eval. +""" + +from __future__ import annotations + +from datetime import datetime +from functools import partial +import operator +from typing import ( + TYPE_CHECKING, + Callable, + Literal, +) + +import numpy as np + +from pandas._libs.tslibs import Timestamp + +from pandas.core.dtypes.common import ( + is_list_like, + is_scalar, +) + +import pandas.core.common as com +from pandas.core.computation.common import ( + ensure_decoded, + result_type_many, +) +from pandas.core.computation.scope import DEFAULT_GLOBALS + +from pandas.io.formats.printing import ( + pprint_thing, + pprint_thing_encoded, +) + +if TYPE_CHECKING: + from collections.abc import ( + Iterable, + Iterator, + ) + +REDUCTIONS = ("sum", "prod", "min", "max") + +_unary_math_ops = ( + "sin", + "cos", + "exp", + "log", + "expm1", + "log1p", + "sqrt", + "sinh", + "cosh", + "tanh", + "arcsin", + "arccos", + "arctan", + "arccosh", + "arcsinh", + "arctanh", + "abs", + "log10", + "floor", + "ceil", +) +_binary_math_ops = ("arctan2",) + +MATHOPS = _unary_math_ops + _binary_math_ops + + +LOCAL_TAG = "__pd_eval_local_" + + +class Term: + def __new__(cls, name, env, side=None, encoding=None): + klass = Constant if not isinstance(name, str) else cls + # error: Argument 2 for "super" not an instance of argument 1 + supr_new = super(Term, klass).__new__ # type: ignore[misc] + return supr_new(klass) + + is_local: bool + + def __init__(self, name, env, side=None, encoding=None) -> None: + # name is a str for Term, but may be something else for subclasses + self._name = name + self.env = env + self.side = side + tname = str(name) + self.is_local = tname.startswith(LOCAL_TAG) or tname in DEFAULT_GLOBALS + self._value = self._resolve_name() + self.encoding = encoding + + @property + def local_name(self) -> str: + return self.name.replace(LOCAL_TAG, "") + + def __repr__(self) -> str: + return pprint_thing(self.name) + + def __call__(self, *args, **kwargs): + return self.value + + def evaluate(self, *args, **kwargs) -> Term: + return self + + def _resolve_name(self): + local_name = str(self.local_name) + is_local = self.is_local + if local_name in self.env.scope and isinstance( + self.env.scope[local_name], type + ): + is_local = False + + res = self.env.resolve(local_name, is_local=is_local) + self.update(res) + + if hasattr(res, "ndim") and res.ndim > 2: + raise NotImplementedError( + "N-dimensional objects, where N > 2, are not supported with eval" + ) + return res + + def update(self, value) -> None: + """ + search order for local (i.e., @variable) variables: + + scope, key_variable + [('locals', 'local_name'), + ('globals', 'local_name'), + ('locals', 'key'), + ('globals', 'key')] + """ + key = self.name + + # if it's a variable name (otherwise a constant) + if isinstance(key, str): + self.env.swapkey(self.local_name, key, new_value=value) + + self.value = value + + @property + def is_scalar(self) -> bool: + return is_scalar(self._value) + + @property + def type(self): + try: + # potentially very slow for large, mixed dtype frames + return self._value.values.dtype + except AttributeError: + try: + # ndarray + return self._value.dtype + except AttributeError: + # scalar + return type(self._value) + + return_type = type + + @property + def raw(self) -> str: + return f"{type(self).__name__}(name={repr(self.name)}, type={self.type})" + + @property + def is_datetime(self) -> bool: + try: + t = self.type.type + except AttributeError: + t = self.type + + return issubclass(t, (datetime, np.datetime64)) + + @property + def value(self): + return self._value + + @value.setter + def value(self, new_value) -> None: + self._value = new_value + + @property + def name(self): + return self._name + + @property + def ndim(self) -> int: + return self._value.ndim + + +class Constant(Term): + def _resolve_name(self): + return self._name + + @property + def name(self): + return self.value + + def __repr__(self) -> str: + # in python 2 str() of float + # can truncate shorter than repr() + return repr(self.name) + + +_bool_op_map = {"not": "~", "and": "&", "or": "|"} + + +class Op: + """ + Hold an operator of arbitrary arity. + """ + + op: str + + def __init__(self, op: str, operands: Iterable[Term | Op], encoding=None) -> None: + self.op = _bool_op_map.get(op, op) + self.operands = operands + self.encoding = encoding + + def __iter__(self) -> Iterator: + return iter(self.operands) + + def __repr__(self) -> str: + """ + Print a generic n-ary operator and its operands using infix notation. + """ + # recurse over the operands + parened = (f"({pprint_thing(opr)})" for opr in self.operands) + return pprint_thing(f" {self.op} ".join(parened)) + + @property + def return_type(self): + # clobber types to bool if the op is a boolean operator + if self.op in (CMP_OPS_SYMS + BOOL_OPS_SYMS): + return np.bool_ + return result_type_many(*(term.type for term in com.flatten(self))) + + @property + def has_invalid_return_type(self) -> bool: + types = self.operand_types + obj_dtype_set = frozenset([np.dtype("object")]) + return self.return_type == object and types - obj_dtype_set + + @property + def operand_types(self): + return frozenset(term.type for term in com.flatten(self)) + + @property + def is_scalar(self) -> bool: + return all(operand.is_scalar for operand in self.operands) + + @property + def is_datetime(self) -> bool: + try: + t = self.return_type.type + except AttributeError: + t = self.return_type + + return issubclass(t, (datetime, np.datetime64)) + + +def _in(x, y): + """ + Compute the vectorized membership of ``x in y`` if possible, otherwise + use Python. + """ + try: + return x.isin(y) + except AttributeError: + if is_list_like(x): + try: + return y.isin(x) + except AttributeError: + pass + return x in y + + +def _not_in(x, y): + """ + Compute the vectorized membership of ``x not in y`` if possible, + otherwise use Python. + """ + try: + return ~x.isin(y) + except AttributeError: + if is_list_like(x): + try: + return ~y.isin(x) + except AttributeError: + pass + return x not in y + + +CMP_OPS_SYMS = (">", "<", ">=", "<=", "==", "!=", "in", "not in") +_cmp_ops_funcs = ( + operator.gt, + operator.lt, + operator.ge, + operator.le, + operator.eq, + operator.ne, + _in, + _not_in, +) +_cmp_ops_dict = dict(zip(CMP_OPS_SYMS, _cmp_ops_funcs)) + +BOOL_OPS_SYMS = ("&", "|", "and", "or") +_bool_ops_funcs = (operator.and_, operator.or_, operator.and_, operator.or_) +_bool_ops_dict = dict(zip(BOOL_OPS_SYMS, _bool_ops_funcs)) + +ARITH_OPS_SYMS = ("+", "-", "*", "/", "**", "//", "%") +_arith_ops_funcs = ( + operator.add, + operator.sub, + operator.mul, + operator.truediv, + operator.pow, + operator.floordiv, + operator.mod, +) +_arith_ops_dict = dict(zip(ARITH_OPS_SYMS, _arith_ops_funcs)) + +SPECIAL_CASE_ARITH_OPS_SYMS = ("**", "//", "%") +_special_case_arith_ops_funcs = (operator.pow, operator.floordiv, operator.mod) +_special_case_arith_ops_dict = dict( + zip(SPECIAL_CASE_ARITH_OPS_SYMS, _special_case_arith_ops_funcs) +) + +_binary_ops_dict = {} + +for d in (_cmp_ops_dict, _bool_ops_dict, _arith_ops_dict): + _binary_ops_dict.update(d) + + +def _cast_inplace(terms, acceptable_dtypes, dtype) -> None: + """ + Cast an expression inplace. + + Parameters + ---------- + terms : Op + The expression that should cast. + acceptable_dtypes : list of acceptable numpy.dtype + Will not cast if term's dtype in this list. + dtype : str or numpy.dtype + The dtype to cast to. + """ + dt = np.dtype(dtype) + for term in terms: + if term.type in acceptable_dtypes: + continue + + try: + new_value = term.value.astype(dt) + except AttributeError: + new_value = dt.type(term.value) + term.update(new_value) + + +def is_term(obj) -> bool: + return isinstance(obj, Term) + + +class BinOp(Op): + """ + Hold a binary operator and its operands. + + Parameters + ---------- + op : str + lhs : Term or Op + rhs : Term or Op + """ + + def __init__(self, op: str, lhs, rhs) -> None: + super().__init__(op, (lhs, rhs)) + self.lhs = lhs + self.rhs = rhs + + self._disallow_scalar_only_bool_ops() + + self.convert_values() + + try: + self.func = _binary_ops_dict[op] + except KeyError as err: + # has to be made a list for python3 + keys = list(_binary_ops_dict.keys()) + raise ValueError( + f"Invalid binary operator {repr(op)}, valid operators are {keys}" + ) from err + + def __call__(self, env): + """ + Recursively evaluate an expression in Python space. + + Parameters + ---------- + env : Scope + + Returns + ------- + object + The result of an evaluated expression. + """ + # recurse over the left/right nodes + left = self.lhs(env) + right = self.rhs(env) + + return self.func(left, right) + + def evaluate(self, env, engine: str, parser, term_type, eval_in_python): + """ + Evaluate a binary operation *before* being passed to the engine. + + Parameters + ---------- + env : Scope + engine : str + parser : str + term_type : type + eval_in_python : list + + Returns + ------- + term_type + The "pre-evaluated" expression as an instance of ``term_type`` + """ + if engine == "python": + res = self(env) + else: + # recurse over the left/right nodes + + left = self.lhs.evaluate( + env, + engine=engine, + parser=parser, + term_type=term_type, + eval_in_python=eval_in_python, + ) + + right = self.rhs.evaluate( + env, + engine=engine, + parser=parser, + term_type=term_type, + eval_in_python=eval_in_python, + ) + + # base cases + if self.op in eval_in_python: + res = self.func(left.value, right.value) + else: + from pandas.core.computation.eval import eval + + res = eval(self, local_dict=env, engine=engine, parser=parser) + + name = env.add_tmp(res) + return term_type(name, env=env) + + def convert_values(self) -> None: + """ + Convert datetimes to a comparable value in an expression. + """ + + def stringify(value): + encoder: Callable + if self.encoding is not None: + encoder = partial(pprint_thing_encoded, encoding=self.encoding) + else: + encoder = pprint_thing + return encoder(value) + + lhs, rhs = self.lhs, self.rhs + + if is_term(lhs) and lhs.is_datetime and is_term(rhs) and rhs.is_scalar: + v = rhs.value + if isinstance(v, (int, float)): + v = stringify(v) + v = Timestamp(ensure_decoded(v)) + if v.tz is not None: + v = v.tz_convert("UTC") + self.rhs.update(v) + + if is_term(rhs) and rhs.is_datetime and is_term(lhs) and lhs.is_scalar: + v = lhs.value + if isinstance(v, (int, float)): + v = stringify(v) + v = Timestamp(ensure_decoded(v)) + if v.tz is not None: + v = v.tz_convert("UTC") + self.lhs.update(v) + + def _disallow_scalar_only_bool_ops(self): + rhs = self.rhs + lhs = self.lhs + + # GH#24883 unwrap dtype if necessary to ensure we have a type object + rhs_rt = rhs.return_type + rhs_rt = getattr(rhs_rt, "type", rhs_rt) + lhs_rt = lhs.return_type + lhs_rt = getattr(lhs_rt, "type", lhs_rt) + if ( + (lhs.is_scalar or rhs.is_scalar) + and self.op in _bool_ops_dict + and ( + not ( + issubclass(rhs_rt, (bool, np.bool_)) + and issubclass(lhs_rt, (bool, np.bool_)) + ) + ) + ): + raise NotImplementedError("cannot evaluate scalar only bool ops") + + +def isnumeric(dtype) -> bool: + return issubclass(np.dtype(dtype).type, np.number) + + +class Div(BinOp): + """ + Div operator to special case casting. + + Parameters + ---------- + lhs, rhs : Term or Op + The Terms or Ops in the ``/`` expression. + """ + + def __init__(self, lhs, rhs) -> None: + super().__init__("/", lhs, rhs) + + if not isnumeric(lhs.return_type) or not isnumeric(rhs.return_type): + raise TypeError( + f"unsupported operand type(s) for {self.op}: " + f"'{lhs.return_type}' and '{rhs.return_type}'" + ) + + # do not upcast float32s to float64 un-necessarily + acceptable_dtypes = [np.float32, np.float64] + _cast_inplace(com.flatten(self), acceptable_dtypes, np.float64) + + +UNARY_OPS_SYMS = ("+", "-", "~", "not") +_unary_ops_funcs = (operator.pos, operator.neg, operator.invert, operator.invert) +_unary_ops_dict = dict(zip(UNARY_OPS_SYMS, _unary_ops_funcs)) + + +class UnaryOp(Op): + """ + Hold a unary operator and its operands. + + Parameters + ---------- + op : str + The token used to represent the operator. + operand : Term or Op + The Term or Op operand to the operator. + + Raises + ------ + ValueError + * If no function associated with the passed operator token is found. + """ + + def __init__(self, op: Literal["+", "-", "~", "not"], operand) -> None: + super().__init__(op, (operand,)) + self.operand = operand + + try: + self.func = _unary_ops_dict[op] + except KeyError as err: + raise ValueError( + f"Invalid unary operator {repr(op)}, " + f"valid operators are {UNARY_OPS_SYMS}" + ) from err + + def __call__(self, env) -> MathCall: + operand = self.operand(env) + # error: Cannot call function of unknown type + return self.func(operand) # type: ignore[operator] + + def __repr__(self) -> str: + return pprint_thing(f"{self.op}({self.operand})") + + @property + def return_type(self) -> np.dtype: + operand = self.operand + if operand.return_type == np.dtype("bool"): + return np.dtype("bool") + if isinstance(operand, Op) and ( + operand.op in _cmp_ops_dict or operand.op in _bool_ops_dict + ): + return np.dtype("bool") + return np.dtype("int") + + +class MathCall(Op): + def __init__(self, func, args) -> None: + super().__init__(func.name, args) + self.func = func + + def __call__(self, env): + # error: "Op" not callable + operands = [op(env) for op in self.operands] # type: ignore[operator] + return self.func.func(*operands) + + def __repr__(self) -> str: + operands = map(str, self.operands) + return pprint_thing(f"{self.op}({','.join(operands)})") + + +class FuncNode: + def __init__(self, name: str) -> None: + if name not in MATHOPS: + raise ValueError(f'"{name}" is not a supported function') + self.name = name + self.func = getattr(np, name) + + def __call__(self, *args) -> MathCall: + return MathCall(self, args) diff --git a/venv/lib/python3.10/site-packages/pandas/core/computation/parsing.py b/venv/lib/python3.10/site-packages/pandas/core/computation/parsing.py new file mode 100644 index 0000000000000000000000000000000000000000..4cfa0f2baffd5ed45db19242c2afd00b6e5e23dc --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/computation/parsing.py @@ -0,0 +1,198 @@ +""" +:func:`~pandas.eval` source string parsing functions +""" +from __future__ import annotations + +from io import StringIO +from keyword import iskeyword +import token +import tokenize +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Iterator, + ) + +# A token value Python's tokenizer probably will never use. +BACKTICK_QUOTED_STRING = 100 + + +def create_valid_python_identifier(name: str) -> str: + """ + Create valid Python identifiers from any string. + + Check if name contains any special characters. If it contains any + special characters, the special characters will be replaced by + a special string and a prefix is added. + + Raises + ------ + SyntaxError + If the returned name is not a Python valid identifier, raise an exception. + This can happen if there is a hashtag in the name, as the tokenizer will + than terminate and not find the backtick. + But also for characters that fall out of the range of (U+0001..U+007F). + """ + if name.isidentifier() and not iskeyword(name): + return name + + # Create a dict with the special characters and their replacement string. + # EXACT_TOKEN_TYPES contains these special characters + # token.tok_name contains a readable description of the replacement string. + special_characters_replacements = { + char: f"_{token.tok_name[tokval]}_" + for char, tokval in (tokenize.EXACT_TOKEN_TYPES.items()) + } + special_characters_replacements.update( + { + " ": "_", + "?": "_QUESTIONMARK_", + "!": "_EXCLAMATIONMARK_", + "$": "_DOLLARSIGN_", + "€": "_EUROSIGN_", + "°": "_DEGREESIGN_", + # Including quotes works, but there are exceptions. + "'": "_SINGLEQUOTE_", + '"': "_DOUBLEQUOTE_", + # Currently not possible. Terminates parser and won't find backtick. + # "#": "_HASH_", + } + ) + + name = "".join([special_characters_replacements.get(char, char) for char in name]) + name = f"BACKTICK_QUOTED_STRING_{name}" + + if not name.isidentifier(): + raise SyntaxError(f"Could not convert '{name}' to a valid Python identifier.") + + return name + + +def clean_backtick_quoted_toks(tok: tuple[int, str]) -> tuple[int, str]: + """ + Clean up a column name if surrounded by backticks. + + Backtick quoted string are indicated by a certain tokval value. If a string + is a backtick quoted token it will processed by + :func:`_create_valid_python_identifier` so that the parser can find this + string when the query is executed. + In this case the tok will get the NAME tokval. + + Parameters + ---------- + tok : tuple of int, str + ints correspond to the all caps constants in the tokenize module + + Returns + ------- + tok : Tuple[int, str] + Either the input or token or the replacement values + """ + toknum, tokval = tok + if toknum == BACKTICK_QUOTED_STRING: + return tokenize.NAME, create_valid_python_identifier(tokval) + return toknum, tokval + + +def clean_column_name(name: Hashable) -> Hashable: + """ + Function to emulate the cleaning of a backtick quoted name. + + The purpose for this function is to see what happens to the name of + identifier if it goes to the process of being parsed a Python code + inside a backtick quoted string and than being cleaned + (removed of any special characters). + + Parameters + ---------- + name : hashable + Name to be cleaned. + + Returns + ------- + name : hashable + Returns the name after tokenizing and cleaning. + + Notes + ----- + For some cases, a name cannot be converted to a valid Python identifier. + In that case :func:`tokenize_string` raises a SyntaxError. + In that case, we just return the name unmodified. + + If this name was used in the query string (this makes the query call impossible) + an error will be raised by :func:`tokenize_backtick_quoted_string` instead, + which is not caught and propagates to the user level. + """ + try: + tokenized = tokenize_string(f"`{name}`") + tokval = next(tokenized)[1] + return create_valid_python_identifier(tokval) + except SyntaxError: + return name + + +def tokenize_backtick_quoted_string( + token_generator: Iterator[tokenize.TokenInfo], source: str, string_start: int +) -> tuple[int, str]: + """ + Creates a token from a backtick quoted string. + + Moves the token_generator forwards till right after the next backtick. + + Parameters + ---------- + token_generator : Iterator[tokenize.TokenInfo] + The generator that yields the tokens of the source string (Tuple[int, str]). + The generator is at the first token after the backtick (`) + + source : str + The Python source code string. + + string_start : int + This is the start of backtick quoted string inside the source string. + + Returns + ------- + tok: Tuple[int, str] + The token that represents the backtick quoted string. + The integer is equal to BACKTICK_QUOTED_STRING (100). + """ + for _, tokval, start, _, _ in token_generator: + if tokval == "`": + string_end = start[1] + break + + return BACKTICK_QUOTED_STRING, source[string_start:string_end] + + +def tokenize_string(source: str) -> Iterator[tuple[int, str]]: + """ + Tokenize a Python source code string. + + Parameters + ---------- + source : str + The Python source code string. + + Returns + ------- + tok_generator : Iterator[Tuple[int, str]] + An iterator yielding all tokens with only toknum and tokval (Tuple[ing, str]). + """ + line_reader = StringIO(source).readline + token_generator = tokenize.generate_tokens(line_reader) + + # Loop over all tokens till a backtick (`) is found. + # Then, take all tokens till the next backtick to form a backtick quoted string + for toknum, tokval, start, _, _ in token_generator: + if tokval == "`": + try: + yield tokenize_backtick_quoted_string( + token_generator, source, string_start=start[1] + 1 + ) + except Exception as err: + raise SyntaxError(f"Failed to parse backticks in '{source}'.") from err + else: + yield toknum, tokval diff --git a/venv/lib/python3.10/site-packages/pandas/core/computation/pytables.py b/venv/lib/python3.10/site-packages/pandas/core/computation/pytables.py new file mode 100644 index 0000000000000000000000000000000000000000..04a8ad7ef0be6b044baf65b80cbf4161d45f8cac --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/computation/pytables.py @@ -0,0 +1,666 @@ +""" manage PyTables query interface via Expressions """ +from __future__ import annotations + +import ast +from decimal import ( + Decimal, + InvalidOperation, +) +from functools import partial +from typing import ( + TYPE_CHECKING, + Any, + ClassVar, +) + +import numpy as np + +from pandas._libs.tslibs import ( + Timedelta, + Timestamp, +) +from pandas.errors import UndefinedVariableError + +from pandas.core.dtypes.common import is_list_like + +import pandas.core.common as com +from pandas.core.computation import ( + expr, + ops, + scope as _scope, +) +from pandas.core.computation.common import ensure_decoded +from pandas.core.computation.expr import BaseExprVisitor +from pandas.core.computation.ops import is_term +from pandas.core.construction import extract_array +from pandas.core.indexes.base import Index + +from pandas.io.formats.printing import ( + pprint_thing, + pprint_thing_encoded, +) + +if TYPE_CHECKING: + from pandas._typing import ( + Self, + npt, + ) + + +class PyTablesScope(_scope.Scope): + __slots__ = ("queryables",) + + queryables: dict[str, Any] + + def __init__( + self, + level: int, + global_dict=None, + local_dict=None, + queryables: dict[str, Any] | None = None, + ) -> None: + super().__init__(level + 1, global_dict=global_dict, local_dict=local_dict) + self.queryables = queryables or {} + + +class Term(ops.Term): + env: PyTablesScope + + def __new__(cls, name, env, side=None, encoding=None): + if isinstance(name, str): + klass = cls + else: + klass = Constant + return object.__new__(klass) + + def __init__(self, name, env: PyTablesScope, side=None, encoding=None) -> None: + super().__init__(name, env, side=side, encoding=encoding) + + def _resolve_name(self): + # must be a queryables + if self.side == "left": + # Note: The behavior of __new__ ensures that self.name is a str here + if self.name not in self.env.queryables: + raise NameError(f"name {repr(self.name)} is not defined") + return self.name + + # resolve the rhs (and allow it to be None) + try: + return self.env.resolve(self.name, is_local=False) + except UndefinedVariableError: + return self.name + + # read-only property overwriting read/write property + @property # type: ignore[misc] + def value(self): + return self._value + + +class Constant(Term): + def __init__(self, name, env: PyTablesScope, side=None, encoding=None) -> None: + assert isinstance(env, PyTablesScope), type(env) + super().__init__(name, env, side=side, encoding=encoding) + + def _resolve_name(self): + return self._name + + +class BinOp(ops.BinOp): + _max_selectors = 31 + + op: str + queryables: dict[str, Any] + condition: str | None + + def __init__(self, op: str, lhs, rhs, queryables: dict[str, Any], encoding) -> None: + super().__init__(op, lhs, rhs) + self.queryables = queryables + self.encoding = encoding + self.condition = None + + def _disallow_scalar_only_bool_ops(self) -> None: + pass + + def prune(self, klass): + def pr(left, right): + """create and return a new specialized BinOp from myself""" + if left is None: + return right + elif right is None: + return left + + k = klass + if isinstance(left, ConditionBinOp): + if isinstance(right, ConditionBinOp): + k = JointConditionBinOp + elif isinstance(left, k): + return left + elif isinstance(right, k): + return right + + elif isinstance(left, FilterBinOp): + if isinstance(right, FilterBinOp): + k = JointFilterBinOp + elif isinstance(left, k): + return left + elif isinstance(right, k): + return right + + return k( + self.op, left, right, queryables=self.queryables, encoding=self.encoding + ).evaluate() + + left, right = self.lhs, self.rhs + + if is_term(left) and is_term(right): + res = pr(left.value, right.value) + elif not is_term(left) and is_term(right): + res = pr(left.prune(klass), right.value) + elif is_term(left) and not is_term(right): + res = pr(left.value, right.prune(klass)) + elif not (is_term(left) or is_term(right)): + res = pr(left.prune(klass), right.prune(klass)) + + return res + + def conform(self, rhs): + """inplace conform rhs""" + if not is_list_like(rhs): + rhs = [rhs] + if isinstance(rhs, np.ndarray): + rhs = rhs.ravel() + return rhs + + @property + def is_valid(self) -> bool: + """return True if this is a valid field""" + return self.lhs in self.queryables + + @property + def is_in_table(self) -> bool: + """ + return True if this is a valid column name for generation (e.g. an + actual column in the table) + """ + return self.queryables.get(self.lhs) is not None + + @property + def kind(self): + """the kind of my field""" + return getattr(self.queryables.get(self.lhs), "kind", None) + + @property + def meta(self): + """the meta of my field""" + return getattr(self.queryables.get(self.lhs), "meta", None) + + @property + def metadata(self): + """the metadata of my field""" + return getattr(self.queryables.get(self.lhs), "metadata", None) + + def generate(self, v) -> str: + """create and return the op string for this TermValue""" + val = v.tostring(self.encoding) + return f"({self.lhs} {self.op} {val})" + + def convert_value(self, v) -> TermValue: + """ + convert the expression that is in the term to something that is + accepted by pytables + """ + + def stringify(value): + if self.encoding is not None: + return pprint_thing_encoded(value, encoding=self.encoding) + return pprint_thing(value) + + kind = ensure_decoded(self.kind) + meta = ensure_decoded(self.meta) + if kind == "datetime" or (kind and kind.startswith("datetime64")): + if isinstance(v, (int, float)): + v = stringify(v) + v = ensure_decoded(v) + v = Timestamp(v).as_unit("ns") + if v.tz is not None: + v = v.tz_convert("UTC") + return TermValue(v, v._value, kind) + elif kind in ("timedelta64", "timedelta"): + if isinstance(v, str): + v = Timedelta(v) + else: + v = Timedelta(v, unit="s") + v = v.as_unit("ns")._value + return TermValue(int(v), v, kind) + elif meta == "category": + metadata = extract_array(self.metadata, extract_numpy=True) + result: npt.NDArray[np.intp] | np.intp | int + if v not in metadata: + result = -1 + else: + result = metadata.searchsorted(v, side="left") + return TermValue(result, result, "integer") + elif kind == "integer": + try: + v_dec = Decimal(v) + except InvalidOperation: + # GH 54186 + # convert v to float to raise float's ValueError + float(v) + else: + v = int(v_dec.to_integral_exact(rounding="ROUND_HALF_EVEN")) + return TermValue(v, v, kind) + elif kind == "float": + v = float(v) + return TermValue(v, v, kind) + elif kind == "bool": + if isinstance(v, str): + v = v.strip().lower() not in [ + "false", + "f", + "no", + "n", + "none", + "0", + "[]", + "{}", + "", + ] + else: + v = bool(v) + return TermValue(v, v, kind) + elif isinstance(v, str): + # string quoting + return TermValue(v, stringify(v), "string") + else: + raise TypeError(f"Cannot compare {v} of type {type(v)} to {kind} column") + + def convert_values(self) -> None: + pass + + +class FilterBinOp(BinOp): + filter: tuple[Any, Any, Index] | None = None + + def __repr__(self) -> str: + if self.filter is None: + return "Filter: Not Initialized" + return pprint_thing(f"[Filter : [{self.filter[0]}] -> [{self.filter[1]}]") + + def invert(self) -> Self: + """invert the filter""" + if self.filter is not None: + self.filter = ( + self.filter[0], + self.generate_filter_op(invert=True), + self.filter[2], + ) + return self + + def format(self): + """return the actual filter format""" + return [self.filter] + + # error: Signature of "evaluate" incompatible with supertype "BinOp" + def evaluate(self) -> Self | None: # type: ignore[override] + if not self.is_valid: + raise ValueError(f"query term is not valid [{self}]") + + rhs = self.conform(self.rhs) + values = list(rhs) + + if self.is_in_table: + # if too many values to create the expression, use a filter instead + if self.op in ["==", "!="] and len(values) > self._max_selectors: + filter_op = self.generate_filter_op() + self.filter = (self.lhs, filter_op, Index(values)) + + return self + return None + + # equality conditions + if self.op in ["==", "!="]: + filter_op = self.generate_filter_op() + self.filter = (self.lhs, filter_op, Index(values)) + + else: + raise TypeError( + f"passing a filterable condition to a non-table indexer [{self}]" + ) + + return self + + def generate_filter_op(self, invert: bool = False): + if (self.op == "!=" and not invert) or (self.op == "==" and invert): + return lambda axis, vals: ~axis.isin(vals) + else: + return lambda axis, vals: axis.isin(vals) + + +class JointFilterBinOp(FilterBinOp): + def format(self): + raise NotImplementedError("unable to collapse Joint Filters") + + # error: Signature of "evaluate" incompatible with supertype "BinOp" + def evaluate(self) -> Self: # type: ignore[override] + return self + + +class ConditionBinOp(BinOp): + def __repr__(self) -> str: + return pprint_thing(f"[Condition : [{self.condition}]]") + + def invert(self): + """invert the condition""" + # if self.condition is not None: + # self.condition = "~(%s)" % self.condition + # return self + raise NotImplementedError( + "cannot use an invert condition when passing to numexpr" + ) + + def format(self): + """return the actual ne format""" + return self.condition + + # error: Signature of "evaluate" incompatible with supertype "BinOp" + def evaluate(self) -> Self | None: # type: ignore[override] + if not self.is_valid: + raise ValueError(f"query term is not valid [{self}]") + + # convert values if we are in the table + if not self.is_in_table: + return None + + rhs = self.conform(self.rhs) + values = [self.convert_value(v) for v in rhs] + + # equality conditions + if self.op in ["==", "!="]: + # too many values to create the expression? + if len(values) <= self._max_selectors: + vs = [self.generate(v) for v in values] + self.condition = f"({' | '.join(vs)})" + + # use a filter after reading + else: + return None + else: + self.condition = self.generate(values[0]) + + return self + + +class JointConditionBinOp(ConditionBinOp): + # error: Signature of "evaluate" incompatible with supertype "BinOp" + def evaluate(self) -> Self: # type: ignore[override] + self.condition = f"({self.lhs.condition} {self.op} {self.rhs.condition})" + return self + + +class UnaryOp(ops.UnaryOp): + def prune(self, klass): + if self.op != "~": + raise NotImplementedError("UnaryOp only support invert type ops") + + operand = self.operand + operand = operand.prune(klass) + + if operand is not None and ( + issubclass(klass, ConditionBinOp) + and operand.condition is not None + or not issubclass(klass, ConditionBinOp) + and issubclass(klass, FilterBinOp) + and operand.filter is not None + ): + return operand.invert() + return None + + +class PyTablesExprVisitor(BaseExprVisitor): + const_type: ClassVar[type[ops.Term]] = Constant + term_type: ClassVar[type[Term]] = Term + + def __init__(self, env, engine, parser, **kwargs) -> None: + super().__init__(env, engine, parser) + for bin_op in self.binary_ops: + bin_node = self.binary_op_nodes_map[bin_op] + setattr( + self, + f"visit_{bin_node}", + lambda node, bin_op=bin_op: partial(BinOp, bin_op, **kwargs), + ) + + def visit_UnaryOp(self, node, **kwargs) -> ops.Term | UnaryOp | None: + if isinstance(node.op, (ast.Not, ast.Invert)): + return UnaryOp("~", self.visit(node.operand)) + elif isinstance(node.op, ast.USub): + return self.const_type(-self.visit(node.operand).value, self.env) + elif isinstance(node.op, ast.UAdd): + raise NotImplementedError("Unary addition not supported") + # TODO: return None might never be reached + return None + + def visit_Index(self, node, **kwargs): + return self.visit(node.value).value + + def visit_Assign(self, node, **kwargs): + cmpr = ast.Compare( + ops=[ast.Eq()], left=node.targets[0], comparators=[node.value] + ) + return self.visit(cmpr) + + def visit_Subscript(self, node, **kwargs) -> ops.Term: + # only allow simple subscripts + + value = self.visit(node.value) + slobj = self.visit(node.slice) + try: + value = value.value + except AttributeError: + pass + + if isinstance(slobj, Term): + # In py39 np.ndarray lookups with Term containing int raise + slobj = slobj.value + + try: + return self.const_type(value[slobj], self.env) + except TypeError as err: + raise ValueError( + f"cannot subscript {repr(value)} with {repr(slobj)}" + ) from err + + def visit_Attribute(self, node, **kwargs): + attr = node.attr + value = node.value + + ctx = type(node.ctx) + if ctx == ast.Load: + # resolve the value + resolved = self.visit(value) + + # try to get the value to see if we are another expression + try: + resolved = resolved.value + except AttributeError: + pass + + try: + return self.term_type(getattr(resolved, attr), self.env) + except AttributeError: + # something like datetime.datetime where scope is overridden + if isinstance(value, ast.Name) and value.id == attr: + return resolved + + raise ValueError(f"Invalid Attribute context {ctx.__name__}") + + def translate_In(self, op): + return ast.Eq() if isinstance(op, ast.In) else op + + def _rewrite_membership_op(self, node, left, right): + return self.visit(node.op), node.op, left, right + + +def _validate_where(w): + """ + Validate that the where statement is of the right type. + + The type may either be String, Expr, or list-like of Exprs. + + Parameters + ---------- + w : String term expression, Expr, or list-like of Exprs. + + Returns + ------- + where : The original where clause if the check was successful. + + Raises + ------ + TypeError : An invalid data type was passed in for w (e.g. dict). + """ + if not (isinstance(w, (PyTablesExpr, str)) or is_list_like(w)): + raise TypeError( + "where must be passed as a string, PyTablesExpr, " + "or list-like of PyTablesExpr" + ) + + return w + + +class PyTablesExpr(expr.Expr): + """ + Hold a pytables-like expression, comprised of possibly multiple 'terms'. + + Parameters + ---------- + where : string term expression, PyTablesExpr, or list-like of PyTablesExprs + queryables : a "kinds" map (dict of column name -> kind), or None if column + is non-indexable + encoding : an encoding that will encode the query terms + + Returns + ------- + a PyTablesExpr object + + Examples + -------- + 'index>=date' + "columns=['A', 'D']" + 'columns=A' + 'columns==A' + "~(columns=['A','B'])" + 'index>df.index[3] & string="bar"' + '(index>df.index[3] & index<=df.index[6]) | string="bar"' + "ts>=Timestamp('2012-02-01')" + "major_axis>=20130101" + """ + + _visitor: PyTablesExprVisitor | None + env: PyTablesScope + expr: str + + def __init__( + self, + where, + queryables: dict[str, Any] | None = None, + encoding=None, + scope_level: int = 0, + ) -> None: + where = _validate_where(where) + + self.encoding = encoding + self.condition = None + self.filter = None + self.terms = None + self._visitor = None + + # capture the environment if needed + local_dict: _scope.DeepChainMap[Any, Any] | None = None + + if isinstance(where, PyTablesExpr): + local_dict = where.env.scope + _where = where.expr + + elif is_list_like(where): + where = list(where) + for idx, w in enumerate(where): + if isinstance(w, PyTablesExpr): + local_dict = w.env.scope + else: + where[idx] = _validate_where(w) + _where = " & ".join([f"({w})" for w in com.flatten(where)]) + else: + # _validate_where ensures we otherwise have a string + _where = where + + self.expr = _where + self.env = PyTablesScope(scope_level + 1, local_dict=local_dict) + + if queryables is not None and isinstance(self.expr, str): + self.env.queryables.update(queryables) + self._visitor = PyTablesExprVisitor( + self.env, + queryables=queryables, + parser="pytables", + engine="pytables", + encoding=encoding, + ) + self.terms = self.parse() + + def __repr__(self) -> str: + if self.terms is not None: + return pprint_thing(self.terms) + return pprint_thing(self.expr) + + def evaluate(self): + """create and return the numexpr condition and filter""" + try: + self.condition = self.terms.prune(ConditionBinOp) + except AttributeError as err: + raise ValueError( + f"cannot process expression [{self.expr}], [{self}] " + "is not a valid condition" + ) from err + try: + self.filter = self.terms.prune(FilterBinOp) + except AttributeError as err: + raise ValueError( + f"cannot process expression [{self.expr}], [{self}] " + "is not a valid filter" + ) from err + + return self.condition, self.filter + + +class TermValue: + """hold a term value the we use to construct a condition/filter""" + + def __init__(self, value, converted, kind: str) -> None: + assert isinstance(kind, str), kind + self.value = value + self.converted = converted + self.kind = kind + + def tostring(self, encoding) -> str: + """quote the string if not encoded else encode and return""" + if self.kind == "string": + if encoding is not None: + return str(self.converted) + return f'"{self.converted}"' + elif self.kind == "float": + # python 2 str(float) is not always + # round-trippable so use repr() + return repr(self.converted) + return str(self.converted) + + +def maybe_expression(s) -> bool: + """loose checking if s is a pytables-acceptable expression""" + if not isinstance(s, str): + return False + operations = PyTablesExprVisitor.binary_ops + PyTablesExprVisitor.unary_ops + ("=",) + + # make sure we have an op at least + return any(op in s for op in operations) diff --git a/venv/lib/python3.10/site-packages/pandas/core/computation/scope.py b/venv/lib/python3.10/site-packages/pandas/core/computation/scope.py new file mode 100644 index 0000000000000000000000000000000000000000..7e553ca448218435eafe1fd7ca97dce6f739e2a3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/computation/scope.py @@ -0,0 +1,355 @@ +""" +Module for scope operations +""" +from __future__ import annotations + +from collections import ChainMap +import datetime +import inspect +from io import StringIO +import itertools +import pprint +import struct +import sys +from typing import TypeVar + +import numpy as np + +from pandas._libs.tslibs import Timestamp +from pandas.errors import UndefinedVariableError + +_KT = TypeVar("_KT") +_VT = TypeVar("_VT") + + +# https://docs.python.org/3/library/collections.html#chainmap-examples-and-recipes +class DeepChainMap(ChainMap[_KT, _VT]): + """ + Variant of ChainMap that allows direct updates to inner scopes. + + Only works when all passed mapping are mutable. + """ + + def __setitem__(self, key: _KT, value: _VT) -> None: + for mapping in self.maps: + if key in mapping: + mapping[key] = value + return + self.maps[0][key] = value + + def __delitem__(self, key: _KT) -> None: + """ + Raises + ------ + KeyError + If `key` doesn't exist. + """ + for mapping in self.maps: + if key in mapping: + del mapping[key] + return + raise KeyError(key) + + +def ensure_scope( + level: int, global_dict=None, local_dict=None, resolvers=(), target=None +) -> Scope: + """Ensure that we are grabbing the correct scope.""" + return Scope( + level + 1, + global_dict=global_dict, + local_dict=local_dict, + resolvers=resolvers, + target=target, + ) + + +def _replacer(x) -> str: + """ + Replace a number with its hexadecimal representation. Used to tag + temporary variables with their calling scope's id. + """ + # get the hex repr of the binary char and remove 0x and pad by pad_size + # zeros + try: + hexin = ord(x) + except TypeError: + # bytes literals masquerade as ints when iterating in py3 + hexin = x + + return hex(hexin) + + +def _raw_hex_id(obj) -> str: + """Return the padded hexadecimal id of ``obj``.""" + # interpret as a pointer since that's what really what id returns + packed = struct.pack("@P", id(obj)) + return "".join([_replacer(x) for x in packed]) + + +DEFAULT_GLOBALS = { + "Timestamp": Timestamp, + "datetime": datetime.datetime, + "True": True, + "False": False, + "list": list, + "tuple": tuple, + "inf": np.inf, + "Inf": np.inf, +} + + +def _get_pretty_string(obj) -> str: + """ + Return a prettier version of obj. + + Parameters + ---------- + obj : object + Object to pretty print + + Returns + ------- + str + Pretty print object repr + """ + sio = StringIO() + pprint.pprint(obj, stream=sio) + return sio.getvalue() + + +class Scope: + """ + Object to hold scope, with a few bells to deal with some custom syntax + and contexts added by pandas. + + Parameters + ---------- + level : int + global_dict : dict or None, optional, default None + local_dict : dict or Scope or None, optional, default None + resolvers : list-like or None, optional, default None + target : object + + Attributes + ---------- + level : int + scope : DeepChainMap + target : object + temps : dict + """ + + __slots__ = ["level", "scope", "target", "resolvers", "temps"] + level: int + scope: DeepChainMap + resolvers: DeepChainMap + temps: dict + + def __init__( + self, level: int, global_dict=None, local_dict=None, resolvers=(), target=None + ) -> None: + self.level = level + 1 + + # shallow copy because we don't want to keep filling this up with what + # was there before if there are multiple calls to Scope/_ensure_scope + self.scope = DeepChainMap(DEFAULT_GLOBALS.copy()) + self.target = target + + if isinstance(local_dict, Scope): + self.scope.update(local_dict.scope) + if local_dict.target is not None: + self.target = local_dict.target + self._update(local_dict.level) + + frame = sys._getframe(self.level) + + try: + # shallow copy here because we don't want to replace what's in + # scope when we align terms (alignment accesses the underlying + # numpy array of pandas objects) + scope_global = self.scope.new_child( + (global_dict if global_dict is not None else frame.f_globals).copy() + ) + self.scope = DeepChainMap(scope_global) + if not isinstance(local_dict, Scope): + scope_local = self.scope.new_child( + (local_dict if local_dict is not None else frame.f_locals).copy() + ) + self.scope = DeepChainMap(scope_local) + finally: + del frame + + # assumes that resolvers are going from outermost scope to inner + if isinstance(local_dict, Scope): + resolvers += tuple(local_dict.resolvers.maps) + self.resolvers = DeepChainMap(*resolvers) + self.temps = {} + + def __repr__(self) -> str: + scope_keys = _get_pretty_string(list(self.scope.keys())) + res_keys = _get_pretty_string(list(self.resolvers.keys())) + return f"{type(self).__name__}(scope={scope_keys}, resolvers={res_keys})" + + @property + def has_resolvers(self) -> bool: + """ + Return whether we have any extra scope. + + For example, DataFrames pass Their columns as resolvers during calls to + ``DataFrame.eval()`` and ``DataFrame.query()``. + + Returns + ------- + hr : bool + """ + return bool(len(self.resolvers)) + + def resolve(self, key: str, is_local: bool): + """ + Resolve a variable name in a possibly local context. + + Parameters + ---------- + key : str + A variable name + is_local : bool + Flag indicating whether the variable is local or not (prefixed with + the '@' symbol) + + Returns + ------- + value : object + The value of a particular variable + """ + try: + # only look for locals in outer scope + if is_local: + return self.scope[key] + + # not a local variable so check in resolvers if we have them + if self.has_resolvers: + return self.resolvers[key] + + # if we're here that means that we have no locals and we also have + # no resolvers + assert not is_local and not self.has_resolvers + return self.scope[key] + except KeyError: + try: + # last ditch effort we look in temporaries + # these are created when parsing indexing expressions + # e.g., df[df > 0] + return self.temps[key] + except KeyError as err: + raise UndefinedVariableError(key, is_local) from err + + def swapkey(self, old_key: str, new_key: str, new_value=None) -> None: + """ + Replace a variable name, with a potentially new value. + + Parameters + ---------- + old_key : str + Current variable name to replace + new_key : str + New variable name to replace `old_key` with + new_value : object + Value to be replaced along with the possible renaming + """ + if self.has_resolvers: + maps = self.resolvers.maps + self.scope.maps + else: + maps = self.scope.maps + + maps.append(self.temps) + + for mapping in maps: + if old_key in mapping: + mapping[new_key] = new_value + return + + def _get_vars(self, stack, scopes: list[str]) -> None: + """ + Get specifically scoped variables from a list of stack frames. + + Parameters + ---------- + stack : list + A list of stack frames as returned by ``inspect.stack()`` + scopes : sequence of strings + A sequence containing valid stack frame attribute names that + evaluate to a dictionary. For example, ('locals', 'globals') + """ + variables = itertools.product(scopes, stack) + for scope, (frame, _, _, _, _, _) in variables: + try: + d = getattr(frame, f"f_{scope}") + self.scope = DeepChainMap(self.scope.new_child(d)) + finally: + # won't remove it, but DECREF it + # in Py3 this probably isn't necessary since frame won't be + # scope after the loop + del frame + + def _update(self, level: int) -> None: + """ + Update the current scope by going back `level` levels. + + Parameters + ---------- + level : int + """ + sl = level + 1 + + # add sl frames to the scope starting with the + # most distant and overwriting with more current + # makes sure that we can capture variable scope + stack = inspect.stack() + + try: + self._get_vars(stack[:sl], scopes=["locals"]) + finally: + del stack[:], stack + + def add_tmp(self, value) -> str: + """ + Add a temporary variable to the scope. + + Parameters + ---------- + value : object + An arbitrary object to be assigned to a temporary variable. + + Returns + ------- + str + The name of the temporary variable created. + """ + name = f"{type(value).__name__}_{self.ntemps}_{_raw_hex_id(self)}" + + # add to inner most scope + assert name not in self.temps + self.temps[name] = value + assert name in self.temps + + # only increment if the variable gets put in the scope + return name + + @property + def ntemps(self) -> int: + """The number of temporary variables in this scope""" + return len(self.temps) + + @property + def full_scope(self) -> DeepChainMap: + """ + Return the full scope for use with passing to engines transparently + as a mapping. + + Returns + ------- + vars : DeepChainMap + All variables in this scope. + """ + maps = [self.temps] + self.resolvers.maps + self.scope.maps + return DeepChainMap(*maps) diff --git a/venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/generic.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/generic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ee585cda39bf26ad6bd8a2e5d4c5a7dbab18a7e Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/generic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/indexers/__init__.py b/venv/lib/python3.10/site-packages/pandas/core/indexers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ba8a4f1d0ee7adb668c6b0ac49b2360d3c0dc356 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/indexers/__init__.py @@ -0,0 +1,31 @@ +from pandas.core.indexers.utils import ( + check_array_indexer, + check_key_length, + check_setitem_lengths, + disallow_ndim_indexing, + is_empty_indexer, + is_list_like_indexer, + is_scalar_indexer, + is_valid_positional_slice, + length_of_indexer, + maybe_convert_indices, + unpack_1tuple, + unpack_tuple_and_ellipses, + validate_indices, +) + +__all__ = [ + "is_valid_positional_slice", + "is_list_like_indexer", + "is_scalar_indexer", + "is_empty_indexer", + "check_setitem_lengths", + "validate_indices", + "maybe_convert_indices", + "length_of_indexer", + "disallow_ndim_indexing", + "unpack_1tuple", + "check_key_length", + "check_array_indexer", + "unpack_tuple_and_ellipses", +] diff --git a/venv/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3af96744b7b0555638abe14115e7a66821145ed4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/objects.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/objects.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1ffb4a345aef86bef5fef3346affcbd98e3e439 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/objects.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d3926ffc5cdeff8d6f80ef973e55604de1fd678 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/indexers/objects.py b/venv/lib/python3.10/site-packages/pandas/core/indexers/objects.py new file mode 100644 index 0000000000000000000000000000000000000000..f2db4886a559017422ed41bb8bd2246d6a3f0fb0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/indexers/objects.py @@ -0,0 +1,453 @@ +"""Indexer objects for computing start/end window bounds for rolling operations""" +from __future__ import annotations + +from datetime import timedelta + +import numpy as np + +from pandas._libs.tslibs import BaseOffset +from pandas._libs.window.indexers import calculate_variable_window_bounds +from pandas.util._decorators import Appender + +from pandas.core.dtypes.common import ensure_platform_int + +from pandas.core.indexes.datetimes import DatetimeIndex + +from pandas.tseries.offsets import Nano + +get_window_bounds_doc = """ +Computes the bounds of a window. + +Parameters +---------- +num_values : int, default 0 + number of values that will be aggregated over +window_size : int, default 0 + the number of rows in a window +min_periods : int, default None + min_periods passed from the top level rolling API +center : bool, default None + center passed from the top level rolling API +closed : str, default None + closed passed from the top level rolling API +step : int, default None + step passed from the top level rolling API + .. versionadded:: 1.5 +win_type : str, default None + win_type passed from the top level rolling API + +Returns +------- +A tuple of ndarray[int64]s, indicating the boundaries of each +window +""" + + +class BaseIndexer: + """ + Base class for window bounds calculations. + + Examples + -------- + >>> from pandas.api.indexers import BaseIndexer + >>> class CustomIndexer(BaseIndexer): + ... def get_window_bounds(self, num_values, min_periods, center, closed, step): + ... start = np.empty(num_values, dtype=np.int64) + ... end = np.empty(num_values, dtype=np.int64) + ... for i in range(num_values): + ... start[i] = i + ... end[i] = i + self.window_size + ... return start, end + >>> df = pd.DataFrame({"values": range(5)}) + >>> indexer = CustomIndexer(window_size=2) + >>> df.rolling(indexer).sum() + values + 0 1.0 + 1 3.0 + 2 5.0 + 3 7.0 + 4 4.0 + """ + + def __init__( + self, index_array: np.ndarray | None = None, window_size: int = 0, **kwargs + ) -> None: + self.index_array = index_array + self.window_size = window_size + # Set user defined kwargs as attributes that can be used in get_window_bounds + for key, value in kwargs.items(): + setattr(self, key, value) + + @Appender(get_window_bounds_doc) + def get_window_bounds( + self, + num_values: int = 0, + min_periods: int | None = None, + center: bool | None = None, + closed: str | None = None, + step: int | None = None, + ) -> tuple[np.ndarray, np.ndarray]: + raise NotImplementedError + + +class FixedWindowIndexer(BaseIndexer): + """Creates window boundaries that are of fixed length.""" + + @Appender(get_window_bounds_doc) + def get_window_bounds( + self, + num_values: int = 0, + min_periods: int | None = None, + center: bool | None = None, + closed: str | None = None, + step: int | None = None, + ) -> tuple[np.ndarray, np.ndarray]: + if center or self.window_size == 0: + offset = (self.window_size - 1) // 2 + else: + offset = 0 + + end = np.arange(1 + offset, num_values + 1 + offset, step, dtype="int64") + start = end - self.window_size + if closed in ["left", "both"]: + start -= 1 + if closed in ["left", "neither"]: + end -= 1 + + end = np.clip(end, 0, num_values) + start = np.clip(start, 0, num_values) + + return start, end + + +class VariableWindowIndexer(BaseIndexer): + """Creates window boundaries that are of variable length, namely for time series.""" + + @Appender(get_window_bounds_doc) + def get_window_bounds( + self, + num_values: int = 0, + min_periods: int | None = None, + center: bool | None = None, + closed: str | None = None, + step: int | None = None, + ) -> tuple[np.ndarray, np.ndarray]: + # error: Argument 4 to "calculate_variable_window_bounds" has incompatible + # type "Optional[bool]"; expected "bool" + # error: Argument 6 to "calculate_variable_window_bounds" has incompatible + # type "Optional[ndarray]"; expected "ndarray" + return calculate_variable_window_bounds( + num_values, + self.window_size, + min_periods, + center, # type: ignore[arg-type] + closed, + self.index_array, # type: ignore[arg-type] + ) + + +class VariableOffsetWindowIndexer(BaseIndexer): + """ + Calculate window boundaries based on a non-fixed offset such as a BusinessDay. + + Examples + -------- + >>> from pandas.api.indexers import VariableOffsetWindowIndexer + >>> df = pd.DataFrame(range(10), index=pd.date_range("2020", periods=10)) + >>> offset = pd.offsets.BDay(1) + >>> indexer = VariableOffsetWindowIndexer(index=df.index, offset=offset) + >>> df + 0 + 2020-01-01 0 + 2020-01-02 1 + 2020-01-03 2 + 2020-01-04 3 + 2020-01-05 4 + 2020-01-06 5 + 2020-01-07 6 + 2020-01-08 7 + 2020-01-09 8 + 2020-01-10 9 + >>> df.rolling(indexer).sum() + 0 + 2020-01-01 0.0 + 2020-01-02 1.0 + 2020-01-03 2.0 + 2020-01-04 3.0 + 2020-01-05 7.0 + 2020-01-06 12.0 + 2020-01-07 6.0 + 2020-01-08 7.0 + 2020-01-09 8.0 + 2020-01-10 9.0 + """ + + def __init__( + self, + index_array: np.ndarray | None = None, + window_size: int = 0, + index: DatetimeIndex | None = None, + offset: BaseOffset | None = None, + **kwargs, + ) -> None: + super().__init__(index_array, window_size, **kwargs) + if not isinstance(index, DatetimeIndex): + raise ValueError("index must be a DatetimeIndex.") + self.index = index + if not isinstance(offset, BaseOffset): + raise ValueError("offset must be a DateOffset-like object.") + self.offset = offset + + @Appender(get_window_bounds_doc) + def get_window_bounds( + self, + num_values: int = 0, + min_periods: int | None = None, + center: bool | None = None, + closed: str | None = None, + step: int | None = None, + ) -> tuple[np.ndarray, np.ndarray]: + if step is not None: + raise NotImplementedError("step not implemented for variable offset window") + if num_values <= 0: + return np.empty(0, dtype="int64"), np.empty(0, dtype="int64") + + # if windows is variable, default is 'right', otherwise default is 'both' + if closed is None: + closed = "right" if self.index is not None else "both" + + right_closed = closed in ["right", "both"] + left_closed = closed in ["left", "both"] + + if self.index[num_values - 1] < self.index[0]: + index_growth_sign = -1 + else: + index_growth_sign = 1 + offset_diff = index_growth_sign * self.offset + + start = np.empty(num_values, dtype="int64") + start.fill(-1) + end = np.empty(num_values, dtype="int64") + end.fill(-1) + + start[0] = 0 + + # right endpoint is closed + if right_closed: + end[0] = 1 + # right endpoint is open + else: + end[0] = 0 + + zero = timedelta(0) + # start is start of slice interval (including) + # end is end of slice interval (not including) + for i in range(1, num_values): + end_bound = self.index[i] + start_bound = end_bound - offset_diff + + # left endpoint is closed + if left_closed: + start_bound -= Nano(1) + + # advance the start bound until we are + # within the constraint + start[i] = i + for j in range(start[i - 1], i): + start_diff = (self.index[j] - start_bound) * index_growth_sign + if start_diff > zero: + start[i] = j + break + + # end bound is previous end + # or current index + end_diff = (self.index[end[i - 1]] - end_bound) * index_growth_sign + if end_diff == zero and not right_closed: + end[i] = end[i - 1] + 1 + elif end_diff <= zero: + end[i] = i + 1 + else: + end[i] = end[i - 1] + + # right endpoint is open + if not right_closed: + end[i] -= 1 + + return start, end + + +class ExpandingIndexer(BaseIndexer): + """Calculate expanding window bounds, mimicking df.expanding()""" + + @Appender(get_window_bounds_doc) + def get_window_bounds( + self, + num_values: int = 0, + min_periods: int | None = None, + center: bool | None = None, + closed: str | None = None, + step: int | None = None, + ) -> tuple[np.ndarray, np.ndarray]: + return ( + np.zeros(num_values, dtype=np.int64), + np.arange(1, num_values + 1, dtype=np.int64), + ) + + +class FixedForwardWindowIndexer(BaseIndexer): + """ + Creates window boundaries for fixed-length windows that include the current row. + + Examples + -------- + >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]}) + >>> df + B + 0 0.0 + 1 1.0 + 2 2.0 + 3 NaN + 4 4.0 + + >>> indexer = pd.api.indexers.FixedForwardWindowIndexer(window_size=2) + >>> df.rolling(window=indexer, min_periods=1).sum() + B + 0 1.0 + 1 3.0 + 2 2.0 + 3 4.0 + 4 4.0 + """ + + @Appender(get_window_bounds_doc) + def get_window_bounds( + self, + num_values: int = 0, + min_periods: int | None = None, + center: bool | None = None, + closed: str | None = None, + step: int | None = None, + ) -> tuple[np.ndarray, np.ndarray]: + if center: + raise ValueError("Forward-looking windows can't have center=True") + if closed is not None: + raise ValueError( + "Forward-looking windows don't support setting the closed argument" + ) + if step is None: + step = 1 + + start = np.arange(0, num_values, step, dtype="int64") + end = start + self.window_size + if self.window_size: + end = np.clip(end, 0, num_values) + + return start, end + + +class GroupbyIndexer(BaseIndexer): + """Calculate bounds to compute groupby rolling, mimicking df.groupby().rolling()""" + + def __init__( + self, + index_array: np.ndarray | None = None, + window_size: int | BaseIndexer = 0, + groupby_indices: dict | None = None, + window_indexer: type[BaseIndexer] = BaseIndexer, + indexer_kwargs: dict | None = None, + **kwargs, + ) -> None: + """ + Parameters + ---------- + index_array : np.ndarray or None + np.ndarray of the index of the original object that we are performing + a chained groupby operation over. This index has been pre-sorted relative to + the groups + window_size : int or BaseIndexer + window size during the windowing operation + groupby_indices : dict or None + dict of {group label: [positional index of rows belonging to the group]} + window_indexer : BaseIndexer + BaseIndexer class determining the start and end bounds of each group + indexer_kwargs : dict or None + Custom kwargs to be passed to window_indexer + **kwargs : + keyword arguments that will be available when get_window_bounds is called + """ + self.groupby_indices = groupby_indices or {} + self.window_indexer = window_indexer + self.indexer_kwargs = indexer_kwargs.copy() if indexer_kwargs else {} + super().__init__( + index_array=index_array, + window_size=self.indexer_kwargs.pop("window_size", window_size), + **kwargs, + ) + + @Appender(get_window_bounds_doc) + def get_window_bounds( + self, + num_values: int = 0, + min_periods: int | None = None, + center: bool | None = None, + closed: str | None = None, + step: int | None = None, + ) -> tuple[np.ndarray, np.ndarray]: + # 1) For each group, get the indices that belong to the group + # 2) Use the indices to calculate the start & end bounds of the window + # 3) Append the window bounds in group order + start_arrays = [] + end_arrays = [] + window_indices_start = 0 + for key, indices in self.groupby_indices.items(): + index_array: np.ndarray | None + + if self.index_array is not None: + index_array = self.index_array.take(ensure_platform_int(indices)) + else: + index_array = self.index_array + indexer = self.window_indexer( + index_array=index_array, + window_size=self.window_size, + **self.indexer_kwargs, + ) + start, end = indexer.get_window_bounds( + len(indices), min_periods, center, closed, step + ) + start = start.astype(np.int64) + end = end.astype(np.int64) + assert len(start) == len( + end + ), "these should be equal in length from get_window_bounds" + # Cannot use groupby_indices as they might not be monotonic with the object + # we're rolling over + window_indices = np.arange( + window_indices_start, window_indices_start + len(indices) + ) + window_indices_start += len(indices) + # Extend as we'll be slicing window like [start, end) + window_indices = np.append(window_indices, [window_indices[-1] + 1]).astype( + np.int64, copy=False + ) + start_arrays.append(window_indices.take(ensure_platform_int(start))) + end_arrays.append(window_indices.take(ensure_platform_int(end))) + if len(start_arrays) == 0: + return np.array([], dtype=np.int64), np.array([], dtype=np.int64) + start = np.concatenate(start_arrays) + end = np.concatenate(end_arrays) + return start, end + + +class ExponentialMovingWindowIndexer(BaseIndexer): + """Calculate ewm window bounds (the entire window)""" + + @Appender(get_window_bounds_doc) + def get_window_bounds( + self, + num_values: int = 0, + min_periods: int | None = None, + center: bool | None = None, + closed: str | None = None, + step: int | None = None, + ) -> tuple[np.ndarray, np.ndarray]: + return np.array([0], dtype=np.int64), np.array([num_values], dtype=np.int64) diff --git a/venv/lib/python3.10/site-packages/pandas/core/indexers/utils.py b/venv/lib/python3.10/site-packages/pandas/core/indexers/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..55bb58f3108c3d7004058494284ea6fb4b2fca7f --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/indexers/utils.py @@ -0,0 +1,553 @@ +""" +Low-dependency indexing utilities. +""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, +) + +import numpy as np + +from pandas._libs import lib + +from pandas.core.dtypes.common import ( + is_array_like, + is_bool_dtype, + is_integer, + is_integer_dtype, + is_list_like, +) +from pandas.core.dtypes.dtypes import ExtensionDtype +from pandas.core.dtypes.generic import ( + ABCIndex, + ABCSeries, +) + +if TYPE_CHECKING: + from pandas._typing import AnyArrayLike + + from pandas.core.frame import DataFrame + from pandas.core.indexes.base import Index + +# ----------------------------------------------------------- +# Indexer Identification + + +def is_valid_positional_slice(slc: slice) -> bool: + """ + Check if a slice object can be interpreted as a positional indexer. + + Parameters + ---------- + slc : slice + + Returns + ------- + bool + + Notes + ----- + A valid positional slice may also be interpreted as a label-based slice + depending on the index being sliced. + """ + return ( + lib.is_int_or_none(slc.start) + and lib.is_int_or_none(slc.stop) + and lib.is_int_or_none(slc.step) + ) + + +def is_list_like_indexer(key) -> bool: + """ + Check if we have a list-like indexer that is *not* a NamedTuple. + + Parameters + ---------- + key : object + + Returns + ------- + bool + """ + # allow a list_like, but exclude NamedTuples which can be indexers + return is_list_like(key) and not (isinstance(key, tuple) and type(key) is not tuple) + + +def is_scalar_indexer(indexer, ndim: int) -> bool: + """ + Return True if we are all scalar indexers. + + Parameters + ---------- + indexer : object + ndim : int + Number of dimensions in the object being indexed. + + Returns + ------- + bool + """ + if ndim == 1 and is_integer(indexer): + # GH37748: allow indexer to be an integer for Series + return True + if isinstance(indexer, tuple) and len(indexer) == ndim: + return all(is_integer(x) for x in indexer) + return False + + +def is_empty_indexer(indexer) -> bool: + """ + Check if we have an empty indexer. + + Parameters + ---------- + indexer : object + + Returns + ------- + bool + """ + if is_list_like(indexer) and not len(indexer): + return True + if not isinstance(indexer, tuple): + indexer = (indexer,) + return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer) + + +# ----------------------------------------------------------- +# Indexer Validation + + +def check_setitem_lengths(indexer, value, values) -> bool: + """ + Validate that value and indexer are the same length. + + An special-case is allowed for when the indexer is a boolean array + and the number of true values equals the length of ``value``. In + this case, no exception is raised. + + Parameters + ---------- + indexer : sequence + Key for the setitem. + value : array-like + Value for the setitem. + values : array-like + Values being set into. + + Returns + ------- + bool + Whether this is an empty listlike setting which is a no-op. + + Raises + ------ + ValueError + When the indexer is an ndarray or list and the lengths don't match. + """ + no_op = False + + if isinstance(indexer, (np.ndarray, list)): + # We can ignore other listlikes because they are either + # a) not necessarily 1-D indexers, e.g. tuple + # b) boolean indexers e.g. BoolArray + if is_list_like(value): + if len(indexer) != len(value) and values.ndim == 1: + # boolean with truth values == len of the value is ok too + if isinstance(indexer, list): + indexer = np.array(indexer) + if not ( + isinstance(indexer, np.ndarray) + and indexer.dtype == np.bool_ + and indexer.sum() == len(value) + ): + raise ValueError( + "cannot set using a list-like indexer " + "with a different length than the value" + ) + if not len(indexer): + no_op = True + + elif isinstance(indexer, slice): + if is_list_like(value): + if len(value) != length_of_indexer(indexer, values) and values.ndim == 1: + # In case of two dimensional value is used row-wise and broadcasted + raise ValueError( + "cannot set using a slice indexer with a " + "different length than the value" + ) + if not len(value): + no_op = True + + return no_op + + +def validate_indices(indices: np.ndarray, n: int) -> None: + """ + Perform bounds-checking for an indexer. + + -1 is allowed for indicating missing values. + + Parameters + ---------- + indices : ndarray + n : int + Length of the array being indexed. + + Raises + ------ + ValueError + + Examples + -------- + >>> validate_indices(np.array([1, 2]), 3) # OK + + >>> validate_indices(np.array([1, -2]), 3) + Traceback (most recent call last): + ... + ValueError: negative dimensions are not allowed + + >>> validate_indices(np.array([1, 2, 3]), 3) + Traceback (most recent call last): + ... + IndexError: indices are out-of-bounds + + >>> validate_indices(np.array([-1, -1]), 0) # OK + + >>> validate_indices(np.array([0, 1]), 0) + Traceback (most recent call last): + ... + IndexError: indices are out-of-bounds + """ + if len(indices): + min_idx = indices.min() + if min_idx < -1: + msg = f"'indices' contains values less than allowed ({min_idx} < -1)" + raise ValueError(msg) + + max_idx = indices.max() + if max_idx >= n: + raise IndexError("indices are out-of-bounds") + + +# ----------------------------------------------------------- +# Indexer Conversion + + +def maybe_convert_indices(indices, n: int, verify: bool = True) -> np.ndarray: + """ + Attempt to convert indices into valid, positive indices. + + If we have negative indices, translate to positive here. + If we have indices that are out-of-bounds, raise an IndexError. + + Parameters + ---------- + indices : array-like + Array of indices that we are to convert. + n : int + Number of elements in the array that we are indexing. + verify : bool, default True + Check that all entries are between 0 and n - 1, inclusive. + + Returns + ------- + array-like + An array-like of positive indices that correspond to the ones + that were passed in initially to this function. + + Raises + ------ + IndexError + One of the converted indices either exceeded the number of, + elements (specified by `n`), or was still negative. + """ + if isinstance(indices, list): + indices = np.array(indices) + if len(indices) == 0: + # If `indices` is empty, np.array will return a float, + # and will cause indexing errors. + return np.empty(0, dtype=np.intp) + + mask = indices < 0 + if mask.any(): + indices = indices.copy() + indices[mask] += n + + if verify: + mask = (indices >= n) | (indices < 0) + if mask.any(): + raise IndexError("indices are out-of-bounds") + return indices + + +# ----------------------------------------------------------- +# Unsorted + + +def length_of_indexer(indexer, target=None) -> int: + """ + Return the expected length of target[indexer] + + Returns + ------- + int + """ + if target is not None and isinstance(indexer, slice): + target_len = len(target) + start = indexer.start + stop = indexer.stop + step = indexer.step + if start is None: + start = 0 + elif start < 0: + start += target_len + if stop is None or stop > target_len: + stop = target_len + elif stop < 0: + stop += target_len + if step is None: + step = 1 + elif step < 0: + start, stop = stop + 1, start + 1 + step = -step + return (stop - start + step - 1) // step + elif isinstance(indexer, (ABCSeries, ABCIndex, np.ndarray, list)): + if isinstance(indexer, list): + indexer = np.array(indexer) + + if indexer.dtype == bool: + # GH#25774 + return indexer.sum() + return len(indexer) + elif isinstance(indexer, range): + return (indexer.stop - indexer.start) // indexer.step + elif not is_list_like_indexer(indexer): + return 1 + raise AssertionError("cannot find the length of the indexer") + + +def disallow_ndim_indexing(result) -> None: + """ + Helper function to disallow multi-dimensional indexing on 1D Series/Index. + + GH#27125 indexer like idx[:, None] expands dim, but we cannot do that + and keep an index, so we used to return ndarray, which was deprecated + in GH#30588. + """ + if np.ndim(result) > 1: + raise ValueError( + "Multi-dimensional indexing (e.g. `obj[:, None]`) is no longer " + "supported. Convert to a numpy array before indexing instead." + ) + + +def unpack_1tuple(tup): + """ + If we have a length-1 tuple/list that contains a slice, unpack to just + the slice. + + Notes + ----- + The list case is deprecated. + """ + if len(tup) == 1 and isinstance(tup[0], slice): + # if we don't have a MultiIndex, we may still be able to handle + # a 1-tuple. see test_1tuple_without_multiindex + + if isinstance(tup, list): + # GH#31299 + raise ValueError( + "Indexing with a single-item list containing a " + "slice is not allowed. Pass a tuple instead.", + ) + + return tup[0] + return tup + + +def check_key_length(columns: Index, key, value: DataFrame) -> None: + """ + Checks if a key used as indexer has the same length as the columns it is + associated with. + + Parameters + ---------- + columns : Index The columns of the DataFrame to index. + key : A list-like of keys to index with. + value : DataFrame The value to set for the keys. + + Raises + ------ + ValueError: If the length of key is not equal to the number of columns in value + or if the number of columns referenced by key is not equal to number + of columns. + """ + if columns.is_unique: + if len(value.columns) != len(key): + raise ValueError("Columns must be same length as key") + else: + # Missing keys in columns are represented as -1 + if len(columns.get_indexer_non_unique(key)[0]) != len(value.columns): + raise ValueError("Columns must be same length as key") + + +def unpack_tuple_and_ellipses(item: tuple): + """ + Possibly unpack arr[..., n] to arr[n] + """ + if len(item) > 1: + # Note: we are assuming this indexing is being done on a 1D arraylike + if item[0] is Ellipsis: + item = item[1:] + elif item[-1] is Ellipsis: + item = item[:-1] + + if len(item) > 1: + raise IndexError("too many indices for array.") + + item = item[0] + return item + + +# ----------------------------------------------------------- +# Public indexer validation + + +def check_array_indexer(array: AnyArrayLike, indexer: Any) -> Any: + """ + Check if `indexer` is a valid array indexer for `array`. + + For a boolean mask, `array` and `indexer` are checked to have the same + length. The dtype is validated, and if it is an integer or boolean + ExtensionArray, it is checked if there are missing values present, and + it is converted to the appropriate numpy array. Other dtypes will raise + an error. + + Non-array indexers (integer, slice, Ellipsis, tuples, ..) are passed + through as is. + + Parameters + ---------- + array : array-like + The array that is being indexed (only used for the length). + indexer : array-like or list-like + The array-like that's used to index. List-like input that is not yet + a numpy array or an ExtensionArray is converted to one. Other input + types are passed through as is. + + Returns + ------- + numpy.ndarray + The validated indexer as a numpy array that can be used to index. + + Raises + ------ + IndexError + When the lengths don't match. + ValueError + When `indexer` cannot be converted to a numpy ndarray to index + (e.g. presence of missing values). + + See Also + -------- + api.types.is_bool_dtype : Check if `key` is of boolean dtype. + + Examples + -------- + When checking a boolean mask, a boolean ndarray is returned when the + arguments are all valid. + + >>> mask = pd.array([True, False]) + >>> arr = pd.array([1, 2]) + >>> pd.api.indexers.check_array_indexer(arr, mask) + array([ True, False]) + + An IndexError is raised when the lengths don't match. + + >>> mask = pd.array([True, False, True]) + >>> pd.api.indexers.check_array_indexer(arr, mask) + Traceback (most recent call last): + ... + IndexError: Boolean index has wrong length: 3 instead of 2. + + NA values in a boolean array are treated as False. + + >>> mask = pd.array([True, pd.NA]) + >>> pd.api.indexers.check_array_indexer(arr, mask) + array([ True, False]) + + A numpy boolean mask will get passed through (if the length is correct): + + >>> mask = np.array([True, False]) + >>> pd.api.indexers.check_array_indexer(arr, mask) + array([ True, False]) + + Similarly for integer indexers, an integer ndarray is returned when it is + a valid indexer, otherwise an error is (for integer indexers, a matching + length is not required): + + >>> indexer = pd.array([0, 2], dtype="Int64") + >>> arr = pd.array([1, 2, 3]) + >>> pd.api.indexers.check_array_indexer(arr, indexer) + array([0, 2]) + + >>> indexer = pd.array([0, pd.NA], dtype="Int64") + >>> pd.api.indexers.check_array_indexer(arr, indexer) + Traceback (most recent call last): + ... + ValueError: Cannot index with an integer indexer containing NA values + + For non-integer/boolean dtypes, an appropriate error is raised: + + >>> indexer = np.array([0., 2.], dtype="float64") + >>> pd.api.indexers.check_array_indexer(arr, indexer) + Traceback (most recent call last): + ... + IndexError: arrays used as indices must be of integer or boolean type + """ + from pandas.core.construction import array as pd_array + + # whatever is not an array-like is returned as-is (possible valid array + # indexers that are not array-like: integer, slice, Ellipsis, None) + # In this context, tuples are not considered as array-like, as they have + # a specific meaning in indexing (multi-dimensional indexing) + if is_list_like(indexer): + if isinstance(indexer, tuple): + return indexer + else: + return indexer + + # convert list-likes to array + if not is_array_like(indexer): + indexer = pd_array(indexer) + if len(indexer) == 0: + # empty list is converted to float array by pd.array + indexer = np.array([], dtype=np.intp) + + dtype = indexer.dtype + if is_bool_dtype(dtype): + if isinstance(dtype, ExtensionDtype): + indexer = indexer.to_numpy(dtype=bool, na_value=False) + else: + indexer = np.asarray(indexer, dtype=bool) + + # GH26658 + if len(indexer) != len(array): + raise IndexError( + f"Boolean index has wrong length: " + f"{len(indexer)} instead of {len(array)}" + ) + elif is_integer_dtype(dtype): + try: + indexer = np.asarray(indexer, dtype=np.intp) + except ValueError as err: + raise ValueError( + "Cannot index with an integer indexer containing NA values" + ) from err + else: + raise IndexError("arrays used as indices must be of integer or boolean type") + + return indexer