diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/arrays.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/arrays.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..d10904f0d0d62bedf202d12a403139bfabefe1a4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/arrays.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/byteswap.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/byteswap.pyi new file mode 100644 index 0000000000000000000000000000000000000000..bb0dbfc6a50b1bb7cd509dc5b3dfeed55ad70b09 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/byteswap.pyi @@ -0,0 +1,5 @@ +def read_float_with_byteswap(data: bytes, offset: int, byteswap: bool) -> float: ... +def read_double_with_byteswap(data: bytes, offset: int, byteswap: bool) -> float: ... +def read_uint16_with_byteswap(data: bytes, offset: int, byteswap: bool) -> int: ... +def read_uint32_with_byteswap(data: bytes, offset: int, byteswap: bool) -> int: ... +def read_uint64_with_byteswap(data: bytes, offset: int, byteswap: bool) -> int: ... diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/hashing.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/hashing.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..fe5f310aa8123dc096570d78b1c27e3a524dcd53 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/hashing.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/hashtable.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/hashtable.pyi new file mode 100644 index 0000000000000000000000000000000000000000..555ec73acd9b2e9ccbb5ea145db77862e2c54dbf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/hashtable.pyi @@ -0,0 +1,252 @@ +from typing import ( + Any, + Hashable, + Literal, +) + +import numpy as np + +from pandas._typing import npt + +def unique_label_indices( + labels: np.ndarray, # const int64_t[:] +) -> np.ndarray: ... + +class Factorizer: + count: int + uniques: Any + def __init__(self, size_hint: int) -> None: ... + def get_count(self) -> int: ... + def factorize( + self, + values: np.ndarray, + na_sentinel=..., + na_value=..., + mask=..., + ) -> npt.NDArray[np.intp]: ... + +class ObjectFactorizer(Factorizer): + table: PyObjectHashTable + uniques: ObjectVector + +class Int64Factorizer(Factorizer): + table: Int64HashTable + uniques: Int64Vector + +class UInt64Factorizer(Factorizer): + table: UInt64HashTable + uniques: UInt64Vector + +class Int32Factorizer(Factorizer): + table: Int32HashTable + uniques: Int32Vector + +class UInt32Factorizer(Factorizer): + table: UInt32HashTable + uniques: UInt32Vector + +class Int16Factorizer(Factorizer): + table: Int16HashTable + uniques: Int16Vector + +class UInt16Factorizer(Factorizer): + table: UInt16HashTable + uniques: UInt16Vector + +class Int8Factorizer(Factorizer): + table: Int8HashTable + uniques: Int8Vector + +class UInt8Factorizer(Factorizer): + table: UInt8HashTable + uniques: UInt8Vector + +class Float64Factorizer(Factorizer): + table: Float64HashTable + uniques: Float64Vector + +class Float32Factorizer(Factorizer): + table: Float32HashTable + uniques: Float32Vector + +class Complex64Factorizer(Factorizer): + table: Complex64HashTable + uniques: Complex64Vector + +class Complex128Factorizer(Factorizer): + table: Complex128HashTable + uniques: Complex128Vector + +class Int64Vector: + def __init__(self, *args) -> None: ... + def __len__(self) -> int: ... + def to_array(self) -> npt.NDArray[np.int64]: ... + +class Int32Vector: + def __init__(self, *args) -> None: ... + def __len__(self) -> int: ... + def to_array(self) -> npt.NDArray[np.int32]: ... + +class Int16Vector: + def __init__(self, *args) -> None: ... + def __len__(self) -> int: ... + def to_array(self) -> npt.NDArray[np.int16]: ... + +class Int8Vector: + def __init__(self, *args) -> None: ... + def __len__(self) -> int: ... + def to_array(self) -> npt.NDArray[np.int8]: ... + +class UInt64Vector: + def __init__(self, *args) -> None: ... + def __len__(self) -> int: ... + def to_array(self) -> npt.NDArray[np.uint64]: ... + +class UInt32Vector: + def __init__(self, *args) -> None: ... + def __len__(self) -> int: ... + def to_array(self) -> npt.NDArray[np.uint32]: ... + +class UInt16Vector: + def __init__(self, *args) -> None: ... + def __len__(self) -> int: ... + def to_array(self) -> npt.NDArray[np.uint16]: ... + +class UInt8Vector: + def __init__(self, *args) -> None: ... + def __len__(self) -> int: ... + def to_array(self) -> npt.NDArray[np.uint8]: ... + +class Float64Vector: + def __init__(self, *args) -> None: ... + def __len__(self) -> int: ... + def to_array(self) -> npt.NDArray[np.float64]: ... + +class Float32Vector: + def __init__(self, *args) -> None: ... + def __len__(self) -> int: ... + def to_array(self) -> npt.NDArray[np.float32]: ... + +class Complex128Vector: + def __init__(self, *args) -> None: ... + def __len__(self) -> int: ... + def to_array(self) -> npt.NDArray[np.complex128]: ... + +class Complex64Vector: + def __init__(self, *args) -> None: ... + def __len__(self) -> int: ... + def to_array(self) -> npt.NDArray[np.complex64]: ... + +class StringVector: + def __init__(self, *args) -> None: ... + def __len__(self) -> int: ... + def to_array(self) -> npt.NDArray[np.object_]: ... + +class ObjectVector: + def __init__(self, *args) -> None: ... + def __len__(self) -> int: ... + def to_array(self) -> npt.NDArray[np.object_]: ... + +class HashTable: + # NB: The base HashTable class does _not_ actually have these methods; + # we are putting them here for the sake of mypy to avoid + # reproducing them in each subclass below. + def __init__(self, size_hint: int = ..., uses_mask: bool = ...) -> None: ... + def __len__(self) -> int: ... + def __contains__(self, key: Hashable) -> bool: ... + def sizeof(self, deep: bool = ...) -> int: ... + def get_state(self) -> dict[str, int]: ... + # TODO: `val/key` type is subclass-specific + def get_item(self, val): ... # TODO: return type? + def set_item(self, key, val) -> None: ... + def get_na(self): ... # TODO: return type? + def set_na(self, val) -> None: ... + def map_locations( + self, + values: np.ndarray, # np.ndarray[subclass-specific] + mask: npt.NDArray[np.bool_] | None = ..., + ) -> None: ... + def lookup( + self, + values: np.ndarray, # np.ndarray[subclass-specific] + mask: npt.NDArray[np.bool_] | None = ..., + ) -> npt.NDArray[np.intp]: ... + def get_labels( + self, + values: np.ndarray, # np.ndarray[subclass-specific] + uniques, # SubclassTypeVector + count_prior: int = ..., + na_sentinel: int = ..., + na_value: object = ..., + mask=..., + ) -> npt.NDArray[np.intp]: ... + def unique( + self, + values: np.ndarray, # np.ndarray[subclass-specific] + return_inverse: bool = ..., + mask=..., + ) -> ( + tuple[ + np.ndarray, # np.ndarray[subclass-specific] + npt.NDArray[np.intp], + ] + | np.ndarray + ): ... # np.ndarray[subclass-specific] + def factorize( + self, + values: np.ndarray, # np.ndarray[subclass-specific] + na_sentinel: int = ..., + na_value: object = ..., + mask=..., + ignore_na: bool = True, + ) -> tuple[np.ndarray, npt.NDArray[np.intp]]: ... # np.ndarray[subclass-specific] + +class Complex128HashTable(HashTable): ... +class Complex64HashTable(HashTable): ... +class Float64HashTable(HashTable): ... +class Float32HashTable(HashTable): ... + +class Int64HashTable(HashTable): + # Only Int64HashTable has get_labels_groupby, map_keys_to_values + def get_labels_groupby( + self, + values: npt.NDArray[np.int64], # const int64_t[:] + ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.int64]]: ... + def map_keys_to_values( + self, + keys: npt.NDArray[np.int64], + values: npt.NDArray[np.int64], # const int64_t[:] + ) -> None: ... + +class Int32HashTable(HashTable): ... +class Int16HashTable(HashTable): ... +class Int8HashTable(HashTable): ... +class UInt64HashTable(HashTable): ... +class UInt32HashTable(HashTable): ... +class UInt16HashTable(HashTable): ... +class UInt8HashTable(HashTable): ... +class StringHashTable(HashTable): ... +class PyObjectHashTable(HashTable): ... +class IntpHashTable(HashTable): ... + +def duplicated( + values: np.ndarray, + keep: Literal["last", "first", False] = ..., + mask: npt.NDArray[np.bool_] | None = ..., +) -> npt.NDArray[np.bool_]: ... +def mode( + values: np.ndarray, dropna: bool, mask: npt.NDArray[np.bool_] | None = ... +) -> np.ndarray: ... +def value_count( + values: np.ndarray, + dropna: bool, + mask: npt.NDArray[np.bool_] | None = ..., +) -> tuple[np.ndarray, npt.NDArray[np.int64], int]: ... # np.ndarray[same-as-values] + +# arr and values should have same dtype +def ismember( + arr: np.ndarray, + values: np.ndarray, +) -> npt.NDArray[np.bool_]: ... +def object_hash(obj) -> int: ... +def objects_are_equal(a, b) -> bool: ... diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/index.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/index.pyi new file mode 100644 index 0000000000000000000000000000000000000000..75db47bf3160e828b178c0cd733452c9dc5e8919 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/index.pyi @@ -0,0 +1,100 @@ +import numpy as np + +from pandas._typing import npt + +from pandas import MultiIndex +from pandas.core.arrays import ExtensionArray + +multiindex_nulls_shift: int + +class IndexEngine: + over_size_threshold: bool + def __init__(self, values: np.ndarray) -> None: ... + def __contains__(self, val: object) -> bool: ... + + # -> int | slice | np.ndarray[bool] + def get_loc(self, val: object) -> int | slice | np.ndarray: ... + def sizeof(self, deep: bool = ...) -> int: ... + def __sizeof__(self) -> int: ... + @property + def is_unique(self) -> bool: ... + @property + def is_monotonic_increasing(self) -> bool: ... + @property + def is_monotonic_decreasing(self) -> bool: ... + @property + def is_mapping_populated(self) -> bool: ... + def clear_mapping(self): ... + def get_indexer(self, values: np.ndarray) -> npt.NDArray[np.intp]: ... + def get_indexer_non_unique( + self, + targets: np.ndarray, + ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... + +class MaskedIndexEngine(IndexEngine): + def __init__(self, values: object) -> None: ... + def get_indexer_non_unique( + self, targets: object + ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... + +class Float64Engine(IndexEngine): ... +class Float32Engine(IndexEngine): ... +class Complex128Engine(IndexEngine): ... +class Complex64Engine(IndexEngine): ... +class Int64Engine(IndexEngine): ... +class Int32Engine(IndexEngine): ... +class Int16Engine(IndexEngine): ... +class Int8Engine(IndexEngine): ... +class UInt64Engine(IndexEngine): ... +class UInt32Engine(IndexEngine): ... +class UInt16Engine(IndexEngine): ... +class UInt8Engine(IndexEngine): ... +class ObjectEngine(IndexEngine): ... +class DatetimeEngine(Int64Engine): ... +class TimedeltaEngine(DatetimeEngine): ... +class PeriodEngine(Int64Engine): ... +class BoolEngine(UInt8Engine): ... +class MaskedFloat64Engine(MaskedIndexEngine): ... +class MaskedFloat32Engine(MaskedIndexEngine): ... +class MaskedComplex128Engine(MaskedIndexEngine): ... +class MaskedComplex64Engine(MaskedIndexEngine): ... +class MaskedInt64Engine(MaskedIndexEngine): ... +class MaskedInt32Engine(MaskedIndexEngine): ... +class MaskedInt16Engine(MaskedIndexEngine): ... +class MaskedInt8Engine(MaskedIndexEngine): ... +class MaskedUInt64Engine(MaskedIndexEngine): ... +class MaskedUInt32Engine(MaskedIndexEngine): ... +class MaskedUInt16Engine(MaskedIndexEngine): ... +class MaskedUInt8Engine(MaskedIndexEngine): ... +class MaskedBoolEngine(MaskedUInt8Engine): ... + +class BaseMultiIndexCodesEngine: + levels: list[np.ndarray] + offsets: np.ndarray # ndarray[uint64_t, ndim=1] + + def __init__( + self, + levels: list[np.ndarray], # all entries hashable + labels: list[np.ndarray], # all entries integer-dtyped + offsets: np.ndarray, # np.ndarray[np.uint64, ndim=1] + ) -> None: ... + def get_indexer(self, target: npt.NDArray[np.object_]) -> npt.NDArray[np.intp]: ... + def _extract_level_codes(self, target: MultiIndex) -> np.ndarray: ... + +class ExtensionEngine: + def __init__(self, values: ExtensionArray) -> None: ... + def __contains__(self, val: object) -> bool: ... + def get_loc(self, val: object) -> int | slice | np.ndarray: ... + def get_indexer(self, values: np.ndarray) -> npt.NDArray[np.intp]: ... + def get_indexer_non_unique( + self, + targets: np.ndarray, + ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... + @property + def is_unique(self) -> bool: ... + @property + def is_monotonic_increasing(self) -> bool: ... + @property + def is_monotonic_decreasing(self) -> bool: ... + def sizeof(self, deep: bool = ...) -> int: ... + def clear_mapping(self): ... diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/indexing.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/indexing.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..4e8f746b0a014a24114fdfe7af6b33a05a0b9a08 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/indexing.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/indexing.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/indexing.pyi new file mode 100644 index 0000000000000000000000000000000000000000..3ae5c5044a2f75452fa57ba578af2c7b4c78ec96 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/indexing.pyi @@ -0,0 +1,17 @@ +from typing import ( + Generic, + TypeVar, +) + +from pandas.core.indexing import IndexingMixin + +_IndexingMixinT = TypeVar("_IndexingMixinT", bound=IndexingMixin) + +class NDFrameIndexerBase(Generic[_IndexingMixinT]): + name: str + # in practice obj is either a DataFrame or a Series + obj: _IndexingMixinT + + def __init__(self, name: str, obj: _IndexingMixinT) -> None: ... + @property + def ndim(self) -> int: ... diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/internals.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/internals.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..5aeaae13186da4eb25e621e81c6e24dc56624d59 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/internals.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/interval.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/interval.pyi new file mode 100644 index 0000000000000000000000000000000000000000..587fdf84f2f85520713352bbcab29804c95621e5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/interval.pyi @@ -0,0 +1,174 @@ +from typing import ( + Any, + Generic, + TypeVar, + overload, +) + +import numpy as np +import numpy.typing as npt + +from pandas._typing import ( + IntervalClosedType, + Timedelta, + Timestamp, +) + +VALID_CLOSED: frozenset[str] + +_OrderableScalarT = TypeVar("_OrderableScalarT", int, float) +_OrderableTimesT = TypeVar("_OrderableTimesT", Timestamp, Timedelta) +_OrderableT = TypeVar("_OrderableT", int, float, Timestamp, Timedelta) + +class _LengthDescriptor: + @overload + def __get__( + self, instance: Interval[_OrderableScalarT], owner: Any + ) -> _OrderableScalarT: ... + @overload + def __get__( + self, instance: Interval[_OrderableTimesT], owner: Any + ) -> Timedelta: ... + +class _MidDescriptor: + @overload + def __get__(self, instance: Interval[_OrderableScalarT], owner: Any) -> float: ... + @overload + def __get__( + self, instance: Interval[_OrderableTimesT], owner: Any + ) -> _OrderableTimesT: ... + +class IntervalMixin: + @property + def closed_left(self) -> bool: ... + @property + def closed_right(self) -> bool: ... + @property + def open_left(self) -> bool: ... + @property + def open_right(self) -> bool: ... + @property + def is_empty(self) -> bool: ... + def _check_closed_matches(self, other: IntervalMixin, name: str = ...) -> None: ... + +class Interval(IntervalMixin, Generic[_OrderableT]): + @property + def left(self: Interval[_OrderableT]) -> _OrderableT: ... + @property + def right(self: Interval[_OrderableT]) -> _OrderableT: ... + @property + def closed(self) -> IntervalClosedType: ... + mid: _MidDescriptor + length: _LengthDescriptor + def __init__( + self, + left: _OrderableT, + right: _OrderableT, + closed: IntervalClosedType = ..., + ) -> None: ... + def __hash__(self) -> int: ... + @overload + def __contains__( + self: Interval[Timedelta], key: Timedelta | Interval[Timedelta] + ) -> bool: ... + @overload + def __contains__( + self: Interval[Timestamp], key: Timestamp | Interval[Timestamp] + ) -> bool: ... + @overload + def __contains__( + self: Interval[_OrderableScalarT], + key: _OrderableScalarT | Interval[_OrderableScalarT], + ) -> bool: ... + @overload + def __add__( + self: Interval[_OrderableTimesT], y: Timedelta + ) -> Interval[_OrderableTimesT]: ... + @overload + def __add__( + self: Interval[int], y: _OrderableScalarT + ) -> Interval[_OrderableScalarT]: ... + @overload + def __add__(self: Interval[float], y: float) -> Interval[float]: ... + @overload + def __radd__( + self: Interval[_OrderableTimesT], y: Timedelta + ) -> Interval[_OrderableTimesT]: ... + @overload + def __radd__( + self: Interval[int], y: _OrderableScalarT + ) -> Interval[_OrderableScalarT]: ... + @overload + def __radd__(self: Interval[float], y: float) -> Interval[float]: ... + @overload + def __sub__( + self: Interval[_OrderableTimesT], y: Timedelta + ) -> Interval[_OrderableTimesT]: ... + @overload + def __sub__( + self: Interval[int], y: _OrderableScalarT + ) -> Interval[_OrderableScalarT]: ... + @overload + def __sub__(self: Interval[float], y: float) -> Interval[float]: ... + @overload + def __rsub__( + self: Interval[_OrderableTimesT], y: Timedelta + ) -> Interval[_OrderableTimesT]: ... + @overload + def __rsub__( + self: Interval[int], y: _OrderableScalarT + ) -> Interval[_OrderableScalarT]: ... + @overload + def __rsub__(self: Interval[float], y: float) -> Interval[float]: ... + @overload + def __mul__( + self: Interval[int], y: _OrderableScalarT + ) -> Interval[_OrderableScalarT]: ... + @overload + def __mul__(self: Interval[float], y: float) -> Interval[float]: ... + @overload + def __rmul__( + self: Interval[int], y: _OrderableScalarT + ) -> Interval[_OrderableScalarT]: ... + @overload + def __rmul__(self: Interval[float], y: float) -> Interval[float]: ... + @overload + def __truediv__( + self: Interval[int], y: _OrderableScalarT + ) -> Interval[_OrderableScalarT]: ... + @overload + def __truediv__(self: Interval[float], y: float) -> Interval[float]: ... + @overload + def __floordiv__( + self: Interval[int], y: _OrderableScalarT + ) -> Interval[_OrderableScalarT]: ... + @overload + def __floordiv__(self: Interval[float], y: float) -> Interval[float]: ... + def overlaps(self: Interval[_OrderableT], other: Interval[_OrderableT]) -> bool: ... + +def intervals_to_interval_bounds( + intervals: np.ndarray, validate_closed: bool = ... +) -> tuple[np.ndarray, np.ndarray, IntervalClosedType]: ... + +class IntervalTree(IntervalMixin): + def __init__( + self, + left: np.ndarray, + right: np.ndarray, + closed: IntervalClosedType = ..., + leaf_size: int = ..., + ) -> None: ... + @property + def mid(self) -> np.ndarray: ... + @property + def length(self) -> np.ndarray: ... + def get_indexer(self, target) -> npt.NDArray[np.intp]: ... + def get_indexer_non_unique( + self, target + ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... + _na_count: int + @property + def is_overlapping(self) -> bool: ... + @property + def is_monotonic_increasing(self) -> bool: ... + def clear_mapping(self) -> None: ... diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/json.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/json.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..9735fbb3eb8744a280ceb95ec482af38bf3de822 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/json.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/missing.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/missing.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..f6b1b985edfe12b8b49326cf86a78f88054a0ece Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/missing.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/ops.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/ops.pyi new file mode 100644 index 0000000000000000000000000000000000000000..6738a1dff4a9eb272f3e6c88a31b0bd7386aebd2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/ops.pyi @@ -0,0 +1,51 @@ +from typing import ( + Any, + Callable, + Iterable, + Literal, + TypeAlias, + overload, +) + +import numpy as np + +from pandas._typing import npt + +_BinOp: TypeAlias = Callable[[Any, Any], Any] +_BoolOp: TypeAlias = Callable[[Any, Any], bool] + +def scalar_compare( + values: np.ndarray, # object[:] + val: object, + op: _BoolOp, # {operator.eq, operator.ne, ...} +) -> npt.NDArray[np.bool_]: ... +def vec_compare( + left: npt.NDArray[np.object_], + right: npt.NDArray[np.object_], + op: _BoolOp, # {operator.eq, operator.ne, ...} +) -> npt.NDArray[np.bool_]: ... +def scalar_binop( + values: np.ndarray, # object[:] + val: object, + op: _BinOp, # binary operator +) -> np.ndarray: ... +def vec_binop( + left: np.ndarray, # object[:] + right: np.ndarray, # object[:] + op: _BinOp, # binary operator +) -> np.ndarray: ... +@overload +def maybe_convert_bool( + arr: npt.NDArray[np.object_], + true_values: Iterable | None = None, + false_values: Iterable | None = None, + convert_to_masked_nullable: Literal[False] = ..., +) -> tuple[np.ndarray, None]: ... +@overload +def maybe_convert_bool( + arr: npt.NDArray[np.object_], + true_values: Iterable = ..., + false_values: Iterable = ..., + *, + convert_to_masked_nullable: Literal[True], +) -> tuple[np.ndarray, np.ndarray]: ... diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/ops_dispatch.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/ops_dispatch.pyi new file mode 100644 index 0000000000000000000000000000000000000000..91b5a4dbaaebc177191d3189f12e4e20d56ca0fa --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/ops_dispatch.pyi @@ -0,0 +1,5 @@ +import numpy as np + +def maybe_dispatch_ufunc_to_dunder_op( + self, ufunc: np.ufunc, method: str, *inputs, **kwargs +): ... diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/pandas_datetime.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/pandas_datetime.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..d9a317a3433c6960ab60d523ccc23105e76f33da Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/pandas_datetime.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/parsers.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/parsers.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..c13b3752d81e30e6766e17a8c80284aeacf07637 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/parsers.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/parsers.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/parsers.pyi new file mode 100644 index 0000000000000000000000000000000000000000..253bb7303cefb81f61692c8d7bd9812a191d9ac5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/parsers.pyi @@ -0,0 +1,77 @@ +from typing import ( + Hashable, + Literal, +) + +import numpy as np + +from pandas._typing import ( + ArrayLike, + Dtype, + npt, +) + +STR_NA_VALUES: set[str] +DEFAULT_BUFFER_HEURISTIC: int + +def sanitize_objects( + values: npt.NDArray[np.object_], + na_values: set, +) -> int: ... + +class TextReader: + unnamed_cols: set[str] + table_width: int # int64_t + leading_cols: int # int64_t + header: list[list[int]] # non-negative integers + def __init__( + self, + source, + delimiter: bytes | str = ..., # single-character only + header=..., + header_start: int = ..., # int64_t + header_end: int = ..., # uint64_t + index_col=..., + names=..., + tokenize_chunksize: int = ..., # int64_t + delim_whitespace: bool = ..., + converters=..., + skipinitialspace: bool = ..., + escapechar: bytes | str | None = ..., # single-character only + doublequote: bool = ..., + quotechar: str | bytes | None = ..., # at most 1 character + quoting: int = ..., + lineterminator: bytes | str | None = ..., # at most 1 character + comment=..., + decimal: bytes | str = ..., # single-character only + thousands: bytes | str | None = ..., # single-character only + dtype: Dtype | dict[Hashable, Dtype] = ..., + usecols=..., + error_bad_lines: bool = ..., + warn_bad_lines: bool = ..., + na_filter: bool = ..., + na_values=..., + na_fvalues=..., + keep_default_na: bool = ..., + true_values=..., + false_values=..., + allow_leading_cols: bool = ..., + skiprows=..., + skipfooter: int = ..., # int64_t + verbose: bool = ..., + float_precision: Literal["round_trip", "legacy", "high"] | None = ..., + skip_blank_lines: bool = ..., + encoding_errors: bytes | str = ..., + ) -> None: ... + def set_noconvert(self, i: int) -> None: ... + def remove_noconvert(self, i: int) -> None: ... + def close(self) -> None: ... + def read(self, rows: int | None = ...) -> dict[int, ArrayLike]: ... + def read_low_memory(self, rows: int | None) -> list[dict[int, ArrayLike]]: ... + +# _maybe_upcast, na_values are only exposed for testing +na_values: dict + +def _maybe_upcast( + arr, use_dtype_backend: bool = ..., dtype_backend: str = ... +) -> np.ndarray: ... diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslib.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslib.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..d7b6470049b9f5a8c3ee46d8decd52df757fbd57 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslib.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslib.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslib.pyi new file mode 100644 index 0000000000000000000000000000000000000000..5a340c1d88bc439663c2e482c9eb7270f8ebc5c6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslib.pyi @@ -0,0 +1,37 @@ +from datetime import tzinfo + +import numpy as np + +from pandas._typing import npt + +def format_array_from_datetime( + values: npt.NDArray[np.int64], + tz: tzinfo | None = ..., + format: str | None = ..., + na_rep: str | float = ..., + reso: int = ..., # NPY_DATETIMEUNIT +) -> npt.NDArray[np.object_]: ... +def array_with_unit_to_datetime( + values: npt.NDArray[np.object_], + unit: str, + errors: str = ..., +) -> tuple[np.ndarray, tzinfo | None]: ... +def first_non_null(values: np.ndarray) -> int: ... +def array_to_datetime( + values: npt.NDArray[np.object_], + errors: str = ..., + dayfirst: bool = ..., + yearfirst: bool = ..., + utc: bool = ..., + creso: int = ..., +) -> tuple[np.ndarray, tzinfo | None]: ... + +# returned ndarray may be object dtype or datetime64[ns] + +def array_to_datetime_with_tz( + values: npt.NDArray[np.object_], + tz: tzinfo, + dayfirst: bool, + yearfirst: bool, + creso: int, +) -> npt.NDArray[np.int64]: ... diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..208698bdfce202a3521d26ae72342e42d9c7efa0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/ccalendar.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/ccalendar.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..01e391e11492502997fd4ffa2fb42f1975a7b565 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/ccalendar.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/conversion.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/conversion.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..3e4d896fe3c908ed398ec80c757abc0af4ac3f84 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/conversion.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/dtypes.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/dtypes.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..244bb6c5b7e660980ab62e3e3e3505f2d4119e61 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/dtypes.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/fields.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/fields.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..43a34c86525ed734f7e5a2fa866d0a3eef63b061 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/fields.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/nattype.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/nattype.pyi new file mode 100644 index 0000000000000000000000000000000000000000..bd86a6fdc2174a38bdd0845019e0afd6f2aecbf1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/nattype.pyi @@ -0,0 +1,141 @@ +from datetime import ( + datetime, + timedelta, + tzinfo as _tzinfo, +) +import typing + +import numpy as np + +from pandas._libs.tslibs.period import Period +from pandas._typing import Self + +NaT: NaTType +iNaT: int +nat_strings: set[str] + +_NaTComparisonTypes: typing.TypeAlias = ( + datetime | timedelta | Period | np.datetime64 | np.timedelta64 +) + +class _NatComparison: + def __call__(self, other: _NaTComparisonTypes) -> bool: ... + +class NaTType: + _value: np.int64 + @property + def value(self) -> int: ... + @property + def asm8(self) -> np.datetime64: ... + def to_datetime64(self) -> np.datetime64: ... + def to_numpy( + self, dtype: np.dtype | str | None = ..., copy: bool = ... + ) -> np.datetime64 | np.timedelta64: ... + @property + def is_leap_year(self) -> bool: ... + @property + def is_month_start(self) -> bool: ... + @property + def is_quarter_start(self) -> bool: ... + @property + def is_year_start(self) -> bool: ... + @property + def is_month_end(self) -> bool: ... + @property + def is_quarter_end(self) -> bool: ... + @property + def is_year_end(self) -> bool: ... + @property + def day_of_year(self) -> float: ... + @property + def dayofyear(self) -> float: ... + @property + def days_in_month(self) -> float: ... + @property + def daysinmonth(self) -> float: ... + @property + def day_of_week(self) -> float: ... + @property + def dayofweek(self) -> float: ... + @property + def week(self) -> float: ... + @property + def weekofyear(self) -> float: ... + def day_name(self) -> float: ... + def month_name(self) -> float: ... + def weekday(self) -> float: ... + def isoweekday(self) -> float: ... + def total_seconds(self) -> float: ... + def today(self, *args, **kwargs) -> NaTType: ... + def now(self, *args, **kwargs) -> NaTType: ... + def to_pydatetime(self) -> NaTType: ... + def date(self) -> NaTType: ... + def round(self) -> NaTType: ... + def floor(self) -> NaTType: ... + def ceil(self) -> NaTType: ... + @property + def tzinfo(self) -> None: ... + @property + def tz(self) -> None: ... + def tz_convert(self, tz: _tzinfo | str | None) -> NaTType: ... + def tz_localize( + self, + tz: _tzinfo | str | None, + ambiguous: str = ..., + nonexistent: str = ..., + ) -> NaTType: ... + def replace( + self, + year: int | None = ..., + month: int | None = ..., + day: int | None = ..., + hour: int | None = ..., + minute: int | None = ..., + second: int | None = ..., + microsecond: int | None = ..., + nanosecond: int | None = ..., + tzinfo: _tzinfo | None = ..., + fold: int | None = ..., + ) -> NaTType: ... + @property + def year(self) -> float: ... + @property + def quarter(self) -> float: ... + @property + def month(self) -> float: ... + @property + def day(self) -> float: ... + @property + def hour(self) -> float: ... + @property + def minute(self) -> float: ... + @property + def second(self) -> float: ... + @property + def millisecond(self) -> float: ... + @property + def microsecond(self) -> float: ... + @property + def nanosecond(self) -> float: ... + # inject Timedelta properties + @property + def days(self) -> float: ... + @property + def microseconds(self) -> float: ... + @property + def nanoseconds(self) -> float: ... + # inject Period properties + @property + def qyear(self) -> float: ... + def __eq__(self, other: object) -> bool: ... + def __ne__(self, other: object) -> bool: ... + __lt__: _NatComparison + __le__: _NatComparison + __gt__: _NatComparison + __ge__: _NatComparison + def __sub__(self, other: Self | timedelta | datetime) -> Self: ... + def __rsub__(self, other: Self | timedelta | datetime) -> Self: ... + def __add__(self, other: Self | timedelta | datetime) -> Self: ... + def __radd__(self, other: Self | timedelta | datetime) -> Self: ... + def __hash__(self) -> int: ... + def as_unit(self, unit: str, round_ok: bool = ...) -> NaTType: ... diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/np_datetime.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/np_datetime.pyi new file mode 100644 index 0000000000000000000000000000000000000000..00ef35c50e53251d5ca6f6c6d5ad28a67a695a21 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/np_datetime.pyi @@ -0,0 +1,27 @@ +import numpy as np + +from pandas._typing import npt + +class OutOfBoundsDatetime(ValueError): ... +class OutOfBoundsTimedelta(ValueError): ... + +# only exposed for testing +def py_get_unit_from_dtype(dtype: np.dtype): ... +def py_td64_to_tdstruct(td64: int, unit: int) -> dict: ... +def astype_overflowsafe( + values: np.ndarray, + dtype: np.dtype, + copy: bool = ..., + round_ok: bool = ..., + is_coerce: bool = ..., +) -> np.ndarray: ... +def is_unitless(dtype: np.dtype) -> bool: ... +def compare_mismatched_resolutions( + left: np.ndarray, right: np.ndarray, op +) -> npt.NDArray[np.bool_]: ... +def add_overflowsafe( + left: npt.NDArray[np.int64], + right: npt.NDArray[np.int64], +) -> npt.NDArray[np.int64]: ... +def get_supported_dtype(dtype: np.dtype) -> np.dtype: ... +def is_supported_dtype(dtype: np.dtype) -> bool: ... diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/offsets.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/offsets.pyi new file mode 100644 index 0000000000000000000000000000000000000000..7eb8dc0813868de9ca96c086f5506619719937f4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/offsets.pyi @@ -0,0 +1,287 @@ +from datetime import ( + datetime, + time, + timedelta, +) +from typing import ( + Any, + Collection, + Literal, + TypeVar, + overload, +) + +import numpy as np + +from pandas._libs.tslibs.nattype import NaTType +from pandas._typing import ( + OffsetCalendar, + Self, + npt, +) + +from .timedeltas import Timedelta + +_BaseOffsetT = TypeVar("_BaseOffsetT", bound=BaseOffset) +_DatetimeT = TypeVar("_DatetimeT", bound=datetime) +_TimedeltaT = TypeVar("_TimedeltaT", bound=timedelta) + +_relativedelta_kwds: set[str] +prefix_mapping: dict[str, type] + +class ApplyTypeError(TypeError): ... + +class BaseOffset: + n: int + normalize: bool + def __init__(self, n: int = ..., normalize: bool = ...) -> None: ... + def __eq__(self, other) -> bool: ... + def __ne__(self, other) -> bool: ... + def __hash__(self) -> int: ... + @property + def kwds(self) -> dict: ... + @property + def base(self) -> BaseOffset: ... + @overload + def __add__(self, other: npt.NDArray[np.object_]) -> npt.NDArray[np.object_]: ... + @overload + def __add__(self, other: BaseOffset) -> Self: ... + @overload + def __add__(self, other: _DatetimeT) -> _DatetimeT: ... + @overload + def __add__(self, other: _TimedeltaT) -> _TimedeltaT: ... + @overload + def __radd__(self, other: npt.NDArray[np.object_]) -> npt.NDArray[np.object_]: ... + @overload + def __radd__(self, other: BaseOffset) -> Self: ... + @overload + def __radd__(self, other: _DatetimeT) -> _DatetimeT: ... + @overload + def __radd__(self, other: _TimedeltaT) -> _TimedeltaT: ... + @overload + def __radd__(self, other: NaTType) -> NaTType: ... + def __sub__(self, other: BaseOffset) -> Self: ... + @overload + def __rsub__(self, other: npt.NDArray[np.object_]) -> npt.NDArray[np.object_]: ... + @overload + def __rsub__(self, other: BaseOffset): ... + @overload + def __rsub__(self, other: _DatetimeT) -> _DatetimeT: ... + @overload + def __rsub__(self, other: _TimedeltaT) -> _TimedeltaT: ... + @overload + def __mul__(self, other: np.ndarray) -> np.ndarray: ... + @overload + def __mul__(self, other: int): ... + @overload + def __rmul__(self, other: np.ndarray) -> np.ndarray: ... + @overload + def __rmul__(self, other: int) -> Self: ... + def __neg__(self) -> Self: ... + def copy(self) -> Self: ... + @property + def name(self) -> str: ... + @property + def rule_code(self) -> str: ... + @property + def freqstr(self) -> str: ... + def _apply(self, other): ... + def _apply_array(self, dtarr: np.ndarray) -> np.ndarray: ... + def rollback(self, dt: datetime) -> datetime: ... + def rollforward(self, dt: datetime) -> datetime: ... + def is_on_offset(self, dt: datetime) -> bool: ... + def __setstate__(self, state) -> None: ... + def __getstate__(self): ... + @property + def nanos(self) -> int: ... + def is_anchored(self) -> bool: ... + +def _get_offset(name: str) -> BaseOffset: ... + +class SingleConstructorOffset(BaseOffset): + @classmethod + def _from_name(cls, suffix: None = ...): ... + def __reduce__(self): ... + +@overload +def to_offset(freq: None, is_period: bool = ...) -> None: ... +@overload +def to_offset(freq: _BaseOffsetT, is_period: bool = ...) -> _BaseOffsetT: ... +@overload +def to_offset(freq: timedelta | str, is_period: bool = ...) -> BaseOffset: ... + +class Tick(SingleConstructorOffset): + _creso: int + _prefix: str + def __init__(self, n: int = ..., normalize: bool = ...) -> None: ... + @property + def delta(self) -> Timedelta: ... + @property + def nanos(self) -> int: ... + +def delta_to_tick(delta: timedelta) -> Tick: ... + +class Day(Tick): ... +class Hour(Tick): ... +class Minute(Tick): ... +class Second(Tick): ... +class Milli(Tick): ... +class Micro(Tick): ... +class Nano(Tick): ... + +class RelativeDeltaOffset(BaseOffset): + def __init__(self, n: int = ..., normalize: bool = ..., **kwds: Any) -> None: ... + +class BusinessMixin(SingleConstructorOffset): + def __init__( + self, n: int = ..., normalize: bool = ..., offset: timedelta = ... + ) -> None: ... + +class BusinessDay(BusinessMixin): ... + +class BusinessHour(BusinessMixin): + def __init__( + self, + n: int = ..., + normalize: bool = ..., + start: str | time | Collection[str | time] = ..., + end: str | time | Collection[str | time] = ..., + offset: timedelta = ..., + ) -> None: ... + +class WeekOfMonthMixin(SingleConstructorOffset): + def __init__( + self, n: int = ..., normalize: bool = ..., weekday: int = ... + ) -> None: ... + +class YearOffset(SingleConstructorOffset): + def __init__( + self, n: int = ..., normalize: bool = ..., month: int | None = ... + ) -> None: ... + +class BYearEnd(YearOffset): ... +class BYearBegin(YearOffset): ... +class YearEnd(YearOffset): ... +class YearBegin(YearOffset): ... + +class QuarterOffset(SingleConstructorOffset): + def __init__( + self, n: int = ..., normalize: bool = ..., startingMonth: int | None = ... + ) -> None: ... + +class BQuarterEnd(QuarterOffset): ... +class BQuarterBegin(QuarterOffset): ... +class QuarterEnd(QuarterOffset): ... +class QuarterBegin(QuarterOffset): ... +class MonthOffset(SingleConstructorOffset): ... +class MonthEnd(MonthOffset): ... +class MonthBegin(MonthOffset): ... +class BusinessMonthEnd(MonthOffset): ... +class BusinessMonthBegin(MonthOffset): ... + +class SemiMonthOffset(SingleConstructorOffset): + def __init__( + self, n: int = ..., normalize: bool = ..., day_of_month: int | None = ... + ) -> None: ... + +class SemiMonthEnd(SemiMonthOffset): ... +class SemiMonthBegin(SemiMonthOffset): ... + +class Week(SingleConstructorOffset): + def __init__( + self, n: int = ..., normalize: bool = ..., weekday: int | None = ... + ) -> None: ... + +class WeekOfMonth(WeekOfMonthMixin): + def __init__( + self, n: int = ..., normalize: bool = ..., week: int = ..., weekday: int = ... + ) -> None: ... + +class LastWeekOfMonth(WeekOfMonthMixin): ... + +class FY5253Mixin(SingleConstructorOffset): + def __init__( + self, + n: int = ..., + normalize: bool = ..., + weekday: int = ..., + startingMonth: int = ..., + variation: Literal["nearest", "last"] = ..., + ) -> None: ... + +class FY5253(FY5253Mixin): ... + +class FY5253Quarter(FY5253Mixin): + def __init__( + self, + n: int = ..., + normalize: bool = ..., + weekday: int = ..., + startingMonth: int = ..., + qtr_with_extra_week: int = ..., + variation: Literal["nearest", "last"] = ..., + ) -> None: ... + +class Easter(SingleConstructorOffset): ... + +class _CustomBusinessMonth(BusinessMixin): + def __init__( + self, + n: int = ..., + normalize: bool = ..., + weekmask: str = ..., + holidays: list | None = ..., + calendar: OffsetCalendar | None = ..., + offset: timedelta = ..., + ) -> None: ... + +class CustomBusinessDay(BusinessDay): + def __init__( + self, + n: int = ..., + normalize: bool = ..., + weekmask: str = ..., + holidays: list | None = ..., + calendar: OffsetCalendar | None = ..., + offset: timedelta = ..., + ) -> None: ... + +class CustomBusinessHour(BusinessHour): + def __init__( + self, + n: int = ..., + normalize: bool = ..., + weekmask: str = ..., + holidays: list | None = ..., + calendar: OffsetCalendar | None = ..., + start: str | time | Collection[str | time] = ..., + end: str | time | Collection[str | time] = ..., + offset: timedelta = ..., + ) -> None: ... + +class CustomBusinessMonthEnd(_CustomBusinessMonth): ... +class CustomBusinessMonthBegin(_CustomBusinessMonth): ... +class OffsetMeta(type): ... +class DateOffset(RelativeDeltaOffset, metaclass=OffsetMeta): ... + +BDay = BusinessDay +BMonthEnd = BusinessMonthEnd +BMonthBegin = BusinessMonthBegin +CBMonthEnd = CustomBusinessMonthEnd +CBMonthBegin = CustomBusinessMonthBegin +CDay = CustomBusinessDay + +def roll_qtrday( + other: datetime, n: int, month: int, day_opt: str, modby: int +) -> int: ... + +INVALID_FREQ_ERR_MSG: Literal["Invalid frequency: {0}"] + +def shift_months( + dtindex: npt.NDArray[np.int64], + months: int, + day_opt: str | None = ..., + reso: int = ..., +) -> npt.NDArray[np.int64]: ... + +_offset_map: dict[str, BaseOffset] diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/period.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/period.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..754f3c8996a5a5801d79e016458addd6bdac7887 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/period.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/period.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/period.pyi new file mode 100644 index 0000000000000000000000000000000000000000..22f3bdbe668decaac0c53cf080ae5c3f098d7a48 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/period.pyi @@ -0,0 +1,135 @@ +from datetime import timedelta +from typing import Literal + +import numpy as np + +from pandas._libs.tslibs.dtypes import PeriodDtypeBase +from pandas._libs.tslibs.nattype import NaTType +from pandas._libs.tslibs.offsets import BaseOffset +from pandas._libs.tslibs.timestamps import Timestamp +from pandas._typing import ( + Frequency, + npt, +) + +INVALID_FREQ_ERR_MSG: str +DIFFERENT_FREQ: str + +class IncompatibleFrequency(ValueError): ... + +def periodarr_to_dt64arr( + periodarr: npt.NDArray[np.int64], # const int64_t[:] + freq: int, +) -> npt.NDArray[np.int64]: ... +def period_asfreq_arr( + arr: npt.NDArray[np.int64], + freq1: int, + freq2: int, + end: bool, +) -> npt.NDArray[np.int64]: ... +def get_period_field_arr( + field: str, + arr: npt.NDArray[np.int64], # const int64_t[:] + freq: int, +) -> npt.NDArray[np.int64]: ... +def from_ordinals( + values: npt.NDArray[np.int64], # const int64_t[:] + freq: timedelta | BaseOffset | str, +) -> npt.NDArray[np.int64]: ... +def extract_ordinals( + values: npt.NDArray[np.object_], + freq: Frequency | int, +) -> npt.NDArray[np.int64]: ... +def extract_freq( + values: npt.NDArray[np.object_], +) -> BaseOffset: ... +def period_array_strftime( + values: npt.NDArray[np.int64], + dtype_code: int, + na_rep, + date_format: str | None, +) -> npt.NDArray[np.object_]: ... + +# exposed for tests +def period_asfreq(ordinal: int, freq1: int, freq2: int, end: bool) -> int: ... +def period_ordinal( + y: int, m: int, d: int, h: int, min: int, s: int, us: int, ps: int, freq: int +) -> int: ... +def freq_to_dtype_code(freq: BaseOffset) -> int: ... +def validate_end_alias(how: str) -> Literal["E", "S"]: ... + +class PeriodMixin: + @property + def end_time(self) -> Timestamp: ... + @property + def start_time(self) -> Timestamp: ... + def _require_matching_freq(self, other: BaseOffset, base: bool = ...) -> None: ... + +class Period(PeriodMixin): + ordinal: int # int64_t + freq: BaseOffset + _dtype: PeriodDtypeBase + + # error: "__new__" must return a class instance (got "Union[Period, NaTType]") + def __new__( # type: ignore[misc] + cls, + value=..., + freq: int | str | BaseOffset | None = ..., + ordinal: int | None = ..., + year: int | None = ..., + month: int | None = ..., + quarter: int | None = ..., + day: int | None = ..., + hour: int | None = ..., + minute: int | None = ..., + second: int | None = ..., + ) -> Period | NaTType: ... + @classmethod + def _maybe_convert_freq(cls, freq) -> BaseOffset: ... + @classmethod + def _from_ordinal(cls, ordinal: int, freq: BaseOffset) -> Period: ... + @classmethod + def now(cls, freq: Frequency) -> Period: ... + def strftime(self, fmt: str | None) -> str: ... + def to_timestamp( + self, + freq: str | BaseOffset | None = ..., + how: str = ..., + ) -> Timestamp: ... + def asfreq(self, freq: str | BaseOffset, how: str = ...) -> Period: ... + @property + def freqstr(self) -> str: ... + @property + def is_leap_year(self) -> bool: ... + @property + def daysinmonth(self) -> int: ... + @property + def days_in_month(self) -> int: ... + @property + def qyear(self) -> int: ... + @property + def quarter(self) -> int: ... + @property + def day_of_year(self) -> int: ... + @property + def weekday(self) -> int: ... + @property + def day_of_week(self) -> int: ... + @property + def week(self) -> int: ... + @property + def weekofyear(self) -> int: ... + @property + def second(self) -> int: ... + @property + def minute(self) -> int: ... + @property + def hour(self) -> int: ... + @property + def day(self) -> int: ... + @property + def month(self) -> int: ... + @property + def year(self) -> int: ... + def __sub__(self, other) -> Period | BaseOffset: ... + def __add__(self, other) -> Period: ... diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/strptime.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/strptime.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..5a5f123e8641fe363ac4a442d3c23771f140ce26 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/strptime.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/timezones.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/timezones.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..c615599159f6ee4df5b72d6c153b1365730bc85f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/timezones.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/timezones.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/timezones.pyi new file mode 100644 index 0000000000000000000000000000000000000000..4e9f0c6ae6c33447ebc86d3daf5bf5cedbe5b0cb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/timezones.pyi @@ -0,0 +1,21 @@ +from datetime import ( + datetime, + tzinfo, +) +from typing import Callable + +import numpy as np + +# imported from dateutil.tz +dateutil_gettz: Callable[[str], tzinfo] + +def tz_standardize(tz: tzinfo) -> tzinfo: ... +def tz_compare(start: tzinfo | None, end: tzinfo | None) -> bool: ... +def infer_tzinfo( + start: datetime | None, + end: datetime | None, +) -> tzinfo | None: ... +def maybe_get_tz(tz: str | int | np.int64 | tzinfo | None) -> tzinfo | None: ... +def get_timezone(tz: tzinfo) -> tzinfo | str: ... +def is_utc(tz: tzinfo | None) -> bool: ... +def is_fixed_offset(tz: tzinfo) -> bool: ... diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/window/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/window/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/window/aggregations.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/window/aggregations.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..9fdcbcf73d45e8d92bde2ff5068e707a158eed98 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/window/aggregations.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/window/aggregations.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/window/aggregations.pyi new file mode 100644 index 0000000000000000000000000000000000000000..a6cfbec9b15b9aa13a15dab3ff35b0d8761f1036 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/window/aggregations.pyi @@ -0,0 +1,127 @@ +from typing import ( + Any, + Callable, + Literal, +) + +import numpy as np + +from pandas._typing import ( + WindowingRankType, + npt, +) + +def roll_sum( + values: np.ndarray, # const float64_t[:] + start: np.ndarray, # np.ndarray[np.int64] + end: np.ndarray, # np.ndarray[np.int64] + minp: int, # int64_t +) -> np.ndarray: ... # np.ndarray[float] +def roll_mean( + values: np.ndarray, # const float64_t[:] + start: np.ndarray, # np.ndarray[np.int64] + end: np.ndarray, # np.ndarray[np.int64] + minp: int, # int64_t +) -> np.ndarray: ... # np.ndarray[float] +def roll_var( + values: np.ndarray, # const float64_t[:] + start: np.ndarray, # np.ndarray[np.int64] + end: np.ndarray, # np.ndarray[np.int64] + minp: int, # int64_t + ddof: int = ..., +) -> np.ndarray: ... # np.ndarray[float] +def roll_skew( + values: np.ndarray, # np.ndarray[np.float64] + start: np.ndarray, # np.ndarray[np.int64] + end: np.ndarray, # np.ndarray[np.int64] + minp: int, # int64_t +) -> np.ndarray: ... # np.ndarray[float] +def roll_kurt( + values: np.ndarray, # np.ndarray[np.float64] + start: np.ndarray, # np.ndarray[np.int64] + end: np.ndarray, # np.ndarray[np.int64] + minp: int, # int64_t +) -> np.ndarray: ... # np.ndarray[float] +def roll_median_c( + values: np.ndarray, # np.ndarray[np.float64] + start: np.ndarray, # np.ndarray[np.int64] + end: np.ndarray, # np.ndarray[np.int64] + minp: int, # int64_t +) -> np.ndarray: ... # np.ndarray[float] +def roll_max( + values: np.ndarray, # np.ndarray[np.float64] + start: np.ndarray, # np.ndarray[np.int64] + end: np.ndarray, # np.ndarray[np.int64] + minp: int, # int64_t +) -> np.ndarray: ... # np.ndarray[float] +def roll_min( + values: np.ndarray, # np.ndarray[np.float64] + start: np.ndarray, # np.ndarray[np.int64] + end: np.ndarray, # np.ndarray[np.int64] + minp: int, # int64_t +) -> np.ndarray: ... # np.ndarray[float] +def roll_quantile( + values: np.ndarray, # const float64_t[:] + start: np.ndarray, # np.ndarray[np.int64] + end: np.ndarray, # np.ndarray[np.int64] + minp: int, # int64_t + quantile: float, # float64_t + interpolation: Literal["linear", "lower", "higher", "nearest", "midpoint"], +) -> np.ndarray: ... # np.ndarray[float] +def roll_rank( + values: np.ndarray, + start: np.ndarray, + end: np.ndarray, + minp: int, + percentile: bool, + method: WindowingRankType, + ascending: bool, +) -> np.ndarray: ... # np.ndarray[float] +def roll_apply( + obj: object, + start: np.ndarray, # np.ndarray[np.int64] + end: np.ndarray, # np.ndarray[np.int64] + minp: int, # int64_t + function: Callable[..., Any], + raw: bool, + args: tuple[Any, ...], + kwargs: dict[str, Any], +) -> npt.NDArray[np.float64]: ... +def roll_weighted_sum( + values: np.ndarray, # const float64_t[:] + weights: np.ndarray, # const float64_t[:] + minp: int, +) -> np.ndarray: ... # np.ndarray[np.float64] +def roll_weighted_mean( + values: np.ndarray, # const float64_t[:] + weights: np.ndarray, # const float64_t[:] + minp: int, +) -> np.ndarray: ... # np.ndarray[np.float64] +def roll_weighted_var( + values: np.ndarray, # const float64_t[:] + weights: np.ndarray, # const float64_t[:] + minp: int, # int64_t + ddof: int, # unsigned int +) -> np.ndarray: ... # np.ndarray[np.float64] +def ewm( + vals: np.ndarray, # const float64_t[:] + start: np.ndarray, # const int64_t[:] + end: np.ndarray, # const int64_t[:] + minp: int, + com: float, # float64_t + adjust: bool, + ignore_na: bool, + deltas: np.ndarray | None = None, # const float64_t[:] + normalize: bool = True, +) -> np.ndarray: ... # np.ndarray[np.float64] +def ewmcov( + input_x: np.ndarray, # const float64_t[:] + start: np.ndarray, # const int64_t[:] + end: np.ndarray, # const int64_t[:] + minp: int, + input_y: np.ndarray, # const float64_t[:] + com: float, # float64_t + adjust: bool, + ignore_na: bool, + bias: bool, +) -> np.ndarray: ... # np.ndarray[np.float64] diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/window/indexers.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/window/indexers.pyi new file mode 100644 index 0000000000000000000000000000000000000000..c9bc64be34ac9a41d14fef33b0fc76bdf66527e9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/window/indexers.pyi @@ -0,0 +1,12 @@ +import numpy as np + +from pandas._typing import npt + +def calculate_variable_window_bounds( + num_values: int, # int64_t + window_size: int, # int64_t + min_periods, + center: bool, + closed: str | None, + index: np.ndarray, # const int64_t[:] +) -> tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]: ... diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/writers.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/writers.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..e6afc0ef615b510726a6d0962df4c8f3d73cd3dc Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/writers.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77c2779e2e10d41d97192bd34518a8307546118c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/__pycache__/test_na_scalar.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/__pycache__/test_na_scalar.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5da8b47c8a1029ed0cf6092a8787e3b6a1b84ad4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/__pycache__/test_na_scalar.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/__pycache__/test_nat.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/__pycache__/test_nat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97c1340f4aac69a89ccf79d3eaddae98b70de965 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/__pycache__/test_nat.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_arithmetic.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_arithmetic.py new file mode 100644 index 0000000000000000000000000000000000000000..603763227cb888cb716692ddb82d206ba6812c90 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_arithmetic.py @@ -0,0 +1,192 @@ +from datetime import timedelta + +import numpy as np +import pytest + +from pandas import ( + Interval, + Timedelta, + Timestamp, +) +import pandas._testing as tm + + +class TestIntervalArithmetic: + def test_interval_add(self, closed): + interval = Interval(0, 1, closed=closed) + expected = Interval(1, 2, closed=closed) + + result = interval + 1 + assert result == expected + + result = 1 + interval + assert result == expected + + result = interval + result += 1 + assert result == expected + + msg = r"unsupported operand type\(s\) for \+" + with pytest.raises(TypeError, match=msg): + interval + interval + + with pytest.raises(TypeError, match=msg): + interval + "foo" + + def test_interval_sub(self, closed): + interval = Interval(0, 1, closed=closed) + expected = Interval(-1, 0, closed=closed) + + result = interval - 1 + assert result == expected + + result = interval + result -= 1 + assert result == expected + + msg = r"unsupported operand type\(s\) for -" + with pytest.raises(TypeError, match=msg): + interval - interval + + with pytest.raises(TypeError, match=msg): + interval - "foo" + + def test_interval_mult(self, closed): + interval = Interval(0, 1, closed=closed) + expected = Interval(0, 2, closed=closed) + + result = interval * 2 + assert result == expected + + result = 2 * interval + assert result == expected + + result = interval + result *= 2 + assert result == expected + + msg = r"unsupported operand type\(s\) for \*" + with pytest.raises(TypeError, match=msg): + interval * interval + + msg = r"can\'t multiply sequence by non-int" + with pytest.raises(TypeError, match=msg): + interval * "foo" + + def test_interval_div(self, closed): + interval = Interval(0, 1, closed=closed) + expected = Interval(0, 0.5, closed=closed) + + result = interval / 2.0 + assert result == expected + + result = interval + result /= 2.0 + assert result == expected + + msg = r"unsupported operand type\(s\) for /" + with pytest.raises(TypeError, match=msg): + interval / interval + + with pytest.raises(TypeError, match=msg): + interval / "foo" + + def test_interval_floordiv(self, closed): + interval = Interval(1, 2, closed=closed) + expected = Interval(0, 1, closed=closed) + + result = interval // 2 + assert result == expected + + result = interval + result //= 2 + assert result == expected + + msg = r"unsupported operand type\(s\) for //" + with pytest.raises(TypeError, match=msg): + interval // interval + + with pytest.raises(TypeError, match=msg): + interval // "foo" + + @pytest.mark.parametrize("method", ["__add__", "__sub__"]) + @pytest.mark.parametrize( + "interval", + [ + Interval( + Timestamp("2017-01-01 00:00:00"), Timestamp("2018-01-01 00:00:00") + ), + Interval(Timedelta(days=7), Timedelta(days=14)), + ], + ) + @pytest.mark.parametrize( + "delta", [Timedelta(days=7), timedelta(7), np.timedelta64(7, "D")] + ) + def test_time_interval_add_subtract_timedelta(self, interval, delta, method): + # https://github.com/pandas-dev/pandas/issues/32023 + result = getattr(interval, method)(delta) + left = getattr(interval.left, method)(delta) + right = getattr(interval.right, method)(delta) + expected = Interval(left, right) + + assert result == expected + + @pytest.mark.parametrize("interval", [Interval(1, 2), Interval(1.0, 2.0)]) + @pytest.mark.parametrize( + "delta", [Timedelta(days=7), timedelta(7), np.timedelta64(7, "D")] + ) + def test_numeric_interval_add_timedelta_raises(self, interval, delta): + # https://github.com/pandas-dev/pandas/issues/32023 + msg = "|".join( + [ + "unsupported operand", + "cannot use operands", + "Only numeric, Timestamp and Timedelta endpoints are allowed", + ] + ) + with pytest.raises((TypeError, ValueError), match=msg): + interval + delta + + with pytest.raises((TypeError, ValueError), match=msg): + delta + interval + + @pytest.mark.parametrize("klass", [timedelta, np.timedelta64, Timedelta]) + def test_timedelta_add_timestamp_interval(self, klass): + delta = klass(0) + expected = Interval(Timestamp("2020-01-01"), Timestamp("2020-02-01")) + + result = delta + expected + assert result == expected + + result = expected + delta + assert result == expected + + +class TestIntervalComparisons: + def test_interval_equal(self): + assert Interval(0, 1) == Interval(0, 1, closed="right") + assert Interval(0, 1) != Interval(0, 1, closed="left") + assert Interval(0, 1) != 0 + + def test_interval_comparison(self): + msg = ( + "'<' not supported between instances of " + "'pandas._libs.interval.Interval' and 'int'" + ) + with pytest.raises(TypeError, match=msg): + Interval(0, 1) < 2 + + assert Interval(0, 1) < Interval(1, 2) + assert Interval(0, 1) < Interval(0, 2) + assert Interval(0, 1) < Interval(0.5, 1.5) + assert Interval(0, 1) <= Interval(0, 1) + assert Interval(0, 1) > Interval(-1, 2) + assert Interval(0, 1) >= Interval(0, 1) + + def test_equality_comparison_broadcasts_over_array(self): + # https://github.com/pandas-dev/pandas/issues/35931 + interval = Interval(0, 1) + arr = np.array([interval, interval]) + result = interval == arr + expected = np.array([True, True]) + tm.assert_numpy_array_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_constructors.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_constructors.py new file mode 100644 index 0000000000000000000000000000000000000000..a4bc00b923434f8d62c8332b3104696051fd287e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_constructors.py @@ -0,0 +1,51 @@ +import pytest + +from pandas import ( + Interval, + Period, + Timestamp, +) + + +class TestIntervalConstructors: + @pytest.mark.parametrize( + "left, right", + [ + ("a", "z"), + (("a", "b"), ("c", "d")), + (list("AB"), list("ab")), + (Interval(0, 1), Interval(1, 2)), + (Period("2018Q1", freq="Q"), Period("2018Q1", freq="Q")), + ], + ) + def test_construct_errors(self, left, right): + # GH#23013 + msg = "Only numeric, Timestamp and Timedelta endpoints are allowed" + with pytest.raises(ValueError, match=msg): + Interval(left, right) + + def test_constructor_errors(self): + msg = "invalid option for 'closed': foo" + with pytest.raises(ValueError, match=msg): + Interval(0, 1, closed="foo") + + msg = "left side of interval must be <= right side" + with pytest.raises(ValueError, match=msg): + Interval(1, 0) + + @pytest.mark.parametrize( + "tz_left, tz_right", [(None, "UTC"), ("UTC", None), ("UTC", "US/Eastern")] + ) + def test_constructor_errors_tz(self, tz_left, tz_right): + # GH#18538 + left = Timestamp("2017-01-01", tz=tz_left) + right = Timestamp("2017-01-02", tz=tz_right) + + if tz_left is None or tz_right is None: + error = TypeError + msg = "Cannot compare tz-naive and tz-aware timestamps" + else: + error = ValueError + msg = "left and right must have the same time zone" + with pytest.raises(error, match=msg): + Interval(left, right) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_contains.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_contains.py new file mode 100644 index 0000000000000000000000000000000000000000..8dfca117a658b2a163ef35699c903ad14a032062 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_contains.py @@ -0,0 +1,73 @@ +import pytest + +from pandas import ( + Interval, + Timedelta, + Timestamp, +) + + +class TestContains: + def test_contains(self): + interval = Interval(0, 1) + assert 0.5 in interval + assert 1 in interval + assert 0 not in interval + + interval_both = Interval(0, 1, "both") + assert 0 in interval_both + assert 1 in interval_both + + interval_neither = Interval(0, 1, closed="neither") + assert 0 not in interval_neither + assert 0.5 in interval_neither + assert 1 not in interval_neither + + def test_contains_interval(self, inclusive_endpoints_fixture): + interval1 = Interval(0, 1, "both") + interval2 = Interval(0, 1, inclusive_endpoints_fixture) + assert interval1 in interval1 + assert interval2 in interval2 + assert interval2 in interval1 + assert interval1 not in interval2 or inclusive_endpoints_fixture == "both" + + def test_contains_infinite_length(self): + interval1 = Interval(0, 1, "both") + interval2 = Interval(float("-inf"), float("inf"), "neither") + assert interval1 in interval2 + assert interval2 not in interval1 + + def test_contains_zero_length(self): + interval1 = Interval(0, 1, "both") + interval2 = Interval(-1, -1, "both") + interval3 = Interval(0.5, 0.5, "both") + assert interval2 not in interval1 + assert interval3 in interval1 + assert interval2 not in interval3 and interval3 not in interval2 + assert interval1 not in interval2 and interval1 not in interval3 + + @pytest.mark.parametrize( + "type1", + [ + (0, 1), + (Timestamp(2000, 1, 1, 0), Timestamp(2000, 1, 1, 1)), + (Timedelta("0h"), Timedelta("1h")), + ], + ) + @pytest.mark.parametrize( + "type2", + [ + (0, 1), + (Timestamp(2000, 1, 1, 0), Timestamp(2000, 1, 1, 1)), + (Timedelta("0h"), Timedelta("1h")), + ], + ) + def test_contains_mixed_types(self, type1, type2): + interval1 = Interval(*type1) + interval2 = Interval(*type2) + if type1 == type2: + assert interval1 in interval2 + else: + msg = "^'<=' not supported between instances of" + with pytest.raises(TypeError, match=msg): + interval1 in interval2 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_formats.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_formats.py new file mode 100644 index 0000000000000000000000000000000000000000..6bf7aa91df3cebc41712c611aaa3781b638009d1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_formats.py @@ -0,0 +1,11 @@ +from pandas import Interval + + +def test_interval_repr(): + interval = Interval(0, 1) + assert repr(interval) == "Interval(0, 1, closed='right')" + assert str(interval) == "(0, 1]" + + interval_left = Interval(0, 1, closed="left") + assert repr(interval_left) == "Interval(0, 1, closed='left')" + assert str(interval_left) == "[0, 1)" diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_interval.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_interval.py new file mode 100644 index 0000000000000000000000000000000000000000..91b31e82f9c524f87e2849360cfd44b2f77b0c9c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_interval.py @@ -0,0 +1,87 @@ +import numpy as np +import pytest + +from pandas import ( + Interval, + Timedelta, + Timestamp, +) + + +@pytest.fixture +def interval(): + return Interval(0, 1) + + +class TestInterval: + def test_properties(self, interval): + assert interval.closed == "right" + assert interval.left == 0 + assert interval.right == 1 + assert interval.mid == 0.5 + + def test_hash(self, interval): + # should not raise + hash(interval) + + @pytest.mark.parametrize( + "left, right, expected", + [ + (0, 5, 5), + (-2, 5.5, 7.5), + (10, 10, 0), + (10, np.inf, np.inf), + (-np.inf, -5, np.inf), + (-np.inf, np.inf, np.inf), + (Timedelta("0 days"), Timedelta("5 days"), Timedelta("5 days")), + (Timedelta("10 days"), Timedelta("10 days"), Timedelta("0 days")), + (Timedelta("1h10min"), Timedelta("5h5min"), Timedelta("3h55min")), + (Timedelta("5s"), Timedelta("1h"), Timedelta("59min55s")), + ], + ) + def test_length(self, left, right, expected): + # GH 18789 + iv = Interval(left, right) + result = iv.length + assert result == expected + + @pytest.mark.parametrize( + "left, right, expected", + [ + ("2017-01-01", "2017-01-06", "5 days"), + ("2017-01-01", "2017-01-01 12:00:00", "12 hours"), + ("2017-01-01 12:00", "2017-01-01 12:00:00", "0 days"), + ("2017-01-01 12:01", "2017-01-05 17:31:00", "4 days 5 hours 30 min"), + ], + ) + @pytest.mark.parametrize("tz", (None, "UTC", "CET", "US/Eastern")) + def test_length_timestamp(self, tz, left, right, expected): + # GH 18789 + iv = Interval(Timestamp(left, tz=tz), Timestamp(right, tz=tz)) + result = iv.length + expected = Timedelta(expected) + assert result == expected + + @pytest.mark.parametrize( + "left, right", + [ + (0, 1), + (Timedelta("0 days"), Timedelta("1 day")), + (Timestamp("2018-01-01"), Timestamp("2018-01-02")), + ( + Timestamp("2018-01-01", tz="US/Eastern"), + Timestamp("2018-01-02", tz="US/Eastern"), + ), + ], + ) + def test_is_empty(self, left, right, closed): + # GH27219 + # non-empty always return False + iv = Interval(left, right, closed) + assert iv.is_empty is False + + # same endpoint is empty except when closed='both' (contains one point) + iv = Interval(left, left, closed) + result = iv.is_empty + expected = closed != "both" + assert result is expected diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_overlaps.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_overlaps.py new file mode 100644 index 0000000000000000000000000000000000000000..7fcf59d7bb4afc0077884de68dc335aff25c2cc5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_overlaps.py @@ -0,0 +1,67 @@ +import pytest + +from pandas import ( + Interval, + Timedelta, + Timestamp, +) + + +@pytest.fixture( + params=[ + (Timedelta("0 days"), Timedelta("1 day")), + (Timestamp("2018-01-01"), Timedelta("1 day")), + (0, 1), + ], + ids=lambda x: type(x[0]).__name__, +) +def start_shift(request): + """ + Fixture for generating intervals of types from a start value and a shift + value that can be added to start to generate an endpoint + """ + return request.param + + +class TestOverlaps: + def test_overlaps_self(self, start_shift, closed): + start, shift = start_shift + interval = Interval(start, start + shift, closed) + assert interval.overlaps(interval) + + def test_overlaps_nested(self, start_shift, closed, other_closed): + start, shift = start_shift + interval1 = Interval(start, start + 3 * shift, other_closed) + interval2 = Interval(start + shift, start + 2 * shift, closed) + + # nested intervals should always overlap + assert interval1.overlaps(interval2) + + def test_overlaps_disjoint(self, start_shift, closed, other_closed): + start, shift = start_shift + interval1 = Interval(start, start + shift, other_closed) + interval2 = Interval(start + 2 * shift, start + 3 * shift, closed) + + # disjoint intervals should never overlap + assert not interval1.overlaps(interval2) + + def test_overlaps_endpoint(self, start_shift, closed, other_closed): + start, shift = start_shift + interval1 = Interval(start, start + shift, other_closed) + interval2 = Interval(start + shift, start + 2 * shift, closed) + + # overlap if shared endpoint is closed for both (overlap at a point) + result = interval1.overlaps(interval2) + expected = interval1.closed_right and interval2.closed_left + assert result == expected + + @pytest.mark.parametrize( + "other", + [10, True, "foo", Timedelta("1 day"), Timestamp("2018-01-01")], + ids=lambda x: type(x).__name__, + ) + def test_overlaps_invalid_type(self, other): + interval = Interval(0, 1) + msg = f"`other` must be an Interval, got {type(other).__name__}" + with pytest.raises(TypeError, match=msg): + interval.overlaps(other) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf9ba3416bd6b1d8045749778eb7d193a81c631f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/test_arithmetic.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/test_arithmetic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..727b49734592ec44af590a1e17df92509cf1521c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/test_arithmetic.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/test_timestamp.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/test_timestamp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e7ad12a0bc2facef9cd9bfc09f4229e1f8f765b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/test_timestamp.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ac11a61136a204e94f7955b8bee238a07a4bb81 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/__pycache__/test_as_unit.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/__pycache__/test_as_unit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b37c282a494e7c3b97956c2c021fdc540b03403 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/__pycache__/test_as_unit.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/__pycache__/test_normalize.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/__pycache__/test_normalize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33d5e5631bc836d84fc11f68f7e63ab15e9365e8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/__pycache__/test_normalize.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/__pycache__/test_replace.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/__pycache__/test_replace.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3cf1e0a63235e367e7eeeca0857761146d9df86 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/__pycache__/test_replace.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/__pycache__/test_round.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/__pycache__/test_round.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3c6448e30dddfb3625080de712af736952528b3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/__pycache__/test_round.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/__pycache__/test_timestamp_method.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/__pycache__/test_timestamp_method.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11703546b027a849d65b973db49c2dab5a92112c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/__pycache__/test_timestamp_method.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/__pycache__/test_to_julian_date.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/__pycache__/test_to_julian_date.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a02dfae55cb43d5d1f364d8069f3f02dea1898d4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/__pycache__/test_to_julian_date.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/__pycache__/test_tz_convert.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/__pycache__/test_tz_convert.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37b0ad475102b2d8eb9332cb5044e9e67dfd6366 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/__pycache__/test_tz_convert.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/__pycache__/test_tz_localize.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/__pycache__/test_tz_localize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf1fad37ca589ba20ac8e7a2549a78e449d8d18e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/__pycache__/test_tz_localize.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/test_replace.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/test_replace.py new file mode 100644 index 0000000000000000000000000000000000000000..8a208455edc8237d3428f8e033ed494b424fb10b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/test_replace.py @@ -0,0 +1,193 @@ +from datetime import datetime + +from dateutil.tz import gettz +import numpy as np +import pytest +import pytz + +from pandas._libs.tslibs import ( + OutOfBoundsDatetime, + Timestamp, + conversion, +) +from pandas._libs.tslibs.dtypes import NpyDatetimeUnit +import pandas.util._test_decorators as td + +import pandas._testing as tm + + +class TestTimestampReplace: + def test_replace_out_of_pydatetime_bounds(self): + # GH#50348 + ts = Timestamp("2016-01-01").as_unit("ns") + + msg = "Out of bounds timestamp: 99999-01-01 00:00:00 with frequency 'ns'" + with pytest.raises(OutOfBoundsDatetime, match=msg): + ts.replace(year=99_999) + + ts = ts.as_unit("ms") + result = ts.replace(year=99_999) + assert result.year == 99_999 + assert result._value == Timestamp(np.datetime64("99999-01-01", "ms"))._value + + def test_replace_non_nano(self): + ts = Timestamp._from_value_and_reso( + 91514880000000000, NpyDatetimeUnit.NPY_FR_us.value, None + ) + assert ts.to_pydatetime() == datetime(4869, 12, 28) + + result = ts.replace(year=4900) + assert result._creso == ts._creso + assert result.to_pydatetime() == datetime(4900, 12, 28) + + def test_replace_naive(self): + # GH#14621, GH#7825 + ts = Timestamp("2016-01-01 09:00:00") + result = ts.replace(hour=0) + expected = Timestamp("2016-01-01 00:00:00") + assert result == expected + + def test_replace_aware(self, tz_aware_fixture): + tz = tz_aware_fixture + # GH#14621, GH#7825 + # replacing datetime components with and w/o presence of a timezone + ts = Timestamp("2016-01-01 09:00:00", tz=tz) + result = ts.replace(hour=0) + expected = Timestamp("2016-01-01 00:00:00", tz=tz) + assert result == expected + + def test_replace_preserves_nanos(self, tz_aware_fixture): + tz = tz_aware_fixture + # GH#14621, GH#7825 + ts = Timestamp("2016-01-01 09:00:00.000000123", tz=tz) + result = ts.replace(hour=0) + expected = Timestamp("2016-01-01 00:00:00.000000123", tz=tz) + assert result == expected + + def test_replace_multiple(self, tz_aware_fixture): + tz = tz_aware_fixture + # GH#14621, GH#7825 + # replacing datetime components with and w/o presence of a timezone + # test all + ts = Timestamp("2016-01-01 09:00:00.000000123", tz=tz) + result = ts.replace( + year=2015, + month=2, + day=2, + hour=0, + minute=5, + second=5, + microsecond=5, + nanosecond=5, + ) + expected = Timestamp("2015-02-02 00:05:05.000005005", tz=tz) + assert result == expected + + def test_replace_invalid_kwarg(self, tz_aware_fixture): + tz = tz_aware_fixture + # GH#14621, GH#7825 + ts = Timestamp("2016-01-01 09:00:00.000000123", tz=tz) + msg = r"replace\(\) got an unexpected keyword argument" + with pytest.raises(TypeError, match=msg): + ts.replace(foo=5) + + def test_replace_integer_args(self, tz_aware_fixture): + tz = tz_aware_fixture + # GH#14621, GH#7825 + ts = Timestamp("2016-01-01 09:00:00.000000123", tz=tz) + msg = "value must be an integer, received for hour" + with pytest.raises(ValueError, match=msg): + ts.replace(hour=0.1) + + def test_replace_tzinfo_equiv_tz_localize_none(self): + # GH#14621, GH#7825 + # assert conversion to naive is the same as replacing tzinfo with None + ts = Timestamp("2013-11-03 01:59:59.999999-0400", tz="US/Eastern") + assert ts.tz_localize(None) == ts.replace(tzinfo=None) + + @td.skip_if_windows + def test_replace_tzinfo(self): + # GH#15683 + dt = datetime(2016, 3, 27, 1) + tzinfo = pytz.timezone("CET").localize(dt, is_dst=False).tzinfo + + result_dt = dt.replace(tzinfo=tzinfo) + result_pd = Timestamp(dt).replace(tzinfo=tzinfo) + + # datetime.timestamp() converts in the local timezone + with tm.set_timezone("UTC"): + assert result_dt.timestamp() == result_pd.timestamp() + + assert result_dt == result_pd + assert result_dt == result_pd.to_pydatetime() + + result_dt = dt.replace(tzinfo=tzinfo).replace(tzinfo=None) + result_pd = Timestamp(dt).replace(tzinfo=tzinfo).replace(tzinfo=None) + + # datetime.timestamp() converts in the local timezone + with tm.set_timezone("UTC"): + assert result_dt.timestamp() == result_pd.timestamp() + + assert result_dt == result_pd + assert result_dt == result_pd.to_pydatetime() + + @pytest.mark.parametrize( + "tz, normalize", + [ + (pytz.timezone("US/Eastern"), lambda x: x.tzinfo.normalize(x)), + (gettz("US/Eastern"), lambda x: x), + ], + ) + def test_replace_across_dst(self, tz, normalize): + # GH#18319 check that 1) timezone is correctly normalized and + # 2) that hour is not incorrectly changed by this normalization + ts_naive = Timestamp("2017-12-03 16:03:30") + ts_aware = conversion.localize_pydatetime(ts_naive, tz) + + # Preliminary sanity-check + assert ts_aware == normalize(ts_aware) + + # Replace across DST boundary + ts2 = ts_aware.replace(month=6) + + # Check that `replace` preserves hour literal + assert (ts2.hour, ts2.minute) == (ts_aware.hour, ts_aware.minute) + + # Check that post-replace object is appropriately normalized + ts2b = normalize(ts2) + assert ts2 == ts2b + + @pytest.mark.parametrize("unit", ["ns", "us", "ms", "s"]) + def test_replace_dst_border(self, unit): + # Gh 7825 + t = Timestamp("2013-11-3", tz="America/Chicago").as_unit(unit) + result = t.replace(hour=3) + expected = Timestamp("2013-11-3 03:00:00", tz="America/Chicago") + assert result == expected + assert result._creso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value + + @pytest.mark.parametrize("fold", [0, 1]) + @pytest.mark.parametrize("tz", ["dateutil/Europe/London", "Europe/London"]) + @pytest.mark.parametrize("unit", ["ns", "us", "ms", "s"]) + def test_replace_dst_fold(self, fold, tz, unit): + # GH 25017 + d = datetime(2019, 10, 27, 2, 30) + ts = Timestamp(d, tz=tz).as_unit(unit) + result = ts.replace(hour=1, fold=fold) + expected = Timestamp(datetime(2019, 10, 27, 1, 30)).tz_localize( + tz, ambiguous=not fold + ) + assert result == expected + assert result._creso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value + + @pytest.mark.parametrize("fold", [0, 1]) + def test_replace_preserves_fold(self, fold): + # GH#37610. Check that replace preserves Timestamp fold property + tz = gettz("Europe/Moscow") + + ts = Timestamp( + year=2009, month=10, day=25, hour=2, minute=30, fold=fold, tzinfo=tz + ) + ts_replaced = ts.replace(second=1) + + assert ts_replaced.fold == fold diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/test_round.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/test_round.py new file mode 100644 index 0000000000000000000000000000000000000000..d10ee18b47f19074390915500afde885e853eb5d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/test_round.py @@ -0,0 +1,383 @@ +from hypothesis import ( + given, + strategies as st, +) +import numpy as np +import pytest +import pytz + +from pandas._libs import lib +from pandas._libs.tslibs import ( + NaT, + OutOfBoundsDatetime, + Timedelta, + Timestamp, + iNaT, + to_offset, +) +from pandas._libs.tslibs.dtypes import NpyDatetimeUnit +from pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG + +import pandas._testing as tm + + +class TestTimestampRound: + def test_round_division_by_zero_raises(self): + ts = Timestamp("2016-01-01") + + msg = "Division by zero in rounding" + with pytest.raises(ValueError, match=msg): + ts.round("0ns") + + @pytest.mark.parametrize( + "timestamp, freq, expected", + [ + ("20130101 09:10:11", "D", "20130101"), + ("20130101 19:10:11", "D", "20130102"), + ("20130201 12:00:00", "D", "20130202"), + ("20130104 12:00:00", "D", "20130105"), + ("2000-01-05 05:09:15.13", "D", "2000-01-05 00:00:00"), + ("2000-01-05 05:09:15.13", "h", "2000-01-05 05:00:00"), + ("2000-01-05 05:09:15.13", "s", "2000-01-05 05:09:15"), + ], + ) + def test_round_frequencies(self, timestamp, freq, expected): + dt = Timestamp(timestamp) + result = dt.round(freq) + expected = Timestamp(expected) + assert result == expected + + def test_round_tzaware(self): + dt = Timestamp("20130101 09:10:11", tz="US/Eastern") + result = dt.round("D") + expected = Timestamp("20130101", tz="US/Eastern") + assert result == expected + + dt = Timestamp("20130101 09:10:11", tz="US/Eastern") + result = dt.round("s") + assert result == dt + + def test_round_30min(self): + # round + dt = Timestamp("20130104 12:32:00") + result = dt.round("30Min") + expected = Timestamp("20130104 12:30:00") + assert result == expected + + def test_round_subsecond(self): + # GH#14440 & GH#15578 + result = Timestamp("2016-10-17 12:00:00.0015").round("ms") + expected = Timestamp("2016-10-17 12:00:00.002000") + assert result == expected + + result = Timestamp("2016-10-17 12:00:00.00149").round("ms") + expected = Timestamp("2016-10-17 12:00:00.001000") + assert result == expected + + ts = Timestamp("2016-10-17 12:00:00.0015") + for freq in ["us", "ns"]: + assert ts == ts.round(freq) + + result = Timestamp("2016-10-17 12:00:00.001501031").round("10ns") + expected = Timestamp("2016-10-17 12:00:00.001501030") + assert result == expected + + def test_round_nonstandard_freq(self): + with tm.assert_produces_warning(False): + Timestamp("2016-10-17 12:00:00.001501031").round("1010ns") + + def test_round_invalid_arg(self): + stamp = Timestamp("2000-01-05 05:09:15.13") + with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG): + stamp.round("foo") + + @pytest.mark.parametrize( + "test_input, rounder, freq, expected", + [ + ("2117-01-01 00:00:45", "floor", "15s", "2117-01-01 00:00:45"), + ("2117-01-01 00:00:45", "ceil", "15s", "2117-01-01 00:00:45"), + ( + "2117-01-01 00:00:45.000000012", + "floor", + "10ns", + "2117-01-01 00:00:45.000000010", + ), + ( + "1823-01-01 00:00:01.000000012", + "ceil", + "10ns", + "1823-01-01 00:00:01.000000020", + ), + ("1823-01-01 00:00:01", "floor", "1s", "1823-01-01 00:00:01"), + ("1823-01-01 00:00:01", "ceil", "1s", "1823-01-01 00:00:01"), + ("NaT", "floor", "1s", "NaT"), + ("NaT", "ceil", "1s", "NaT"), + ], + ) + def test_ceil_floor_edge(self, test_input, rounder, freq, expected): + dt = Timestamp(test_input) + func = getattr(dt, rounder) + result = func(freq) + + if dt is NaT: + assert result is NaT + else: + expected = Timestamp(expected) + assert result == expected + + @pytest.mark.parametrize( + "test_input, freq, expected", + [ + ("2018-01-01 00:02:06", "2s", "2018-01-01 00:02:06"), + ("2018-01-01 00:02:00", "2min", "2018-01-01 00:02:00"), + ("2018-01-01 00:04:00", "4min", "2018-01-01 00:04:00"), + ("2018-01-01 00:15:00", "15min", "2018-01-01 00:15:00"), + ("2018-01-01 00:20:00", "20min", "2018-01-01 00:20:00"), + ("2018-01-01 03:00:00", "3h", "2018-01-01 03:00:00"), + ], + ) + @pytest.mark.parametrize("rounder", ["ceil", "floor", "round"]) + def test_round_minute_freq(self, test_input, freq, expected, rounder): + # Ensure timestamps that shouldn't round dont! + # GH#21262 + + dt = Timestamp(test_input) + expected = Timestamp(expected) + func = getattr(dt, rounder) + result = func(freq) + assert result == expected + + @pytest.mark.parametrize("unit", ["ns", "us", "ms", "s"]) + def test_ceil(self, unit): + dt = Timestamp("20130101 09:10:11").as_unit(unit) + result = dt.ceil("D") + expected = Timestamp("20130102") + assert result == expected + assert result._creso == dt._creso + + @pytest.mark.parametrize("unit", ["ns", "us", "ms", "s"]) + def test_floor(self, unit): + dt = Timestamp("20130101 09:10:11").as_unit(unit) + result = dt.floor("D") + expected = Timestamp("20130101") + assert result == expected + assert result._creso == dt._creso + + @pytest.mark.parametrize("method", ["ceil", "round", "floor"]) + @pytest.mark.parametrize( + "unit", + ["ns", "us", "ms", "s"], + ) + def test_round_dst_border_ambiguous(self, method, unit): + # GH 18946 round near "fall back" DST + ts = Timestamp("2017-10-29 00:00:00", tz="UTC").tz_convert("Europe/Madrid") + ts = ts.as_unit(unit) + # + result = getattr(ts, method)("h", ambiguous=True) + assert result == ts + assert result._creso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value + + result = getattr(ts, method)("h", ambiguous=False) + expected = Timestamp("2017-10-29 01:00:00", tz="UTC").tz_convert( + "Europe/Madrid" + ) + assert result == expected + assert result._creso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value + + result = getattr(ts, method)("h", ambiguous="NaT") + assert result is NaT + + msg = "Cannot infer dst time" + with pytest.raises(pytz.AmbiguousTimeError, match=msg): + getattr(ts, method)("h", ambiguous="raise") + + @pytest.mark.parametrize( + "method, ts_str, freq", + [ + ["ceil", "2018-03-11 01:59:00-0600", "5min"], + ["round", "2018-03-11 01:59:00-0600", "5min"], + ["floor", "2018-03-11 03:01:00-0500", "2h"], + ], + ) + @pytest.mark.parametrize( + "unit", + ["ns", "us", "ms", "s"], + ) + def test_round_dst_border_nonexistent(self, method, ts_str, freq, unit): + # GH 23324 round near "spring forward" DST + ts = Timestamp(ts_str, tz="America/Chicago").as_unit(unit) + result = getattr(ts, method)(freq, nonexistent="shift_forward") + expected = Timestamp("2018-03-11 03:00:00", tz="America/Chicago") + assert result == expected + assert result._creso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value + + result = getattr(ts, method)(freq, nonexistent="NaT") + assert result is NaT + + msg = "2018-03-11 02:00:00" + with pytest.raises(pytz.NonExistentTimeError, match=msg): + getattr(ts, method)(freq, nonexistent="raise") + + @pytest.mark.parametrize( + "timestamp", + [ + "2018-01-01 0:0:0.124999360", + "2018-01-01 0:0:0.125000367", + "2018-01-01 0:0:0.125500", + "2018-01-01 0:0:0.126500", + "2018-01-01 12:00:00", + "2019-01-01 12:00:00", + ], + ) + @pytest.mark.parametrize( + "freq", + [ + "2ns", + "3ns", + "4ns", + "5ns", + "6ns", + "7ns", + "250ns", + "500ns", + "750ns", + "1us", + "19us", + "250us", + "500us", + "750us", + "1s", + "2s", + "3s", + "1D", + ], + ) + def test_round_int64(self, timestamp, freq): + # check that all rounding modes are accurate to int64 precision + # see GH#22591 + dt = Timestamp(timestamp).as_unit("ns") + unit = to_offset(freq).nanos + + # test floor + result = dt.floor(freq) + assert result._value % unit == 0, f"floor not a {freq} multiple" + assert 0 <= dt._value - result._value < unit, "floor error" + + # test ceil + result = dt.ceil(freq) + assert result._value % unit == 0, f"ceil not a {freq} multiple" + assert 0 <= result._value - dt._value < unit, "ceil error" + + # test round + result = dt.round(freq) + assert result._value % unit == 0, f"round not a {freq} multiple" + assert abs(result._value - dt._value) <= unit // 2, "round error" + if unit % 2 == 0 and abs(result._value - dt._value) == unit // 2: + # round half to even + assert result._value // unit % 2 == 0, "round half to even error" + + def test_round_implementation_bounds(self): + # See also: analogous test for Timedelta + result = Timestamp.min.ceil("s") + expected = Timestamp(1677, 9, 21, 0, 12, 44) + assert result == expected + + result = Timestamp.max.floor("s") + expected = Timestamp.max - Timedelta(854775807) + assert result == expected + + msg = "Cannot round 1677-09-21 00:12:43.145224193 to freq=" + with pytest.raises(OutOfBoundsDatetime, match=msg): + Timestamp.min.floor("s") + + with pytest.raises(OutOfBoundsDatetime, match=msg): + Timestamp.min.round("s") + + msg = "Cannot round 2262-04-11 23:47:16.854775807 to freq=" + with pytest.raises(OutOfBoundsDatetime, match=msg): + Timestamp.max.ceil("s") + + with pytest.raises(OutOfBoundsDatetime, match=msg): + Timestamp.max.round("s") + + @given(val=st.integers(iNaT + 1, lib.i8max)) + @pytest.mark.parametrize( + "method", [Timestamp.round, Timestamp.floor, Timestamp.ceil] + ) + def test_round_sanity(self, val, method): + cls = Timestamp + err_cls = OutOfBoundsDatetime + + val = np.int64(val) + ts = cls(val) + + def checker(ts, nanos, unit): + # First check that we do raise in cases where we should + if nanos == 1: + pass + else: + div, mod = divmod(ts._value, nanos) + diff = int(nanos - mod) + lb = ts._value - mod + assert lb <= ts._value # i.e. no overflows with python ints + ub = ts._value + diff + assert ub > ts._value # i.e. no overflows with python ints + + msg = "without overflow" + if mod == 0: + # We should never be raising in this + pass + elif method is cls.ceil: + if ub > cls.max._value: + with pytest.raises(err_cls, match=msg): + method(ts, unit) + return + elif method is cls.floor: + if lb < cls.min._value: + with pytest.raises(err_cls, match=msg): + method(ts, unit) + return + elif mod >= diff: + if ub > cls.max._value: + with pytest.raises(err_cls, match=msg): + method(ts, unit) + return + elif lb < cls.min._value: + with pytest.raises(err_cls, match=msg): + method(ts, unit) + return + + res = method(ts, unit) + + td = res - ts + diff = abs(td._value) + assert diff < nanos + assert res._value % nanos == 0 + + if method is cls.round: + assert diff <= nanos / 2 + elif method is cls.floor: + assert res <= ts + elif method is cls.ceil: + assert res >= ts + + nanos = 1 + checker(ts, nanos, "ns") + + nanos = 1000 + checker(ts, nanos, "us") + + nanos = 1_000_000 + checker(ts, nanos, "ms") + + nanos = 1_000_000_000 + checker(ts, nanos, "s") + + nanos = 60 * 1_000_000_000 + checker(ts, nanos, "min") + + nanos = 60 * 60 * 1_000_000_000 + checker(ts, nanos, "h") + + nanos = 24 * 60 * 60 * 1_000_000_000 + checker(ts, nanos, "D") diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/test_timestamp_method.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/test_timestamp_method.py new file mode 100644 index 0000000000000000000000000000000000000000..67985bd4ba566b280cf7a29f826014d43c0df9f4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/test_timestamp_method.py @@ -0,0 +1,31 @@ +# NB: This is for the Timestamp.timestamp *method* specifically, not +# the Timestamp class in general. + +from pytz import utc + +from pandas._libs.tslibs import Timestamp +import pandas.util._test_decorators as td + +import pandas._testing as tm + + +class TestTimestampMethod: + @td.skip_if_windows + def test_timestamp(self, fixed_now_ts): + # GH#17329 + # tz-naive --> treat it as if it were UTC for purposes of timestamp() + ts = fixed_now_ts + uts = ts.replace(tzinfo=utc) + assert ts.timestamp() == uts.timestamp() + + tsc = Timestamp("2014-10-11 11:00:01.12345678", tz="US/Central") + utsc = tsc.tz_convert("UTC") + + # utsc is a different representation of the same time + assert tsc.timestamp() == utsc.timestamp() + + # datetime.timestamp() converts in the local timezone + with tm.set_timezone("UTC"): + # should agree with datetime.timestamp method + dt = ts.to_pydatetime() + assert dt.timestamp() == ts.timestamp() diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/test_to_julian_date.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/test_to_julian_date.py new file mode 100644 index 0000000000000000000000000000000000000000..7769614b601a4842a7273b441af6552956ff2e72 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/test_to_julian_date.py @@ -0,0 +1,28 @@ +from pandas import Timestamp + + +class TestTimestampToJulianDate: + def test_compare_1700(self): + ts = Timestamp("1700-06-23") + res = ts.to_julian_date() + assert res == 2_342_145.5 + + def test_compare_2000(self): + ts = Timestamp("2000-04-12") + res = ts.to_julian_date() + assert res == 2_451_646.5 + + def test_compare_2100(self): + ts = Timestamp("2100-08-12") + res = ts.to_julian_date() + assert res == 2_488_292.5 + + def test_compare_hour01(self): + ts = Timestamp("2000-08-12T01:00:00") + res = ts.to_julian_date() + assert res == 2_451_768.5416666666666666 + + def test_compare_hour13(self): + ts = Timestamp("2000-08-12T13:00:00") + res = ts.to_julian_date() + assert res == 2_451_769.0416666666666666 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/test_to_pydatetime.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/test_to_pydatetime.py new file mode 100644 index 0000000000000000000000000000000000000000..57f57e56201c872e54e91c4e6da2c2154a07a6d5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/test_to_pydatetime.py @@ -0,0 +1,81 @@ +from datetime import ( + datetime, + timedelta, +) + +import pytz + +from pandas._libs.tslibs.timezones import dateutil_gettz as gettz +import pandas.util._test_decorators as td + +from pandas import Timestamp +import pandas._testing as tm + + +class TestTimestampToPyDatetime: + def test_to_pydatetime_fold(self): + # GH#45087 + tzstr = "dateutil/usr/share/zoneinfo/America/Chicago" + ts = Timestamp(year=2013, month=11, day=3, hour=1, minute=0, fold=1, tz=tzstr) + dt = ts.to_pydatetime() + assert dt.fold == 1 + + def test_to_pydatetime_nonzero_nano(self): + ts = Timestamp("2011-01-01 9:00:00.123456789") + + # Warn the user of data loss (nanoseconds). + with tm.assert_produces_warning(UserWarning): + expected = datetime(2011, 1, 1, 9, 0, 0, 123456) + result = ts.to_pydatetime() + assert result == expected + + def test_timestamp_to_datetime(self): + stamp = Timestamp("20090415", tz="US/Eastern") + dtval = stamp.to_pydatetime() + assert stamp == dtval + assert stamp.tzinfo == dtval.tzinfo + + def test_timestamp_to_pydatetime_dateutil(self): + stamp = Timestamp("20090415", tz="dateutil/US/Eastern") + dtval = stamp.to_pydatetime() + assert stamp == dtval + assert stamp.tzinfo == dtval.tzinfo + + def test_timestamp_to_pydatetime_explicit_pytz(self): + stamp = Timestamp("20090415", tz=pytz.timezone("US/Eastern")) + dtval = stamp.to_pydatetime() + assert stamp == dtval + assert stamp.tzinfo == dtval.tzinfo + + @td.skip_if_windows + def test_timestamp_to_pydatetime_explicit_dateutil(self): + stamp = Timestamp("20090415", tz=gettz("US/Eastern")) + dtval = stamp.to_pydatetime() + assert stamp == dtval + assert stamp.tzinfo == dtval.tzinfo + + def test_to_pydatetime_bijective(self): + # Ensure that converting to datetime and back only loses precision + # by going from nanoseconds to microseconds. + exp_warning = None if Timestamp.max.nanosecond == 0 else UserWarning + with tm.assert_produces_warning(exp_warning): + pydt_max = Timestamp.max.to_pydatetime() + + assert ( + Timestamp(pydt_max).as_unit("ns")._value / 1000 + == Timestamp.max._value / 1000 + ) + + exp_warning = None if Timestamp.min.nanosecond == 0 else UserWarning + with tm.assert_produces_warning(exp_warning): + pydt_min = Timestamp.min.to_pydatetime() + + # The next assertion can be enabled once GH#39221 is merged + # assert pydt_min < Timestamp.min # this is bc nanos are dropped + tdus = timedelta(microseconds=1) + assert pydt_min + tdus > Timestamp.min + + assert ( + Timestamp(pydt_min + tdus).as_unit("ns")._value / 1000 + == Timestamp.min._value / 1000 + ) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/test_tz_localize.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/test_tz_localize.py new file mode 100644 index 0000000000000000000000000000000000000000..af3dee1880d2e0625c3e8a2f77aade21ac4f5c13 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/test_tz_localize.py @@ -0,0 +1,351 @@ +from datetime import timedelta +import re + +from dateutil.tz import gettz +import pytest +import pytz +from pytz.exceptions import ( + AmbiguousTimeError, + NonExistentTimeError, +) + +from pandas._libs.tslibs.dtypes import NpyDatetimeUnit +from pandas.errors import OutOfBoundsDatetime + +from pandas import ( + NaT, + Timestamp, +) + +try: + from zoneinfo import ZoneInfo +except ImportError: + # Cannot assign to a type + ZoneInfo = None # type: ignore[misc, assignment] + + +class TestTimestampTZLocalize: + @pytest.mark.skip_ubsan + def test_tz_localize_pushes_out_of_bounds(self): + # GH#12677 + # tz_localize that pushes away from the boundary is OK + msg = ( + f"Converting {Timestamp.min.strftime('%Y-%m-%d %H:%M:%S')} " + f"underflows past {Timestamp.min}" + ) + pac = Timestamp.min.tz_localize("US/Pacific") + assert pac._value > Timestamp.min._value + pac.tz_convert("Asia/Tokyo") # tz_convert doesn't change value + with pytest.raises(OutOfBoundsDatetime, match=msg): + Timestamp.min.tz_localize("Asia/Tokyo") + + # tz_localize that pushes away from the boundary is OK + msg = ( + f"Converting {Timestamp.max.strftime('%Y-%m-%d %H:%M:%S')} " + f"overflows past {Timestamp.max}" + ) + tokyo = Timestamp.max.tz_localize("Asia/Tokyo") + assert tokyo._value < Timestamp.max._value + tokyo.tz_convert("US/Pacific") # tz_convert doesn't change value + with pytest.raises(OutOfBoundsDatetime, match=msg): + Timestamp.max.tz_localize("US/Pacific") + + @pytest.mark.parametrize("unit", ["ns", "us", "ms", "s"]) + def test_tz_localize_ambiguous_bool(self, unit): + # make sure that we are correctly accepting bool values as ambiguous + # GH#14402 + ts = Timestamp("2015-11-01 01:00:03").as_unit(unit) + expected0 = Timestamp("2015-11-01 01:00:03-0500", tz="US/Central") + expected1 = Timestamp("2015-11-01 01:00:03-0600", tz="US/Central") + + msg = "Cannot infer dst time from 2015-11-01 01:00:03" + with pytest.raises(pytz.AmbiguousTimeError, match=msg): + ts.tz_localize("US/Central") + + with pytest.raises(pytz.AmbiguousTimeError, match=msg): + ts.tz_localize("dateutil/US/Central") + + if ZoneInfo is not None: + try: + tz = ZoneInfo("US/Central") + except KeyError: + # no tzdata + pass + else: + with pytest.raises(pytz.AmbiguousTimeError, match=msg): + ts.tz_localize(tz) + + result = ts.tz_localize("US/Central", ambiguous=True) + assert result == expected0 + assert result._creso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value + + result = ts.tz_localize("US/Central", ambiguous=False) + assert result == expected1 + assert result._creso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value + + def test_tz_localize_ambiguous(self): + ts = Timestamp("2014-11-02 01:00") + ts_dst = ts.tz_localize("US/Eastern", ambiguous=True) + ts_no_dst = ts.tz_localize("US/Eastern", ambiguous=False) + + assert ts_no_dst._value - ts_dst._value == 3600 + msg = re.escape( + "'ambiguous' parameter must be one of: " + "True, False, 'NaT', 'raise' (default)" + ) + with pytest.raises(ValueError, match=msg): + ts.tz_localize("US/Eastern", ambiguous="infer") + + # GH#8025 + msg = "Cannot localize tz-aware Timestamp, use tz_convert for conversions" + with pytest.raises(TypeError, match=msg): + Timestamp("2011-01-01", tz="US/Eastern").tz_localize("Asia/Tokyo") + + msg = "Cannot convert tz-naive Timestamp, use tz_localize to localize" + with pytest.raises(TypeError, match=msg): + Timestamp("2011-01-01").tz_convert("Asia/Tokyo") + + @pytest.mark.parametrize( + "stamp, tz", + [ + ("2015-03-08 02:00", "US/Eastern"), + ("2015-03-08 02:30", "US/Pacific"), + ("2015-03-29 02:00", "Europe/Paris"), + ("2015-03-29 02:30", "Europe/Belgrade"), + ], + ) + def test_tz_localize_nonexistent(self, stamp, tz): + # GH#13057 + ts = Timestamp(stamp) + with pytest.raises(NonExistentTimeError, match=stamp): + ts.tz_localize(tz) + # GH 22644 + with pytest.raises(NonExistentTimeError, match=stamp): + ts.tz_localize(tz, nonexistent="raise") + assert ts.tz_localize(tz, nonexistent="NaT") is NaT + + @pytest.mark.parametrize( + "stamp, tz, forward_expected, backward_expected", + [ + ( + "2015-03-29 02:00:00", + "Europe/Warsaw", + "2015-03-29 03:00:00", + "2015-03-29 01:59:59", + ), # utc+1 -> utc+2 + ( + "2023-03-12 02:00:00", + "America/Los_Angeles", + "2023-03-12 03:00:00", + "2023-03-12 01:59:59", + ), # utc-8 -> utc-7 + ( + "2023-03-26 01:00:00", + "Europe/London", + "2023-03-26 02:00:00", + "2023-03-26 00:59:59", + ), # utc+0 -> utc+1 + ( + "2023-03-26 00:00:00", + "Atlantic/Azores", + "2023-03-26 01:00:00", + "2023-03-25 23:59:59", + ), # utc-1 -> utc+0 + ], + ) + def test_tz_localize_nonexistent_shift( + self, stamp, tz, forward_expected, backward_expected + ): + ts = Timestamp(stamp) + forward_ts = ts.tz_localize(tz, nonexistent="shift_forward") + assert forward_ts == Timestamp(forward_expected, tz=tz) + backward_ts = ts.tz_localize(tz, nonexistent="shift_backward") + assert backward_ts == Timestamp(backward_expected, tz=tz) + + def test_tz_localize_ambiguous_raise(self): + # GH#13057 + ts = Timestamp("2015-11-1 01:00") + msg = "Cannot infer dst time from 2015-11-01 01:00:00," + with pytest.raises(AmbiguousTimeError, match=msg): + ts.tz_localize("US/Pacific", ambiguous="raise") + + def test_tz_localize_nonexistent_invalid_arg(self, warsaw): + # GH 22644 + tz = warsaw + ts = Timestamp("2015-03-29 02:00:00") + msg = ( + "The nonexistent argument must be one of 'raise', 'NaT', " + "'shift_forward', 'shift_backward' or a timedelta object" + ) + with pytest.raises(ValueError, match=msg): + ts.tz_localize(tz, nonexistent="foo") + + @pytest.mark.parametrize( + "stamp", + [ + "2014-02-01 09:00", + "2014-07-08 09:00", + "2014-11-01 17:00", + "2014-11-05 00:00", + ], + ) + def test_tz_localize_roundtrip(self, stamp, tz_aware_fixture): + tz = tz_aware_fixture + ts = Timestamp(stamp) + localized = ts.tz_localize(tz) + assert localized == Timestamp(stamp, tz=tz) + + msg = "Cannot localize tz-aware Timestamp" + with pytest.raises(TypeError, match=msg): + localized.tz_localize(tz) + + reset = localized.tz_localize(None) + assert reset == ts + assert reset.tzinfo is None + + def test_tz_localize_ambiguous_compat(self): + # validate that pytz and dateutil are compat for dst + # when the transition happens + naive = Timestamp("2013-10-27 01:00:00") + + pytz_zone = "Europe/London" + dateutil_zone = "dateutil/Europe/London" + result_pytz = naive.tz_localize(pytz_zone, ambiguous=False) + result_dateutil = naive.tz_localize(dateutil_zone, ambiguous=False) + assert result_pytz._value == result_dateutil._value + assert result_pytz._value == 1382835600 + + # fixed ambiguous behavior + # see gh-14621, GH#45087 + assert result_pytz.to_pydatetime().tzname() == "GMT" + assert result_dateutil.to_pydatetime().tzname() == "GMT" + assert str(result_pytz) == str(result_dateutil) + + # 1 hour difference + result_pytz = naive.tz_localize(pytz_zone, ambiguous=True) + result_dateutil = naive.tz_localize(dateutil_zone, ambiguous=True) + assert result_pytz._value == result_dateutil._value + assert result_pytz._value == 1382832000 + + # see gh-14621 + assert str(result_pytz) == str(result_dateutil) + assert ( + result_pytz.to_pydatetime().tzname() + == result_dateutil.to_pydatetime().tzname() + ) + + @pytest.mark.parametrize( + "tz", + [ + pytz.timezone("US/Eastern"), + gettz("US/Eastern"), + "US/Eastern", + "dateutil/US/Eastern", + ], + ) + def test_timestamp_tz_localize(self, tz): + stamp = Timestamp("3/11/2012 04:00") + + result = stamp.tz_localize(tz) + expected = Timestamp("3/11/2012 04:00", tz=tz) + assert result.hour == expected.hour + assert result == expected + + @pytest.mark.parametrize( + "start_ts, tz, end_ts, shift", + [ + ["2015-03-29 02:20:00", "Europe/Warsaw", "2015-03-29 03:00:00", "forward"], + [ + "2015-03-29 02:20:00", + "Europe/Warsaw", + "2015-03-29 01:59:59.999999999", + "backward", + ], + [ + "2015-03-29 02:20:00", + "Europe/Warsaw", + "2015-03-29 03:20:00", + timedelta(hours=1), + ], + [ + "2015-03-29 02:20:00", + "Europe/Warsaw", + "2015-03-29 01:20:00", + timedelta(hours=-1), + ], + ["2018-03-11 02:33:00", "US/Pacific", "2018-03-11 03:00:00", "forward"], + [ + "2018-03-11 02:33:00", + "US/Pacific", + "2018-03-11 01:59:59.999999999", + "backward", + ], + [ + "2018-03-11 02:33:00", + "US/Pacific", + "2018-03-11 03:33:00", + timedelta(hours=1), + ], + [ + "2018-03-11 02:33:00", + "US/Pacific", + "2018-03-11 01:33:00", + timedelta(hours=-1), + ], + ], + ) + @pytest.mark.parametrize("tz_type", ["", "dateutil/"]) + @pytest.mark.parametrize("unit", ["ns", "us", "ms", "s"]) + def test_timestamp_tz_localize_nonexistent_shift( + self, start_ts, tz, end_ts, shift, tz_type, unit + ): + # GH 8917, 24466 + tz = tz_type + tz + if isinstance(shift, str): + shift = "shift_" + shift + ts = Timestamp(start_ts).as_unit(unit) + result = ts.tz_localize(tz, nonexistent=shift) + expected = Timestamp(end_ts).tz_localize(tz) + + if unit == "us": + assert result == expected.replace(nanosecond=0) + elif unit == "ms": + micros = expected.microsecond - expected.microsecond % 1000 + assert result == expected.replace(microsecond=micros, nanosecond=0) + elif unit == "s": + assert result == expected.replace(microsecond=0, nanosecond=0) + else: + assert result == expected + assert result._creso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value + + @pytest.mark.parametrize("offset", [-1, 1]) + def test_timestamp_tz_localize_nonexistent_shift_invalid(self, offset, warsaw): + # GH 8917, 24466 + tz = warsaw + ts = Timestamp("2015-03-29 02:20:00") + msg = "The provided timedelta will relocalize on a nonexistent time" + with pytest.raises(ValueError, match=msg): + ts.tz_localize(tz, nonexistent=timedelta(seconds=offset)) + + @pytest.mark.parametrize("unit", ["ns", "us", "ms", "s"]) + def test_timestamp_tz_localize_nonexistent_NaT(self, warsaw, unit): + # GH 8917 + tz = warsaw + ts = Timestamp("2015-03-29 02:20:00").as_unit(unit) + result = ts.tz_localize(tz, nonexistent="NaT") + assert result is NaT + + @pytest.mark.parametrize("unit", ["ns", "us", "ms", "s"]) + def test_timestamp_tz_localize_nonexistent_raise(self, warsaw, unit): + # GH 8917 + tz = warsaw + ts = Timestamp("2015-03-29 02:20:00").as_unit(unit) + msg = "2015-03-29 02:20:00" + with pytest.raises(pytz.NonExistentTimeError, match=msg): + ts.tz_localize(tz, nonexistent="raise") + msg = ( + "The nonexistent argument must be one of 'raise', 'NaT', " + "'shift_forward', 'shift_backward' or a timedelta object" + ) + with pytest.raises(ValueError, match=msg): + ts.tz_localize(tz, nonexistent="foo") diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_algos.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_algos.py new file mode 100644 index 0000000000000000000000000000000000000000..718d1b3ee2e834507919cd1e46b2e2bead191589 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_algos.py @@ -0,0 +1,2041 @@ +from datetime import datetime +import struct + +import numpy as np +import pytest + +from pandas._libs import ( + algos as libalgos, + hashtable as ht, +) + +from pandas.core.dtypes.common import ( + is_bool_dtype, + is_complex_dtype, + is_float_dtype, + is_integer_dtype, + is_object_dtype, +) +from pandas.core.dtypes.dtypes import CategoricalDtype + +import pandas as pd +from pandas import ( + Categorical, + CategoricalIndex, + DataFrame, + DatetimeIndex, + Index, + IntervalIndex, + MultiIndex, + NaT, + Period, + PeriodIndex, + Series, + Timedelta, + Timestamp, + cut, + date_range, + timedelta_range, + to_datetime, + to_timedelta, +) +import pandas._testing as tm +import pandas.core.algorithms as algos +from pandas.core.arrays import ( + DatetimeArray, + TimedeltaArray, +) +import pandas.core.common as com + + +class TestFactorize: + def test_factorize_complex(self): + # GH#17927 + array = [1, 2, 2 + 1j] + msg = "factorize with argument that is not not a Series" + with tm.assert_produces_warning(FutureWarning, match=msg): + labels, uniques = algos.factorize(array) + + expected_labels = np.array([0, 1, 2], dtype=np.intp) + tm.assert_numpy_array_equal(labels, expected_labels) + + # Should return a complex dtype in the future + expected_uniques = np.array([(1 + 0j), (2 + 0j), (2 + 1j)], dtype=object) + tm.assert_numpy_array_equal(uniques, expected_uniques) + + @pytest.mark.parametrize("sort", [True, False]) + def test_factorize(self, index_or_series_obj, sort): + obj = index_or_series_obj + result_codes, result_uniques = obj.factorize(sort=sort) + + constructor = Index + if isinstance(obj, MultiIndex): + constructor = MultiIndex.from_tuples + expected_arr = obj.unique() + if expected_arr.dtype == np.float16: + expected_arr = expected_arr.astype(np.float32) + expected_uniques = constructor(expected_arr) + if ( + isinstance(obj, Index) + and expected_uniques.dtype == bool + and obj.dtype == object + ): + expected_uniques = expected_uniques.astype(object) + + if sort: + expected_uniques = expected_uniques.sort_values() + + # construct an integer ndarray so that + # `expected_uniques.take(expected_codes)` is equal to `obj` + expected_uniques_list = list(expected_uniques) + expected_codes = [expected_uniques_list.index(val) for val in obj] + expected_codes = np.asarray(expected_codes, dtype=np.intp) + + tm.assert_numpy_array_equal(result_codes, expected_codes) + tm.assert_index_equal(result_uniques, expected_uniques, exact=True) + + def test_series_factorize_use_na_sentinel_false(self): + # GH#35667 + values = np.array([1, 2, 1, np.nan]) + ser = Series(values) + codes, uniques = ser.factorize(use_na_sentinel=False) + + expected_codes = np.array([0, 1, 0, 2], dtype=np.intp) + expected_uniques = Index([1.0, 2.0, np.nan]) + + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_index_equal(uniques, expected_uniques) + + def test_basic(self): + items = np.array(["a", "b", "b", "a", "a", "c", "c", "c"], dtype=object) + codes, uniques = algos.factorize(items) + tm.assert_numpy_array_equal(uniques, np.array(["a", "b", "c"], dtype=object)) + + codes, uniques = algos.factorize(items, sort=True) + exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + exp = np.array(["a", "b", "c"], dtype=object) + tm.assert_numpy_array_equal(uniques, exp) + + arr = np.arange(5, dtype=np.intp)[::-1] + + codes, uniques = algos.factorize(arr) + exp = np.array([0, 1, 2, 3, 4], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + exp = np.array([4, 3, 2, 1, 0], dtype=arr.dtype) + tm.assert_numpy_array_equal(uniques, exp) + + codes, uniques = algos.factorize(arr, sort=True) + exp = np.array([4, 3, 2, 1, 0], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + exp = np.array([0, 1, 2, 3, 4], dtype=arr.dtype) + tm.assert_numpy_array_equal(uniques, exp) + + arr = np.arange(5.0)[::-1] + + codes, uniques = algos.factorize(arr) + exp = np.array([0, 1, 2, 3, 4], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + exp = np.array([4.0, 3.0, 2.0, 1.0, 0.0], dtype=arr.dtype) + tm.assert_numpy_array_equal(uniques, exp) + + codes, uniques = algos.factorize(arr, sort=True) + exp = np.array([4, 3, 2, 1, 0], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + exp = np.array([0.0, 1.0, 2.0, 3.0, 4.0], dtype=arr.dtype) + tm.assert_numpy_array_equal(uniques, exp) + + def test_mixed(self): + # doc example reshaping.rst + x = Series(["A", "A", np.nan, "B", 3.14, np.inf]) + codes, uniques = algos.factorize(x) + + exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + exp = Index(["A", "B", 3.14, np.inf]) + tm.assert_index_equal(uniques, exp) + + codes, uniques = algos.factorize(x, sort=True) + exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + exp = Index([3.14, np.inf, "A", "B"]) + tm.assert_index_equal(uniques, exp) + + def test_factorize_datetime64(self): + # M8 + v1 = Timestamp("20130101 09:00:00.00004") + v2 = Timestamp("20130101") + x = Series([v1, v1, v1, v2, v2, v1]) + codes, uniques = algos.factorize(x) + + exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + exp = DatetimeIndex([v1, v2]) + tm.assert_index_equal(uniques, exp) + + codes, uniques = algos.factorize(x, sort=True) + exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + exp = DatetimeIndex([v2, v1]) + tm.assert_index_equal(uniques, exp) + + def test_factorize_period(self): + # period + v1 = Period("201302", freq="M") + v2 = Period("201303", freq="M") + x = Series([v1, v1, v1, v2, v2, v1]) + + # periods are not 'sorted' as they are converted back into an index + codes, uniques = algos.factorize(x) + exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + tm.assert_index_equal(uniques, PeriodIndex([v1, v2])) + + codes, uniques = algos.factorize(x, sort=True) + exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + tm.assert_index_equal(uniques, PeriodIndex([v1, v2])) + + def test_factorize_timedelta(self): + # GH 5986 + v1 = to_timedelta("1 day 1 min") + v2 = to_timedelta("1 day") + x = Series([v1, v2, v1, v1, v2, v2, v1]) + codes, uniques = algos.factorize(x) + exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + tm.assert_index_equal(uniques, to_timedelta([v1, v2])) + + codes, uniques = algos.factorize(x, sort=True) + exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + tm.assert_index_equal(uniques, to_timedelta([v2, v1])) + + def test_factorize_nan(self): + # nan should map to na_sentinel, not reverse_indexer[na_sentinel] + # rizer.factorize should not raise an exception if na_sentinel indexes + # outside of reverse_indexer + key = np.array([1, 2, 1, np.nan], dtype="O") + rizer = ht.ObjectFactorizer(len(key)) + for na_sentinel in (-1, 20): + ids = rizer.factorize(key, na_sentinel=na_sentinel) + expected = np.array([0, 1, 0, na_sentinel], dtype=np.intp) + assert len(set(key)) == len(set(expected)) + tm.assert_numpy_array_equal(pd.isna(key), expected == na_sentinel) + tm.assert_numpy_array_equal(ids, expected) + + def test_factorizer_with_mask(self): + # GH#49549 + data = np.array([1, 2, 3, 1, 1, 0], dtype="int64") + mask = np.array([False, False, False, False, False, True]) + rizer = ht.Int64Factorizer(len(data)) + result = rizer.factorize(data, mask=mask) + expected = np.array([0, 1, 2, 0, 0, -1], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + expected_uniques = np.array([1, 2, 3], dtype="int64") + tm.assert_numpy_array_equal(rizer.uniques.to_array(), expected_uniques) + + def test_factorizer_object_with_nan(self): + # GH#49549 + data = np.array([1, 2, 3, 1, np.nan]) + rizer = ht.ObjectFactorizer(len(data)) + result = rizer.factorize(data.astype(object)) + expected = np.array([0, 1, 2, 0, -1], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + expected_uniques = np.array([1, 2, 3], dtype=object) + tm.assert_numpy_array_equal(rizer.uniques.to_array(), expected_uniques) + + @pytest.mark.parametrize( + "data, expected_codes, expected_uniques", + [ + ( + [(1, 1), (1, 2), (0, 0), (1, 2), "nonsense"], + [0, 1, 2, 1, 3], + [(1, 1), (1, 2), (0, 0), "nonsense"], + ), + ( + [(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)], + [0, 1, 2, 1, 3], + [(1, 1), (1, 2), (0, 0), (1, 2, 3)], + ), + ([(1, 1), (1, 2), (0, 0), (1, 2)], [0, 1, 2, 1], [(1, 1), (1, 2), (0, 0)]), + ], + ) + def test_factorize_tuple_list(self, data, expected_codes, expected_uniques): + # GH9454 + msg = "factorize with argument that is not not a Series" + with tm.assert_produces_warning(FutureWarning, match=msg): + codes, uniques = pd.factorize(data) + + tm.assert_numpy_array_equal(codes, np.array(expected_codes, dtype=np.intp)) + + expected_uniques_array = com.asarray_tuplesafe(expected_uniques, dtype=object) + tm.assert_numpy_array_equal(uniques, expected_uniques_array) + + def test_complex_sorting(self): + # gh 12666 - check no segfault + x17 = np.array([complex(i) for i in range(17)], dtype=object) + + msg = "'[<>]' not supported between instances of .*" + with pytest.raises(TypeError, match=msg): + algos.factorize(x17[::-1], sort=True) + + def test_numeric_dtype_factorize(self, any_real_numpy_dtype): + # GH41132 + dtype = any_real_numpy_dtype + data = np.array([1, 2, 2, 1], dtype=dtype) + expected_codes = np.array([0, 1, 1, 0], dtype=np.intp) + expected_uniques = np.array([1, 2], dtype=dtype) + + codes, uniques = algos.factorize(data) + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_numpy_array_equal(uniques, expected_uniques) + + def test_float64_factorize(self, writable): + data = np.array([1.0, 1e8, 1.0, 1e-8, 1e8, 1.0], dtype=np.float64) + data.setflags(write=writable) + expected_codes = np.array([0, 1, 0, 2, 1, 0], dtype=np.intp) + expected_uniques = np.array([1.0, 1e8, 1e-8], dtype=np.float64) + + codes, uniques = algos.factorize(data) + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_numpy_array_equal(uniques, expected_uniques) + + def test_uint64_factorize(self, writable): + data = np.array([2**64 - 1, 1, 2**64 - 1], dtype=np.uint64) + data.setflags(write=writable) + expected_codes = np.array([0, 1, 0], dtype=np.intp) + expected_uniques = np.array([2**64 - 1, 1], dtype=np.uint64) + + codes, uniques = algos.factorize(data) + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_numpy_array_equal(uniques, expected_uniques) + + def test_int64_factorize(self, writable): + data = np.array([2**63 - 1, -(2**63), 2**63 - 1], dtype=np.int64) + data.setflags(write=writable) + expected_codes = np.array([0, 1, 0], dtype=np.intp) + expected_uniques = np.array([2**63 - 1, -(2**63)], dtype=np.int64) + + codes, uniques = algos.factorize(data) + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_numpy_array_equal(uniques, expected_uniques) + + def test_string_factorize(self, writable): + data = np.array(["a", "c", "a", "b", "c"], dtype=object) + data.setflags(write=writable) + expected_codes = np.array([0, 1, 0, 2, 1], dtype=np.intp) + expected_uniques = np.array(["a", "c", "b"], dtype=object) + + codes, uniques = algos.factorize(data) + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_numpy_array_equal(uniques, expected_uniques) + + def test_object_factorize(self, writable): + data = np.array(["a", "c", None, np.nan, "a", "b", NaT, "c"], dtype=object) + data.setflags(write=writable) + expected_codes = np.array([0, 1, -1, -1, 0, 2, -1, 1], dtype=np.intp) + expected_uniques = np.array(["a", "c", "b"], dtype=object) + + codes, uniques = algos.factorize(data) + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_numpy_array_equal(uniques, expected_uniques) + + def test_datetime64_factorize(self, writable): + # GH35650 Verify whether read-only datetime64 array can be factorized + data = np.array([np.datetime64("2020-01-01T00:00:00.000")], dtype="M8[ns]") + data.setflags(write=writable) + expected_codes = np.array([0], dtype=np.intp) + expected_uniques = np.array( + ["2020-01-01T00:00:00.000000000"], dtype="datetime64[ns]" + ) + + codes, uniques = pd.factorize(data) + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_numpy_array_equal(uniques, expected_uniques) + + @pytest.mark.parametrize("sort", [True, False]) + def test_factorize_rangeindex(self, sort): + # increasing -> sort doesn't matter + ri = pd.RangeIndex.from_range(range(10)) + expected = np.arange(10, dtype=np.intp), ri + + result = algos.factorize(ri, sort=sort) + tm.assert_numpy_array_equal(result[0], expected[0]) + tm.assert_index_equal(result[1], expected[1], exact=True) + + result = ri.factorize(sort=sort) + tm.assert_numpy_array_equal(result[0], expected[0]) + tm.assert_index_equal(result[1], expected[1], exact=True) + + @pytest.mark.parametrize("sort", [True, False]) + def test_factorize_rangeindex_decreasing(self, sort): + # decreasing -> sort matters + ri = pd.RangeIndex.from_range(range(10)) + expected = np.arange(10, dtype=np.intp), ri + + ri2 = ri[::-1] + expected = expected[0], ri2 + if sort: + expected = expected[0][::-1], expected[1][::-1] + + result = algos.factorize(ri2, sort=sort) + tm.assert_numpy_array_equal(result[0], expected[0]) + tm.assert_index_equal(result[1], expected[1], exact=True) + + result = ri2.factorize(sort=sort) + tm.assert_numpy_array_equal(result[0], expected[0]) + tm.assert_index_equal(result[1], expected[1], exact=True) + + def test_deprecate_order(self): + # gh 19727 - check warning is raised for deprecated keyword, order. + # Test not valid once order keyword is removed. + data = np.array([2**63, 1, 2**63], dtype=np.uint64) + with pytest.raises(TypeError, match="got an unexpected keyword"): + algos.factorize(data, order=True) + with tm.assert_produces_warning(False): + algos.factorize(data) + + @pytest.mark.parametrize( + "data", + [ + np.array([0, 1, 0], dtype="u8"), + np.array([-(2**63), 1, -(2**63)], dtype="i8"), + np.array(["__nan__", "foo", "__nan__"], dtype="object"), + ], + ) + def test_parametrized_factorize_na_value_default(self, data): + # arrays that include the NA default for that type, but isn't used. + codes, uniques = algos.factorize(data) + expected_uniques = data[[0, 1]] + expected_codes = np.array([0, 1, 0], dtype=np.intp) + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_numpy_array_equal(uniques, expected_uniques) + + @pytest.mark.parametrize( + "data, na_value", + [ + (np.array([0, 1, 0, 2], dtype="u8"), 0), + (np.array([1, 0, 1, 2], dtype="u8"), 1), + (np.array([-(2**63), 1, -(2**63), 0], dtype="i8"), -(2**63)), + (np.array([1, -(2**63), 1, 0], dtype="i8"), 1), + (np.array(["a", "", "a", "b"], dtype=object), "a"), + (np.array([(), ("a", 1), (), ("a", 2)], dtype=object), ()), + (np.array([("a", 1), (), ("a", 1), ("a", 2)], dtype=object), ("a", 1)), + ], + ) + def test_parametrized_factorize_na_value(self, data, na_value): + codes, uniques = algos.factorize_array(data, na_value=na_value) + expected_uniques = data[[1, 3]] + expected_codes = np.array([-1, 0, -1, 1], dtype=np.intp) + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_numpy_array_equal(uniques, expected_uniques) + + @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize( + "data, uniques", + [ + ( + np.array(["b", "a", None, "b"], dtype=object), + np.array(["b", "a"], dtype=object), + ), + ( + pd.array([2, 1, np.nan, 2], dtype="Int64"), + pd.array([2, 1], dtype="Int64"), + ), + ], + ids=["numpy_array", "extension_array"], + ) + def test_factorize_use_na_sentinel(self, sort, data, uniques): + codes, uniques = algos.factorize(data, sort=sort, use_na_sentinel=True) + if sort: + expected_codes = np.array([1, 0, -1, 1], dtype=np.intp) + expected_uniques = algos.safe_sort(uniques) + else: + expected_codes = np.array([0, 1, -1, 0], dtype=np.intp) + expected_uniques = uniques + tm.assert_numpy_array_equal(codes, expected_codes) + if isinstance(data, np.ndarray): + tm.assert_numpy_array_equal(uniques, expected_uniques) + else: + tm.assert_extension_array_equal(uniques, expected_uniques) + + @pytest.mark.parametrize( + "data, expected_codes, expected_uniques", + [ + ( + ["a", None, "b", "a"], + np.array([0, 1, 2, 0], dtype=np.dtype("intp")), + np.array(["a", np.nan, "b"], dtype=object), + ), + ( + ["a", np.nan, "b", "a"], + np.array([0, 1, 2, 0], dtype=np.dtype("intp")), + np.array(["a", np.nan, "b"], dtype=object), + ), + ], + ) + def test_object_factorize_use_na_sentinel_false( + self, data, expected_codes, expected_uniques + ): + codes, uniques = algos.factorize( + np.array(data, dtype=object), use_na_sentinel=False + ) + + tm.assert_numpy_array_equal(uniques, expected_uniques, strict_nan=True) + tm.assert_numpy_array_equal(codes, expected_codes, strict_nan=True) + + @pytest.mark.parametrize( + "data, expected_codes, expected_uniques", + [ + ( + [1, None, 1, 2], + np.array([0, 1, 0, 2], dtype=np.dtype("intp")), + np.array([1, np.nan, 2], dtype="O"), + ), + ( + [1, np.nan, 1, 2], + np.array([0, 1, 0, 2], dtype=np.dtype("intp")), + np.array([1, np.nan, 2], dtype=np.float64), + ), + ], + ) + def test_int_factorize_use_na_sentinel_false( + self, data, expected_codes, expected_uniques + ): + msg = "factorize with argument that is not not a Series" + with tm.assert_produces_warning(FutureWarning, match=msg): + codes, uniques = algos.factorize(data, use_na_sentinel=False) + + tm.assert_numpy_array_equal(uniques, expected_uniques, strict_nan=True) + tm.assert_numpy_array_equal(codes, expected_codes, strict_nan=True) + + @pytest.mark.parametrize( + "data, expected_codes, expected_uniques", + [ + ( + Index(Categorical(["a", "a", "b"])), + np.array([0, 0, 1], dtype=np.intp), + CategoricalIndex(["a", "b"], categories=["a", "b"], dtype="category"), + ), + ( + Series(Categorical(["a", "a", "b"])), + np.array([0, 0, 1], dtype=np.intp), + CategoricalIndex(["a", "b"], categories=["a", "b"], dtype="category"), + ), + ( + Series(DatetimeIndex(["2017", "2017"], tz="US/Eastern")), + np.array([0, 0], dtype=np.intp), + DatetimeIndex(["2017"], tz="US/Eastern"), + ), + ], + ) + def test_factorize_mixed_values(self, data, expected_codes, expected_uniques): + # GH 19721 + codes, uniques = algos.factorize(data) + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_index_equal(uniques, expected_uniques) + + def test_factorize_interval_non_nano(self, unit): + # GH#56099 + left = DatetimeIndex(["2016-01-01", np.nan, "2015-10-11"]).as_unit(unit) + right = DatetimeIndex(["2016-01-02", np.nan, "2015-10-15"]).as_unit(unit) + idx = IntervalIndex.from_arrays(left, right) + codes, cats = idx.factorize() + assert cats.dtype == f"interval[datetime64[{unit}], right]" + + ts = Timestamp(0).as_unit(unit) + idx2 = IntervalIndex.from_arrays(left - ts, right - ts) + codes2, cats2 = idx2.factorize() + assert cats2.dtype == f"interval[timedelta64[{unit}], right]" + + idx3 = IntervalIndex.from_arrays( + left.tz_localize("US/Pacific"), right.tz_localize("US/Pacific") + ) + codes3, cats3 = idx3.factorize() + assert cats3.dtype == f"interval[datetime64[{unit}, US/Pacific], right]" + + +class TestUnique: + def test_ints(self): + arr = np.random.default_rng(2).integers(0, 100, size=50) + + result = algos.unique(arr) + assert isinstance(result, np.ndarray) + + def test_objects(self): + arr = np.random.default_rng(2).integers(0, 100, size=50).astype("O") + + result = algos.unique(arr) + assert isinstance(result, np.ndarray) + + def test_object_refcount_bug(self): + lst = np.array(["A", "B", "C", "D", "E"], dtype=object) + for i in range(1000): + len(algos.unique(lst)) + + def test_on_index_object(self): + mindex = MultiIndex.from_arrays( + [np.arange(5).repeat(5), np.tile(np.arange(5), 5)] + ) + expected = mindex.values + expected.sort() + + mindex = mindex.repeat(2) + + result = pd.unique(mindex) + result.sort() + + tm.assert_almost_equal(result, expected) + + def test_dtype_preservation(self, any_numpy_dtype): + # GH 15442 + if any_numpy_dtype in (tm.BYTES_DTYPES + tm.STRING_DTYPES): + data = [1, 2, 2] + uniques = [1, 2] + elif is_integer_dtype(any_numpy_dtype): + data = [1, 2, 2] + uniques = [1, 2] + elif is_float_dtype(any_numpy_dtype): + data = [1, 2, 2] + uniques = [1.0, 2.0] + elif is_complex_dtype(any_numpy_dtype): + data = [complex(1, 0), complex(2, 0), complex(2, 0)] + uniques = [complex(1, 0), complex(2, 0)] + elif is_bool_dtype(any_numpy_dtype): + data = [True, True, False] + uniques = [True, False] + elif is_object_dtype(any_numpy_dtype): + data = ["A", "B", "B"] + uniques = ["A", "B"] + else: + # datetime64[ns]/M8[ns]/timedelta64[ns]/m8[ns] tested elsewhere + data = [1, 2, 2] + uniques = [1, 2] + + result = Series(data, dtype=any_numpy_dtype).unique() + expected = np.array(uniques, dtype=any_numpy_dtype) + + if any_numpy_dtype in tm.STRING_DTYPES: + expected = expected.astype(object) + + if expected.dtype.kind in ["m", "M"]: + # We get TimedeltaArray/DatetimeArray + assert isinstance(result, (DatetimeArray, TimedeltaArray)) + result = np.array(result) + tm.assert_numpy_array_equal(result, expected) + + def test_datetime64_dtype_array_returned(self): + # GH 9431 + expected = np.array( + [ + "2015-01-03T00:00:00.000000000", + "2015-01-01T00:00:00.000000000", + ], + dtype="M8[ns]", + ) + + dt_index = to_datetime( + [ + "2015-01-03T00:00:00.000000000", + "2015-01-01T00:00:00.000000000", + "2015-01-01T00:00:00.000000000", + ] + ) + result = algos.unique(dt_index) + tm.assert_numpy_array_equal(result, expected) + assert result.dtype == expected.dtype + + s = Series(dt_index) + result = algos.unique(s) + tm.assert_numpy_array_equal(result, expected) + assert result.dtype == expected.dtype + + arr = s.values + result = algos.unique(arr) + tm.assert_numpy_array_equal(result, expected) + assert result.dtype == expected.dtype + + def test_datetime_non_ns(self): + a = np.array(["2000", "2000", "2001"], dtype="datetime64[s]") + result = pd.unique(a) + expected = np.array(["2000", "2001"], dtype="datetime64[s]") + tm.assert_numpy_array_equal(result, expected) + + def test_timedelta_non_ns(self): + a = np.array(["2000", "2000", "2001"], dtype="timedelta64[s]") + result = pd.unique(a) + expected = np.array([2000, 2001], dtype="timedelta64[s]") + tm.assert_numpy_array_equal(result, expected) + + def test_timedelta64_dtype_array_returned(self): + # GH 9431 + expected = np.array([31200, 45678, 10000], dtype="m8[ns]") + + td_index = to_timedelta([31200, 45678, 31200, 10000, 45678]) + result = algos.unique(td_index) + tm.assert_numpy_array_equal(result, expected) + assert result.dtype == expected.dtype + + s = Series(td_index) + result = algos.unique(s) + tm.assert_numpy_array_equal(result, expected) + assert result.dtype == expected.dtype + + arr = s.values + result = algos.unique(arr) + tm.assert_numpy_array_equal(result, expected) + assert result.dtype == expected.dtype + + def test_uint64_overflow(self): + s = Series([1, 2, 2**63, 2**63], dtype=np.uint64) + exp = np.array([1, 2, 2**63], dtype=np.uint64) + tm.assert_numpy_array_equal(algos.unique(s), exp) + + def test_nan_in_object_array(self): + duplicated_items = ["a", np.nan, "c", "c"] + result = pd.unique(np.array(duplicated_items, dtype=object)) + expected = np.array(["a", np.nan, "c"], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + def test_categorical(self): + # we are expecting to return in the order + # of appearance + expected = Categorical(list("bac")) + + # we are expecting to return in the order + # of the categories + expected_o = Categorical(list("bac"), categories=list("abc"), ordered=True) + + # GH 15939 + c = Categorical(list("baabc")) + result = c.unique() + tm.assert_categorical_equal(result, expected) + + result = algos.unique(c) + tm.assert_categorical_equal(result, expected) + + c = Categorical(list("baabc"), ordered=True) + result = c.unique() + tm.assert_categorical_equal(result, expected_o) + + result = algos.unique(c) + tm.assert_categorical_equal(result, expected_o) + + # Series of categorical dtype + s = Series(Categorical(list("baabc")), name="foo") + result = s.unique() + tm.assert_categorical_equal(result, expected) + + result = pd.unique(s) + tm.assert_categorical_equal(result, expected) + + # CI -> return CI + ci = CategoricalIndex(Categorical(list("baabc"), categories=list("abc"))) + expected = CategoricalIndex(expected) + result = ci.unique() + tm.assert_index_equal(result, expected) + + result = pd.unique(ci) + tm.assert_index_equal(result, expected) + + def test_datetime64tz_aware(self, unit): + # GH 15939 + + dti = Index( + [ + Timestamp("20160101", tz="US/Eastern"), + Timestamp("20160101", tz="US/Eastern"), + ] + ).as_unit(unit) + ser = Series(dti) + + result = ser.unique() + expected = dti[:1]._data + tm.assert_extension_array_equal(result, expected) + + result = dti.unique() + expected = dti[:1] + tm.assert_index_equal(result, expected) + + result = pd.unique(ser) + expected = dti[:1]._data + tm.assert_extension_array_equal(result, expected) + + result = pd.unique(dti) + expected = dti[:1] + tm.assert_index_equal(result, expected) + + def test_order_of_appearance(self): + # 9346 + # light testing of guarantee of order of appearance + # these also are the doc-examples + result = pd.unique(Series([2, 1, 3, 3])) + tm.assert_numpy_array_equal(result, np.array([2, 1, 3], dtype="int64")) + + result = pd.unique(Series([2] + [1] * 5)) + tm.assert_numpy_array_equal(result, np.array([2, 1], dtype="int64")) + + msg = "unique with argument that is not not a Series, Index," + with tm.assert_produces_warning(FutureWarning, match=msg): + result = pd.unique(list("aabc")) + expected = np.array(["a", "b", "c"], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + result = pd.unique(Series(Categorical(list("aabc")))) + expected = Categorical(list("abc")) + tm.assert_categorical_equal(result, expected) + + def test_order_of_appearance_dt64(self, unit): + ser = Series([Timestamp("20160101"), Timestamp("20160101")]).dt.as_unit(unit) + result = pd.unique(ser) + expected = np.array(["2016-01-01T00:00:00.000000000"], dtype=f"M8[{unit}]") + tm.assert_numpy_array_equal(result, expected) + + def test_order_of_appearance_dt64tz(self, unit): + dti = DatetimeIndex( + [ + Timestamp("20160101", tz="US/Eastern"), + Timestamp("20160101", tz="US/Eastern"), + ] + ).as_unit(unit) + result = pd.unique(dti) + expected = DatetimeIndex( + ["2016-01-01 00:00:00"], dtype=f"datetime64[{unit}, US/Eastern]", freq=None + ) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "arg ,expected", + [ + (("1", "1", "2"), np.array(["1", "2"], dtype=object)), + (("foo",), np.array(["foo"], dtype=object)), + ], + ) + def test_tuple_with_strings(self, arg, expected): + # see GH 17108 + msg = "unique with argument that is not not a Series" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = pd.unique(arg) + tm.assert_numpy_array_equal(result, expected) + + def test_obj_none_preservation(self): + # GH 20866 + arr = np.array(["foo", None], dtype=object) + result = pd.unique(arr) + expected = np.array(["foo", None], dtype=object) + + tm.assert_numpy_array_equal(result, expected, strict_nan=True) + + def test_signed_zero(self): + # GH 21866 + a = np.array([-0.0, 0.0]) + result = pd.unique(a) + expected = np.array([-0.0]) # 0.0 and -0.0 are equivalent + tm.assert_numpy_array_equal(result, expected) + + def test_different_nans(self): + # GH 21866 + # create different nans from bit-patterns: + NAN1 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000000))[0] + NAN2 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000001))[0] + assert NAN1 != NAN1 + assert NAN2 != NAN2 + a = np.array([NAN1, NAN2]) # NAN1 and NAN2 are equivalent + result = pd.unique(a) + expected = np.array([np.nan]) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("el_type", [np.float64, object]) + def test_first_nan_kept(self, el_type): + # GH 22295 + # create different nans from bit-patterns: + bits_for_nan1 = 0xFFF8000000000001 + bits_for_nan2 = 0x7FF8000000000001 + NAN1 = struct.unpack("d", struct.pack("=Q", bits_for_nan1))[0] + NAN2 = struct.unpack("d", struct.pack("=Q", bits_for_nan2))[0] + assert NAN1 != NAN1 + assert NAN2 != NAN2 + a = np.array([NAN1, NAN2], dtype=el_type) + result = pd.unique(a) + assert result.size == 1 + # use bit patterns to identify which nan was kept: + result_nan_bits = struct.unpack("=Q", struct.pack("d", result[0]))[0] + assert result_nan_bits == bits_for_nan1 + + def test_do_not_mangle_na_values(self, unique_nulls_fixture, unique_nulls_fixture2): + # GH 22295 + if unique_nulls_fixture is unique_nulls_fixture2: + return # skip it, values not unique + a = np.array([unique_nulls_fixture, unique_nulls_fixture2], dtype=object) + result = pd.unique(a) + assert result.size == 2 + assert a[0] is unique_nulls_fixture + assert a[1] is unique_nulls_fixture2 + + def test_unique_masked(self, any_numeric_ea_dtype): + # GH#48019 + ser = Series([1, pd.NA, 2] * 3, dtype=any_numeric_ea_dtype) + result = pd.unique(ser) + expected = pd.array([1, pd.NA, 2], dtype=any_numeric_ea_dtype) + tm.assert_extension_array_equal(result, expected) + + +def test_nunique_ints(index_or_series_or_array): + # GH#36327 + values = index_or_series_or_array(np.random.default_rng(2).integers(0, 20, 30)) + result = algos.nunique_ints(values) + expected = len(algos.unique(values)) + assert result == expected + + +class TestIsin: + def test_invalid(self): + msg = ( + r"only list-like objects are allowed to be passed to isin\(\), " + r"you passed a `int`" + ) + with pytest.raises(TypeError, match=msg): + algos.isin(1, 1) + with pytest.raises(TypeError, match=msg): + algos.isin(1, [1]) + with pytest.raises(TypeError, match=msg): + algos.isin([1], 1) + + def test_basic(self): + msg = "isin with argument that is not not a Series" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = algos.isin([1, 2], [1]) + expected = np.array([True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.isin(np.array([1, 2]), [1]) + expected = np.array([True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.isin(Series([1, 2]), [1]) + expected = np.array([True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.isin(Series([1, 2]), Series([1])) + expected = np.array([True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.isin(Series([1, 2]), {1}) + expected = np.array([True, False]) + tm.assert_numpy_array_equal(result, expected) + + with tm.assert_produces_warning(FutureWarning, match=msg): + result = algos.isin(["a", "b"], ["a"]) + expected = np.array([True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.isin(Series(["a", "b"]), Series(["a"])) + expected = np.array([True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.isin(Series(["a", "b"]), {"a"}) + expected = np.array([True, False]) + tm.assert_numpy_array_equal(result, expected) + + with tm.assert_produces_warning(FutureWarning, match=msg): + result = algos.isin(["a", "b"], [1]) + expected = np.array([False, False]) + tm.assert_numpy_array_equal(result, expected) + + def test_i8(self): + arr = date_range("20130101", periods=3).values + result = algos.isin(arr, [arr[0]]) + expected = np.array([True, False, False]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.isin(arr, arr[0:2]) + expected = np.array([True, True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.isin(arr, set(arr[0:2])) + expected = np.array([True, True, False]) + tm.assert_numpy_array_equal(result, expected) + + arr = timedelta_range("1 day", periods=3).values + result = algos.isin(arr, [arr[0]]) + expected = np.array([True, False, False]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.isin(arr, arr[0:2]) + expected = np.array([True, True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.isin(arr, set(arr[0:2])) + expected = np.array([True, True, False]) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("dtype1", ["m8[ns]", "M8[ns]", "M8[ns, UTC]", "period[D]"]) + @pytest.mark.parametrize("dtype", ["i8", "f8", "u8"]) + def test_isin_datetimelike_values_numeric_comps(self, dtype, dtype1): + # Anything but object and we get all-False shortcut + + dta = date_range("2013-01-01", periods=3)._values + arr = Series(dta.view("i8")).array.view(dtype1) + + comps = arr.view("i8").astype(dtype) + + result = algos.isin(comps, arr) + expected = np.zeros(comps.shape, dtype=bool) + tm.assert_numpy_array_equal(result, expected) + + def test_large(self): + s = date_range("20000101", periods=2000000, freq="s").values + result = algos.isin(s, s[0:2]) + expected = np.zeros(len(s), dtype=bool) + expected[0] = True + expected[1] = True + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("dtype", ["m8[ns]", "M8[ns]", "M8[ns, UTC]", "period[D]"]) + def test_isin_datetimelike_all_nat(self, dtype): + # GH#56427 + dta = date_range("2013-01-01", periods=3)._values + arr = Series(dta.view("i8")).array.view(dtype) + + arr[0] = NaT + result = algos.isin(arr, [NaT]) + expected = np.array([True, False, False], dtype=bool) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("dtype", ["m8[ns]", "M8[ns]", "M8[ns, UTC]"]) + def test_isin_datetimelike_strings_deprecated(self, dtype): + # GH#53111 + dta = date_range("2013-01-01", periods=3)._values + arr = Series(dta.view("i8")).array.view(dtype) + + vals = [str(x) for x in arr] + msg = "The behavior of 'isin' with dtype=.* is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = algos.isin(arr, vals) + assert res.all() + + vals2 = np.array(vals, dtype=str) + with tm.assert_produces_warning(FutureWarning, match=msg): + res2 = algos.isin(arr, vals2) + assert res2.all() + + def test_isin_dt64tz_with_nat(self): + # the all-NaT values used to get inferred to tznaive, which was evaluated + # as non-matching GH#56427 + dti = date_range("2016-01-01", periods=3, tz="UTC") + ser = Series(dti) + ser[0] = NaT + + res = algos.isin(ser._values, [NaT]) + exp = np.array([True, False, False], dtype=bool) + tm.assert_numpy_array_equal(res, exp) + + def test_categorical_from_codes(self): + # GH 16639 + vals = np.array([0, 1, 2, 0]) + cats = ["a", "b", "c"] + Sd = Series(Categorical([1]).from_codes(vals, cats)) + St = Series(Categorical([1]).from_codes(np.array([0, 1]), cats)) + expected = np.array([True, True, False, True]) + result = algos.isin(Sd, St) + tm.assert_numpy_array_equal(expected, result) + + def test_categorical_isin(self): + vals = np.array([0, 1, 2, 0]) + cats = ["a", "b", "c"] + cat = Categorical([1]).from_codes(vals, cats) + other = Categorical([1]).from_codes(np.array([0, 1]), cats) + + expected = np.array([True, True, False, True]) + result = algos.isin(cat, other) + tm.assert_numpy_array_equal(expected, result) + + def test_same_nan_is_in(self): + # GH 22160 + # nan is special, because from " a is b" doesn't follow "a == b" + # at least, isin() should follow python's "np.nan in [nan] == True" + # casting to -> np.float64 -> another float-object somewhere on + # the way could lead jeopardize this behavior + comps = [np.nan] # could be casted to float64 + values = [np.nan] + expected = np.array([True]) + msg = "isin with argument that is not not a Series" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = algos.isin(comps, values) + tm.assert_numpy_array_equal(expected, result) + + def test_same_nan_is_in_large(self): + # https://github.com/pandas-dev/pandas/issues/22205 + s = np.tile(1.0, 1_000_001) + s[0] = np.nan + result = algos.isin(s, np.array([np.nan, 1])) + expected = np.ones(len(s), dtype=bool) + tm.assert_numpy_array_equal(result, expected) + + def test_same_nan_is_in_large_series(self): + # https://github.com/pandas-dev/pandas/issues/22205 + s = np.tile(1.0, 1_000_001) + series = Series(s) + s[0] = np.nan + result = series.isin(np.array([np.nan, 1])) + expected = Series(np.ones(len(s), dtype=bool)) + tm.assert_series_equal(result, expected) + + def test_same_object_is_in(self): + # GH 22160 + # there could be special treatment for nans + # the user however could define a custom class + # with similar behavior, then we at least should + # fall back to usual python's behavior: "a in [a] == True" + class LikeNan: + def __eq__(self, other) -> bool: + return False + + def __hash__(self): + return 0 + + a, b = LikeNan(), LikeNan() + + msg = "isin with argument that is not not a Series" + with tm.assert_produces_warning(FutureWarning, match=msg): + # same object -> True + tm.assert_numpy_array_equal(algos.isin([a], [a]), np.array([True])) + # different objects -> False + tm.assert_numpy_array_equal(algos.isin([a], [b]), np.array([False])) + + def test_different_nans(self): + # GH 22160 + # all nans are handled as equivalent + + comps = [float("nan")] + values = [float("nan")] + assert comps[0] is not values[0] # different nan-objects + + # as list of python-objects: + result = algos.isin(np.array(comps), values) + tm.assert_numpy_array_equal(np.array([True]), result) + + # as object-array: + result = algos.isin( + np.asarray(comps, dtype=object), np.asarray(values, dtype=object) + ) + tm.assert_numpy_array_equal(np.array([True]), result) + + # as float64-array: + result = algos.isin( + np.asarray(comps, dtype=np.float64), np.asarray(values, dtype=np.float64) + ) + tm.assert_numpy_array_equal(np.array([True]), result) + + def test_no_cast(self): + # GH 22160 + # ensure 42 is not casted to a string + comps = ["ss", 42] + values = ["42"] + expected = np.array([False, False]) + msg = "isin with argument that is not not a Series, Index" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = algos.isin(comps, values) + tm.assert_numpy_array_equal(expected, result) + + @pytest.mark.parametrize("empty", [[], Series(dtype=object), np.array([])]) + def test_empty(self, empty): + # see gh-16991 + vals = Index(["a", "b"]) + expected = np.array([False, False]) + + result = algos.isin(vals, empty) + tm.assert_numpy_array_equal(expected, result) + + def test_different_nan_objects(self): + # GH 22119 + comps = np.array(["nan", np.nan * 1j, float("nan")], dtype=object) + vals = np.array([float("nan")], dtype=object) + expected = np.array([False, False, True]) + result = algos.isin(comps, vals) + tm.assert_numpy_array_equal(expected, result) + + def test_different_nans_as_float64(self): + # GH 21866 + # create different nans from bit-patterns, + # these nans will land in different buckets in the hash-table + # if no special care is taken + NAN1 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000000))[0] + NAN2 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000001))[0] + assert NAN1 != NAN1 + assert NAN2 != NAN2 + + # check that NAN1 and NAN2 are equivalent: + arr = np.array([NAN1, NAN2], dtype=np.float64) + lookup1 = np.array([NAN1], dtype=np.float64) + result = algos.isin(arr, lookup1) + expected = np.array([True, True]) + tm.assert_numpy_array_equal(result, expected) + + lookup2 = np.array([NAN2], dtype=np.float64) + result = algos.isin(arr, lookup2) + expected = np.array([True, True]) + tm.assert_numpy_array_equal(result, expected) + + def test_isin_int_df_string_search(self): + """Comparing df with int`s (1,2) with a string at isin() ("1") + -> should not match values because int 1 is not equal str 1""" + df = DataFrame({"values": [1, 2]}) + result = df.isin(["1"]) + expected_false = DataFrame({"values": [False, False]}) + tm.assert_frame_equal(result, expected_false) + + def test_isin_nan_df_string_search(self): + """Comparing df with nan value (np.nan,2) with a string at isin() ("NaN") + -> should not match values because np.nan is not equal str NaN""" + df = DataFrame({"values": [np.nan, 2]}) + result = df.isin(np.array(["NaN"], dtype=object)) + expected_false = DataFrame({"values": [False, False]}) + tm.assert_frame_equal(result, expected_false) + + def test_isin_float_df_string_search(self): + """Comparing df with floats (1.4245,2.32441) with a string at isin() ("1.4245") + -> should not match values because float 1.4245 is not equal str 1.4245""" + df = DataFrame({"values": [1.4245, 2.32441]}) + result = df.isin(np.array(["1.4245"], dtype=object)) + expected_false = DataFrame({"values": [False, False]}) + tm.assert_frame_equal(result, expected_false) + + def test_isin_unsigned_dtype(self): + # GH#46485 + ser = Series([1378774140726870442], dtype=np.uint64) + result = ser.isin([1378774140726870528]) + expected = Series(False) + tm.assert_series_equal(result, expected) + + +class TestValueCounts: + def test_value_counts(self): + arr = np.random.default_rng(1234).standard_normal(4) + factor = cut(arr, 4) + + # assert isinstance(factor, n) + msg = "pandas.value_counts is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = algos.value_counts(factor) + breaks = [-1.606, -1.018, -0.431, 0.155, 0.741] + index = IntervalIndex.from_breaks(breaks).astype(CategoricalDtype(ordered=True)) + expected = Series([1, 0, 2, 1], index=index, name="count") + tm.assert_series_equal(result.sort_index(), expected.sort_index()) + + def test_value_counts_bins(self): + s = [1, 2, 3, 4] + msg = "pandas.value_counts is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = algos.value_counts(s, bins=1) + expected = Series( + [4], index=IntervalIndex.from_tuples([(0.996, 4.0)]), name="count" + ) + tm.assert_series_equal(result, expected) + + with tm.assert_produces_warning(FutureWarning, match=msg): + result = algos.value_counts(s, bins=2, sort=False) + expected = Series( + [2, 2], + index=IntervalIndex.from_tuples([(0.996, 2.5), (2.5, 4.0)]), + name="count", + ) + tm.assert_series_equal(result, expected) + + def test_value_counts_dtypes(self): + msg2 = "pandas.value_counts is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg2): + result = algos.value_counts(np.array([1, 1.0])) + assert len(result) == 1 + + with tm.assert_produces_warning(FutureWarning, match=msg2): + result = algos.value_counts(np.array([1, 1.0]), bins=1) + assert len(result) == 1 + + with tm.assert_produces_warning(FutureWarning, match=msg2): + result = algos.value_counts(Series([1, 1.0, "1"])) # object + assert len(result) == 2 + + msg = "bins argument only works with numeric data" + with pytest.raises(TypeError, match=msg): + with tm.assert_produces_warning(FutureWarning, match=msg2): + algos.value_counts(np.array(["1", 1], dtype=object), bins=1) + + def test_value_counts_nat(self): + td = Series([np.timedelta64(10000), NaT], dtype="timedelta64[ns]") + dt = to_datetime(["NaT", "2014-01-01"]) + + msg = "pandas.value_counts is deprecated" + + for ser in [td, dt]: + with tm.assert_produces_warning(FutureWarning, match=msg): + vc = algos.value_counts(ser) + vc_with_na = algos.value_counts(ser, dropna=False) + assert len(vc) == 1 + assert len(vc_with_na) == 2 + + exp_dt = Series({Timestamp("2014-01-01 00:00:00"): 1}, name="count") + with tm.assert_produces_warning(FutureWarning, match=msg): + result_dt = algos.value_counts(dt) + tm.assert_series_equal(result_dt, exp_dt) + + exp_td = Series({np.timedelta64(10000): 1}, name="count") + with tm.assert_produces_warning(FutureWarning, match=msg): + result_td = algos.value_counts(td) + tm.assert_series_equal(result_td, exp_td) + + @pytest.mark.parametrize("dtype", [object, "M8[us]"]) + def test_value_counts_datetime_outofbounds(self, dtype): + # GH 13663 + ser = Series( + [ + datetime(3000, 1, 1), + datetime(5000, 1, 1), + datetime(5000, 1, 1), + datetime(6000, 1, 1), + datetime(3000, 1, 1), + datetime(3000, 1, 1), + ], + dtype=dtype, + ) + res = ser.value_counts() + + exp_index = Index( + [datetime(3000, 1, 1), datetime(5000, 1, 1), datetime(6000, 1, 1)], + dtype=dtype, + ) + exp = Series([3, 2, 1], index=exp_index, name="count") + tm.assert_series_equal(res, exp) + + def test_categorical(self): + s = Series(Categorical(list("aaabbc"))) + result = s.value_counts() + expected = Series( + [3, 2, 1], index=CategoricalIndex(["a", "b", "c"]), name="count" + ) + + tm.assert_series_equal(result, expected, check_index_type=True) + + # preserve order? + s = s.cat.as_ordered() + result = s.value_counts() + expected.index = expected.index.as_ordered() + tm.assert_series_equal(result, expected, check_index_type=True) + + def test_categorical_nans(self): + s = Series(Categorical(list("aaaaabbbcc"))) # 4,3,2,1 (nan) + s.iloc[1] = np.nan + result = s.value_counts() + expected = Series( + [4, 3, 2], + index=CategoricalIndex(["a", "b", "c"], categories=["a", "b", "c"]), + name="count", + ) + tm.assert_series_equal(result, expected, check_index_type=True) + result = s.value_counts(dropna=False) + expected = Series( + [4, 3, 2, 1], index=CategoricalIndex(["a", "b", "c", np.nan]), name="count" + ) + tm.assert_series_equal(result, expected, check_index_type=True) + + # out of order + s = Series( + Categorical(list("aaaaabbbcc"), ordered=True, categories=["b", "a", "c"]) + ) + s.iloc[1] = np.nan + result = s.value_counts() + expected = Series( + [4, 3, 2], + index=CategoricalIndex( + ["a", "b", "c"], + categories=["b", "a", "c"], + ordered=True, + ), + name="count", + ) + tm.assert_series_equal(result, expected, check_index_type=True) + + result = s.value_counts(dropna=False) + expected = Series( + [4, 3, 2, 1], + index=CategoricalIndex( + ["a", "b", "c", np.nan], categories=["b", "a", "c"], ordered=True + ), + name="count", + ) + tm.assert_series_equal(result, expected, check_index_type=True) + + def test_categorical_zeroes(self): + # keep the `d` category with 0 + s = Series(Categorical(list("bbbaac"), categories=list("abcd"), ordered=True)) + result = s.value_counts() + expected = Series( + [3, 2, 1, 0], + index=Categorical( + ["b", "a", "c", "d"], categories=list("abcd"), ordered=True + ), + name="count", + ) + tm.assert_series_equal(result, expected, check_index_type=True) + + def test_value_counts_dropna(self): + # https://github.com/pandas-dev/pandas/issues/9443#issuecomment-73719328 + + tm.assert_series_equal( + Series([True, True, False]).value_counts(dropna=True), + Series([2, 1], index=[True, False], name="count"), + ) + tm.assert_series_equal( + Series([True, True, False]).value_counts(dropna=False), + Series([2, 1], index=[True, False], name="count"), + ) + + tm.assert_series_equal( + Series([True] * 3 + [False] * 2 + [None] * 5).value_counts(dropna=True), + Series([3, 2], index=Index([True, False], dtype=object), name="count"), + ) + tm.assert_series_equal( + Series([True] * 5 + [False] * 3 + [None] * 2).value_counts(dropna=False), + Series([5, 3, 2], index=[True, False, None], name="count"), + ) + tm.assert_series_equal( + Series([10.3, 5.0, 5.0]).value_counts(dropna=True), + Series([2, 1], index=[5.0, 10.3], name="count"), + ) + tm.assert_series_equal( + Series([10.3, 5.0, 5.0]).value_counts(dropna=False), + Series([2, 1], index=[5.0, 10.3], name="count"), + ) + + tm.assert_series_equal( + Series([10.3, 5.0, 5.0, None]).value_counts(dropna=True), + Series([2, 1], index=[5.0, 10.3], name="count"), + ) + + result = Series([10.3, 10.3, 5.0, 5.0, 5.0, None]).value_counts(dropna=False) + expected = Series([3, 2, 1], index=[5.0, 10.3, None], name="count") + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("dtype", (np.float64, object, "M8[ns]")) + def test_value_counts_normalized(self, dtype): + # GH12558 + s = Series([1] * 2 + [2] * 3 + [np.nan] * 5) + s_typed = s.astype(dtype) + result = s_typed.value_counts(normalize=True, dropna=False) + expected = Series( + [0.5, 0.3, 0.2], + index=Series([np.nan, 2.0, 1.0], dtype=dtype), + name="proportion", + ) + tm.assert_series_equal(result, expected) + + result = s_typed.value_counts(normalize=True, dropna=True) + expected = Series( + [0.6, 0.4], index=Series([2.0, 1.0], dtype=dtype), name="proportion" + ) + tm.assert_series_equal(result, expected) + + def test_value_counts_uint64(self): + arr = np.array([2**63], dtype=np.uint64) + expected = Series([1], index=[2**63], name="count") + msg = "pandas.value_counts is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = algos.value_counts(arr) + + tm.assert_series_equal(result, expected) + + arr = np.array([-1, 2**63], dtype=object) + expected = Series([1, 1], index=[-1, 2**63], name="count") + with tm.assert_produces_warning(FutureWarning, match=msg): + result = algos.value_counts(arr) + + tm.assert_series_equal(result, expected) + + def test_value_counts_series(self): + # GH#54857 + values = np.array([3, 1, 2, 3, 4, np.nan]) + result = Series(values).value_counts(bins=3) + expected = Series( + [2, 2, 1], + index=IntervalIndex.from_tuples( + [(0.996, 2.0), (2.0, 3.0), (3.0, 4.0)], dtype="interval[float64, right]" + ), + name="count", + ) + tm.assert_series_equal(result, expected) + + +class TestDuplicated: + def test_duplicated_with_nas(self): + keys = np.array([0, 1, np.nan, 0, 2, np.nan], dtype=object) + + result = algos.duplicated(keys) + expected = np.array([False, False, False, True, False, True]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.duplicated(keys, keep="first") + expected = np.array([False, False, False, True, False, True]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.duplicated(keys, keep="last") + expected = np.array([True, False, True, False, False, False]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.duplicated(keys, keep=False) + expected = np.array([True, False, True, True, False, True]) + tm.assert_numpy_array_equal(result, expected) + + keys = np.empty(8, dtype=object) + for i, t in enumerate( + zip([0, 0, np.nan, np.nan] * 2, [0, np.nan, 0, np.nan] * 2) + ): + keys[i] = t + + result = algos.duplicated(keys) + falses = [False] * 4 + trues = [True] * 4 + expected = np.array(falses + trues) + tm.assert_numpy_array_equal(result, expected) + + result = algos.duplicated(keys, keep="last") + expected = np.array(trues + falses) + tm.assert_numpy_array_equal(result, expected) + + result = algos.duplicated(keys, keep=False) + expected = np.array(trues + trues) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize( + "case", + [ + np.array([1, 2, 1, 5, 3, 2, 4, 1, 5, 6]), + np.array([1.1, 2.2, 1.1, np.nan, 3.3, 2.2, 4.4, 1.1, np.nan, 6.6]), + np.array( + [ + 1 + 1j, + 2 + 2j, + 1 + 1j, + 5 + 5j, + 3 + 3j, + 2 + 2j, + 4 + 4j, + 1 + 1j, + 5 + 5j, + 6 + 6j, + ] + ), + np.array(["a", "b", "a", "e", "c", "b", "d", "a", "e", "f"], dtype=object), + np.array( + [1, 2**63, 1, 3**5, 10, 2**63, 39, 1, 3**5, 7], dtype=np.uint64 + ), + ], + ) + def test_numeric_object_likes(self, case): + exp_first = np.array( + [False, False, True, False, False, True, False, True, True, False] + ) + exp_last = np.array( + [True, True, True, True, False, False, False, False, False, False] + ) + exp_false = exp_first | exp_last + + res_first = algos.duplicated(case, keep="first") + tm.assert_numpy_array_equal(res_first, exp_first) + + res_last = algos.duplicated(case, keep="last") + tm.assert_numpy_array_equal(res_last, exp_last) + + res_false = algos.duplicated(case, keep=False) + tm.assert_numpy_array_equal(res_false, exp_false) + + # index + for idx in [Index(case), Index(case, dtype="category")]: + res_first = idx.duplicated(keep="first") + tm.assert_numpy_array_equal(res_first, exp_first) + + res_last = idx.duplicated(keep="last") + tm.assert_numpy_array_equal(res_last, exp_last) + + res_false = idx.duplicated(keep=False) + tm.assert_numpy_array_equal(res_false, exp_false) + + # series + for s in [Series(case), Series(case, dtype="category")]: + res_first = s.duplicated(keep="first") + tm.assert_series_equal(res_first, Series(exp_first)) + + res_last = s.duplicated(keep="last") + tm.assert_series_equal(res_last, Series(exp_last)) + + res_false = s.duplicated(keep=False) + tm.assert_series_equal(res_false, Series(exp_false)) + + def test_datetime_likes(self): + dt = [ + "2011-01-01", + "2011-01-02", + "2011-01-01", + "NaT", + "2011-01-03", + "2011-01-02", + "2011-01-04", + "2011-01-01", + "NaT", + "2011-01-06", + ] + td = [ + "1 days", + "2 days", + "1 days", + "NaT", + "3 days", + "2 days", + "4 days", + "1 days", + "NaT", + "6 days", + ] + + cases = [ + np.array([Timestamp(d) for d in dt]), + np.array([Timestamp(d, tz="US/Eastern") for d in dt]), + np.array([Period(d, freq="D") for d in dt]), + np.array([np.datetime64(d) for d in dt]), + np.array([Timedelta(d) for d in td]), + ] + + exp_first = np.array( + [False, False, True, False, False, True, False, True, True, False] + ) + exp_last = np.array( + [True, True, True, True, False, False, False, False, False, False] + ) + exp_false = exp_first | exp_last + + for case in cases: + res_first = algos.duplicated(case, keep="first") + tm.assert_numpy_array_equal(res_first, exp_first) + + res_last = algos.duplicated(case, keep="last") + tm.assert_numpy_array_equal(res_last, exp_last) + + res_false = algos.duplicated(case, keep=False) + tm.assert_numpy_array_equal(res_false, exp_false) + + # index + for idx in [ + Index(case), + Index(case, dtype="category"), + Index(case, dtype=object), + ]: + res_first = idx.duplicated(keep="first") + tm.assert_numpy_array_equal(res_first, exp_first) + + res_last = idx.duplicated(keep="last") + tm.assert_numpy_array_equal(res_last, exp_last) + + res_false = idx.duplicated(keep=False) + tm.assert_numpy_array_equal(res_false, exp_false) + + # series + for s in [ + Series(case), + Series(case, dtype="category"), + Series(case, dtype=object), + ]: + res_first = s.duplicated(keep="first") + tm.assert_series_equal(res_first, Series(exp_first)) + + res_last = s.duplicated(keep="last") + tm.assert_series_equal(res_last, Series(exp_last)) + + res_false = s.duplicated(keep=False) + tm.assert_series_equal(res_false, Series(exp_false)) + + @pytest.mark.parametrize("case", [Index([1, 2, 3]), pd.RangeIndex(0, 3)]) + def test_unique_index(self, case): + assert case.is_unique is True + tm.assert_numpy_array_equal(case.duplicated(), np.array([False, False, False])) + + @pytest.mark.parametrize( + "arr, uniques", + [ + ( + [(0, 0), (0, 1), (1, 0), (1, 1), (0, 0), (0, 1), (1, 0), (1, 1)], + [(0, 0), (0, 1), (1, 0), (1, 1)], + ), + ( + [("b", "c"), ("a", "b"), ("a", "b"), ("b", "c")], + [("b", "c"), ("a", "b")], + ), + ([("a", 1), ("b", 2), ("a", 3), ("a", 1)], [("a", 1), ("b", 2), ("a", 3)]), + ], + ) + def test_unique_tuples(self, arr, uniques): + # https://github.com/pandas-dev/pandas/issues/16519 + expected = np.empty(len(uniques), dtype=object) + expected[:] = uniques + + msg = "unique with argument that is not not a Series" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = pd.unique(arr) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize( + "array,expected", + [ + ( + [1 + 1j, 0, 1, 1j, 1 + 2j, 1 + 2j], + # Should return a complex dtype in the future + np.array([(1 + 1j), 0j, (1 + 0j), 1j, (1 + 2j)], dtype=object), + ) + ], + ) + def test_unique_complex_numbers(self, array, expected): + # GH 17927 + msg = "unique with argument that is not not a Series" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = pd.unique(array) + tm.assert_numpy_array_equal(result, expected) + + +class TestHashTable: + @pytest.mark.parametrize( + "htable, data", + [ + (ht.PyObjectHashTable, [f"foo_{i}" for i in range(1000)]), + (ht.StringHashTable, [f"foo_{i}" for i in range(1000)]), + (ht.Float64HashTable, np.arange(1000, dtype=np.float64)), + (ht.Int64HashTable, np.arange(1000, dtype=np.int64)), + (ht.UInt64HashTable, np.arange(1000, dtype=np.uint64)), + ], + ) + def test_hashtable_unique(self, htable, data, writable): + # output of maker has guaranteed unique elements + s = Series(data) + if htable == ht.Float64HashTable: + # add NaN for float column + s.loc[500] = np.nan + elif htable == ht.PyObjectHashTable: + # use different NaN types for object column + s.loc[500:502] = [np.nan, None, NaT] + + # create duplicated selection + s_duplicated = s.sample(frac=3, replace=True).reset_index(drop=True) + s_duplicated.values.setflags(write=writable) + + # drop_duplicates has own cython code (hash_table_func_helper.pxi) + # and is tested separately; keeps first occurrence like ht.unique() + expected_unique = s_duplicated.drop_duplicates(keep="first").values + result_unique = htable().unique(s_duplicated.values) + tm.assert_numpy_array_equal(result_unique, expected_unique) + + # test return_inverse=True + # reconstruction can only succeed if the inverse is correct + result_unique, result_inverse = htable().unique( + s_duplicated.values, return_inverse=True + ) + tm.assert_numpy_array_equal(result_unique, expected_unique) + reconstr = result_unique[result_inverse] + tm.assert_numpy_array_equal(reconstr, s_duplicated.values) + + @pytest.mark.parametrize( + "htable, data", + [ + (ht.PyObjectHashTable, [f"foo_{i}" for i in range(1000)]), + (ht.StringHashTable, [f"foo_{i}" for i in range(1000)]), + (ht.Float64HashTable, np.arange(1000, dtype=np.float64)), + (ht.Int64HashTable, np.arange(1000, dtype=np.int64)), + (ht.UInt64HashTable, np.arange(1000, dtype=np.uint64)), + ], + ) + def test_hashtable_factorize(self, htable, writable, data): + # output of maker has guaranteed unique elements + s = Series(data) + if htable == ht.Float64HashTable: + # add NaN for float column + s.loc[500] = np.nan + elif htable == ht.PyObjectHashTable: + # use different NaN types for object column + s.loc[500:502] = [np.nan, None, NaT] + + # create duplicated selection + s_duplicated = s.sample(frac=3, replace=True).reset_index(drop=True) + s_duplicated.values.setflags(write=writable) + na_mask = s_duplicated.isna().values + + result_unique, result_inverse = htable().factorize(s_duplicated.values) + + # drop_duplicates has own cython code (hash_table_func_helper.pxi) + # and is tested separately; keeps first occurrence like ht.factorize() + # since factorize removes all NaNs, we do the same here + expected_unique = s_duplicated.dropna().drop_duplicates().values + tm.assert_numpy_array_equal(result_unique, expected_unique) + + # reconstruction can only succeed if the inverse is correct. Since + # factorize removes the NaNs, those have to be excluded here as well + result_reconstruct = result_unique[result_inverse[~na_mask]] + expected_reconstruct = s_duplicated.dropna().values + tm.assert_numpy_array_equal(result_reconstruct, expected_reconstruct) + + +class TestRank: + @pytest.mark.parametrize( + "arr", + [ + [np.nan, np.nan, 5.0, 5.0, 5.0, np.nan, 1, 2, 3, np.nan], + [4.0, np.nan, 5.0, 5.0, 5.0, np.nan, 1, 2, 4.0, np.nan], + ], + ) + def test_scipy_compat(self, arr): + sp_stats = pytest.importorskip("scipy.stats") + + arr = np.array(arr) + + mask = ~np.isfinite(arr) + arr = arr.copy() + result = libalgos.rank_1d(arr) + arr[mask] = np.inf + exp = sp_stats.rankdata(arr) + exp[mask] = np.nan + tm.assert_almost_equal(result, exp) + + @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) + def test_basic(self, writable, dtype): + exp = np.array([1, 2], dtype=np.float64) + + data = np.array([1, 100], dtype=dtype) + data.setflags(write=writable) + ser = Series(data) + result = algos.rank(ser) + tm.assert_numpy_array_equal(result, exp) + + @pytest.mark.parametrize("dtype", [np.float64, np.uint64]) + def test_uint64_overflow(self, dtype): + exp = np.array([1, 2], dtype=np.float64) + + s = Series([1, 2**63], dtype=dtype) + tm.assert_numpy_array_equal(algos.rank(s), exp) + + def test_too_many_ndims(self): + arr = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]) + msg = "Array with ndim > 2 are not supported" + + with pytest.raises(TypeError, match=msg): + algos.rank(arr) + + @pytest.mark.single_cpu + def test_pct_max_many_rows(self): + # GH 18271 + values = np.arange(2**24 + 1) + result = algos.rank(values, pct=True).max() + assert result == 1 + + values = np.arange(2**25 + 2).reshape(2**24 + 1, 2) + result = algos.rank(values, pct=True).max() + assert result == 1 + + +class TestMode: + def test_no_mode(self): + exp = Series([], dtype=np.float64, index=Index([], dtype=int)) + tm.assert_numpy_array_equal(algos.mode(np.array([])), exp.values) + + @pytest.mark.parametrize("dt", np.typecodes["AllInteger"] + np.typecodes["Float"]) + def test_mode_single(self, dt): + # GH 15714 + exp_single = [1] + data_single = [1] + + exp_multi = [1] + data_multi = [1, 1] + + ser = Series(data_single, dtype=dt) + exp = Series(exp_single, dtype=dt) + tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values) + tm.assert_series_equal(ser.mode(), exp) + + ser = Series(data_multi, dtype=dt) + exp = Series(exp_multi, dtype=dt) + tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values) + tm.assert_series_equal(ser.mode(), exp) + + def test_mode_obj_int(self): + exp = Series([1], dtype=int) + tm.assert_numpy_array_equal(algos.mode(exp.values), exp.values) + + exp = Series(["a", "b", "c"], dtype=object) + tm.assert_numpy_array_equal(algos.mode(exp.values), exp.values) + + @pytest.mark.parametrize("dt", np.typecodes["AllInteger"] + np.typecodes["Float"]) + def test_number_mode(self, dt): + exp_single = [1] + data_single = [1] * 5 + [2] * 3 + + exp_multi = [1, 3] + data_multi = [1] * 5 + [2] * 3 + [3] * 5 + + ser = Series(data_single, dtype=dt) + exp = Series(exp_single, dtype=dt) + tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values) + tm.assert_series_equal(ser.mode(), exp) + + ser = Series(data_multi, dtype=dt) + exp = Series(exp_multi, dtype=dt) + tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values) + tm.assert_series_equal(ser.mode(), exp) + + def test_strobj_mode(self): + exp = ["b"] + data = ["a"] * 2 + ["b"] * 3 + + ser = Series(data, dtype="c") + exp = Series(exp, dtype="c") + tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values) + tm.assert_series_equal(ser.mode(), exp) + + @pytest.mark.parametrize("dt", [str, object]) + def test_strobj_multi_char(self, dt): + exp = ["bar"] + data = ["foo"] * 2 + ["bar"] * 3 + + ser = Series(data, dtype=dt) + exp = Series(exp, dtype=dt) + tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values) + tm.assert_series_equal(ser.mode(), exp) + + def test_datelike_mode(self): + exp = Series(["1900-05-03", "2011-01-03", "2013-01-02"], dtype="M8[ns]") + ser = Series(["2011-01-03", "2013-01-02", "1900-05-03"], dtype="M8[ns]") + tm.assert_extension_array_equal(algos.mode(ser.values), exp._values) + tm.assert_series_equal(ser.mode(), exp) + + exp = Series(["2011-01-03", "2013-01-02"], dtype="M8[ns]") + ser = Series( + ["2011-01-03", "2013-01-02", "1900-05-03", "2011-01-03", "2013-01-02"], + dtype="M8[ns]", + ) + tm.assert_extension_array_equal(algos.mode(ser.values), exp._values) + tm.assert_series_equal(ser.mode(), exp) + + def test_timedelta_mode(self): + exp = Series(["-1 days", "0 days", "1 days"], dtype="timedelta64[ns]") + ser = Series(["1 days", "-1 days", "0 days"], dtype="timedelta64[ns]") + tm.assert_extension_array_equal(algos.mode(ser.values), exp._values) + tm.assert_series_equal(ser.mode(), exp) + + exp = Series(["2 min", "1 day"], dtype="timedelta64[ns]") + ser = Series( + ["1 day", "1 day", "-1 day", "-1 day 2 min", "2 min", "2 min"], + dtype="timedelta64[ns]", + ) + tm.assert_extension_array_equal(algos.mode(ser.values), exp._values) + tm.assert_series_equal(ser.mode(), exp) + + def test_mixed_dtype(self): + exp = Series(["foo"], dtype=object) + ser = Series([1, "foo", "foo"]) + tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values) + tm.assert_series_equal(ser.mode(), exp) + + def test_uint64_overflow(self): + exp = Series([2**63], dtype=np.uint64) + ser = Series([1, 2**63, 2**63], dtype=np.uint64) + tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values) + tm.assert_series_equal(ser.mode(), exp) + + exp = Series([1, 2**63], dtype=np.uint64) + ser = Series([1, 2**63], dtype=np.uint64) + tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values) + tm.assert_series_equal(ser.mode(), exp) + + def test_categorical(self): + c = Categorical([1, 2]) + exp = c + res = Series(c).mode()._values + tm.assert_categorical_equal(res, exp) + + c = Categorical([1, "a", "a"]) + exp = Categorical(["a"], categories=[1, "a"]) + res = Series(c).mode()._values + tm.assert_categorical_equal(res, exp) + + c = Categorical([1, 1, 2, 3, 3]) + exp = Categorical([1, 3], categories=[1, 2, 3]) + res = Series(c).mode()._values + tm.assert_categorical_equal(res, exp) + + def test_index(self): + idx = Index([1, 2, 3]) + exp = Series([1, 2, 3], dtype=np.int64) + tm.assert_numpy_array_equal(algos.mode(idx), exp.values) + + idx = Index([1, "a", "a"]) + exp = Series(["a"], dtype=object) + tm.assert_numpy_array_equal(algos.mode(idx), exp.values) + + idx = Index([1, 1, 2, 3, 3]) + exp = Series([1, 3], dtype=np.int64) + tm.assert_numpy_array_equal(algos.mode(idx), exp.values) + + idx = Index( + ["1 day", "1 day", "-1 day", "-1 day 2 min", "2 min", "2 min"], + dtype="timedelta64[ns]", + ) + with pytest.raises(AttributeError, match="TimedeltaIndex"): + # algos.mode expects Arraylike, does *not* unwrap TimedeltaIndex + algos.mode(idx) + + def test_ser_mode_with_name(self): + # GH 46737 + ser = Series([1, 1, 3], name="foo") + result = ser.mode() + expected = Series([1], name="foo") + tm.assert_series_equal(result, expected) + + +class TestDiff: + @pytest.mark.parametrize("dtype", ["M8[ns]", "m8[ns]"]) + def test_diff_datetimelike_nat(self, dtype): + # NaT - NaT is NaT, not 0 + arr = np.arange(12).astype(np.int64).view(dtype).reshape(3, 4) + arr[:, 2] = arr.dtype.type("NaT", "ns") + result = algos.diff(arr, 1, axis=0) + + expected = np.ones(arr.shape, dtype="timedelta64[ns]") * 4 + expected[:, 2] = np.timedelta64("NaT", "ns") + expected[0, :] = np.timedelta64("NaT", "ns") + + tm.assert_numpy_array_equal(result, expected) + + result = algos.diff(arr.T, 1, axis=1) + tm.assert_numpy_array_equal(result, expected.T) + + def test_diff_ea_axis(self): + dta = date_range("2016-01-01", periods=3, tz="US/Pacific")._data + + msg = "cannot diff DatetimeArray on axis=1" + with pytest.raises(ValueError, match=msg): + algos.diff(dta, 1, axis=1) + + @pytest.mark.parametrize("dtype", ["int8", "int16"]) + def test_diff_low_precision_int(self, dtype): + arr = np.array([0, 1, 1, 0, 0], dtype=dtype) + result = algos.diff(arr, 1) + expected = np.array([np.nan, 1, 0, -1, 0], dtype="float32") + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("op", [np.array, pd.array]) +def test_union_with_duplicates(op): + # GH#36289 + lvals = op([3, 1, 3, 4]) + rvals = op([2, 3, 1, 1]) + expected = op([3, 3, 1, 1, 4, 2]) + if isinstance(expected, np.ndarray): + result = algos.union_with_duplicates(lvals, rvals) + tm.assert_numpy_array_equal(result, expected) + else: + result = algos.union_with_duplicates(lvals, rvals) + tm.assert_extension_array_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_common.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_common.py new file mode 100644 index 0000000000000000000000000000000000000000..e8a1c961c8cb6e5b1014f6baa193d4593d85d981 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_common.py @@ -0,0 +1,267 @@ +import collections +from functools import partial +import string +import subprocess +import sys +import textwrap + +import numpy as np +import pytest + +import pandas as pd +from pandas import Series +import pandas._testing as tm +from pandas.core import ops +import pandas.core.common as com +from pandas.util.version import Version + + +def test_get_callable_name(): + getname = com.get_callable_name + + def fn(x): + return x + + lambda_ = lambda x: x + part1 = partial(fn) + part2 = partial(part1) + + class somecall: + def __call__(self): + # This shouldn't actually get called below; somecall.__init__ + # should. + raise NotImplementedError + + assert getname(fn) == "fn" + assert getname(lambda_) + assert getname(part1) == "fn" + assert getname(part2) == "fn" + assert getname(somecall()) == "somecall" + assert getname(1) is None + + +def test_any_none(): + assert com.any_none(1, 2, 3, None) + assert not com.any_none(1, 2, 3, 4) + + +def test_all_not_none(): + assert com.all_not_none(1, 2, 3, 4) + assert not com.all_not_none(1, 2, 3, None) + assert not com.all_not_none(None, None, None, None) + + +def test_random_state(): + # Check with seed + state = com.random_state(5) + assert state.uniform() == np.random.RandomState(5).uniform() + + # Check with random state object + state2 = np.random.RandomState(10) + assert com.random_state(state2).uniform() == np.random.RandomState(10).uniform() + + # check with no arg random state + assert com.random_state() is np.random + + # check array-like + # GH32503 + state_arr_like = np.random.default_rng(None).integers( + 0, 2**31, size=624, dtype="uint32" + ) + assert ( + com.random_state(state_arr_like).uniform() + == np.random.RandomState(state_arr_like).uniform() + ) + + # Check BitGenerators + # GH32503 + assert ( + com.random_state(np.random.MT19937(3)).uniform() + == np.random.RandomState(np.random.MT19937(3)).uniform() + ) + assert ( + com.random_state(np.random.PCG64(11)).uniform() + == np.random.RandomState(np.random.PCG64(11)).uniform() + ) + + # Error for floats or strings + msg = ( + "random_state must be an integer, array-like, a BitGenerator, Generator, " + "a numpy RandomState, or None" + ) + with pytest.raises(ValueError, match=msg): + com.random_state("test") + + with pytest.raises(ValueError, match=msg): + com.random_state(5.5) + + +@pytest.mark.parametrize( + "left, right, expected", + [ + (Series([1], name="x"), Series([2], name="x"), "x"), + (Series([1], name="x"), Series([2], name="y"), None), + (Series([1]), Series([2], name="x"), None), + (Series([1], name="x"), Series([2]), None), + (Series([1], name="x"), [2], "x"), + ([1], Series([2], name="y"), "y"), + # matching NAs + (Series([1], name=np.nan), pd.Index([], name=np.nan), np.nan), + (Series([1], name=np.nan), pd.Index([], name=pd.NaT), None), + (Series([1], name=pd.NA), pd.Index([], name=pd.NA), pd.NA), + # tuple name GH#39757 + ( + Series([1], name=np.int64(1)), + pd.Index([], name=(np.int64(1), np.int64(2))), + None, + ), + ( + Series([1], name=(np.int64(1), np.int64(2))), + pd.Index([], name=(np.int64(1), np.int64(2))), + (np.int64(1), np.int64(2)), + ), + pytest.param( + Series([1], name=(np.float64("nan"), np.int64(2))), + pd.Index([], name=(np.float64("nan"), np.int64(2))), + (np.float64("nan"), np.int64(2)), + marks=pytest.mark.xfail( + reason="Not checking for matching NAs inside tuples." + ), + ), + ], +) +def test_maybe_match_name(left, right, expected): + res = ops.common._maybe_match_name(left, right) + assert res is expected or res == expected + + +def test_standardize_mapping(): + # No uninitialized defaultdicts + msg = r"to_dict\(\) only accepts initialized defaultdicts" + with pytest.raises(TypeError, match=msg): + com.standardize_mapping(collections.defaultdict) + + # No non-mapping subtypes, instance + msg = "unsupported type: " + with pytest.raises(TypeError, match=msg): + com.standardize_mapping([]) + + # No non-mapping subtypes, class + with pytest.raises(TypeError, match=msg): + com.standardize_mapping(list) + + fill = {"bad": "data"} + assert com.standardize_mapping(fill) == dict + + # Convert instance to type + assert com.standardize_mapping({}) == dict + + dd = collections.defaultdict(list) + assert isinstance(com.standardize_mapping(dd), partial) + + +def test_git_version(): + # GH 21295 + git_version = pd.__git_version__ + assert len(git_version) == 40 + assert all(c in string.hexdigits for c in git_version) + + +def test_version_tag(): + version = Version(pd.__version__) + try: + version > Version("0.0.1") + except TypeError: + raise ValueError( + "No git tags exist, please sync tags between upstream and your repo" + ) + + +@pytest.mark.parametrize( + "obj", [(obj,) for obj in pd.__dict__.values() if callable(obj)] +) +def test_serializable(obj): + # GH 35611 + unpickled = tm.round_trip_pickle(obj) + assert type(obj) == type(unpickled) + + +class TestIsBoolIndexer: + def test_non_bool_array_with_na(self): + # in particular, this should not raise + arr = np.array(["A", "B", np.nan], dtype=object) + assert not com.is_bool_indexer(arr) + + def test_list_subclass(self): + # GH#42433 + + class MyList(list): + pass + + val = MyList(["a"]) + + assert not com.is_bool_indexer(val) + + val = MyList([True]) + assert com.is_bool_indexer(val) + + def test_frozenlist(self): + # GH#42461 + data = {"col1": [1, 2], "col2": [3, 4]} + df = pd.DataFrame(data=data) + + frozen = df.index.names[1:] + assert not com.is_bool_indexer(frozen) + + result = df[frozen] + expected = df[[]] + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("with_exception", [True, False]) +def test_temp_setattr(with_exception): + # GH#45954 + ser = Series(dtype=object) + ser.name = "first" + # Raise a ValueError in either case to satisfy pytest.raises + match = "Inside exception raised" if with_exception else "Outside exception raised" + with pytest.raises(ValueError, match=match): + with com.temp_setattr(ser, "name", "second"): + assert ser.name == "second" + if with_exception: + raise ValueError("Inside exception raised") + raise ValueError("Outside exception raised") + assert ser.name == "first" + + +@pytest.mark.single_cpu +def test_str_size(): + # GH#21758 + a = "a" + expected = sys.getsizeof(a) + pyexe = sys.executable.replace("\\", "/") + call = [ + pyexe, + "-c", + "a='a';import sys;sys.getsizeof(a);import pandas;print(sys.getsizeof(a));", + ] + result = subprocess.check_output(call).decode()[-4:-1].strip("\n") + assert int(result) == int(expected) + + +@pytest.mark.single_cpu +def test_bz2_missing_import(): + # Check whether bz2 missing import is handled correctly (issue #53857) + code = """ + import sys + sys.modules['bz2'] = None + import pytest + import pandas as pd + from pandas.compat import get_bz2_file + msg = 'bz2 module not available.' + with pytest.raises(RuntimeError, match=msg): + get_bz2_file() + """ + code = textwrap.dedent(code) + call = [sys.executable, "-c", code] + subprocess.check_output(call) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_errors.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_errors.py new file mode 100644 index 0000000000000000000000000000000000000000..aeddc08e4b888c0937a3095a46003613e0115876 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_errors.py @@ -0,0 +1,112 @@ +import pytest + +from pandas.errors import ( + AbstractMethodError, + UndefinedVariableError, +) + +import pandas as pd + + +@pytest.mark.parametrize( + "exc", + [ + "AttributeConflictWarning", + "CSSWarning", + "CategoricalConversionWarning", + "ClosedFileError", + "DataError", + "DatabaseError", + "DtypeWarning", + "EmptyDataError", + "IncompatibilityWarning", + "IndexingError", + "InvalidColumnName", + "InvalidComparison", + "InvalidVersion", + "LossySetitemError", + "MergeError", + "NoBufferPresent", + "NumExprClobberingError", + "NumbaUtilError", + "OptionError", + "OutOfBoundsDatetime", + "ParserError", + "ParserWarning", + "PerformanceWarning", + "PossibleDataLossError", + "PossiblePrecisionLoss", + "PyperclipException", + "SettingWithCopyError", + "SettingWithCopyWarning", + "SpecificationError", + "UnsortedIndexError", + "UnsupportedFunctionCall", + "ValueLabelTypeMismatch", + ], +) +def test_exception_importable(exc): + from pandas import errors + + err = getattr(errors, exc) + assert err is not None + + # check that we can raise on them + + msg = "^$" + + with pytest.raises(err, match=msg): + raise err() + + +def test_catch_oob(): + from pandas import errors + + msg = "Cannot cast 1500-01-01 00:00:00 to unit='ns' without overflow" + with pytest.raises(errors.OutOfBoundsDatetime, match=msg): + pd.Timestamp("15000101").as_unit("ns") + + +@pytest.mark.parametrize( + "is_local", + [ + True, + False, + ], +) +def test_catch_undefined_variable_error(is_local): + variable_name = "x" + if is_local: + msg = f"local variable '{variable_name}' is not defined" + else: + msg = f"name '{variable_name}' is not defined" + + with pytest.raises(UndefinedVariableError, match=msg): + raise UndefinedVariableError(variable_name, is_local) + + +class Foo: + @classmethod + def classmethod(cls): + raise AbstractMethodError(cls, methodtype="classmethod") + + @property + def property(self): + raise AbstractMethodError(self, methodtype="property") + + def method(self): + raise AbstractMethodError(self) + + +def test_AbstractMethodError_classmethod(): + xpr = "This classmethod must be defined in the concrete class Foo" + with pytest.raises(AbstractMethodError, match=xpr): + Foo.classmethod() + + xpr = "This property must be defined in the concrete class Foo" + with pytest.raises(AbstractMethodError, match=xpr): + Foo().property + + xpr = "This method must be defined in the concrete class Foo" + with pytest.raises(AbstractMethodError, match=xpr): + Foo().method() diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_expressions.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_expressions.py new file mode 100644 index 0000000000000000000000000000000000000000..dfec99f0786ebf11a44dedfad8aa8e1015356fab --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_expressions.py @@ -0,0 +1,466 @@ +import operator +import re + +import numpy as np +import pytest + +from pandas import option_context +import pandas._testing as tm +from pandas.core.api import ( + DataFrame, + Index, + Series, +) +from pandas.core.computation import expressions as expr + + +@pytest.fixture +def _frame(): + return DataFrame( + np.random.default_rng(2).standard_normal((10001, 4)), + columns=list("ABCD"), + dtype="float64", + ) + + +@pytest.fixture +def _frame2(): + return DataFrame( + np.random.default_rng(2).standard_normal((100, 4)), + columns=list("ABCD"), + dtype="float64", + ) + + +@pytest.fixture +def _mixed(_frame): + return DataFrame( + { + "A": _frame["A"].copy(), + "B": _frame["B"].astype("float32"), + "C": _frame["C"].astype("int64"), + "D": _frame["D"].astype("int32"), + } + ) + + +@pytest.fixture +def _mixed2(_frame2): + return DataFrame( + { + "A": _frame2["A"].copy(), + "B": _frame2["B"].astype("float32"), + "C": _frame2["C"].astype("int64"), + "D": _frame2["D"].astype("int32"), + } + ) + + +@pytest.fixture +def _integer(): + return DataFrame( + np.random.default_rng(2).integers(1, 100, size=(10001, 4)), + columns=list("ABCD"), + dtype="int64", + ) + + +@pytest.fixture +def _integer_integers(_integer): + # integers to get a case with zeros + return _integer * np.random.default_rng(2).integers(0, 2, size=np.shape(_integer)) + + +@pytest.fixture +def _integer2(): + return DataFrame( + np.random.default_rng(2).integers(1, 100, size=(101, 4)), + columns=list("ABCD"), + dtype="int64", + ) + + +@pytest.fixture +def _array(_frame): + return _frame["A"].values.copy() + + +@pytest.fixture +def _array2(_frame2): + return _frame2["A"].values.copy() + + +@pytest.fixture +def _array_mixed(_mixed): + return _mixed["D"].values.copy() + + +@pytest.fixture +def _array_mixed2(_mixed2): + return _mixed2["D"].values.copy() + + +@pytest.mark.skipif(not expr.USE_NUMEXPR, reason="not using numexpr") +class TestExpressions: + @staticmethod + def call_op(df, other, flex: bool, opname: str): + if flex: + op = lambda x, y: getattr(x, opname)(y) + op.__name__ = opname + else: + op = getattr(operator, opname) + + with option_context("compute.use_numexpr", False): + expected = op(df, other) + + expr.get_test_result() + + result = op(df, other) + return result, expected + + @pytest.mark.parametrize( + "fixture", + [ + "_integer", + "_integer2", + "_integer_integers", + "_frame", + "_frame2", + "_mixed", + "_mixed2", + ], + ) + @pytest.mark.parametrize("flex", [True, False]) + @pytest.mark.parametrize( + "arith", ["add", "sub", "mul", "mod", "truediv", "floordiv"] + ) + def test_run_arithmetic(self, request, fixture, flex, arith, monkeypatch): + df = request.getfixturevalue(fixture) + with monkeypatch.context() as m: + m.setattr(expr, "_MIN_ELEMENTS", 0) + result, expected = self.call_op(df, df, flex, arith) + + if arith == "truediv": + assert all(x.kind == "f" for x in expected.dtypes.values) + tm.assert_equal(expected, result) + + for i in range(len(df.columns)): + result, expected = self.call_op( + df.iloc[:, i], df.iloc[:, i], flex, arith + ) + if arith == "truediv": + assert expected.dtype.kind == "f" + tm.assert_equal(expected, result) + + @pytest.mark.parametrize( + "fixture", + [ + "_integer", + "_integer2", + "_integer_integers", + "_frame", + "_frame2", + "_mixed", + "_mixed2", + ], + ) + @pytest.mark.parametrize("flex", [True, False]) + def test_run_binary(self, request, fixture, flex, comparison_op, monkeypatch): + """ + tests solely that the result is the same whether or not numexpr is + enabled. Need to test whether the function does the correct thing + elsewhere. + """ + df = request.getfixturevalue(fixture) + arith = comparison_op.__name__ + with option_context("compute.use_numexpr", False): + other = df.copy() + 1 + + with monkeypatch.context() as m: + m.setattr(expr, "_MIN_ELEMENTS", 0) + expr.set_test_mode(True) + + result, expected = self.call_op(df, other, flex, arith) + + used_numexpr = expr.get_test_result() + assert used_numexpr, "Did not use numexpr as expected." + tm.assert_equal(expected, result) + + for i in range(len(df.columns)): + binary_comp = other.iloc[:, i] + 1 + self.call_op(df.iloc[:, i], binary_comp, flex, "add") + + def test_invalid(self): + array = np.random.default_rng(2).standard_normal(1_000_001) + array2 = np.random.default_rng(2).standard_normal(100) + + # no op + result = expr._can_use_numexpr(operator.add, None, array, array, "evaluate") + assert not result + + # min elements + result = expr._can_use_numexpr(operator.add, "+", array2, array2, "evaluate") + assert not result + + # ok, we only check on first part of expression + result = expr._can_use_numexpr(operator.add, "+", array, array2, "evaluate") + assert result + + @pytest.mark.filterwarnings("ignore:invalid value encountered in:RuntimeWarning") + @pytest.mark.parametrize( + "opname,op_str", + [("add", "+"), ("sub", "-"), ("mul", "*"), ("truediv", "/"), ("pow", "**")], + ) + @pytest.mark.parametrize( + "left_fix,right_fix", [("_array", "_array2"), ("_array_mixed", "_array_mixed2")] + ) + def test_binary_ops(self, request, opname, op_str, left_fix, right_fix): + left = request.getfixturevalue(left_fix) + right = request.getfixturevalue(right_fix) + + def testit(left, right, opname, op_str): + if opname == "pow": + left = np.abs(left) + + op = getattr(operator, opname) + + # array has 0s + result = expr.evaluate(op, left, left, use_numexpr=True) + expected = expr.evaluate(op, left, left, use_numexpr=False) + tm.assert_numpy_array_equal(result, expected) + + result = expr._can_use_numexpr(op, op_str, right, right, "evaluate") + assert not result + + with option_context("compute.use_numexpr", False): + testit(left, right, opname, op_str) + + expr.set_numexpr_threads(1) + testit(left, right, opname, op_str) + expr.set_numexpr_threads() + testit(left, right, opname, op_str) + + @pytest.mark.parametrize( + "left_fix,right_fix", [("_array", "_array2"), ("_array_mixed", "_array_mixed2")] + ) + def test_comparison_ops(self, request, comparison_op, left_fix, right_fix): + left = request.getfixturevalue(left_fix) + right = request.getfixturevalue(right_fix) + + def testit(): + f12 = left + 1 + f22 = right + 1 + + op = comparison_op + + result = expr.evaluate(op, left, f12, use_numexpr=True) + expected = expr.evaluate(op, left, f12, use_numexpr=False) + tm.assert_numpy_array_equal(result, expected) + + result = expr._can_use_numexpr(op, op, right, f22, "evaluate") + assert not result + + with option_context("compute.use_numexpr", False): + testit() + + expr.set_numexpr_threads(1) + testit() + expr.set_numexpr_threads() + testit() + + @pytest.mark.parametrize("cond", [True, False]) + @pytest.mark.parametrize("fixture", ["_frame", "_frame2", "_mixed", "_mixed2"]) + def test_where(self, request, cond, fixture): + df = request.getfixturevalue(fixture) + + def testit(): + c = np.empty(df.shape, dtype=np.bool_) + c.fill(cond) + result = expr.where(c, df.values, df.values + 1) + expected = np.where(c, df.values, df.values + 1) + tm.assert_numpy_array_equal(result, expected) + + with option_context("compute.use_numexpr", False): + testit() + + expr.set_numexpr_threads(1) + testit() + expr.set_numexpr_threads() + testit() + + @pytest.mark.parametrize( + "op_str,opname", [("/", "truediv"), ("//", "floordiv"), ("**", "pow")] + ) + def test_bool_ops_raise_on_arithmetic(self, op_str, opname): + df = DataFrame( + { + "a": np.random.default_rng(2).random(10) > 0.5, + "b": np.random.default_rng(2).random(10) > 0.5, + } + ) + + msg = f"operator '{opname}' not implemented for bool dtypes" + f = getattr(operator, opname) + err_msg = re.escape(msg) + + with pytest.raises(NotImplementedError, match=err_msg): + f(df, df) + + with pytest.raises(NotImplementedError, match=err_msg): + f(df.a, df.b) + + with pytest.raises(NotImplementedError, match=err_msg): + f(df.a, True) + + with pytest.raises(NotImplementedError, match=err_msg): + f(False, df.a) + + with pytest.raises(NotImplementedError, match=err_msg): + f(False, df) + + with pytest.raises(NotImplementedError, match=err_msg): + f(df, True) + + @pytest.mark.parametrize( + "op_str,opname", [("+", "add"), ("*", "mul"), ("-", "sub")] + ) + def test_bool_ops_warn_on_arithmetic(self, op_str, opname): + n = 10 + df = DataFrame( + { + "a": np.random.default_rng(2).random(n) > 0.5, + "b": np.random.default_rng(2).random(n) > 0.5, + } + ) + + subs = {"+": "|", "*": "&", "-": "^"} + sub_funcs = {"|": "or_", "&": "and_", "^": "xor"} + + f = getattr(operator, opname) + fe = getattr(operator, sub_funcs[subs[op_str]]) + + if op_str == "-": + # raises TypeError + return + + with tm.use_numexpr(True, min_elements=5): + with tm.assert_produces_warning(): + r = f(df, df) + e = fe(df, df) + tm.assert_frame_equal(r, e) + + with tm.assert_produces_warning(): + r = f(df.a, df.b) + e = fe(df.a, df.b) + tm.assert_series_equal(r, e) + + with tm.assert_produces_warning(): + r = f(df.a, True) + e = fe(df.a, True) + tm.assert_series_equal(r, e) + + with tm.assert_produces_warning(): + r = f(False, df.a) + e = fe(False, df.a) + tm.assert_series_equal(r, e) + + with tm.assert_produces_warning(): + r = f(False, df) + e = fe(False, df) + tm.assert_frame_equal(r, e) + + with tm.assert_produces_warning(): + r = f(df, True) + e = fe(df, True) + tm.assert_frame_equal(r, e) + + @pytest.mark.parametrize( + "test_input,expected", + [ + ( + DataFrame( + [[0, 1, 2, "aa"], [0, 1, 2, "aa"]], columns=["a", "b", "c", "dtype"] + ), + DataFrame([[False, False], [False, False]], columns=["a", "dtype"]), + ), + ( + DataFrame( + [[0, 3, 2, "aa"], [0, 4, 2, "aa"], [0, 1, 1, "bb"]], + columns=["a", "b", "c", "dtype"], + ), + DataFrame( + [[False, False], [False, False], [False, False]], + columns=["a", "dtype"], + ), + ), + ], + ) + def test_bool_ops_column_name_dtype(self, test_input, expected): + # GH 22383 - .ne fails if columns containing column name 'dtype' + result = test_input.loc[:, ["a", "dtype"]].ne(test_input.loc[:, ["a", "dtype"]]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "arith", ("add", "sub", "mul", "mod", "truediv", "floordiv") + ) + @pytest.mark.parametrize("axis", (0, 1)) + def test_frame_series_axis(self, axis, arith, _frame, monkeypatch): + # GH#26736 Dataframe.floordiv(Series, axis=1) fails + + df = _frame + if axis == 1: + other = df.iloc[0, :] + else: + other = df.iloc[:, 0] + + with monkeypatch.context() as m: + m.setattr(expr, "_MIN_ELEMENTS", 0) + + op_func = getattr(df, arith) + + with option_context("compute.use_numexpr", False): + expected = op_func(other, axis=axis) + + result = op_func(other, axis=axis) + tm.assert_frame_equal(expected, result) + + @pytest.mark.parametrize( + "op", + [ + "__mod__", + "__rmod__", + "__floordiv__", + "__rfloordiv__", + ], + ) + @pytest.mark.parametrize("box", [DataFrame, Series, Index]) + @pytest.mark.parametrize("scalar", [-5, 5]) + def test_python_semantics_with_numexpr_installed( + self, op, box, scalar, monkeypatch + ): + # https://github.com/pandas-dev/pandas/issues/36047 + with monkeypatch.context() as m: + m.setattr(expr, "_MIN_ELEMENTS", 0) + data = np.arange(-50, 50) + obj = box(data) + method = getattr(obj, op) + result = method(scalar) + + # compare result with numpy + with option_context("compute.use_numexpr", False): + expected = method(scalar) + + tm.assert_equal(result, expected) + + # compare result element-wise with Python + for i, elem in enumerate(data): + if box == DataFrame: + scalar_result = result.iloc[i, 0] + else: + scalar_result = result[i] + try: + expected = getattr(int(elem), op)(scalar) + except ZeroDivisionError: + pass + else: + assert scalar_result == expected diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_flags.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_flags.py new file mode 100644 index 0000000000000000000000000000000000000000..9294b3fc3319b78b59d5637acdf3fd75737cd836 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_flags.py @@ -0,0 +1,48 @@ +import pytest + +import pandas as pd + + +class TestFlags: + def test_equality(self): + a = pd.DataFrame().set_flags(allows_duplicate_labels=True).flags + b = pd.DataFrame().set_flags(allows_duplicate_labels=False).flags + + assert a == a + assert b == b + assert a != b + assert a != 2 + + def test_set(self): + df = pd.DataFrame().set_flags(allows_duplicate_labels=True) + a = df.flags + a.allows_duplicate_labels = False + assert a.allows_duplicate_labels is False + a["allows_duplicate_labels"] = True + assert a.allows_duplicate_labels is True + + def test_repr(self): + a = repr(pd.DataFrame({"A"}).set_flags(allows_duplicate_labels=True).flags) + assert a == "" + a = repr(pd.DataFrame({"A"}).set_flags(allows_duplicate_labels=False).flags) + assert a == "" + + def test_obj_ref(self): + df = pd.DataFrame() + flags = df.flags + del df + with pytest.raises(ValueError, match="object has been deleted"): + flags.allows_duplicate_labels = True + + def test_getitem(self): + df = pd.DataFrame() + flags = df.flags + assert flags["allows_duplicate_labels"] is True + flags["allows_duplicate_labels"] = False + assert flags["allows_duplicate_labels"] is False + + with pytest.raises(KeyError, match="a"): + flags["a"] + + with pytest.raises(ValueError, match="a"): + flags["a"] = 10 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_nanops.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_nanops.py new file mode 100644 index 0000000000000000000000000000000000000000..a50054f33f382ed913261e0cafd944c2fd86aaa3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_nanops.py @@ -0,0 +1,1274 @@ +from functools import partial + +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas.core.dtypes.common import is_integer_dtype + +import pandas as pd +from pandas import ( + Series, + isna, +) +import pandas._testing as tm +from pandas.core import nanops + +use_bn = nanops._USE_BOTTLENECK + + +@pytest.fixture +def disable_bottleneck(monkeypatch): + with monkeypatch.context() as m: + m.setattr(nanops, "_USE_BOTTLENECK", False) + yield + + +@pytest.fixture +def arr_shape(): + return 11, 7 + + +@pytest.fixture +def arr_float(arr_shape): + return np.random.default_rng(2).standard_normal(arr_shape) + + +@pytest.fixture +def arr_complex(arr_float): + return arr_float + arr_float * 1j + + +@pytest.fixture +def arr_int(arr_shape): + return np.random.default_rng(2).integers(-10, 10, arr_shape) + + +@pytest.fixture +def arr_bool(arr_shape): + return np.random.default_rng(2).integers(0, 2, arr_shape) == 0 + + +@pytest.fixture +def arr_str(arr_float): + return np.abs(arr_float).astype("S") + + +@pytest.fixture +def arr_utf(arr_float): + return np.abs(arr_float).astype("U") + + +@pytest.fixture +def arr_date(arr_shape): + return np.random.default_rng(2).integers(0, 20000, arr_shape).astype("M8[ns]") + + +@pytest.fixture +def arr_tdelta(arr_shape): + return np.random.default_rng(2).integers(0, 20000, arr_shape).astype("m8[ns]") + + +@pytest.fixture +def arr_nan(arr_shape): + return np.tile(np.nan, arr_shape) + + +@pytest.fixture +def arr_float_nan(arr_float, arr_nan): + return np.vstack([arr_float, arr_nan]) + + +@pytest.fixture +def arr_nan_float1(arr_nan, arr_float): + return np.vstack([arr_nan, arr_float]) + + +@pytest.fixture +def arr_nan_nan(arr_nan): + return np.vstack([arr_nan, arr_nan]) + + +@pytest.fixture +def arr_inf(arr_float): + return arr_float * np.inf + + +@pytest.fixture +def arr_float_inf(arr_float, arr_inf): + return np.vstack([arr_float, arr_inf]) + + +@pytest.fixture +def arr_nan_inf(arr_nan, arr_inf): + return np.vstack([arr_nan, arr_inf]) + + +@pytest.fixture +def arr_float_nan_inf(arr_float, arr_nan, arr_inf): + return np.vstack([arr_float, arr_nan, arr_inf]) + + +@pytest.fixture +def arr_nan_nan_inf(arr_nan, arr_inf): + return np.vstack([arr_nan, arr_nan, arr_inf]) + + +@pytest.fixture +def arr_obj( + arr_float, arr_int, arr_bool, arr_complex, arr_str, arr_utf, arr_date, arr_tdelta +): + return np.vstack( + [ + arr_float.astype("O"), + arr_int.astype("O"), + arr_bool.astype("O"), + arr_complex.astype("O"), + arr_str.astype("O"), + arr_utf.astype("O"), + arr_date.astype("O"), + arr_tdelta.astype("O"), + ] + ) + + +@pytest.fixture +def arr_nan_nanj(arr_nan): + with np.errstate(invalid="ignore"): + return arr_nan + arr_nan * 1j + + +@pytest.fixture +def arr_complex_nan(arr_complex, arr_nan_nanj): + with np.errstate(invalid="ignore"): + return np.vstack([arr_complex, arr_nan_nanj]) + + +@pytest.fixture +def arr_nan_infj(arr_inf): + with np.errstate(invalid="ignore"): + return arr_inf * 1j + + +@pytest.fixture +def arr_complex_nan_infj(arr_complex, arr_nan_infj): + with np.errstate(invalid="ignore"): + return np.vstack([arr_complex, arr_nan_infj]) + + +@pytest.fixture +def arr_float_1d(arr_float): + return arr_float[:, 0] + + +@pytest.fixture +def arr_nan_1d(arr_nan): + return arr_nan[:, 0] + + +@pytest.fixture +def arr_float_nan_1d(arr_float_nan): + return arr_float_nan[:, 0] + + +@pytest.fixture +def arr_float1_nan_1d(arr_float1_nan): + return arr_float1_nan[:, 0] + + +@pytest.fixture +def arr_nan_float1_1d(arr_nan_float1): + return arr_nan_float1[:, 0] + + +class TestnanopsDataFrame: + def setup_method(self): + nanops._USE_BOTTLENECK = False + + arr_shape = (11, 7) + + self.arr_float = np.random.default_rng(2).standard_normal(arr_shape) + self.arr_float1 = np.random.default_rng(2).standard_normal(arr_shape) + self.arr_complex = self.arr_float + self.arr_float1 * 1j + self.arr_int = np.random.default_rng(2).integers(-10, 10, arr_shape) + self.arr_bool = np.random.default_rng(2).integers(0, 2, arr_shape) == 0 + self.arr_str = np.abs(self.arr_float).astype("S") + self.arr_utf = np.abs(self.arr_float).astype("U") + self.arr_date = ( + np.random.default_rng(2).integers(0, 20000, arr_shape).astype("M8[ns]") + ) + self.arr_tdelta = ( + np.random.default_rng(2).integers(0, 20000, arr_shape).astype("m8[ns]") + ) + + self.arr_nan = np.tile(np.nan, arr_shape) + self.arr_float_nan = np.vstack([self.arr_float, self.arr_nan]) + self.arr_float1_nan = np.vstack([self.arr_float1, self.arr_nan]) + self.arr_nan_float1 = np.vstack([self.arr_nan, self.arr_float1]) + self.arr_nan_nan = np.vstack([self.arr_nan, self.arr_nan]) + + self.arr_inf = self.arr_float * np.inf + self.arr_float_inf = np.vstack([self.arr_float, self.arr_inf]) + + self.arr_nan_inf = np.vstack([self.arr_nan, self.arr_inf]) + self.arr_float_nan_inf = np.vstack([self.arr_float, self.arr_nan, self.arr_inf]) + self.arr_nan_nan_inf = np.vstack([self.arr_nan, self.arr_nan, self.arr_inf]) + self.arr_obj = np.vstack( + [ + self.arr_float.astype("O"), + self.arr_int.astype("O"), + self.arr_bool.astype("O"), + self.arr_complex.astype("O"), + self.arr_str.astype("O"), + self.arr_utf.astype("O"), + self.arr_date.astype("O"), + self.arr_tdelta.astype("O"), + ] + ) + + with np.errstate(invalid="ignore"): + self.arr_nan_nanj = self.arr_nan + self.arr_nan * 1j + self.arr_complex_nan = np.vstack([self.arr_complex, self.arr_nan_nanj]) + + self.arr_nan_infj = self.arr_inf * 1j + self.arr_complex_nan_infj = np.vstack([self.arr_complex, self.arr_nan_infj]) + + self.arr_float_2d = self.arr_float + self.arr_float1_2d = self.arr_float1 + + self.arr_nan_2d = self.arr_nan + self.arr_float_nan_2d = self.arr_float_nan + self.arr_float1_nan_2d = self.arr_float1_nan + self.arr_nan_float1_2d = self.arr_nan_float1 + + self.arr_float_1d = self.arr_float[:, 0] + self.arr_float1_1d = self.arr_float1[:, 0] + + self.arr_nan_1d = self.arr_nan[:, 0] + self.arr_float_nan_1d = self.arr_float_nan[:, 0] + self.arr_float1_nan_1d = self.arr_float1_nan[:, 0] + self.arr_nan_float1_1d = self.arr_nan_float1[:, 0] + + def teardown_method(self): + nanops._USE_BOTTLENECK = use_bn + + def check_results(self, targ, res, axis, check_dtype=True): + res = getattr(res, "asm8", res) + + if ( + axis != 0 + and hasattr(targ, "shape") + and targ.ndim + and targ.shape != res.shape + ): + res = np.split(res, [targ.shape[0]], axis=0)[0] + + try: + tm.assert_almost_equal(targ, res, check_dtype=check_dtype) + except AssertionError: + # handle timedelta dtypes + if hasattr(targ, "dtype") and targ.dtype == "m8[ns]": + raise + + # There are sometimes rounding errors with + # complex and object dtypes. + # If it isn't one of those, re-raise the error. + if not hasattr(res, "dtype") or res.dtype.kind not in ["c", "O"]: + raise + # convert object dtypes to something that can be split into + # real and imaginary parts + if res.dtype.kind == "O": + if targ.dtype.kind != "O": + res = res.astype(targ.dtype) + else: + cast_dtype = "c16" if hasattr(np, "complex128") else "f8" + res = res.astype(cast_dtype) + targ = targ.astype(cast_dtype) + # there should never be a case where numpy returns an object + # but nanops doesn't, so make that an exception + elif targ.dtype.kind == "O": + raise + tm.assert_almost_equal(np.real(targ), np.real(res), check_dtype=check_dtype) + tm.assert_almost_equal(np.imag(targ), np.imag(res), check_dtype=check_dtype) + + def check_fun_data( + self, + testfunc, + targfunc, + testarval, + targarval, + skipna, + check_dtype=True, + empty_targfunc=None, + **kwargs, + ): + for axis in list(range(targarval.ndim)) + [None]: + targartempval = targarval if skipna else testarval + if skipna and empty_targfunc and isna(targartempval).all(): + targ = empty_targfunc(targartempval, axis=axis, **kwargs) + else: + targ = targfunc(targartempval, axis=axis, **kwargs) + + if targartempval.dtype == object and ( + targfunc is np.any or targfunc is np.all + ): + # GH#12863 the numpy functions will retain e.g. floatiness + if isinstance(targ, np.ndarray): + targ = targ.astype(bool) + else: + targ = bool(targ) + + res = testfunc(testarval, axis=axis, skipna=skipna, **kwargs) + + if ( + isinstance(targ, np.complex128) + and isinstance(res, float) + and np.isnan(targ) + and np.isnan(res) + ): + # GH#18463 + targ = res + + self.check_results(targ, res, axis, check_dtype=check_dtype) + if skipna: + res = testfunc(testarval, axis=axis, **kwargs) + self.check_results(targ, res, axis, check_dtype=check_dtype) + if axis is None: + res = testfunc(testarval, skipna=skipna, **kwargs) + self.check_results(targ, res, axis, check_dtype=check_dtype) + if skipna and axis is None: + res = testfunc(testarval, **kwargs) + self.check_results(targ, res, axis, check_dtype=check_dtype) + + if testarval.ndim <= 1: + return + + # Recurse on lower-dimension + testarval2 = np.take(testarval, 0, axis=-1) + targarval2 = np.take(targarval, 0, axis=-1) + self.check_fun_data( + testfunc, + targfunc, + testarval2, + targarval2, + skipna=skipna, + check_dtype=check_dtype, + empty_targfunc=empty_targfunc, + **kwargs, + ) + + def check_fun( + self, testfunc, targfunc, testar, skipna, empty_targfunc=None, **kwargs + ): + targar = testar + if testar.endswith("_nan") and hasattr(self, testar[:-4]): + targar = testar[:-4] + + testarval = getattr(self, testar) + targarval = getattr(self, targar) + self.check_fun_data( + testfunc, + targfunc, + testarval, + targarval, + skipna=skipna, + empty_targfunc=empty_targfunc, + **kwargs, + ) + + def check_funs( + self, + testfunc, + targfunc, + skipna, + allow_complex=True, + allow_all_nan=True, + allow_date=True, + allow_tdelta=True, + allow_obj=True, + **kwargs, + ): + self.check_fun(testfunc, targfunc, "arr_float", skipna, **kwargs) + self.check_fun(testfunc, targfunc, "arr_float_nan", skipna, **kwargs) + self.check_fun(testfunc, targfunc, "arr_int", skipna, **kwargs) + self.check_fun(testfunc, targfunc, "arr_bool", skipna, **kwargs) + objs = [ + self.arr_float.astype("O"), + self.arr_int.astype("O"), + self.arr_bool.astype("O"), + ] + + if allow_all_nan: + self.check_fun(testfunc, targfunc, "arr_nan", skipna, **kwargs) + + if allow_complex: + self.check_fun(testfunc, targfunc, "arr_complex", skipna, **kwargs) + self.check_fun(testfunc, targfunc, "arr_complex_nan", skipna, **kwargs) + if allow_all_nan: + self.check_fun(testfunc, targfunc, "arr_nan_nanj", skipna, **kwargs) + objs += [self.arr_complex.astype("O")] + + if allow_date: + targfunc(self.arr_date) + self.check_fun(testfunc, targfunc, "arr_date", skipna, **kwargs) + objs += [self.arr_date.astype("O")] + + if allow_tdelta: + try: + targfunc(self.arr_tdelta) + except TypeError: + pass + else: + self.check_fun(testfunc, targfunc, "arr_tdelta", skipna, **kwargs) + objs += [self.arr_tdelta.astype("O")] + + if allow_obj: + self.arr_obj = np.vstack(objs) + # some nanops handle object dtypes better than their numpy + # counterparts, so the numpy functions need to be given something + # else + if allow_obj == "convert": + targfunc = partial( + self._badobj_wrap, func=targfunc, allow_complex=allow_complex + ) + self.check_fun(testfunc, targfunc, "arr_obj", skipna, **kwargs) + + def _badobj_wrap(self, value, func, allow_complex=True, **kwargs): + if value.dtype.kind == "O": + if allow_complex: + value = value.astype("c16") + else: + value = value.astype("f8") + return func(value, **kwargs) + + @pytest.mark.parametrize( + "nan_op,np_op", [(nanops.nanany, np.any), (nanops.nanall, np.all)] + ) + def test_nan_funcs(self, nan_op, np_op, skipna): + self.check_funs(nan_op, np_op, skipna, allow_all_nan=False, allow_date=False) + + def test_nansum(self, skipna): + self.check_funs( + nanops.nansum, + np.sum, + skipna, + allow_date=False, + check_dtype=False, + empty_targfunc=np.nansum, + ) + + def test_nanmean(self, skipna): + self.check_funs( + nanops.nanmean, np.mean, skipna, allow_obj=False, allow_date=False + ) + + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_nanmedian(self, skipna): + self.check_funs( + nanops.nanmedian, + np.median, + skipna, + allow_complex=False, + allow_date=False, + allow_obj="convert", + ) + + @pytest.mark.parametrize("ddof", range(3)) + def test_nanvar(self, ddof, skipna): + self.check_funs( + nanops.nanvar, + np.var, + skipna, + allow_complex=False, + allow_date=False, + allow_obj="convert", + ddof=ddof, + ) + + @pytest.mark.parametrize("ddof", range(3)) + def test_nanstd(self, ddof, skipna): + self.check_funs( + nanops.nanstd, + np.std, + skipna, + allow_complex=False, + allow_date=False, + allow_obj="convert", + ddof=ddof, + ) + + @pytest.mark.parametrize("ddof", range(3)) + def test_nansem(self, ddof, skipna): + sp_stats = pytest.importorskip("scipy.stats") + + with np.errstate(invalid="ignore"): + self.check_funs( + nanops.nansem, + sp_stats.sem, + skipna, + allow_complex=False, + allow_date=False, + allow_tdelta=False, + allow_obj="convert", + ddof=ddof, + ) + + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + @pytest.mark.parametrize( + "nan_op,np_op", [(nanops.nanmin, np.min), (nanops.nanmax, np.max)] + ) + def test_nanops_with_warnings(self, nan_op, np_op, skipna): + self.check_funs(nan_op, np_op, skipna, allow_obj=False) + + def _argminmax_wrap(self, value, axis=None, func=None): + res = func(value, axis) + nans = np.min(value, axis) + nullnan = isna(nans) + if res.ndim: + res[nullnan] = -1 + elif ( + hasattr(nullnan, "all") + and nullnan.all() + or not hasattr(nullnan, "all") + and nullnan + ): + res = -1 + return res + + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_nanargmax(self, skipna): + func = partial(self._argminmax_wrap, func=np.argmax) + self.check_funs(nanops.nanargmax, func, skipna, allow_obj=False) + + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_nanargmin(self, skipna): + func = partial(self._argminmax_wrap, func=np.argmin) + self.check_funs(nanops.nanargmin, func, skipna, allow_obj=False) + + def _skew_kurt_wrap(self, values, axis=None, func=None): + if not isinstance(values.dtype.type, np.floating): + values = values.astype("f8") + result = func(values, axis=axis, bias=False) + # fix for handling cases where all elements in an axis are the same + if isinstance(result, np.ndarray): + result[np.max(values, axis=axis) == np.min(values, axis=axis)] = 0 + return result + elif np.max(values) == np.min(values): + return 0.0 + return result + + def test_nanskew(self, skipna): + sp_stats = pytest.importorskip("scipy.stats") + + func = partial(self._skew_kurt_wrap, func=sp_stats.skew) + with np.errstate(invalid="ignore"): + self.check_funs( + nanops.nanskew, + func, + skipna, + allow_complex=False, + allow_date=False, + allow_tdelta=False, + ) + + def test_nankurt(self, skipna): + sp_stats = pytest.importorskip("scipy.stats") + + func1 = partial(sp_stats.kurtosis, fisher=True) + func = partial(self._skew_kurt_wrap, func=func1) + with np.errstate(invalid="ignore"): + self.check_funs( + nanops.nankurt, + func, + skipna, + allow_complex=False, + allow_date=False, + allow_tdelta=False, + ) + + def test_nanprod(self, skipna): + self.check_funs( + nanops.nanprod, + np.prod, + skipna, + allow_date=False, + allow_tdelta=False, + empty_targfunc=np.nanprod, + ) + + def check_nancorr_nancov_2d(self, checkfun, targ0, targ1, **kwargs): + res00 = checkfun(self.arr_float_2d, self.arr_float1_2d, **kwargs) + res01 = checkfun( + self.arr_float_2d, + self.arr_float1_2d, + min_periods=len(self.arr_float_2d) - 1, + **kwargs, + ) + tm.assert_almost_equal(targ0, res00) + tm.assert_almost_equal(targ0, res01) + + res10 = checkfun(self.arr_float_nan_2d, self.arr_float1_nan_2d, **kwargs) + res11 = checkfun( + self.arr_float_nan_2d, + self.arr_float1_nan_2d, + min_periods=len(self.arr_float_2d) - 1, + **kwargs, + ) + tm.assert_almost_equal(targ1, res10) + tm.assert_almost_equal(targ1, res11) + + targ2 = np.nan + res20 = checkfun(self.arr_nan_2d, self.arr_float1_2d, **kwargs) + res21 = checkfun(self.arr_float_2d, self.arr_nan_2d, **kwargs) + res22 = checkfun(self.arr_nan_2d, self.arr_nan_2d, **kwargs) + res23 = checkfun(self.arr_float_nan_2d, self.arr_nan_float1_2d, **kwargs) + res24 = checkfun( + self.arr_float_nan_2d, + self.arr_nan_float1_2d, + min_periods=len(self.arr_float_2d) - 1, + **kwargs, + ) + res25 = checkfun( + self.arr_float_2d, + self.arr_float1_2d, + min_periods=len(self.arr_float_2d) + 1, + **kwargs, + ) + tm.assert_almost_equal(targ2, res20) + tm.assert_almost_equal(targ2, res21) + tm.assert_almost_equal(targ2, res22) + tm.assert_almost_equal(targ2, res23) + tm.assert_almost_equal(targ2, res24) + tm.assert_almost_equal(targ2, res25) + + def check_nancorr_nancov_1d(self, checkfun, targ0, targ1, **kwargs): + res00 = checkfun(self.arr_float_1d, self.arr_float1_1d, **kwargs) + res01 = checkfun( + self.arr_float_1d, + self.arr_float1_1d, + min_periods=len(self.arr_float_1d) - 1, + **kwargs, + ) + tm.assert_almost_equal(targ0, res00) + tm.assert_almost_equal(targ0, res01) + + res10 = checkfun(self.arr_float_nan_1d, self.arr_float1_nan_1d, **kwargs) + res11 = checkfun( + self.arr_float_nan_1d, + self.arr_float1_nan_1d, + min_periods=len(self.arr_float_1d) - 1, + **kwargs, + ) + tm.assert_almost_equal(targ1, res10) + tm.assert_almost_equal(targ1, res11) + + targ2 = np.nan + res20 = checkfun(self.arr_nan_1d, self.arr_float1_1d, **kwargs) + res21 = checkfun(self.arr_float_1d, self.arr_nan_1d, **kwargs) + res22 = checkfun(self.arr_nan_1d, self.arr_nan_1d, **kwargs) + res23 = checkfun(self.arr_float_nan_1d, self.arr_nan_float1_1d, **kwargs) + res24 = checkfun( + self.arr_float_nan_1d, + self.arr_nan_float1_1d, + min_periods=len(self.arr_float_1d) - 1, + **kwargs, + ) + res25 = checkfun( + self.arr_float_1d, + self.arr_float1_1d, + min_periods=len(self.arr_float_1d) + 1, + **kwargs, + ) + tm.assert_almost_equal(targ2, res20) + tm.assert_almost_equal(targ2, res21) + tm.assert_almost_equal(targ2, res22) + tm.assert_almost_equal(targ2, res23) + tm.assert_almost_equal(targ2, res24) + tm.assert_almost_equal(targ2, res25) + + def test_nancorr(self): + targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1] + targ1 = np.corrcoef(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1] + self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1) + targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1] + targ1 = np.corrcoef(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1] + self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="pearson") + + def test_nancorr_pearson(self): + targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1] + targ1 = np.corrcoef(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1] + self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, method="pearson") + targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1] + targ1 = np.corrcoef(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1] + self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="pearson") + + def test_nancorr_kendall(self): + sp_stats = pytest.importorskip("scipy.stats") + + targ0 = sp_stats.kendalltau(self.arr_float_2d, self.arr_float1_2d)[0] + targ1 = sp_stats.kendalltau(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0] + self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, method="kendall") + targ0 = sp_stats.kendalltau(self.arr_float_1d, self.arr_float1_1d)[0] + targ1 = sp_stats.kendalltau(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0] + self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="kendall") + + def test_nancorr_spearman(self): + sp_stats = pytest.importorskip("scipy.stats") + + targ0 = sp_stats.spearmanr(self.arr_float_2d, self.arr_float1_2d)[0] + targ1 = sp_stats.spearmanr(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0] + self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, method="spearman") + targ0 = sp_stats.spearmanr(self.arr_float_1d, self.arr_float1_1d)[0] + targ1 = sp_stats.spearmanr(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0] + self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="spearman") + + def test_invalid_method(self): + pytest.importorskip("scipy") + targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1] + targ1 = np.corrcoef(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1] + msg = "Unknown method 'foo', expected one of 'kendall', 'spearman'" + with pytest.raises(ValueError, match=msg): + self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="foo") + + def test_nancov(self): + targ0 = np.cov(self.arr_float_2d, self.arr_float1_2d)[0, 1] + targ1 = np.cov(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1] + self.check_nancorr_nancov_2d(nanops.nancov, targ0, targ1) + targ0 = np.cov(self.arr_float_1d, self.arr_float1_1d)[0, 1] + targ1 = np.cov(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1] + self.check_nancorr_nancov_1d(nanops.nancov, targ0, targ1) + + +@pytest.mark.parametrize( + "arr, correct", + [ + ("arr_complex", False), + ("arr_int", False), + ("arr_bool", False), + ("arr_str", False), + ("arr_utf", False), + ("arr_complex", False), + ("arr_complex_nan", False), + ("arr_nan_nanj", False), + ("arr_nan_infj", True), + ("arr_complex_nan_infj", True), + ], +) +def test_has_infs_non_float(request, arr, correct, disable_bottleneck): + val = request.getfixturevalue(arr) + while getattr(val, "ndim", True): + res0 = nanops._has_infs(val) + if correct: + assert res0 + else: + assert not res0 + + if not hasattr(val, "ndim"): + break + + # Reduce dimension for next step in the loop + val = np.take(val, 0, axis=-1) + + +@pytest.mark.parametrize( + "arr, correct", + [ + ("arr_float", False), + ("arr_nan", False), + ("arr_float_nan", False), + ("arr_nan_nan", False), + ("arr_float_inf", True), + ("arr_inf", True), + ("arr_nan_inf", True), + ("arr_float_nan_inf", True), + ("arr_nan_nan_inf", True), + ], +) +@pytest.mark.parametrize("astype", [None, "f4", "f2"]) +def test_has_infs_floats(request, arr, correct, astype, disable_bottleneck): + val = request.getfixturevalue(arr) + if astype is not None: + val = val.astype(astype) + while getattr(val, "ndim", True): + res0 = nanops._has_infs(val) + if correct: + assert res0 + else: + assert not res0 + + if not hasattr(val, "ndim"): + break + + # Reduce dimension for next step in the loop + val = np.take(val, 0, axis=-1) + + +@pytest.mark.parametrize( + "fixture", ["arr_float", "arr_complex", "arr_int", "arr_bool", "arr_str", "arr_utf"] +) +def test_bn_ok_dtype(fixture, request, disable_bottleneck): + obj = request.getfixturevalue(fixture) + assert nanops._bn_ok_dtype(obj.dtype, "test") + + +@pytest.mark.parametrize( + "fixture", + [ + "arr_date", + "arr_tdelta", + "arr_obj", + ], +) +def test_bn_not_ok_dtype(fixture, request, disable_bottleneck): + obj = request.getfixturevalue(fixture) + assert not nanops._bn_ok_dtype(obj.dtype, "test") + + +class TestEnsureNumeric: + def test_numeric_values(self): + # Test integer + assert nanops._ensure_numeric(1) == 1 + + # Test float + assert nanops._ensure_numeric(1.1) == 1.1 + + # Test complex + assert nanops._ensure_numeric(1 + 2j) == 1 + 2j + + def test_ndarray(self): + # Test numeric ndarray + values = np.array([1, 2, 3]) + assert np.allclose(nanops._ensure_numeric(values), values) + + # Test object ndarray + o_values = values.astype(object) + assert np.allclose(nanops._ensure_numeric(o_values), values) + + # Test convertible string ndarray + s_values = np.array(["1", "2", "3"], dtype=object) + msg = r"Could not convert \['1' '2' '3'\] to numeric" + with pytest.raises(TypeError, match=msg): + nanops._ensure_numeric(s_values) + + # Test non-convertible string ndarray + s_values = np.array(["foo", "bar", "baz"], dtype=object) + msg = r"Could not convert .* to numeric" + with pytest.raises(TypeError, match=msg): + nanops._ensure_numeric(s_values) + + def test_convertable_values(self): + with pytest.raises(TypeError, match="Could not convert string '1' to numeric"): + nanops._ensure_numeric("1") + with pytest.raises( + TypeError, match="Could not convert string '1.1' to numeric" + ): + nanops._ensure_numeric("1.1") + with pytest.raises( + TypeError, match=r"Could not convert string '1\+1j' to numeric" + ): + nanops._ensure_numeric("1+1j") + + def test_non_convertable_values(self): + msg = "Could not convert string 'foo' to numeric" + with pytest.raises(TypeError, match=msg): + nanops._ensure_numeric("foo") + + # with the wrong type, python raises TypeError for us + msg = "argument must be a string or a number" + with pytest.raises(TypeError, match=msg): + nanops._ensure_numeric({}) + with pytest.raises(TypeError, match=msg): + nanops._ensure_numeric([]) + + +class TestNanvarFixedValues: + # xref GH10242 + # Samples from a normal distribution. + @pytest.fixture + def variance(self): + return 3.0 + + @pytest.fixture + def samples(self, variance): + return self.prng.normal(scale=variance**0.5, size=100000) + + def test_nanvar_all_finite(self, samples, variance): + actual_variance = nanops.nanvar(samples) + tm.assert_almost_equal(actual_variance, variance, rtol=1e-2) + + def test_nanvar_nans(self, samples, variance): + samples_test = np.nan * np.ones(2 * samples.shape[0]) + samples_test[::2] = samples + + actual_variance = nanops.nanvar(samples_test, skipna=True) + tm.assert_almost_equal(actual_variance, variance, rtol=1e-2) + + actual_variance = nanops.nanvar(samples_test, skipna=False) + tm.assert_almost_equal(actual_variance, np.nan, rtol=1e-2) + + def test_nanstd_nans(self, samples, variance): + samples_test = np.nan * np.ones(2 * samples.shape[0]) + samples_test[::2] = samples + + actual_std = nanops.nanstd(samples_test, skipna=True) + tm.assert_almost_equal(actual_std, variance**0.5, rtol=1e-2) + + actual_std = nanops.nanvar(samples_test, skipna=False) + tm.assert_almost_equal(actual_std, np.nan, rtol=1e-2) + + def test_nanvar_axis(self, samples, variance): + # Generate some sample data. + samples_unif = self.prng.uniform(size=samples.shape[0]) + samples = np.vstack([samples, samples_unif]) + + actual_variance = nanops.nanvar(samples, axis=1) + tm.assert_almost_equal( + actual_variance, np.array([variance, 1.0 / 12]), rtol=1e-2 + ) + + def test_nanvar_ddof(self): + n = 5 + samples = self.prng.uniform(size=(10000, n + 1)) + samples[:, -1] = np.nan # Force use of our own algorithm. + + variance_0 = nanops.nanvar(samples, axis=1, skipna=True, ddof=0).mean() + variance_1 = nanops.nanvar(samples, axis=1, skipna=True, ddof=1).mean() + variance_2 = nanops.nanvar(samples, axis=1, skipna=True, ddof=2).mean() + + # The unbiased estimate. + var = 1.0 / 12 + tm.assert_almost_equal(variance_1, var, rtol=1e-2) + + # The underestimated variance. + tm.assert_almost_equal(variance_0, (n - 1.0) / n * var, rtol=1e-2) + + # The overestimated variance. + tm.assert_almost_equal(variance_2, (n - 1.0) / (n - 2.0) * var, rtol=1e-2) + + @pytest.mark.parametrize("axis", range(2)) + @pytest.mark.parametrize("ddof", range(3)) + def test_ground_truth(self, axis, ddof): + # Test against values that were precomputed with Numpy. + samples = np.empty((4, 4)) + samples[:3, :3] = np.array( + [ + [0.97303362, 0.21869576, 0.55560287], + [0.72980153, 0.03109364, 0.99155171], + [0.09317602, 0.60078248, 0.15871292], + ] + ) + samples[3] = samples[:, 3] = np.nan + + # Actual variances along axis=0, 1 for ddof=0, 1, 2 + variance = np.array( + [ + [ + [0.13762259, 0.05619224, 0.11568816], + [0.20643388, 0.08428837, 0.17353224], + [0.41286776, 0.16857673, 0.34706449], + ], + [ + [0.09519783, 0.16435395, 0.05082054], + [0.14279674, 0.24653093, 0.07623082], + [0.28559348, 0.49306186, 0.15246163], + ], + ] + ) + + # Test nanvar. + var = nanops.nanvar(samples, skipna=True, axis=axis, ddof=ddof) + tm.assert_almost_equal(var[:3], variance[axis, ddof]) + assert np.isnan(var[3]) + + # Test nanstd. + std = nanops.nanstd(samples, skipna=True, axis=axis, ddof=ddof) + tm.assert_almost_equal(std[:3], variance[axis, ddof] ** 0.5) + assert np.isnan(std[3]) + + @pytest.mark.parametrize("ddof", range(3)) + def test_nanstd_roundoff(self, ddof): + # Regression test for GH 10242 (test data taken from GH 10489). Ensure + # that variance is stable. + data = Series(766897346 * np.ones(10)) + result = data.std(ddof=ddof) + assert result == 0.0 + + @property + def prng(self): + return np.random.default_rng(2) + + +class TestNanskewFixedValues: + # xref GH 11974 + # Test data + skewness value (computed with scipy.stats.skew) + @pytest.fixture + def samples(self): + return np.sin(np.linspace(0, 1, 200)) + + @pytest.fixture + def actual_skew(self): + return -0.1875895205961754 + + @pytest.mark.parametrize("val", [3075.2, 3075.3, 3075.5]) + def test_constant_series(self, val): + # xref GH 11974 + data = val * np.ones(300) + skew = nanops.nanskew(data) + assert skew == 0.0 + + def test_all_finite(self): + alpha, beta = 0.3, 0.1 + left_tailed = self.prng.beta(alpha, beta, size=100) + assert nanops.nanskew(left_tailed) < 0 + + alpha, beta = 0.1, 0.3 + right_tailed = self.prng.beta(alpha, beta, size=100) + assert nanops.nanskew(right_tailed) > 0 + + def test_ground_truth(self, samples, actual_skew): + skew = nanops.nanskew(samples) + tm.assert_almost_equal(skew, actual_skew) + + def test_axis(self, samples, actual_skew): + samples = np.vstack([samples, np.nan * np.ones(len(samples))]) + skew = nanops.nanskew(samples, axis=1) + tm.assert_almost_equal(skew, np.array([actual_skew, np.nan])) + + def test_nans(self, samples): + samples = np.hstack([samples, np.nan]) + skew = nanops.nanskew(samples, skipna=False) + assert np.isnan(skew) + + def test_nans_skipna(self, samples, actual_skew): + samples = np.hstack([samples, np.nan]) + skew = nanops.nanskew(samples, skipna=True) + tm.assert_almost_equal(skew, actual_skew) + + @property + def prng(self): + return np.random.default_rng(2) + + +class TestNankurtFixedValues: + # xref GH 11974 + # Test data + kurtosis value (computed with scipy.stats.kurtosis) + @pytest.fixture + def samples(self): + return np.sin(np.linspace(0, 1, 200)) + + @pytest.fixture + def actual_kurt(self): + return -1.2058303433799713 + + @pytest.mark.parametrize("val", [3075.2, 3075.3, 3075.5]) + def test_constant_series(self, val): + # xref GH 11974 + data = val * np.ones(300) + kurt = nanops.nankurt(data) + assert kurt == 0.0 + + def test_all_finite(self): + alpha, beta = 0.3, 0.1 + left_tailed = self.prng.beta(alpha, beta, size=100) + assert nanops.nankurt(left_tailed) < 2 + + alpha, beta = 0.1, 0.3 + right_tailed = self.prng.beta(alpha, beta, size=100) + assert nanops.nankurt(right_tailed) < 0 + + def test_ground_truth(self, samples, actual_kurt): + kurt = nanops.nankurt(samples) + tm.assert_almost_equal(kurt, actual_kurt) + + def test_axis(self, samples, actual_kurt): + samples = np.vstack([samples, np.nan * np.ones(len(samples))]) + kurt = nanops.nankurt(samples, axis=1) + tm.assert_almost_equal(kurt, np.array([actual_kurt, np.nan])) + + def test_nans(self, samples): + samples = np.hstack([samples, np.nan]) + kurt = nanops.nankurt(samples, skipna=False) + assert np.isnan(kurt) + + def test_nans_skipna(self, samples, actual_kurt): + samples = np.hstack([samples, np.nan]) + kurt = nanops.nankurt(samples, skipna=True) + tm.assert_almost_equal(kurt, actual_kurt) + + @property + def prng(self): + return np.random.default_rng(2) + + +class TestDatetime64NaNOps: + @pytest.fixture(params=["s", "ms", "us", "ns"]) + def unit(self, request): + return request.param + + # Enabling mean changes the behavior of DataFrame.mean + # See https://github.com/pandas-dev/pandas/issues/24752 + def test_nanmean(self, unit): + dti = pd.date_range("2016-01-01", periods=3).as_unit(unit) + expected = dti[1] + + for obj in [dti, dti._data]: + result = nanops.nanmean(obj) + assert result == expected + + dti2 = dti.insert(1, pd.NaT) + + for obj in [dti2, dti2._data]: + result = nanops.nanmean(obj) + assert result == expected + + @pytest.mark.parametrize("constructor", ["M8", "m8"]) + def test_nanmean_skipna_false(self, constructor, unit): + dtype = f"{constructor}[{unit}]" + arr = np.arange(12).astype(np.int64).view(dtype).reshape(4, 3) + + arr[-1, -1] = "NaT" + + result = nanops.nanmean(arr, skipna=False) + assert np.isnat(result) + assert result.dtype == dtype + + result = nanops.nanmean(arr, axis=0, skipna=False) + expected = np.array([4, 5, "NaT"], dtype=arr.dtype) + tm.assert_numpy_array_equal(result, expected) + + result = nanops.nanmean(arr, axis=1, skipna=False) + expected = np.array([arr[0, 1], arr[1, 1], arr[2, 1], arr[-1, -1]]) + tm.assert_numpy_array_equal(result, expected) + + +def test_use_bottleneck(): + if nanops._BOTTLENECK_INSTALLED: + with pd.option_context("use_bottleneck", True): + assert pd.get_option("use_bottleneck") + + with pd.option_context("use_bottleneck", False): + assert not pd.get_option("use_bottleneck") + + +@pytest.mark.parametrize( + "numpy_op, expected", + [ + (np.sum, 10), + (np.nansum, 10), + (np.mean, 2.5), + (np.nanmean, 2.5), + (np.median, 2.5), + (np.nanmedian, 2.5), + (np.min, 1), + (np.max, 4), + (np.nanmin, 1), + (np.nanmax, 4), + ], +) +def test_numpy_ops(numpy_op, expected): + # GH8383 + result = numpy_op(Series([1, 2, 3, 4])) + assert result == expected + + +@pytest.mark.parametrize( + "operation", + [ + nanops.nanany, + nanops.nanall, + nanops.nansum, + nanops.nanmean, + nanops.nanmedian, + nanops.nanstd, + nanops.nanvar, + nanops.nansem, + nanops.nanargmax, + nanops.nanargmin, + nanops.nanmax, + nanops.nanmin, + nanops.nanskew, + nanops.nankurt, + nanops.nanprod, + ], +) +def test_nanops_independent_of_mask_param(operation): + # GH22764 + ser = Series([1, 2, np.nan, 3, np.nan, 4]) + mask = ser.isna() + median_expected = operation(ser._values) + median_result = operation(ser._values, mask=mask) + assert median_expected == median_result + + +@pytest.mark.parametrize("min_count", [-1, 0]) +def test_check_below_min_count_negative_or_zero_min_count(min_count): + # GH35227 + result = nanops.check_below_min_count((21, 37), None, min_count) + expected_result = False + assert result == expected_result + + +@pytest.mark.parametrize( + "mask", [None, np.array([False, False, True]), np.array([True] + 9 * [False])] +) +@pytest.mark.parametrize("min_count, expected_result", [(1, False), (101, True)]) +def test_check_below_min_count_positive_min_count(mask, min_count, expected_result): + # GH35227 + shape = (10, 10) + result = nanops.check_below_min_count(shape, mask, min_count) + assert result == expected_result + + +@td.skip_if_windows +@td.skip_if_32bit +@pytest.mark.parametrize("min_count, expected_result", [(1, False), (2812191852, True)]) +def test_check_below_min_count_large_shape(min_count, expected_result): + # GH35227 large shape used to show that the issue is fixed + shape = (2244367, 1253) + result = nanops.check_below_min_count(shape, mask=None, min_count=min_count) + assert result == expected_result + + +@pytest.mark.parametrize("func", ["nanmean", "nansum"]) +def test_check_bottleneck_disallow(any_real_numpy_dtype, func): + # GH 42878 bottleneck sometimes produces unreliable results for mean and sum + assert not nanops._bn_ok_dtype(np.dtype(any_real_numpy_dtype).type, func) + + +@pytest.mark.parametrize("val", [2**55, -(2**55), 20150515061816532]) +def test_nanmean_overflow(disable_bottleneck, val): + # GH 10155 + # In the previous implementation mean can overflow for int dtypes, it + # is now consistent with numpy + + ser = Series(val, index=range(500), dtype=np.int64) + result = ser.mean() + np_result = ser.values.mean() + assert result == val + assert result == np_result + assert result.dtype == np.float64 + + +@pytest.mark.parametrize( + "dtype", + [ + np.int16, + np.int32, + np.int64, + np.float32, + np.float64, + getattr(np, "float128", None), + ], +) +@pytest.mark.parametrize("method", ["mean", "std", "var", "skew", "kurt", "min", "max"]) +def test_returned_dtype(disable_bottleneck, dtype, method): + if dtype is None: + pytest.skip("np.float128 not available") + + ser = Series(range(10), dtype=dtype) + result = getattr(ser, method)() + if is_integer_dtype(dtype) and method not in ["min", "max"]: + assert result.dtype == np.float64 + else: + assert result.dtype == dtype diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_sorting.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_sorting.py new file mode 100644 index 0000000000000000000000000000000000000000..285f240028152072ed52b3657113d30a7fd63fea --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_sorting.py @@ -0,0 +1,487 @@ +from collections import defaultdict +from datetime import datetime +from itertools import product + +import numpy as np +import pytest + +from pandas import ( + NA, + DataFrame, + MultiIndex, + Series, + array, + concat, + merge, +) +import pandas._testing as tm +from pandas.core.algorithms import safe_sort +import pandas.core.common as com +from pandas.core.sorting import ( + _decons_group_index, + get_group_index, + is_int64_overflow_possible, + lexsort_indexer, + nargsort, +) + + +@pytest.fixture +def left_right(): + low, high, n = -1 << 10, 1 << 10, 1 << 20 + left = DataFrame( + np.random.default_rng(2).integers(low, high, (n, 7)), columns=list("ABCDEFG") + ) + left["left"] = left.sum(axis=1) + + # one-2-one match + i = np.random.default_rng(2).permutation(len(left)) + right = left.iloc[i].copy() + right.columns = right.columns[:-1].tolist() + ["right"] + right.index = np.arange(len(right)) + right["right"] *= -1 + return left, right + + +class TestSorting: + @pytest.mark.slow + def test_int64_overflow(self): + B = np.concatenate((np.arange(1000), np.arange(1000), np.arange(500))) + A = np.arange(2500) + df = DataFrame( + { + "A": A, + "B": B, + "C": A, + "D": B, + "E": A, + "F": B, + "G": A, + "H": B, + "values": np.random.default_rng(2).standard_normal(2500), + } + ) + + lg = df.groupby(["A", "B", "C", "D", "E", "F", "G", "H"]) + rg = df.groupby(["H", "G", "F", "E", "D", "C", "B", "A"]) + + left = lg.sum()["values"] + right = rg.sum()["values"] + + exp_index, _ = left.index.sortlevel() + tm.assert_index_equal(left.index, exp_index) + + exp_index, _ = right.index.sortlevel(0) + tm.assert_index_equal(right.index, exp_index) + + tups = list(map(tuple, df[["A", "B", "C", "D", "E", "F", "G", "H"]].values)) + tups = com.asarray_tuplesafe(tups) + + expected = df.groupby(tups).sum()["values"] + + for k, v in expected.items(): + assert left[k] == right[k[::-1]] + assert left[k] == v + assert len(left) == len(right) + + def test_int64_overflow_groupby_large_range(self): + # GH9096 + values = range(55109) + data = DataFrame.from_dict({"a": values, "b": values, "c": values, "d": values}) + grouped = data.groupby(["a", "b", "c", "d"]) + assert len(grouped) == len(values) + + @pytest.mark.parametrize("agg", ["mean", "median"]) + def test_int64_overflow_groupby_large_df_shuffled(self, agg): + rs = np.random.default_rng(2) + arr = rs.integers(-1 << 12, 1 << 12, (1 << 15, 5)) + i = rs.choice(len(arr), len(arr) * 4) + arr = np.vstack((arr, arr[i])) # add some duplicate rows + + i = rs.permutation(len(arr)) + arr = arr[i] # shuffle rows + + df = DataFrame(arr, columns=list("abcde")) + df["jim"], df["joe"] = np.zeros((2, len(df))) + gr = df.groupby(list("abcde")) + + # verify this is testing what it is supposed to test! + assert is_int64_overflow_possible(gr._grouper.shape) + + mi = MultiIndex.from_arrays( + [ar.ravel() for ar in np.array_split(np.unique(arr, axis=0), 5, axis=1)], + names=list("abcde"), + ) + + res = DataFrame( + np.zeros((len(mi), 2)), columns=["jim", "joe"], index=mi + ).sort_index() + + tm.assert_frame_equal(getattr(gr, agg)(), res) + + @pytest.mark.parametrize( + "order, na_position, exp", + [ + [ + True, + "last", + list(range(5, 105)) + list(range(5)) + list(range(105, 110)), + ], + [ + True, + "first", + list(range(5)) + list(range(105, 110)) + list(range(5, 105)), + ], + [ + False, + "last", + list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110)), + ], + [ + False, + "first", + list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1)), + ], + ], + ) + def test_lexsort_indexer(self, order, na_position, exp): + keys = [[np.nan] * 5 + list(range(100)) + [np.nan] * 5] + result = lexsort_indexer(keys, orders=order, na_position=na_position) + tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp)) + + @pytest.mark.parametrize( + "ascending, na_position, exp", + [ + [ + True, + "last", + list(range(5, 105)) + list(range(5)) + list(range(105, 110)), + ], + [ + True, + "first", + list(range(5)) + list(range(105, 110)) + list(range(5, 105)), + ], + [ + False, + "last", + list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110)), + ], + [ + False, + "first", + list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1)), + ], + ], + ) + def test_nargsort(self, ascending, na_position, exp): + # list places NaNs last, np.array(..., dtype="O") may not place NaNs first + items = np.array([np.nan] * 5 + list(range(100)) + [np.nan] * 5, dtype="O") + + # mergesort is the most difficult to get right because we want it to be + # stable. + + # According to numpy/core/tests/test_multiarray, """The number of + # sorted items must be greater than ~50 to check the actual algorithm + # because quick and merge sort fall over to insertion sort for small + # arrays.""" + + result = nargsort( + items, kind="mergesort", ascending=ascending, na_position=na_position + ) + tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False) + + +class TestMerge: + def test_int64_overflow_outer_merge(self): + # #2690, combinatorial explosion + df1 = DataFrame( + np.random.default_rng(2).standard_normal((1000, 7)), + columns=list("ABCDEF") + ["G1"], + ) + df2 = DataFrame( + np.random.default_rng(3).standard_normal((1000, 7)), + columns=list("ABCDEF") + ["G2"], + ) + result = merge(df1, df2, how="outer") + assert len(result) == 2000 + + @pytest.mark.slow + def test_int64_overflow_check_sum_col(self, left_right): + left, right = left_right + + out = merge(left, right, how="outer") + assert len(out) == len(left) + tm.assert_series_equal(out["left"], -out["right"], check_names=False) + result = out.iloc[:, :-2].sum(axis=1) + tm.assert_series_equal(out["left"], result, check_names=False) + assert result.name is None + + @pytest.mark.slow + @pytest.mark.parametrize("how", ["left", "right", "outer", "inner"]) + def test_int64_overflow_how_merge(self, left_right, how): + left, right = left_right + + out = merge(left, right, how="outer") + out.sort_values(out.columns.tolist(), inplace=True) + out.index = np.arange(len(out)) + tm.assert_frame_equal(out, merge(left, right, how=how, sort=True)) + + @pytest.mark.slow + def test_int64_overflow_sort_false_order(self, left_right): + left, right = left_right + + # check that left merge w/ sort=False maintains left frame order + out = merge(left, right, how="left", sort=False) + tm.assert_frame_equal(left, out[left.columns.tolist()]) + + out = merge(right, left, how="left", sort=False) + tm.assert_frame_equal(right, out[right.columns.tolist()]) + + @pytest.mark.slow + @pytest.mark.parametrize("how", ["left", "right", "outer", "inner"]) + @pytest.mark.parametrize("sort", [True, False]) + def test_int64_overflow_one_to_many_none_match(self, how, sort): + # one-2-many/none match + low, high, n = -1 << 10, 1 << 10, 1 << 11 + left = DataFrame( + np.random.default_rng(2).integers(low, high, (n, 7)).astype("int64"), + columns=list("ABCDEFG"), + ) + + # confirm that this is checking what it is supposed to check + shape = left.apply(Series.nunique).values + assert is_int64_overflow_possible(shape) + + # add duplicates to left frame + left = concat([left, left], ignore_index=True) + + right = DataFrame( + np.random.default_rng(3).integers(low, high, (n // 2, 7)).astype("int64"), + columns=list("ABCDEFG"), + ) + + # add duplicates & overlap with left to the right frame + i = np.random.default_rng(4).choice(len(left), n) + right = concat([right, right, left.iloc[i]], ignore_index=True) + + left["left"] = np.random.default_rng(2).standard_normal(len(left)) + right["right"] = np.random.default_rng(2).standard_normal(len(right)) + + # shuffle left & right frames + i = np.random.default_rng(5).permutation(len(left)) + left = left.iloc[i].copy() + left.index = np.arange(len(left)) + + i = np.random.default_rng(6).permutation(len(right)) + right = right.iloc[i].copy() + right.index = np.arange(len(right)) + + # manually compute outer merge + ldict, rdict = defaultdict(list), defaultdict(list) + + for idx, row in left.set_index(list("ABCDEFG")).iterrows(): + ldict[idx].append(row["left"]) + + for idx, row in right.set_index(list("ABCDEFG")).iterrows(): + rdict[idx].append(row["right"]) + + vals = [] + for k, lval in ldict.items(): + rval = rdict.get(k, [np.nan]) + for lv, rv in product(lval, rval): + vals.append( + k + + ( + lv, + rv, + ) + ) + + for k, rval in rdict.items(): + if k not in ldict: + vals.extend( + k + + ( + np.nan, + rv, + ) + for rv in rval + ) + + def align(df): + df = df.sort_values(df.columns.tolist()) + df.index = np.arange(len(df)) + return df + + out = DataFrame(vals, columns=list("ABCDEFG") + ["left", "right"]) + out = align(out) + + jmask = { + "left": out["left"].notna(), + "right": out["right"].notna(), + "inner": out["left"].notna() & out["right"].notna(), + "outer": np.ones(len(out), dtype="bool"), + } + + mask = jmask[how] + frame = align(out[mask].copy()) + assert mask.all() ^ mask.any() or how == "outer" + + res = merge(left, right, how=how, sort=sort) + if sort: + kcols = list("ABCDEFG") + tm.assert_frame_equal( + res[kcols].copy(), res[kcols].sort_values(kcols, kind="mergesort") + ) + + # as in GH9092 dtypes break with outer/right join + # 2021-12-18: dtype does not break anymore + tm.assert_frame_equal(frame, align(res)) + + +@pytest.mark.parametrize( + "codes_list, shape", + [ + [ + [ + np.tile([0, 1, 2, 3, 0, 1, 2, 3], 100).astype(np.int64), + np.tile([0, 2, 4, 3, 0, 1, 2, 3], 100).astype(np.int64), + np.tile([5, 1, 0, 2, 3, 0, 5, 4], 100).astype(np.int64), + ], + (4, 5, 6), + ], + [ + [ + np.tile(np.arange(10000, dtype=np.int64), 5), + np.tile(np.arange(10000, dtype=np.int64), 5), + ], + (10000, 10000), + ], + ], +) +def test_decons(codes_list, shape): + group_index = get_group_index(codes_list, shape, sort=True, xnull=True) + codes_list2 = _decons_group_index(group_index, shape) + + for a, b in zip(codes_list, codes_list2): + tm.assert_numpy_array_equal(a, b) + + +class TestSafeSort: + @pytest.mark.parametrize( + "arg, exp", + [ + [[3, 1, 2, 0, 4], [0, 1, 2, 3, 4]], + [ + np.array(list("baaacb"), dtype=object), + np.array(list("aaabbc"), dtype=object), + ], + [[], []], + ], + ) + def test_basic_sort(self, arg, exp): + result = safe_sort(np.array(arg)) + expected = np.array(exp) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("verify", [True, False]) + @pytest.mark.parametrize( + "codes, exp_codes", + [ + [[0, 1, 1, 2, 3, 0, -1, 4], [3, 1, 1, 2, 0, 3, -1, 4]], + [[], []], + ], + ) + def test_codes(self, verify, codes, exp_codes): + values = np.array([3, 1, 2, 0, 4]) + expected = np.array([0, 1, 2, 3, 4]) + + result, result_codes = safe_sort( + values, codes, use_na_sentinel=True, verify=verify + ) + expected_codes = np.array(exp_codes, dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + tm.assert_numpy_array_equal(result_codes, expected_codes) + + def test_codes_out_of_bound(self): + values = np.array([3, 1, 2, 0, 4]) + expected = np.array([0, 1, 2, 3, 4]) + + # out of bound indices + codes = [0, 101, 102, 2, 3, 0, 99, 4] + result, result_codes = safe_sort(values, codes, use_na_sentinel=True) + expected_codes = np.array([3, -1, -1, 2, 0, 3, -1, 4], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + tm.assert_numpy_array_equal(result_codes, expected_codes) + + def test_mixed_integer(self): + values = np.array(["b", 1, 0, "a", 0, "b"], dtype=object) + result = safe_sort(values) + expected = np.array([0, 0, 1, "a", "b", "b"], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + def test_mixed_integer_with_codes(self): + values = np.array(["b", 1, 0, "a"], dtype=object) + codes = [0, 1, 2, 3, 0, -1, 1] + result, result_codes = safe_sort(values, codes) + expected = np.array([0, 1, "a", "b"], dtype=object) + expected_codes = np.array([3, 1, 0, 2, 3, -1, 1], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + tm.assert_numpy_array_equal(result_codes, expected_codes) + + def test_unsortable(self): + # GH 13714 + arr = np.array([1, 2, datetime.now(), 0, 3], dtype=object) + msg = "'[<>]' not supported between instances of .*" + with pytest.raises(TypeError, match=msg): + safe_sort(arr) + + @pytest.mark.parametrize( + "arg, codes, err, msg", + [ + [1, None, TypeError, "Only np.ndarray, ExtensionArray, and Index"], + [np.array([0, 1, 2]), 1, TypeError, "Only list-like objects or None"], + [np.array([0, 1, 2, 1]), [0, 1], ValueError, "values should be unique"], + ], + ) + def test_exceptions(self, arg, codes, err, msg): + with pytest.raises(err, match=msg): + safe_sort(values=arg, codes=codes) + + @pytest.mark.parametrize( + "arg, exp", [[[1, 3, 2], [1, 2, 3]], [[1, 3, np.nan, 2], [1, 2, 3, np.nan]]] + ) + def test_extension_array(self, arg, exp): + a = array(arg, dtype="Int64") + result = safe_sort(a) + expected = array(exp, dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + @pytest.mark.parametrize("verify", [True, False]) + def test_extension_array_codes(self, verify): + a = array([1, 3, 2], dtype="Int64") + result, codes = safe_sort(a, [0, 1, -1, 2], use_na_sentinel=True, verify=verify) + expected_values = array([1, 2, 3], dtype="Int64") + expected_codes = np.array([0, 2, -1, 1], dtype=np.intp) + tm.assert_extension_array_equal(result, expected_values) + tm.assert_numpy_array_equal(codes, expected_codes) + + +def test_mixed_str_null(nulls_fixture): + values = np.array(["b", nulls_fixture, "a", "b"], dtype=object) + result = safe_sort(values) + expected = np.array(["a", "b", "b", nulls_fixture], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + +def test_safe_sort_multiindex(): + # GH#48412 + arr1 = Series([2, 1, NA, NA], dtype="Int64") + arr2 = [2, 1, 3, 3] + midx = MultiIndex.from_arrays([arr1, arr2]) + result = safe_sort(midx) + expected = MultiIndex.from_arrays( + [Series([1, 2, NA, NA], dtype="Int64"), [1, 2, 3, 3]] + ) + tm.assert_index_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/util/__pycache__/_doctools.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/util/__pycache__/_doctools.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30a86f81453efcee3faf8c300c8856ceae1d2584 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/util/__pycache__/_doctools.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/util/__pycache__/_exceptions.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/util/__pycache__/_exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca664888f352e4f1b8eb84519ee4a9b8329f64db Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/util/__pycache__/_exceptions.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/util/__pycache__/_test_decorators.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/util/__pycache__/_test_decorators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b14ffd4cc5d79d705afbffde564f2e59df702d8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/util/__pycache__/_test_decorators.cpython-310.pyc differ