diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..245a171fea74bc9409a315b64d157a37b3da6eaa --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__init__.py @@ -0,0 +1,43 @@ +from pandas.core.arrays.arrow import ArrowExtensionArray +from pandas.core.arrays.base import ( + ExtensionArray, + ExtensionOpsMixin, + ExtensionScalarOpsMixin, +) +from pandas.core.arrays.boolean import BooleanArray +from pandas.core.arrays.categorical import Categorical +from pandas.core.arrays.datetimes import DatetimeArray +from pandas.core.arrays.floating import FloatingArray +from pandas.core.arrays.integer import IntegerArray +from pandas.core.arrays.interval import IntervalArray +from pandas.core.arrays.masked import BaseMaskedArray +from pandas.core.arrays.numpy_ import NumpyExtensionArray +from pandas.core.arrays.period import ( + PeriodArray, + period_array, +) +from pandas.core.arrays.sparse import SparseArray +from pandas.core.arrays.string_ import StringArray +from pandas.core.arrays.string_arrow import ArrowStringArray +from pandas.core.arrays.timedeltas import TimedeltaArray + +__all__ = [ + "ArrowExtensionArray", + "ExtensionArray", + "ExtensionOpsMixin", + "ExtensionScalarOpsMixin", + "ArrowStringArray", + "BaseMaskedArray", + "BooleanArray", + "Categorical", + "DatetimeArray", + "FloatingArray", + "IntegerArray", + "IntervalArray", + "NumpyExtensionArray", + "PeriodArray", + "period_array", + "SparseArray", + "StringArray", + "TimedeltaArray", +] diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a146f1892c81cb43ccad51bc0b5d29f5046c791 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_arrow_string_mixins.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_arrow_string_mixins.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03bdb3f8f06cc22bd6cd3200c52a979666d971de Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_arrow_string_mixins.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_mixins.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_mixins.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..605fab8df4e366c8425af4aa4babe9b8bdac928d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_mixins.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_ranges.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_ranges.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1091d226704c5b3e277afc4cfb0730f8b54816f1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_ranges.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..815f2e4e8b4f0e7434ee4fa115ebd64a1d38d7df Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/base.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b99b3c2e019c4ead807e8d2e41ea346048d66c1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/base.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/boolean.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/boolean.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26cf42405f519705848a699a91524a80793f04e9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/boolean.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/categorical.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/categorical.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a76522e7fcbc5dbd3c4fb679b15cc38591e4ad0c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/categorical.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/datetimelike.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/datetimelike.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b6b22831d7b1a8f923441678a4073a539315660 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/datetimelike.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/datetimes.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/datetimes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25663a8729169d22f9a0b065319cb1de3fb764d4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/datetimes.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/floating.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/floating.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..393d8168c15c542c2a5f1fc257dd4a10b02419ed Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/floating.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/integer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/integer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f996c0c5564766010d12b1e3454e3e676faab2f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/integer.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/interval.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/interval.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a5cfeba8192f9db7048814b4d9826d024dcc7b2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/interval.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/masked.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/masked.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9828ee8d313455e4b6f1a31cb876e97e80c136f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/masked.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/numeric.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/numeric.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..34251f5cea57455769ff5d2b229e992e156d6dc3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/numeric.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/numpy_.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/numpy_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9070fa683160042a40dbde9682b324f98b294db6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/numpy_.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/period.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/period.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dba4463a2f24259c512d6a858192ade95613aacc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/period.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/string_.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/string_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2cd5a91133e00e8df20c3e211e94d3a830467a8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/string_.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/string_arrow.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/string_arrow.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90ef3d02c1af26448fb51e340c08a8e40b24edae Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/string_arrow.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/timedeltas.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/timedeltas.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0719d92a4775c16dd970069fed0db61f083c54a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/timedeltas.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/_arrow_string_mixins.py b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/_arrow_string_mixins.py new file mode 100644 index 0000000000000000000000000000000000000000..cc41985843574d4b5d671d730e77fc41109ca9ca --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/_arrow_string_mixins.py @@ -0,0 +1,84 @@ +from __future__ import annotations + +from typing import Literal + +import numpy as np + +from pandas.compat import pa_version_under10p1 + +if not pa_version_under10p1: + import pyarrow as pa + import pyarrow.compute as pc + + +class ArrowStringArrayMixin: + _pa_array = None + + def __init__(self, *args, **kwargs) -> None: + raise NotImplementedError + + def _str_pad( + self, + width: int, + side: Literal["left", "right", "both"] = "left", + fillchar: str = " ", + ): + if side == "left": + pa_pad = pc.utf8_lpad + elif side == "right": + pa_pad = pc.utf8_rpad + elif side == "both": + pa_pad = pc.utf8_center + else: + raise ValueError( + f"Invalid side: {side}. Side must be one of 'left', 'right', 'both'" + ) + return type(self)(pa_pad(self._pa_array, width=width, padding=fillchar)) + + def _str_get(self, i: int): + lengths = pc.utf8_length(self._pa_array) + if i >= 0: + out_of_bounds = pc.greater_equal(i, lengths) + start = i + stop = i + 1 + step = 1 + else: + out_of_bounds = pc.greater(-i, lengths) + start = i + stop = i - 1 + step = -1 + not_out_of_bounds = pc.invert(out_of_bounds.fill_null(True)) + selected = pc.utf8_slice_codeunits( + self._pa_array, start=start, stop=stop, step=step + ) + null_value = pa.scalar( + None, type=self._pa_array.type # type: ignore[attr-defined] + ) + result = pc.if_else(not_out_of_bounds, selected, null_value) + return type(self)(result) + + def _str_slice_replace( + self, start: int | None = None, stop: int | None = None, repl: str | None = None + ): + if repl is None: + repl = "" + if start is None: + start = 0 + if stop is None: + stop = np.iinfo(np.int64).max + return type(self)(pc.utf8_replace_slice(self._pa_array, start, stop, repl)) + + def _str_capitalize(self): + return type(self)(pc.utf8_capitalize(self._pa_array)) + + def _str_title(self): + return type(self)(pc.utf8_title(self._pa_array)) + + def _str_swapcase(self): + return type(self)(pc.utf8_swapcase(self._pa_array)) + + def _str_removesuffix(self, suffix: str): + ends_with = pc.ends_with(self._pa_array, pattern=suffix) + removed = pc.utf8_slice_codeunits(self._pa_array, 0, stop=-len(suffix)) + result = pc.if_else(ends_with, removed, self._pa_array) + return type(self)(result) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/_mixins.py b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/_mixins.py new file mode 100644 index 0000000000000000000000000000000000000000..0da121c36644ac8b8fb6509acd62f90887db2ad0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/_mixins.py @@ -0,0 +1,547 @@ +from __future__ import annotations + +from functools import wraps +from typing import ( + TYPE_CHECKING, + Any, + Literal, + cast, + overload, +) + +import numpy as np + +from pandas._libs import lib +from pandas._libs.arrays import NDArrayBacked +from pandas._libs.tslibs import is_supported_dtype +from pandas._typing import ( + ArrayLike, + AxisInt, + Dtype, + F, + FillnaOptions, + PositionalIndexer2D, + PositionalIndexerTuple, + ScalarIndexer, + Self, + SequenceIndexer, + Shape, + TakeIndexer, + npt, +) +from pandas.errors import AbstractMethodError +from pandas.util._decorators import doc +from pandas.util._validators import ( + validate_bool_kwarg, + validate_fillna_kwargs, + validate_insert_loc, +) + +from pandas.core.dtypes.common import pandas_dtype +from pandas.core.dtypes.dtypes import ( + DatetimeTZDtype, + ExtensionDtype, + PeriodDtype, +) +from pandas.core.dtypes.missing import array_equivalent + +from pandas.core import missing +from pandas.core.algorithms import ( + take, + unique, + value_counts_internal as value_counts, +) +from pandas.core.array_algos.quantile import quantile_with_mask +from pandas.core.array_algos.transforms import shift +from pandas.core.arrays.base import ExtensionArray +from pandas.core.construction import extract_array +from pandas.core.indexers import check_array_indexer +from pandas.core.sorting import nargminmax + +if TYPE_CHECKING: + from collections.abc import Sequence + + from pandas._typing import ( + NumpySorter, + NumpyValueArrayLike, + ) + + from pandas import Series + + +def ravel_compat(meth: F) -> F: + """ + Decorator to ravel a 2D array before passing it to a cython operation, + then reshape the result to our own shape. + """ + + @wraps(meth) + def method(self, *args, **kwargs): + if self.ndim == 1: + return meth(self, *args, **kwargs) + + flags = self._ndarray.flags + flat = self.ravel("K") + result = meth(flat, *args, **kwargs) + order = "F" if flags.f_contiguous else "C" + return result.reshape(self.shape, order=order) + + return cast(F, method) + + +class NDArrayBackedExtensionArray(NDArrayBacked, ExtensionArray): + """ + ExtensionArray that is backed by a single NumPy ndarray. + """ + + _ndarray: np.ndarray + + # scalar used to denote NA value inside our self._ndarray, e.g. -1 + # for Categorical, iNaT for Period. Outside of object dtype, + # self.isna() should be exactly locations in self._ndarray with + # _internal_fill_value. + _internal_fill_value: Any + + def _box_func(self, x): + """ + Wrap numpy type in our dtype.type if necessary. + """ + return x + + def _validate_scalar(self, value): + # used by NDArrayBackedExtensionIndex.insert + raise AbstractMethodError(self) + + # ------------------------------------------------------------------------ + + def view(self, dtype: Dtype | None = None) -> ArrayLike: + # We handle datetime64, datetime64tz, timedelta64, and period + # dtypes here. Everything else we pass through to the underlying + # ndarray. + if dtype is None or dtype is self.dtype: + return self._from_backing_data(self._ndarray) + + if isinstance(dtype, type): + # we sometimes pass non-dtype objects, e.g np.ndarray; + # pass those through to the underlying ndarray + return self._ndarray.view(dtype) + + dtype = pandas_dtype(dtype) + arr = self._ndarray + + if isinstance(dtype, PeriodDtype): + cls = dtype.construct_array_type() + return cls(arr.view("i8"), dtype=dtype) + elif isinstance(dtype, DatetimeTZDtype): + dt_cls = dtype.construct_array_type() + dt64_values = arr.view(f"M8[{dtype.unit}]") + return dt_cls._simple_new(dt64_values, dtype=dtype) + elif lib.is_np_dtype(dtype, "M") and is_supported_dtype(dtype): + from pandas.core.arrays import DatetimeArray + + dt64_values = arr.view(dtype) + return DatetimeArray._simple_new(dt64_values, dtype=dtype) + + elif lib.is_np_dtype(dtype, "m") and is_supported_dtype(dtype): + from pandas.core.arrays import TimedeltaArray + + td64_values = arr.view(dtype) + return TimedeltaArray._simple_new(td64_values, dtype=dtype) + + # error: Argument "dtype" to "view" of "_ArrayOrScalarCommon" has incompatible + # type "Union[ExtensionDtype, dtype[Any]]"; expected "Union[dtype[Any], None, + # type, _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any, Union[int, + # Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]" + return arr.view(dtype=dtype) # type: ignore[arg-type] + + def take( + self, + indices: TakeIndexer, + *, + allow_fill: bool = False, + fill_value: Any = None, + axis: AxisInt = 0, + ) -> Self: + if allow_fill: + fill_value = self._validate_scalar(fill_value) + + new_data = take( + self._ndarray, + indices, + allow_fill=allow_fill, + fill_value=fill_value, + axis=axis, + ) + return self._from_backing_data(new_data) + + # ------------------------------------------------------------------------ + + def equals(self, other) -> bool: + if type(self) is not type(other): + return False + if self.dtype != other.dtype: + return False + return bool(array_equivalent(self._ndarray, other._ndarray, dtype_equal=True)) + + @classmethod + def _from_factorized(cls, values, original): + assert values.dtype == original._ndarray.dtype + return original._from_backing_data(values) + + def _values_for_argsort(self) -> np.ndarray: + return self._ndarray + + def _values_for_factorize(self): + return self._ndarray, self._internal_fill_value + + def _hash_pandas_object( + self, *, encoding: str, hash_key: str, categorize: bool + ) -> npt.NDArray[np.uint64]: + from pandas.core.util.hashing import hash_array + + values = self._ndarray + return hash_array( + values, encoding=encoding, hash_key=hash_key, categorize=categorize + ) + + # Signature of "argmin" incompatible with supertype "ExtensionArray" + def argmin(self, axis: AxisInt = 0, skipna: bool = True): # type: ignore[override] + # override base class by adding axis keyword + validate_bool_kwarg(skipna, "skipna") + if not skipna and self._hasna: + raise NotImplementedError + return nargminmax(self, "argmin", axis=axis) + + # Signature of "argmax" incompatible with supertype "ExtensionArray" + def argmax(self, axis: AxisInt = 0, skipna: bool = True): # type: ignore[override] + # override base class by adding axis keyword + validate_bool_kwarg(skipna, "skipna") + if not skipna and self._hasna: + raise NotImplementedError + return nargminmax(self, "argmax", axis=axis) + + def unique(self) -> Self: + new_data = unique(self._ndarray) + return self._from_backing_data(new_data) + + @classmethod + @doc(ExtensionArray._concat_same_type) + def _concat_same_type( + cls, + to_concat: Sequence[Self], + axis: AxisInt = 0, + ) -> Self: + if not lib.dtypes_all_equal([x.dtype for x in to_concat]): + dtypes = {str(x.dtype) for x in to_concat} + raise ValueError("to_concat must have the same dtype", dtypes) + + return super()._concat_same_type(to_concat, axis=axis) + + @doc(ExtensionArray.searchsorted) + def searchsorted( + self, + value: NumpyValueArrayLike | ExtensionArray, + side: Literal["left", "right"] = "left", + sorter: NumpySorter | None = None, + ) -> npt.NDArray[np.intp] | np.intp: + npvalue = self._validate_setitem_value(value) + return self._ndarray.searchsorted(npvalue, side=side, sorter=sorter) + + @doc(ExtensionArray.shift) + def shift(self, periods: int = 1, fill_value=None): + # NB: shift is always along axis=0 + axis = 0 + fill_value = self._validate_scalar(fill_value) + new_values = shift(self._ndarray, periods, axis, fill_value) + + return self._from_backing_data(new_values) + + def __setitem__(self, key, value) -> None: + key = check_array_indexer(self, key) + value = self._validate_setitem_value(value) + self._ndarray[key] = value + + def _validate_setitem_value(self, value): + return value + + @overload + def __getitem__(self, key: ScalarIndexer) -> Any: + ... + + @overload + def __getitem__( + self, + key: SequenceIndexer | PositionalIndexerTuple, + ) -> Self: + ... + + def __getitem__( + self, + key: PositionalIndexer2D, + ) -> Self | Any: + if lib.is_integer(key): + # fast-path + result = self._ndarray[key] + if self.ndim == 1: + return self._box_func(result) + return self._from_backing_data(result) + + # error: Incompatible types in assignment (expression has type "ExtensionArray", + # variable has type "Union[int, slice, ndarray]") + key = extract_array(key, extract_numpy=True) # type: ignore[assignment] + key = check_array_indexer(self, key) + result = self._ndarray[key] + if lib.is_scalar(result): + return self._box_func(result) + + result = self._from_backing_data(result) + return result + + def _fill_mask_inplace( + self, method: str, limit: int | None, mask: npt.NDArray[np.bool_] + ) -> None: + # (for now) when self.ndim == 2, we assume axis=0 + func = missing.get_fill_func(method, ndim=self.ndim) + func(self._ndarray.T, limit=limit, mask=mask.T) + + def _pad_or_backfill( + self, + *, + method: FillnaOptions, + limit: int | None = None, + limit_area: Literal["inside", "outside"] | None = None, + copy: bool = True, + ) -> Self: + mask = self.isna() + if mask.any(): + # (for now) when self.ndim == 2, we assume axis=0 + func = missing.get_fill_func(method, ndim=self.ndim) + + npvalues = self._ndarray.T + if copy: + npvalues = npvalues.copy() + func(npvalues, limit=limit, limit_area=limit_area, mask=mask.T) + npvalues = npvalues.T + + if copy: + new_values = self._from_backing_data(npvalues) + else: + new_values = self + + else: + if copy: + new_values = self.copy() + else: + new_values = self + return new_values + + @doc(ExtensionArray.fillna) + def fillna( + self, value=None, method=None, limit: int | None = None, copy: bool = True + ) -> Self: + value, method = validate_fillna_kwargs( + value, method, validate_scalar_dict_value=False + ) + + mask = self.isna() + # error: Argument 2 to "check_value_size" has incompatible type + # "ExtensionArray"; expected "ndarray" + value = missing.check_value_size( + value, mask, len(self) # type: ignore[arg-type] + ) + + if mask.any(): + if method is not None: + # (for now) when self.ndim == 2, we assume axis=0 + func = missing.get_fill_func(method, ndim=self.ndim) + npvalues = self._ndarray.T + if copy: + npvalues = npvalues.copy() + func(npvalues, limit=limit, mask=mask.T) + npvalues = npvalues.T + + # TODO: NumpyExtensionArray didn't used to copy, need tests + # for this + new_values = self._from_backing_data(npvalues) + else: + # fill with value + if copy: + new_values = self.copy() + else: + new_values = self[:] + new_values[mask] = value + else: + # We validate the fill_value even if there is nothing to fill + if value is not None: + self._validate_setitem_value(value) + + if not copy: + new_values = self[:] + else: + new_values = self.copy() + return new_values + + # ------------------------------------------------------------------------ + # Reductions + + def _wrap_reduction_result(self, axis: AxisInt | None, result): + if axis is None or self.ndim == 1: + return self._box_func(result) + return self._from_backing_data(result) + + # ------------------------------------------------------------------------ + # __array_function__ methods + + def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None: + """ + Analogue to np.putmask(self, mask, value) + + Parameters + ---------- + mask : np.ndarray[bool] + value : scalar or listlike + + Raises + ------ + TypeError + If value cannot be cast to self.dtype. + """ + value = self._validate_setitem_value(value) + + np.putmask(self._ndarray, mask, value) + + def _where(self: Self, mask: npt.NDArray[np.bool_], value) -> Self: + """ + Analogue to np.where(mask, self, value) + + Parameters + ---------- + mask : np.ndarray[bool] + value : scalar or listlike + + Raises + ------ + TypeError + If value cannot be cast to self.dtype. + """ + value = self._validate_setitem_value(value) + + res_values = np.where(mask, self._ndarray, value) + if res_values.dtype != self._ndarray.dtype: + raise AssertionError( + # GH#56410 + "Something has gone wrong, please report a bug at " + "github.com/pandas-dev/pandas/" + ) + return self._from_backing_data(res_values) + + # ------------------------------------------------------------------------ + # Index compat methods + + def insert(self, loc: int, item) -> Self: + """ + Make new ExtensionArray inserting new item at location. Follows + Python list.append semantics for negative values. + + Parameters + ---------- + loc : int + item : object + + Returns + ------- + type(self) + """ + loc = validate_insert_loc(loc, len(self)) + + code = self._validate_scalar(item) + + new_vals = np.concatenate( + ( + self._ndarray[:loc], + np.asarray([code], dtype=self._ndarray.dtype), + self._ndarray[loc:], + ) + ) + return self._from_backing_data(new_vals) + + # ------------------------------------------------------------------------ + # Additional array methods + # These are not part of the EA API, but we implement them because + # pandas assumes they're there. + + def value_counts(self, dropna: bool = True) -> Series: + """ + Return a Series containing counts of unique values. + + Parameters + ---------- + dropna : bool, default True + Don't include counts of NA values. + + Returns + ------- + Series + """ + if self.ndim != 1: + raise NotImplementedError + + from pandas import ( + Index, + Series, + ) + + if dropna: + # error: Unsupported operand type for ~ ("ExtensionArray") + values = self[~self.isna()]._ndarray # type: ignore[operator] + else: + values = self._ndarray + + result = value_counts(values, sort=False, dropna=dropna) + + index_arr = self._from_backing_data(np.asarray(result.index._data)) + index = Index(index_arr, name=result.index.name) + return Series(result._values, index=index, name=result.name, copy=False) + + def _quantile( + self, + qs: npt.NDArray[np.float64], + interpolation: str, + ) -> Self: + # TODO: disable for Categorical if not ordered? + + mask = np.asarray(self.isna()) + arr = self._ndarray + fill_value = self._internal_fill_value + + res_values = quantile_with_mask(arr, mask, fill_value, qs, interpolation) + + res_values = self._cast_quantile_result(res_values) + return self._from_backing_data(res_values) + + # TODO: see if we can share this with other dispatch-wrapping methods + def _cast_quantile_result(self, res_values: np.ndarray) -> np.ndarray: + """ + Cast the result of quantile_with_mask to an appropriate dtype + to pass to _from_backing_data in _quantile. + """ + return res_values + + # ------------------------------------------------------------------------ + # numpy-like methods + + @classmethod + def _empty(cls, shape: Shape, dtype: ExtensionDtype) -> Self: + """ + Analogous to np.empty(shape, dtype=dtype) + + Parameters + ---------- + shape : tuple[int] + dtype : ExtensionDtype + """ + # The base implementation uses a naive approach to find the dtype + # for the backing ndarray + arr = cls._from_sequence([], dtype=dtype) + backing = np.empty(shape, dtype=arr._ndarray.dtype) + return arr._from_backing_data(backing) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/_ranges.py b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/_ranges.py new file mode 100644 index 0000000000000000000000000000000000000000..3e89391324ad4a90235da230250758662822678f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/_ranges.py @@ -0,0 +1,207 @@ +""" +Helper functions to generate range-like data for DatetimeArray +(and possibly TimedeltaArray/PeriodArray) +""" +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +from pandas._libs.lib import i8max +from pandas._libs.tslibs import ( + BaseOffset, + OutOfBoundsDatetime, + Timedelta, + Timestamp, + iNaT, +) + +if TYPE_CHECKING: + from pandas._typing import npt + + +def generate_regular_range( + start: Timestamp | Timedelta | None, + end: Timestamp | Timedelta | None, + periods: int | None, + freq: BaseOffset, + unit: str = "ns", +) -> npt.NDArray[np.intp]: + """ + Generate a range of dates or timestamps with the spans between dates + described by the given `freq` DateOffset. + + Parameters + ---------- + start : Timedelta, Timestamp or None + First point of produced date range. + end : Timedelta, Timestamp or None + Last point of produced date range. + periods : int or None + Number of periods in produced date range. + freq : Tick + Describes space between dates in produced date range. + unit : str, default "ns" + The resolution the output is meant to represent. + + Returns + ------- + ndarray[np.int64] + Representing the given resolution. + """ + istart = start._value if start is not None else None + iend = end._value if end is not None else None + freq.nanos # raises if non-fixed frequency + td = Timedelta(freq) + b: int + e: int + try: + td = td.as_unit(unit, round_ok=False) + except ValueError as err: + raise ValueError( + f"freq={freq} is incompatible with unit={unit}. " + "Use a lower freq or a higher unit instead." + ) from err + stride = int(td._value) + + if periods is None and istart is not None and iend is not None: + b = istart + # cannot just use e = Timestamp(end) + 1 because arange breaks when + # stride is too large, see GH10887 + e = b + (iend - b) // stride * stride + stride // 2 + 1 + elif istart is not None and periods is not None: + b = istart + e = _generate_range_overflow_safe(b, periods, stride, side="start") + elif iend is not None and periods is not None: + e = iend + stride + b = _generate_range_overflow_safe(e, periods, stride, side="end") + else: + raise ValueError( + "at least 'start' or 'end' should be specified if a 'period' is given." + ) + + with np.errstate(over="raise"): + # If the range is sufficiently large, np.arange may overflow + # and incorrectly return an empty array if not caught. + try: + values = np.arange(b, e, stride, dtype=np.int64) + except FloatingPointError: + xdr = [b] + while xdr[-1] != e: + xdr.append(xdr[-1] + stride) + values = np.array(xdr[:-1], dtype=np.int64) + return values + + +def _generate_range_overflow_safe( + endpoint: int, periods: int, stride: int, side: str = "start" +) -> int: + """ + Calculate the second endpoint for passing to np.arange, checking + to avoid an integer overflow. Catch OverflowError and re-raise + as OutOfBoundsDatetime. + + Parameters + ---------- + endpoint : int + nanosecond timestamp of the known endpoint of the desired range + periods : int + number of periods in the desired range + stride : int + nanoseconds between periods in the desired range + side : {'start', 'end'} + which end of the range `endpoint` refers to + + Returns + ------- + other_end : int + + Raises + ------ + OutOfBoundsDatetime + """ + # GH#14187 raise instead of incorrectly wrapping around + assert side in ["start", "end"] + + i64max = np.uint64(i8max) + msg = f"Cannot generate range with {side}={endpoint} and periods={periods}" + + with np.errstate(over="raise"): + # if periods * strides cannot be multiplied within the *uint64* bounds, + # we cannot salvage the operation by recursing, so raise + try: + addend = np.uint64(periods) * np.uint64(np.abs(stride)) + except FloatingPointError as err: + raise OutOfBoundsDatetime(msg) from err + + if np.abs(addend) <= i64max: + # relatively easy case without casting concerns + return _generate_range_overflow_safe_signed(endpoint, periods, stride, side) + + elif (endpoint > 0 and side == "start" and stride > 0) or ( + endpoint < 0 < stride and side == "end" + ): + # no chance of not-overflowing + raise OutOfBoundsDatetime(msg) + + elif side == "end" and endpoint - stride <= i64max < endpoint: + # in _generate_regular_range we added `stride` thereby overflowing + # the bounds. Adjust to fix this. + return _generate_range_overflow_safe( + endpoint - stride, periods - 1, stride, side + ) + + # split into smaller pieces + mid_periods = periods // 2 + remaining = periods - mid_periods + assert 0 < remaining < periods, (remaining, periods, endpoint, stride) + + midpoint = int(_generate_range_overflow_safe(endpoint, mid_periods, stride, side)) + return _generate_range_overflow_safe(midpoint, remaining, stride, side) + + +def _generate_range_overflow_safe_signed( + endpoint: int, periods: int, stride: int, side: str +) -> int: + """ + A special case for _generate_range_overflow_safe where `periods * stride` + can be calculated without overflowing int64 bounds. + """ + assert side in ["start", "end"] + if side == "end": + stride *= -1 + + with np.errstate(over="raise"): + addend = np.int64(periods) * np.int64(stride) + try: + # easy case with no overflows + result = np.int64(endpoint) + addend + if result == iNaT: + # Putting this into a DatetimeArray/TimedeltaArray + # would incorrectly be interpreted as NaT + raise OverflowError + return int(result) + except (FloatingPointError, OverflowError): + # with endpoint negative and addend positive we risk + # FloatingPointError; with reversed signed we risk OverflowError + pass + + # if stride and endpoint had opposite signs, then endpoint + addend + # should never overflow. so they must have the same signs + assert (stride > 0 and endpoint >= 0) or (stride < 0 and endpoint <= 0) + + if stride > 0: + # watch out for very special case in which we just slightly + # exceed implementation bounds, but when passing the result to + # np.arange will get a result slightly within the bounds + + uresult = np.uint64(endpoint) + np.uint64(addend) + i64max = np.uint64(i8max) + assert uresult > i64max + if uresult <= i64max + np.uint64(stride): + return int(uresult) + + raise OutOfBoundsDatetime( + f"Cannot generate range with {side}={endpoint} and periods={periods}" + ) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/_utils.py b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6b46396d5efdfa4301a5362c8a5a71678345479b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/_utils.py @@ -0,0 +1,63 @@ +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, +) + +import numpy as np + +from pandas._libs import lib +from pandas.errors import LossySetitemError + +from pandas.core.dtypes.cast import np_can_hold_element +from pandas.core.dtypes.common import is_numeric_dtype + +if TYPE_CHECKING: + from pandas._typing import ( + ArrayLike, + npt, + ) + + +def to_numpy_dtype_inference( + arr: ArrayLike, dtype: npt.DTypeLike | None, na_value, hasna: bool +) -> tuple[npt.DTypeLike, Any]: + if dtype is None and is_numeric_dtype(arr.dtype): + dtype_given = False + if hasna: + if arr.dtype.kind == "b": + dtype = np.dtype(np.object_) + else: + if arr.dtype.kind in "iu": + dtype = np.dtype(np.float64) + else: + dtype = arr.dtype.numpy_dtype # type: ignore[union-attr] + if na_value is lib.no_default: + na_value = np.nan + else: + dtype = arr.dtype.numpy_dtype # type: ignore[union-attr] + elif dtype is not None: + dtype = np.dtype(dtype) + dtype_given = True + else: + dtype_given = True + + if na_value is lib.no_default: + if dtype is None or not hasna: + na_value = arr.dtype.na_value + elif dtype.kind == "f": # type: ignore[union-attr] + na_value = np.nan + elif dtype.kind == "M": # type: ignore[union-attr] + na_value = np.datetime64("nat") + elif dtype.kind == "m": # type: ignore[union-attr] + na_value = np.timedelta64("nat") + else: + na_value = arr.dtype.na_value + + if not dtype_given and hasna: + try: + np_can_hold_element(dtype, na_value) # type: ignore[arg-type] + except LossySetitemError: + dtype = np.dtype(np.object_) + return dtype, na_value diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ade4ceb22ea8af0ddc592912fdfd42e66c784aa Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/_arrow_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/_arrow_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..564c9b1aa2251724ee44cd842ea7273dbe44010f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/_arrow_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/accessors.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/accessors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c960aa1890f164fabaef4eb7156c3f37606a2318 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/accessors.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/array.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/array.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..563c87843765d1db04d8d77d4dad3756bbf535ed Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/array.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/extension_types.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/extension_types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3b9a3e23866b41d1ea92952a4cc707d4a8146c2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/extension_types.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/base.py b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/base.py new file mode 100644 index 0000000000000000000000000000000000000000..abfe2369b0d0dba2f3ef34e48a490158ec948e90 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/base.py @@ -0,0 +1,2588 @@ +""" +An interface for extending pandas with custom arrays. + +.. warning:: + + This is an experimental API and subject to breaking changes + without warning. +""" +from __future__ import annotations + +import operator +from typing import ( + TYPE_CHECKING, + Any, + Callable, + ClassVar, + Literal, + cast, + overload, +) +import warnings + +import numpy as np + +from pandas._libs import ( + algos as libalgos, + lib, +) +from pandas.compat import set_function_name +from pandas.compat.numpy import function as nv +from pandas.errors import AbstractMethodError +from pandas.util._decorators import ( + Appender, + Substitution, + cache_readonly, +) +from pandas.util._exceptions import find_stack_level +from pandas.util._validators import ( + validate_bool_kwarg, + validate_fillna_kwargs, + validate_insert_loc, +) + +from pandas.core.dtypes.cast import maybe_cast_pointwise_result +from pandas.core.dtypes.common import ( + is_list_like, + is_scalar, + pandas_dtype, +) +from pandas.core.dtypes.dtypes import ExtensionDtype +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCIndex, + ABCSeries, +) +from pandas.core.dtypes.missing import isna + +from pandas.core import ( + arraylike, + missing, + roperator, +) +from pandas.core.algorithms import ( + duplicated, + factorize_array, + isin, + map_array, + mode, + rank, + unique, +) +from pandas.core.array_algos.quantile import quantile_with_mask +from pandas.core.missing import _fill_limit_area_1d +from pandas.core.sorting import ( + nargminmax, + nargsort, +) + +if TYPE_CHECKING: + from collections.abc import ( + Iterator, + Sequence, + ) + + from pandas._typing import ( + ArrayLike, + AstypeArg, + AxisInt, + Dtype, + DtypeObj, + FillnaOptions, + InterpolateOptions, + NumpySorter, + NumpyValueArrayLike, + PositionalIndexer, + ScalarIndexer, + Self, + SequenceIndexer, + Shape, + SortKind, + TakeIndexer, + npt, + ) + + from pandas import Index + +_extension_array_shared_docs: dict[str, str] = {} + + +class ExtensionArray: + """ + Abstract base class for custom 1-D array types. + + pandas will recognize instances of this class as proper arrays + with a custom type and will not attempt to coerce them to objects. They + may be stored directly inside a :class:`DataFrame` or :class:`Series`. + + Attributes + ---------- + dtype + nbytes + ndim + shape + + Methods + ------- + argsort + astype + copy + dropna + duplicated + factorize + fillna + equals + insert + interpolate + isin + isna + ravel + repeat + searchsorted + shift + take + tolist + unique + view + _accumulate + _concat_same_type + _explode + _formatter + _from_factorized + _from_sequence + _from_sequence_of_strings + _hash_pandas_object + _pad_or_backfill + _reduce + _values_for_argsort + _values_for_factorize + + Notes + ----- + The interface includes the following abstract methods that must be + implemented by subclasses: + + * _from_sequence + * _from_factorized + * __getitem__ + * __len__ + * __eq__ + * dtype + * nbytes + * isna + * take + * copy + * _concat_same_type + * interpolate + + A default repr displaying the type, (truncated) data, length, + and dtype is provided. It can be customized or replaced by + by overriding: + + * __repr__ : A default repr for the ExtensionArray. + * _formatter : Print scalars inside a Series or DataFrame. + + Some methods require casting the ExtensionArray to an ndarray of Python + objects with ``self.astype(object)``, which may be expensive. When + performance is a concern, we highly recommend overriding the following + methods: + + * fillna + * _pad_or_backfill + * dropna + * unique + * factorize / _values_for_factorize + * argsort, argmax, argmin / _values_for_argsort + * searchsorted + * map + + The remaining methods implemented on this class should be performant, + as they only compose abstract methods. Still, a more efficient + implementation may be available, and these methods can be overridden. + + One can implement methods to handle array accumulations or reductions. + + * _accumulate + * _reduce + + One can implement methods to handle parsing from strings that will be used + in methods such as ``pandas.io.parsers.read_csv``. + + * _from_sequence_of_strings + + This class does not inherit from 'abc.ABCMeta' for performance reasons. + Methods and properties required by the interface raise + ``pandas.errors.AbstractMethodError`` and no ``register`` method is + provided for registering virtual subclasses. + + ExtensionArrays are limited to 1 dimension. + + They may be backed by none, one, or many NumPy arrays. For example, + ``pandas.Categorical`` is an extension array backed by two arrays, + one for codes and one for categories. An array of IPv6 address may + be backed by a NumPy structured array with two fields, one for the + lower 64 bits and one for the upper 64 bits. Or they may be backed + by some other storage type, like Python lists. Pandas makes no + assumptions on how the data are stored, just that it can be converted + to a NumPy array. + The ExtensionArray interface does not impose any rules on how this data + is stored. However, currently, the backing data cannot be stored in + attributes called ``.values`` or ``._values`` to ensure full compatibility + with pandas internals. But other names as ``.data``, ``._data``, + ``._items``, ... can be freely used. + + If implementing NumPy's ``__array_ufunc__`` interface, pandas expects + that + + 1. You defer by returning ``NotImplemented`` when any Series are present + in `inputs`. Pandas will extract the arrays and call the ufunc again. + 2. You define a ``_HANDLED_TYPES`` tuple as an attribute on the class. + Pandas inspect this to determine whether the ufunc is valid for the + types present. + + See :ref:`extending.extension.ufunc` for more. + + By default, ExtensionArrays are not hashable. Immutable subclasses may + override this behavior. + + Examples + -------- + Please see the following: + + https://github.com/pandas-dev/pandas/blob/main/pandas/tests/extension/list/array.py + """ + + # '_typ' is for pandas.core.dtypes.generic.ABCExtensionArray. + # Don't override this. + _typ = "extension" + + # similar to __array_priority__, positions ExtensionArray after Index, + # Series, and DataFrame. EA subclasses may override to choose which EA + # subclass takes priority. If overriding, the value should always be + # strictly less than 2000 to be below Index.__pandas_priority__. + __pandas_priority__ = 1000 + + # ------------------------------------------------------------------------ + # Constructors + # ------------------------------------------------------------------------ + + @classmethod + def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy: bool = False): + """ + Construct a new ExtensionArray from a sequence of scalars. + + Parameters + ---------- + scalars : Sequence + Each element will be an instance of the scalar type for this + array, ``cls.dtype.type`` or be converted into this type in this method. + dtype : dtype, optional + Construct for this particular dtype. This should be a Dtype + compatible with the ExtensionArray. + copy : bool, default False + If True, copy the underlying data. + + Returns + ------- + ExtensionArray + + Examples + -------- + >>> pd.arrays.IntegerArray._from_sequence([4, 5]) + + [4, 5] + Length: 2, dtype: Int64 + """ + raise AbstractMethodError(cls) + + @classmethod + def _from_scalars(cls, scalars, *, dtype: DtypeObj) -> Self: + """ + Strict analogue to _from_sequence, allowing only sequences of scalars + that should be specifically inferred to the given dtype. + + Parameters + ---------- + scalars : sequence + dtype : ExtensionDtype + + Raises + ------ + TypeError or ValueError + + Notes + ----- + This is called in a try/except block when casting the result of a + pointwise operation. + """ + try: + return cls._from_sequence(scalars, dtype=dtype, copy=False) + except (ValueError, TypeError): + raise + except Exception: + warnings.warn( + "_from_scalars should only raise ValueError or TypeError. " + "Consider overriding _from_scalars where appropriate.", + stacklevel=find_stack_level(), + ) + raise + + @classmethod + def _from_sequence_of_strings( + cls, strings, *, dtype: Dtype | None = None, copy: bool = False + ): + """ + Construct a new ExtensionArray from a sequence of strings. + + Parameters + ---------- + strings : Sequence + Each element will be an instance of the scalar type for this + array, ``cls.dtype.type``. + dtype : dtype, optional + Construct for this particular dtype. This should be a Dtype + compatible with the ExtensionArray. + copy : bool, default False + If True, copy the underlying data. + + Returns + ------- + ExtensionArray + + Examples + -------- + >>> pd.arrays.IntegerArray._from_sequence_of_strings(["1", "2", "3"]) + + [1, 2, 3] + Length: 3, dtype: Int64 + """ + raise AbstractMethodError(cls) + + @classmethod + def _from_factorized(cls, values, original): + """ + Reconstruct an ExtensionArray after factorization. + + Parameters + ---------- + values : ndarray + An integer ndarray with the factorized values. + original : ExtensionArray + The original ExtensionArray that factorize was called on. + + See Also + -------- + factorize : Top-level factorize method that dispatches here. + ExtensionArray.factorize : Encode the extension array as an enumerated type. + + Examples + -------- + >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), + ... pd.Interval(1, 5), pd.Interval(1, 5)]) + >>> codes, uniques = pd.factorize(interv_arr) + >>> pd.arrays.IntervalArray._from_factorized(uniques, interv_arr) + + [(0, 1], (1, 5]] + Length: 2, dtype: interval[int64, right] + """ + raise AbstractMethodError(cls) + + # ------------------------------------------------------------------------ + # Must be a Sequence + # ------------------------------------------------------------------------ + @overload + def __getitem__(self, item: ScalarIndexer) -> Any: + ... + + @overload + def __getitem__(self, item: SequenceIndexer) -> Self: + ... + + def __getitem__(self, item: PositionalIndexer) -> Self | Any: + """ + Select a subset of self. + + Parameters + ---------- + item : int, slice, or ndarray + * int: The position in 'self' to get. + + * slice: A slice object, where 'start', 'stop', and 'step' are + integers or None + + * ndarray: A 1-d boolean NumPy ndarray the same length as 'self' + + * list[int]: A list of int + + Returns + ------- + item : scalar or ExtensionArray + + Notes + ----- + For scalar ``item``, return a scalar value suitable for the array's + type. This should be an instance of ``self.dtype.type``. + + For slice ``key``, return an instance of ``ExtensionArray``, even + if the slice is length 0 or 1. + + For a boolean mask, return an instance of ``ExtensionArray``, filtered + to the values where ``item`` is True. + """ + raise AbstractMethodError(self) + + def __setitem__(self, key, value) -> None: + """ + Set one or more values inplace. + + This method is not required to satisfy the pandas extension array + interface. + + Parameters + ---------- + key : int, ndarray, or slice + When called from, e.g. ``Series.__setitem__``, ``key`` will be + one of + + * scalar int + * ndarray of integers. + * boolean ndarray + * slice object + + value : ExtensionDtype.type, Sequence[ExtensionDtype.type], or object + value or values to be set of ``key``. + + Returns + ------- + None + """ + # Some notes to the ExtensionArray implementer who may have ended up + # here. While this method is not required for the interface, if you + # *do* choose to implement __setitem__, then some semantics should be + # observed: + # + # * Setting multiple values : ExtensionArrays should support setting + # multiple values at once, 'key' will be a sequence of integers and + # 'value' will be a same-length sequence. + # + # * Broadcasting : For a sequence 'key' and a scalar 'value', + # each position in 'key' should be set to 'value'. + # + # * Coercion : Most users will expect basic coercion to work. For + # example, a string like '2018-01-01' is coerced to a datetime + # when setting on a datetime64ns array. In general, if the + # __init__ method coerces that value, then so should __setitem__ + # Note, also, that Series/DataFrame.where internally use __setitem__ + # on a copy of the data. + raise NotImplementedError(f"{type(self)} does not implement __setitem__.") + + def __len__(self) -> int: + """ + Length of this array + + Returns + ------- + length : int + """ + raise AbstractMethodError(self) + + def __iter__(self) -> Iterator[Any]: + """ + Iterate over elements of the array. + """ + # This needs to be implemented so that pandas recognizes extension + # arrays as list-like. The default implementation makes successive + # calls to ``__getitem__``, which may be slower than necessary. + for i in range(len(self)): + yield self[i] + + def __contains__(self, item: object) -> bool | np.bool_: + """ + Return for `item in self`. + """ + # GH37867 + # comparisons of any item to pd.NA always return pd.NA, so e.g. "a" in [pd.NA] + # would raise a TypeError. The implementation below works around that. + if is_scalar(item) and isna(item): + if not self._can_hold_na: + return False + elif item is self.dtype.na_value or isinstance(item, self.dtype.type): + return self._hasna + else: + return False + else: + # error: Item "ExtensionArray" of "Union[ExtensionArray, ndarray]" has no + # attribute "any" + return (item == self).any() # type: ignore[union-attr] + + # error: Signature of "__eq__" incompatible with supertype "object" + def __eq__(self, other: object) -> ArrayLike: # type: ignore[override] + """ + Return for `self == other` (element-wise equality). + """ + # Implementer note: this should return a boolean numpy ndarray or + # a boolean ExtensionArray. + # When `other` is one of Series, Index, or DataFrame, this method should + # return NotImplemented (to ensure that those objects are responsible for + # first unpacking the arrays, and then dispatch the operation to the + # underlying arrays) + raise AbstractMethodError(self) + + # error: Signature of "__ne__" incompatible with supertype "object" + def __ne__(self, other: object) -> ArrayLike: # type: ignore[override] + """ + Return for `self != other` (element-wise in-equality). + """ + # error: Unsupported operand type for ~ ("ExtensionArray") + return ~(self == other) # type: ignore[operator] + + def to_numpy( + self, + dtype: npt.DTypeLike | None = None, + copy: bool = False, + na_value: object = lib.no_default, + ) -> np.ndarray: + """ + Convert to a NumPy ndarray. + + This is similar to :meth:`numpy.asarray`, but may provide additional control + over how the conversion is done. + + Parameters + ---------- + dtype : str or numpy.dtype, optional + The dtype to pass to :meth:`numpy.asarray`. + copy : bool, default False + Whether to ensure that the returned value is a not a view on + another array. Note that ``copy=False`` does not *ensure* that + ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that + a copy is made, even if not strictly necessary. + na_value : Any, optional + The value to use for missing values. The default value depends + on `dtype` and the type of the array. + + Returns + ------- + numpy.ndarray + """ + result = np.asarray(self, dtype=dtype) + if copy or na_value is not lib.no_default: + result = result.copy() + if na_value is not lib.no_default: + result[self.isna()] = na_value + return result + + # ------------------------------------------------------------------------ + # Required attributes + # ------------------------------------------------------------------------ + + @property + def dtype(self) -> ExtensionDtype: + """ + An instance of ExtensionDtype. + + Examples + -------- + >>> pd.array([1, 2, 3]).dtype + Int64Dtype() + """ + raise AbstractMethodError(self) + + @property + def shape(self) -> Shape: + """ + Return a tuple of the array dimensions. + + Examples + -------- + >>> arr = pd.array([1, 2, 3]) + >>> arr.shape + (3,) + """ + return (len(self),) + + @property + def size(self) -> int: + """ + The number of elements in the array. + """ + # error: Incompatible return value type (got "signedinteger[_64Bit]", + # expected "int") [return-value] + return np.prod(self.shape) # type: ignore[return-value] + + @property + def ndim(self) -> int: + """ + Extension Arrays are only allowed to be 1-dimensional. + + Examples + -------- + >>> arr = pd.array([1, 2, 3]) + >>> arr.ndim + 1 + """ + return 1 + + @property + def nbytes(self) -> int: + """ + The number of bytes needed to store this object in memory. + + Examples + -------- + >>> pd.array([1, 2, 3]).nbytes + 27 + """ + # If this is expensive to compute, return an approximate lower bound + # on the number of bytes needed. + raise AbstractMethodError(self) + + # ------------------------------------------------------------------------ + # Additional Methods + # ------------------------------------------------------------------------ + + @overload + def astype(self, dtype: npt.DTypeLike, copy: bool = ...) -> np.ndarray: + ... + + @overload + def astype(self, dtype: ExtensionDtype, copy: bool = ...) -> ExtensionArray: + ... + + @overload + def astype(self, dtype: AstypeArg, copy: bool = ...) -> ArrayLike: + ... + + def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike: + """ + Cast to a NumPy array or ExtensionArray with 'dtype'. + + Parameters + ---------- + dtype : str or dtype + Typecode or data-type to which the array is cast. + copy : bool, default True + Whether to copy the data, even if not necessary. If False, + a copy is made only if the old dtype does not match the + new dtype. + + Returns + ------- + np.ndarray or pandas.api.extensions.ExtensionArray + An ``ExtensionArray`` if ``dtype`` is ``ExtensionDtype``, + otherwise a Numpy ndarray with ``dtype`` for its dtype. + + Examples + -------- + >>> arr = pd.array([1, 2, 3]) + >>> arr + + [1, 2, 3] + Length: 3, dtype: Int64 + + Casting to another ``ExtensionDtype`` returns an ``ExtensionArray``: + + >>> arr1 = arr.astype('Float64') + >>> arr1 + + [1.0, 2.0, 3.0] + Length: 3, dtype: Float64 + >>> arr1.dtype + Float64Dtype() + + Otherwise, we will get a Numpy ndarray: + + >>> arr2 = arr.astype('float64') + >>> arr2 + array([1., 2., 3.]) + >>> arr2.dtype + dtype('float64') + """ + dtype = pandas_dtype(dtype) + if dtype == self.dtype: + if not copy: + return self + else: + return self.copy() + + if isinstance(dtype, ExtensionDtype): + cls = dtype.construct_array_type() + return cls._from_sequence(self, dtype=dtype, copy=copy) + + elif lib.is_np_dtype(dtype, "M"): + from pandas.core.arrays import DatetimeArray + + return DatetimeArray._from_sequence(self, dtype=dtype, copy=copy) + + elif lib.is_np_dtype(dtype, "m"): + from pandas.core.arrays import TimedeltaArray + + return TimedeltaArray._from_sequence(self, dtype=dtype, copy=copy) + + if not copy: + return np.asarray(self, dtype=dtype) + else: + return np.array(self, dtype=dtype, copy=copy) + + def isna(self) -> np.ndarray | ExtensionArraySupportsAnyAll: + """ + A 1-D array indicating if each value is missing. + + Returns + ------- + numpy.ndarray or pandas.api.extensions.ExtensionArray + In most cases, this should return a NumPy ndarray. For + exceptional cases like ``SparseArray``, where returning + an ndarray would be expensive, an ExtensionArray may be + returned. + + Notes + ----- + If returning an ExtensionArray, then + + * ``na_values._is_boolean`` should be True + * `na_values` should implement :func:`ExtensionArray._reduce` + * ``na_values.any`` and ``na_values.all`` should be implemented + + Examples + -------- + >>> arr = pd.array([1, 2, np.nan, np.nan]) + >>> arr.isna() + array([False, False, True, True]) + """ + raise AbstractMethodError(self) + + @property + def _hasna(self) -> bool: + # GH#22680 + """ + Equivalent to `self.isna().any()`. + + Some ExtensionArray subclasses may be able to optimize this check. + """ + return bool(self.isna().any()) + + def _values_for_argsort(self) -> np.ndarray: + """ + Return values for sorting. + + Returns + ------- + ndarray + The transformed values should maintain the ordering between values + within the array. + + See Also + -------- + ExtensionArray.argsort : Return the indices that would sort this array. + + Notes + ----- + The caller is responsible for *not* modifying these values in-place, so + it is safe for implementers to give views on ``self``. + + Functions that use this (e.g. ``ExtensionArray.argsort``) should ignore + entries with missing values in the original array (according to + ``self.isna()``). This means that the corresponding entries in the returned + array don't need to be modified to sort correctly. + + Examples + -------- + In most cases, this is the underlying Numpy array of the ``ExtensionArray``: + + >>> arr = pd.array([1, 2, 3]) + >>> arr._values_for_argsort() + array([1, 2, 3]) + """ + # Note: this is used in `ExtensionArray.argsort/argmin/argmax`. + return np.array(self) + + def argsort( + self, + *, + ascending: bool = True, + kind: SortKind = "quicksort", + na_position: str = "last", + **kwargs, + ) -> np.ndarray: + """ + Return the indices that would sort this array. + + Parameters + ---------- + ascending : bool, default True + Whether the indices should result in an ascending + or descending sort. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional + Sorting algorithm. + na_position : {'first', 'last'}, default 'last' + If ``'first'``, put ``NaN`` values at the beginning. + If ``'last'``, put ``NaN`` values at the end. + *args, **kwargs: + Passed through to :func:`numpy.argsort`. + + Returns + ------- + np.ndarray[np.intp] + Array of indices that sort ``self``. If NaN values are contained, + NaN values are placed at the end. + + See Also + -------- + numpy.argsort : Sorting implementation used internally. + + Examples + -------- + >>> arr = pd.array([3, 1, 2, 5, 4]) + >>> arr.argsort() + array([1, 2, 0, 4, 3]) + """ + # Implementer note: You have two places to override the behavior of + # argsort. + # 1. _values_for_argsort : construct the values passed to np.argsort + # 2. argsort : total control over sorting. In case of overriding this, + # it is recommended to also override argmax/argmin + ascending = nv.validate_argsort_with_ascending(ascending, (), kwargs) + + values = self._values_for_argsort() + return nargsort( + values, + kind=kind, + ascending=ascending, + na_position=na_position, + mask=np.asarray(self.isna()), + ) + + def argmin(self, skipna: bool = True) -> int: + """ + Return the index of minimum value. + + In case of multiple occurrences of the minimum value, the index + corresponding to the first occurrence is returned. + + Parameters + ---------- + skipna : bool, default True + + Returns + ------- + int + + See Also + -------- + ExtensionArray.argmax : Return the index of the maximum value. + + Examples + -------- + >>> arr = pd.array([3, 1, 2, 5, 4]) + >>> arr.argmin() + 1 + """ + # Implementer note: You have two places to override the behavior of + # argmin. + # 1. _values_for_argsort : construct the values used in nargminmax + # 2. argmin itself : total control over sorting. + validate_bool_kwarg(skipna, "skipna") + if not skipna and self._hasna: + raise NotImplementedError + return nargminmax(self, "argmin") + + def argmax(self, skipna: bool = True) -> int: + """ + Return the index of maximum value. + + In case of multiple occurrences of the maximum value, the index + corresponding to the first occurrence is returned. + + Parameters + ---------- + skipna : bool, default True + + Returns + ------- + int + + See Also + -------- + ExtensionArray.argmin : Return the index of the minimum value. + + Examples + -------- + >>> arr = pd.array([3, 1, 2, 5, 4]) + >>> arr.argmax() + 3 + """ + # Implementer note: You have two places to override the behavior of + # argmax. + # 1. _values_for_argsort : construct the values used in nargminmax + # 2. argmax itself : total control over sorting. + validate_bool_kwarg(skipna, "skipna") + if not skipna and self._hasna: + raise NotImplementedError + return nargminmax(self, "argmax") + + def interpolate( + self, + *, + method: InterpolateOptions, + axis: int, + index: Index, + limit, + limit_direction, + limit_area, + copy: bool, + **kwargs, + ) -> Self: + """ + See DataFrame.interpolate.__doc__. + + Examples + -------- + >>> arr = pd.arrays.NumpyExtensionArray(np.array([0, 1, np.nan, 3])) + >>> arr.interpolate(method="linear", + ... limit=3, + ... limit_direction="forward", + ... index=pd.Index([1, 2, 3, 4]), + ... fill_value=1, + ... copy=False, + ... axis=0, + ... limit_area="inside" + ... ) + + [0.0, 1.0, 2.0, 3.0] + Length: 4, dtype: float64 + """ + # NB: we return type(self) even if copy=False + raise NotImplementedError( + f"{type(self).__name__} does not implement interpolate" + ) + + def _pad_or_backfill( + self, + *, + method: FillnaOptions, + limit: int | None = None, + limit_area: Literal["inside", "outside"] | None = None, + copy: bool = True, + ) -> Self: + """ + Pad or backfill values, used by Series/DataFrame ffill and bfill. + + Parameters + ---------- + method : {'backfill', 'bfill', 'pad', 'ffill'} + Method to use for filling holes in reindexed Series: + + * pad / ffill: propagate last valid observation forward to next valid. + * backfill / bfill: use NEXT valid observation to fill gap. + + limit : int, default None + This is the maximum number of consecutive + NaN values to forward/backward fill. In other words, if there is + a gap with more than this number of consecutive NaNs, it will only + be partially filled. If method is not specified, this is the + maximum number of entries along the entire axis where NaNs will be + filled. + + copy : bool, default True + Whether to make a copy of the data before filling. If False, then + the original should be modified and no new memory should be allocated. + For ExtensionArray subclasses that cannot do this, it is at the + author's discretion whether to ignore "copy=False" or to raise. + The base class implementation ignores the keyword if any NAs are + present. + + Returns + ------- + Same type as self + + Examples + -------- + >>> arr = pd.array([np.nan, np.nan, 2, 3, np.nan, np.nan]) + >>> arr._pad_or_backfill(method="backfill", limit=1) + + [, 2, 2, 3, , ] + Length: 6, dtype: Int64 + """ + + # If a 3rd-party EA has implemented this functionality in fillna, + # we warn that they need to implement _pad_or_backfill instead. + if ( + type(self).fillna is not ExtensionArray.fillna + and type(self)._pad_or_backfill is ExtensionArray._pad_or_backfill + ): + # Check for _pad_or_backfill here allows us to call + # super()._pad_or_backfill without getting this warning + warnings.warn( + "ExtensionArray.fillna 'method' keyword is deprecated. " + "In a future version. arr._pad_or_backfill will be called " + "instead. 3rd-party ExtensionArray authors need to implement " + "_pad_or_backfill.", + DeprecationWarning, + stacklevel=find_stack_level(), + ) + if limit_area is not None: + raise NotImplementedError( + f"{type(self).__name__} does not implement limit_area " + "(added in pandas 2.2). 3rd-party ExtnsionArray authors " + "need to add this argument to _pad_or_backfill." + ) + return self.fillna(method=method, limit=limit) + + mask = self.isna() + + if mask.any(): + # NB: the base class does not respect the "copy" keyword + meth = missing.clean_fill_method(method) + + npmask = np.asarray(mask) + if limit_area is not None and not npmask.all(): + _fill_limit_area_1d(npmask, limit_area) + if meth == "pad": + indexer = libalgos.get_fill_indexer(npmask, limit=limit) + return self.take(indexer, allow_fill=True) + else: + # i.e. meth == "backfill" + indexer = libalgos.get_fill_indexer(npmask[::-1], limit=limit)[::-1] + return self[::-1].take(indexer, allow_fill=True) + + else: + if not copy: + return self + new_values = self.copy() + return new_values + + def fillna( + self, + value: object | ArrayLike | None = None, + method: FillnaOptions | None = None, + limit: int | None = None, + copy: bool = True, + ) -> Self: + """ + Fill NA/NaN values using the specified method. + + Parameters + ---------- + value : scalar, array-like + If a scalar value is passed it is used to fill all missing values. + Alternatively, an array-like "value" can be given. It's expected + that the array-like have the same length as 'self'. + method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None + Method to use for filling holes in reindexed Series: + + * pad / ffill: propagate last valid observation forward to next valid. + * backfill / bfill: use NEXT valid observation to fill gap. + + .. deprecated:: 2.1.0 + + limit : int, default None + If method is specified, this is the maximum number of consecutive + NaN values to forward/backward fill. In other words, if there is + a gap with more than this number of consecutive NaNs, it will only + be partially filled. If method is not specified, this is the + maximum number of entries along the entire axis where NaNs will be + filled. + + .. deprecated:: 2.1.0 + + copy : bool, default True + Whether to make a copy of the data before filling. If False, then + the original should be modified and no new memory should be allocated. + For ExtensionArray subclasses that cannot do this, it is at the + author's discretion whether to ignore "copy=False" or to raise. + The base class implementation ignores the keyword in pad/backfill + cases. + + Returns + ------- + ExtensionArray + With NA/NaN filled. + + Examples + -------- + >>> arr = pd.array([np.nan, np.nan, 2, 3, np.nan, np.nan]) + >>> arr.fillna(0) + + [0, 0, 2, 3, 0, 0] + Length: 6, dtype: Int64 + """ + if method is not None: + warnings.warn( + f"The 'method' keyword in {type(self).__name__}.fillna is " + "deprecated and will be removed in a future version.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + value, method = validate_fillna_kwargs(value, method) + + mask = self.isna() + # error: Argument 2 to "check_value_size" has incompatible type + # "ExtensionArray"; expected "ndarray" + value = missing.check_value_size( + value, mask, len(self) # type: ignore[arg-type] + ) + + if mask.any(): + if method is not None: + meth = missing.clean_fill_method(method) + + npmask = np.asarray(mask) + if meth == "pad": + indexer = libalgos.get_fill_indexer(npmask, limit=limit) + return self.take(indexer, allow_fill=True) + else: + # i.e. meth == "backfill" + indexer = libalgos.get_fill_indexer(npmask[::-1], limit=limit)[::-1] + return self[::-1].take(indexer, allow_fill=True) + else: + # fill with value + if not copy: + new_values = self[:] + else: + new_values = self.copy() + new_values[mask] = value + else: + if not copy: + new_values = self[:] + else: + new_values = self.copy() + return new_values + + def dropna(self) -> Self: + """ + Return ExtensionArray without NA values. + + Returns + ------- + + Examples + -------- + >>> pd.array([1, 2, np.nan]).dropna() + + [1, 2] + Length: 2, dtype: Int64 + """ + # error: Unsupported operand type for ~ ("ExtensionArray") + return self[~self.isna()] # type: ignore[operator] + + def duplicated( + self, keep: Literal["first", "last", False] = "first" + ) -> npt.NDArray[np.bool_]: + """ + Return boolean ndarray denoting duplicate values. + + Parameters + ---------- + keep : {'first', 'last', False}, default 'first' + - ``first`` : Mark duplicates as ``True`` except for the first occurrence. + - ``last`` : Mark duplicates as ``True`` except for the last occurrence. + - False : Mark all duplicates as ``True``. + + Returns + ------- + ndarray[bool] + + Examples + -------- + >>> pd.array([1, 1, 2, 3, 3], dtype="Int64").duplicated() + array([False, True, False, False, True]) + """ + mask = self.isna().astype(np.bool_, copy=False) + return duplicated(values=self, keep=keep, mask=mask) + + def shift(self, periods: int = 1, fill_value: object = None) -> ExtensionArray: + """ + Shift values by desired number. + + Newly introduced missing values are filled with + ``self.dtype.na_value``. + + Parameters + ---------- + periods : int, default 1 + The number of periods to shift. Negative values are allowed + for shifting backwards. + + fill_value : object, optional + The scalar value to use for newly introduced missing values. + The default is ``self.dtype.na_value``. + + Returns + ------- + ExtensionArray + Shifted. + + Notes + ----- + If ``self`` is empty or ``periods`` is 0, a copy of ``self`` is + returned. + + If ``periods > len(self)``, then an array of size + len(self) is returned, with all values filled with + ``self.dtype.na_value``. + + For 2-dimensional ExtensionArrays, we are always shifting along axis=0. + + Examples + -------- + >>> arr = pd.array([1, 2, 3]) + >>> arr.shift(2) + + [, , 1] + Length: 3, dtype: Int64 + """ + # Note: this implementation assumes that `self.dtype.na_value` can be + # stored in an instance of your ExtensionArray with `self.dtype`. + if not len(self) or periods == 0: + return self.copy() + + if isna(fill_value): + fill_value = self.dtype.na_value + + empty = self._from_sequence( + [fill_value] * min(abs(periods), len(self)), dtype=self.dtype + ) + if periods > 0: + a = empty + b = self[:-periods] + else: + a = self[abs(periods) :] + b = empty + return self._concat_same_type([a, b]) + + def unique(self) -> Self: + """ + Compute the ExtensionArray of unique values. + + Returns + ------- + pandas.api.extensions.ExtensionArray + + Examples + -------- + >>> arr = pd.array([1, 2, 3, 1, 2, 3]) + >>> arr.unique() + + [1, 2, 3] + Length: 3, dtype: Int64 + """ + uniques = unique(self.astype(object)) + return self._from_sequence(uniques, dtype=self.dtype) + + def searchsorted( + self, + value: NumpyValueArrayLike | ExtensionArray, + side: Literal["left", "right"] = "left", + sorter: NumpySorter | None = None, + ) -> npt.NDArray[np.intp] | np.intp: + """ + Find indices where elements should be inserted to maintain order. + + Find the indices into a sorted array `self` (a) such that, if the + corresponding elements in `value` were inserted before the indices, + the order of `self` would be preserved. + + Assuming that `self` is sorted: + + ====== ================================ + `side` returned index `i` satisfies + ====== ================================ + left ``self[i-1] < value <= self[i]`` + right ``self[i-1] <= value < self[i]`` + ====== ================================ + + Parameters + ---------- + value : array-like, list or scalar + Value(s) to insert into `self`. + side : {'left', 'right'}, optional + If 'left', the index of the first suitable location found is given. + If 'right', return the last such index. If there is no suitable + index, return either 0 or N (where N is the length of `self`). + sorter : 1-D array-like, optional + Optional array of integer indices that sort array a into ascending + order. They are typically the result of argsort. + + Returns + ------- + array of ints or int + If value is array-like, array of insertion points. + If value is scalar, a single integer. + + See Also + -------- + numpy.searchsorted : Similar method from NumPy. + + Examples + -------- + >>> arr = pd.array([1, 2, 3, 5]) + >>> arr.searchsorted([4]) + array([3]) + """ + # Note: the base tests provided by pandas only test the basics. + # We do not test + # 1. Values outside the range of the `data_for_sorting` fixture + # 2. Values between the values in the `data_for_sorting` fixture + # 3. Missing values. + arr = self.astype(object) + if isinstance(value, ExtensionArray): + value = value.astype(object) + return arr.searchsorted(value, side=side, sorter=sorter) + + def equals(self, other: object) -> bool: + """ + Return if another array is equivalent to this array. + + Equivalent means that both arrays have the same shape and dtype, and + all values compare equal. Missing values in the same location are + considered equal (in contrast with normal equality). + + Parameters + ---------- + other : ExtensionArray + Array to compare to this Array. + + Returns + ------- + boolean + Whether the arrays are equivalent. + + Examples + -------- + >>> arr1 = pd.array([1, 2, np.nan]) + >>> arr2 = pd.array([1, 2, np.nan]) + >>> arr1.equals(arr2) + True + """ + if type(self) != type(other): + return False + other = cast(ExtensionArray, other) + if self.dtype != other.dtype: + return False + elif len(self) != len(other): + return False + else: + equal_values = self == other + if isinstance(equal_values, ExtensionArray): + # boolean array with NA -> fill with False + equal_values = equal_values.fillna(False) + # error: Unsupported left operand type for & ("ExtensionArray") + equal_na = self.isna() & other.isna() # type: ignore[operator] + return bool((equal_values | equal_na).all()) + + def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]: + """ + Pointwise comparison for set containment in the given values. + + Roughly equivalent to `np.array([x in values for x in self])` + + Parameters + ---------- + values : np.ndarray or ExtensionArray + + Returns + ------- + np.ndarray[bool] + + Examples + -------- + >>> arr = pd.array([1, 2, 3]) + >>> arr.isin([1]) + + [True, False, False] + Length: 3, dtype: boolean + """ + return isin(np.asarray(self), values) + + def _values_for_factorize(self) -> tuple[np.ndarray, Any]: + """ + Return an array and missing value suitable for factorization. + + Returns + ------- + values : ndarray + An array suitable for factorization. This should maintain order + and be a supported dtype (Float64, Int64, UInt64, String, Object). + By default, the extension array is cast to object dtype. + na_value : object + The value in `values` to consider missing. This will be treated + as NA in the factorization routines, so it will be coded as + `-1` and not included in `uniques`. By default, + ``np.nan`` is used. + + Notes + ----- + The values returned by this method are also used in + :func:`pandas.util.hash_pandas_object`. If needed, this can be + overridden in the ``self._hash_pandas_object()`` method. + + Examples + -------- + >>> pd.array([1, 2, 3])._values_for_factorize() + (array([1, 2, 3], dtype=object), nan) + """ + return self.astype(object), np.nan + + def factorize( + self, + use_na_sentinel: bool = True, + ) -> tuple[np.ndarray, ExtensionArray]: + """ + Encode the extension array as an enumerated type. + + Parameters + ---------- + use_na_sentinel : bool, default True + If True, the sentinel -1 will be used for NaN values. If False, + NaN values will be encoded as non-negative integers and will not drop the + NaN from the uniques of the values. + + .. versionadded:: 1.5.0 + + Returns + ------- + codes : ndarray + An integer NumPy array that's an indexer into the original + ExtensionArray. + uniques : ExtensionArray + An ExtensionArray containing the unique values of `self`. + + .. note:: + + uniques will *not* contain an entry for the NA value of + the ExtensionArray if there are any missing values present + in `self`. + + See Also + -------- + factorize : Top-level factorize method that dispatches here. + + Notes + ----- + :meth:`pandas.factorize` offers a `sort` keyword as well. + + Examples + -------- + >>> idx1 = pd.PeriodIndex(["2014-01", "2014-01", "2014-02", "2014-02", + ... "2014-03", "2014-03"], freq="M") + >>> arr, idx = idx1.factorize() + >>> arr + array([0, 0, 1, 1, 2, 2]) + >>> idx + PeriodIndex(['2014-01', '2014-02', '2014-03'], dtype='period[M]') + """ + # Implementer note: There are two ways to override the behavior of + # pandas.factorize + # 1. _values_for_factorize and _from_factorize. + # Specify the values passed to pandas' internal factorization + # routines, and how to convert from those values back to the + # original ExtensionArray. + # 2. ExtensionArray.factorize. + # Complete control over factorization. + arr, na_value = self._values_for_factorize() + + codes, uniques = factorize_array( + arr, use_na_sentinel=use_na_sentinel, na_value=na_value + ) + + uniques_ea = self._from_factorized(uniques, self) + return codes, uniques_ea + + _extension_array_shared_docs[ + "repeat" + ] = """ + Repeat elements of a %(klass)s. + + Returns a new %(klass)s where each element of the current %(klass)s + is repeated consecutively a given number of times. + + Parameters + ---------- + repeats : int or array of ints + The number of repetitions for each element. This should be a + non-negative integer. Repeating 0 times will return an empty + %(klass)s. + axis : None + Must be ``None``. Has no effect but is accepted for compatibility + with numpy. + + Returns + ------- + %(klass)s + Newly created %(klass)s with repeated elements. + + See Also + -------- + Series.repeat : Equivalent function for Series. + Index.repeat : Equivalent function for Index. + numpy.repeat : Similar method for :class:`numpy.ndarray`. + ExtensionArray.take : Take arbitrary positions. + + Examples + -------- + >>> cat = pd.Categorical(['a', 'b', 'c']) + >>> cat + ['a', 'b', 'c'] + Categories (3, object): ['a', 'b', 'c'] + >>> cat.repeat(2) + ['a', 'a', 'b', 'b', 'c', 'c'] + Categories (3, object): ['a', 'b', 'c'] + >>> cat.repeat([1, 2, 3]) + ['a', 'b', 'b', 'c', 'c', 'c'] + Categories (3, object): ['a', 'b', 'c'] + """ + + @Substitution(klass="ExtensionArray") + @Appender(_extension_array_shared_docs["repeat"]) + def repeat(self, repeats: int | Sequence[int], axis: AxisInt | None = None) -> Self: + nv.validate_repeat((), {"axis": axis}) + ind = np.arange(len(self)).repeat(repeats) + return self.take(ind) + + # ------------------------------------------------------------------------ + # Indexing methods + # ------------------------------------------------------------------------ + + def take( + self, + indices: TakeIndexer, + *, + allow_fill: bool = False, + fill_value: Any = None, + ) -> Self: + """ + Take elements from an array. + + Parameters + ---------- + indices : sequence of int or one-dimensional np.ndarray of int + Indices to be taken. + allow_fill : bool, default False + How to handle negative values in `indices`. + + * False: negative values in `indices` indicate positional indices + from the right (the default). This is similar to + :func:`numpy.take`. + + * True: negative values in `indices` indicate + missing values. These values are set to `fill_value`. Any other + other negative values raise a ``ValueError``. + + fill_value : any, optional + Fill value to use for NA-indices when `allow_fill` is True. + This may be ``None``, in which case the default NA value for + the type, ``self.dtype.na_value``, is used. + + For many ExtensionArrays, there will be two representations of + `fill_value`: a user-facing "boxed" scalar, and a low-level + physical NA value. `fill_value` should be the user-facing version, + and the implementation should handle translating that to the + physical version for processing the take if necessary. + + Returns + ------- + ExtensionArray + + Raises + ------ + IndexError + When the indices are out of bounds for the array. + ValueError + When `indices` contains negative values other than ``-1`` + and `allow_fill` is True. + + See Also + -------- + numpy.take : Take elements from an array along an axis. + api.extensions.take : Take elements from an array. + + Notes + ----- + ExtensionArray.take is called by ``Series.__getitem__``, ``.loc``, + ``iloc``, when `indices` is a sequence of values. Additionally, + it's called by :meth:`Series.reindex`, or any other method + that causes realignment, with a `fill_value`. + + Examples + -------- + Here's an example implementation, which relies on casting the + extension array to object dtype. This uses the helper method + :func:`pandas.api.extensions.take`. + + .. code-block:: python + + def take(self, indices, allow_fill=False, fill_value=None): + from pandas.core.algorithms import take + + # If the ExtensionArray is backed by an ndarray, then + # just pass that here instead of coercing to object. + data = self.astype(object) + + if allow_fill and fill_value is None: + fill_value = self.dtype.na_value + + # fill value should always be translated from the scalar + # type for the array, to the physical storage type for + # the data, before passing to take. + + result = take(data, indices, fill_value=fill_value, + allow_fill=allow_fill) + return self._from_sequence(result, dtype=self.dtype) + """ + # Implementer note: The `fill_value` parameter should be a user-facing + # value, an instance of self.dtype.type. When passed `fill_value=None`, + # the default of `self.dtype.na_value` should be used. + # This may differ from the physical storage type your ExtensionArray + # uses. In this case, your implementation is responsible for casting + # the user-facing type to the storage type, before using + # pandas.api.extensions.take + raise AbstractMethodError(self) + + def copy(self) -> Self: + """ + Return a copy of the array. + + Returns + ------- + ExtensionArray + + Examples + -------- + >>> arr = pd.array([1, 2, 3]) + >>> arr2 = arr.copy() + >>> arr[0] = 2 + >>> arr2 + + [1, 2, 3] + Length: 3, dtype: Int64 + """ + raise AbstractMethodError(self) + + def view(self, dtype: Dtype | None = None) -> ArrayLike: + """ + Return a view on the array. + + Parameters + ---------- + dtype : str, np.dtype, or ExtensionDtype, optional + Default None. + + Returns + ------- + ExtensionArray or np.ndarray + A view on the :class:`ExtensionArray`'s data. + + Examples + -------- + This gives view on the underlying data of an ``ExtensionArray`` and is not a + copy. Modifications on either the view or the original ``ExtensionArray`` + will be reflectd on the underlying data: + + >>> arr = pd.array([1, 2, 3]) + >>> arr2 = arr.view() + >>> arr[0] = 2 + >>> arr2 + + [2, 2, 3] + Length: 3, dtype: Int64 + """ + # NB: + # - This must return a *new* object referencing the same data, not self. + # - The only case that *must* be implemented is with dtype=None, + # giving a view with the same dtype as self. + if dtype is not None: + raise NotImplementedError(dtype) + return self[:] + + # ------------------------------------------------------------------------ + # Printing + # ------------------------------------------------------------------------ + + def __repr__(self) -> str: + if self.ndim > 1: + return self._repr_2d() + + from pandas.io.formats.printing import format_object_summary + + # the short repr has no trailing newline, while the truncated + # repr does. So we include a newline in our template, and strip + # any trailing newlines from format_object_summary + data = format_object_summary( + self, self._formatter(), indent_for_name=False + ).rstrip(", \n") + class_name = f"<{type(self).__name__}>\n" + footer = self._get_repr_footer() + return f"{class_name}{data}\n{footer}" + + def _get_repr_footer(self) -> str: + # GH#24278 + if self.ndim > 1: + return f"Shape: {self.shape}, dtype: {self.dtype}" + return f"Length: {len(self)}, dtype: {self.dtype}" + + def _repr_2d(self) -> str: + from pandas.io.formats.printing import format_object_summary + + # the short repr has no trailing newline, while the truncated + # repr does. So we include a newline in our template, and strip + # any trailing newlines from format_object_summary + lines = [ + format_object_summary(x, self._formatter(), indent_for_name=False).rstrip( + ", \n" + ) + for x in self + ] + data = ",\n".join(lines) + class_name = f"<{type(self).__name__}>" + footer = self._get_repr_footer() + return f"{class_name}\n[\n{data}\n]\n{footer}" + + def _formatter(self, boxed: bool = False) -> Callable[[Any], str | None]: + """ + Formatting function for scalar values. + + This is used in the default '__repr__'. The returned formatting + function receives instances of your scalar type. + + Parameters + ---------- + boxed : bool, default False + An indicated for whether or not your array is being printed + within a Series, DataFrame, or Index (True), or just by + itself (False). This may be useful if you want scalar values + to appear differently within a Series versus on its own (e.g. + quoted or not). + + Returns + ------- + Callable[[Any], str] + A callable that gets instances of the scalar type and + returns a string. By default, :func:`repr` is used + when ``boxed=False`` and :func:`str` is used when + ``boxed=True``. + + Examples + -------- + >>> class MyExtensionArray(pd.arrays.NumpyExtensionArray): + ... def _formatter(self, boxed=False): + ... return lambda x: '*' + str(x) + '*' if boxed else repr(x) + '*' + >>> MyExtensionArray(np.array([1, 2, 3, 4])) + + [1*, 2*, 3*, 4*] + Length: 4, dtype: int64 + """ + if boxed: + return str + return repr + + # ------------------------------------------------------------------------ + # Reshaping + # ------------------------------------------------------------------------ + + def transpose(self, *axes: int) -> ExtensionArray: + """ + Return a transposed view on this array. + + Because ExtensionArrays are always 1D, this is a no-op. It is included + for compatibility with np.ndarray. + + Returns + ------- + ExtensionArray + + Examples + -------- + >>> pd.array([1, 2, 3]).transpose() + + [1, 2, 3] + Length: 3, dtype: Int64 + """ + return self[:] + + @property + def T(self) -> ExtensionArray: + return self.transpose() + + def ravel(self, order: Literal["C", "F", "A", "K"] | None = "C") -> ExtensionArray: + """ + Return a flattened view on this array. + + Parameters + ---------- + order : {None, 'C', 'F', 'A', 'K'}, default 'C' + + Returns + ------- + ExtensionArray + + Notes + ----- + - Because ExtensionArrays are 1D-only, this is a no-op. + - The "order" argument is ignored, is for compatibility with NumPy. + + Examples + -------- + >>> pd.array([1, 2, 3]).ravel() + + [1, 2, 3] + Length: 3, dtype: Int64 + """ + return self + + @classmethod + def _concat_same_type(cls, to_concat: Sequence[Self]) -> Self: + """ + Concatenate multiple array of this dtype. + + Parameters + ---------- + to_concat : sequence of this type + + Returns + ------- + ExtensionArray + + Examples + -------- + >>> arr1 = pd.array([1, 2, 3]) + >>> arr2 = pd.array([4, 5, 6]) + >>> pd.arrays.IntegerArray._concat_same_type([arr1, arr2]) + + [1, 2, 3, 4, 5, 6] + Length: 6, dtype: Int64 + """ + # Implementer note: this method will only be called with a sequence of + # ExtensionArrays of this class and with the same dtype as self. This + # should allow "easy" concatenation (no upcasting needed), and result + # in a new ExtensionArray of the same dtype. + # Note: this strict behaviour is only guaranteed starting with pandas 1.1 + raise AbstractMethodError(cls) + + # The _can_hold_na attribute is set to True so that pandas internals + # will use the ExtensionDtype.na_value as the NA value in operations + # such as take(), reindex(), shift(), etc. In addition, those results + # will then be of the ExtensionArray subclass rather than an array + # of objects + @cache_readonly + def _can_hold_na(self) -> bool: + return self.dtype._can_hold_na + + def _accumulate( + self, name: str, *, skipna: bool = True, **kwargs + ) -> ExtensionArray: + """ + Return an ExtensionArray performing an accumulation operation. + + The underlying data type might change. + + Parameters + ---------- + name : str + Name of the function, supported values are: + - cummin + - cummax + - cumsum + - cumprod + skipna : bool, default True + If True, skip NA values. + **kwargs + Additional keyword arguments passed to the accumulation function. + Currently, there is no supported kwarg. + + Returns + ------- + array + + Raises + ------ + NotImplementedError : subclass does not define accumulations + + Examples + -------- + >>> arr = pd.array([1, 2, 3]) + >>> arr._accumulate(name='cumsum') + + [1, 3, 6] + Length: 3, dtype: Int64 + """ + raise NotImplementedError(f"cannot perform {name} with type {self.dtype}") + + def _reduce( + self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs + ): + """ + Return a scalar result of performing the reduction operation. + + Parameters + ---------- + name : str + Name of the function, supported values are: + { any, all, min, max, sum, mean, median, prod, + std, var, sem, kurt, skew }. + skipna : bool, default True + If True, skip NaN values. + keepdims : bool, default False + If False, a scalar is returned. + If True, the result has dimension with size one along the reduced axis. + + .. versionadded:: 2.1 + + This parameter is not required in the _reduce signature to keep backward + compatibility, but will become required in the future. If the parameter + is not found in the method signature, a FutureWarning will be emitted. + **kwargs + Additional keyword arguments passed to the reduction function. + Currently, `ddof` is the only supported kwarg. + + Returns + ------- + scalar + + Raises + ------ + TypeError : subclass does not define reductions + + Examples + -------- + >>> pd.array([1, 2, 3])._reduce("min") + 1 + """ + meth = getattr(self, name, None) + if meth is None: + raise TypeError( + f"'{type(self).__name__}' with dtype {self.dtype} " + f"does not support reduction '{name}'" + ) + result = meth(skipna=skipna, **kwargs) + if keepdims: + result = np.array([result]) + + return result + + # https://github.com/python/typeshed/issues/2148#issuecomment-520783318 + # Incompatible types in assignment (expression has type "None", base class + # "object" defined the type as "Callable[[object], int]") + __hash__: ClassVar[None] # type: ignore[assignment] + + # ------------------------------------------------------------------------ + # Non-Optimized Default Methods; in the case of the private methods here, + # these are not guaranteed to be stable across pandas versions. + + def _values_for_json(self) -> np.ndarray: + """ + Specify how to render our entries in to_json. + + Notes + ----- + The dtype on the returned ndarray is not restricted, but for non-native + types that are not specifically handled in objToJSON.c, to_json is + liable to raise. In these cases, it may be safer to return an ndarray + of strings. + """ + return np.asarray(self) + + def _hash_pandas_object( + self, *, encoding: str, hash_key: str, categorize: bool + ) -> npt.NDArray[np.uint64]: + """ + Hook for hash_pandas_object. + + Default is to use the values returned by _values_for_factorize. + + Parameters + ---------- + encoding : str + Encoding for data & key when strings. + hash_key : str + Hash_key for string key to encode. + categorize : bool + Whether to first categorize object arrays before hashing. This is more + efficient when the array contains duplicate values. + + Returns + ------- + np.ndarray[uint64] + + Examples + -------- + >>> pd.array([1, 2])._hash_pandas_object(encoding='utf-8', + ... hash_key="1000000000000000", + ... categorize=False + ... ) + array([ 6238072747940578789, 15839785061582574730], dtype=uint64) + """ + from pandas.core.util.hashing import hash_array + + values, _ = self._values_for_factorize() + return hash_array( + values, encoding=encoding, hash_key=hash_key, categorize=categorize + ) + + def _explode(self) -> tuple[Self, npt.NDArray[np.uint64]]: + """ + Transform each element of list-like to a row. + + For arrays that do not contain list-like elements the default + implementation of this method just returns a copy and an array + of ones (unchanged index). + + Returns + ------- + ExtensionArray + Array with the exploded values. + np.ndarray[uint64] + The original lengths of each list-like for determining the + resulting index. + + See Also + -------- + Series.explode : The method on the ``Series`` object that this + extension array method is meant to support. + + Examples + -------- + >>> import pyarrow as pa + >>> a = pd.array([[1, 2, 3], [4], [5, 6]], + ... dtype=pd.ArrowDtype(pa.list_(pa.int64()))) + >>> a._explode() + ( + [1, 2, 3, 4, 5, 6] + Length: 6, dtype: int64[pyarrow], array([3, 1, 2], dtype=int32)) + """ + values = self.copy() + counts = np.ones(shape=(len(self),), dtype=np.uint64) + return values, counts + + def tolist(self) -> list: + """ + Return a list of the values. + + These are each a scalar type, which is a Python scalar + (for str, int, float) or a pandas scalar + (for Timestamp/Timedelta/Interval/Period) + + Returns + ------- + list + + Examples + -------- + >>> arr = pd.array([1, 2, 3]) + >>> arr.tolist() + [1, 2, 3] + """ + if self.ndim > 1: + return [x.tolist() for x in self] + return list(self) + + def delete(self, loc: PositionalIndexer) -> Self: + indexer = np.delete(np.arange(len(self)), loc) + return self.take(indexer) + + def insert(self, loc: int, item) -> Self: + """ + Insert an item at the given position. + + Parameters + ---------- + loc : int + item : scalar-like + + Returns + ------- + same type as self + + Notes + ----- + This method should be both type and dtype-preserving. If the item + cannot be held in an array of this type/dtype, either ValueError or + TypeError should be raised. + + The default implementation relies on _from_sequence to raise on invalid + items. + + Examples + -------- + >>> arr = pd.array([1, 2, 3]) + >>> arr.insert(2, -1) + + [1, 2, -1, 3] + Length: 4, dtype: Int64 + """ + loc = validate_insert_loc(loc, len(self)) + + item_arr = type(self)._from_sequence([item], dtype=self.dtype) + + return type(self)._concat_same_type([self[:loc], item_arr, self[loc:]]) + + def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None: + """ + Analogue to np.putmask(self, mask, value) + + Parameters + ---------- + mask : np.ndarray[bool] + value : scalar or listlike + If listlike, must be arraylike with same length as self. + + Returns + ------- + None + + Notes + ----- + Unlike np.putmask, we do not repeat listlike values with mismatched length. + 'value' should either be a scalar or an arraylike with the same length + as self. + """ + if is_list_like(value): + val = value[mask] + else: + val = value + + self[mask] = val + + def _where(self, mask: npt.NDArray[np.bool_], value) -> Self: + """ + Analogue to np.where(mask, self, value) + + Parameters + ---------- + mask : np.ndarray[bool] + value : scalar or listlike + + Returns + ------- + same type as self + """ + result = self.copy() + + if is_list_like(value): + val = value[~mask] + else: + val = value + + result[~mask] = val + return result + + # TODO(3.0): this can be removed once GH#33302 deprecation is enforced + def _fill_mask_inplace( + self, method: str, limit: int | None, mask: npt.NDArray[np.bool_] + ) -> None: + """ + Replace values in locations specified by 'mask' using pad or backfill. + + See also + -------- + ExtensionArray.fillna + """ + func = missing.get_fill_func(method) + npvalues = self.astype(object) + # NB: if we don't copy mask here, it may be altered inplace, which + # would mess up the `self[mask] = ...` below. + func(npvalues, limit=limit, mask=mask.copy()) + new_values = self._from_sequence(npvalues, dtype=self.dtype) + self[mask] = new_values[mask] + + def _rank( + self, + *, + axis: AxisInt = 0, + method: str = "average", + na_option: str = "keep", + ascending: bool = True, + pct: bool = False, + ): + """ + See Series.rank.__doc__. + """ + if axis != 0: + raise NotImplementedError + + return rank( + self._values_for_argsort(), + axis=axis, + method=method, + na_option=na_option, + ascending=ascending, + pct=pct, + ) + + @classmethod + def _empty(cls, shape: Shape, dtype: ExtensionDtype): + """ + Create an ExtensionArray with the given shape and dtype. + + See also + -------- + ExtensionDtype.empty + ExtensionDtype.empty is the 'official' public version of this API. + """ + # Implementer note: while ExtensionDtype.empty is the public way to + # call this method, it is still required to implement this `_empty` + # method as well (it is called internally in pandas) + obj = cls._from_sequence([], dtype=dtype) + + taker = np.broadcast_to(np.intp(-1), shape) + result = obj.take(taker, allow_fill=True) + if not isinstance(result, cls) or dtype != result.dtype: + raise NotImplementedError( + f"Default 'empty' implementation is invalid for dtype='{dtype}'" + ) + return result + + def _quantile(self, qs: npt.NDArray[np.float64], interpolation: str) -> Self: + """ + Compute the quantiles of self for each quantile in `qs`. + + Parameters + ---------- + qs : np.ndarray[float64] + interpolation: str + + Returns + ------- + same type as self + """ + mask = np.asarray(self.isna()) + arr = np.asarray(self) + fill_value = np.nan + + res_values = quantile_with_mask(arr, mask, fill_value, qs, interpolation) + return type(self)._from_sequence(res_values) + + def _mode(self, dropna: bool = True) -> Self: + """ + Returns the mode(s) of the ExtensionArray. + + Always returns `ExtensionArray` even if only one value. + + Parameters + ---------- + dropna : bool, default True + Don't consider counts of NA values. + + Returns + ------- + same type as self + Sorted, if possible. + """ + # error: Incompatible return value type (got "Union[ExtensionArray, + # ndarray[Any, Any]]", expected "Self") + return mode(self, dropna=dropna) # type: ignore[return-value] + + def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): + if any( + isinstance(other, (ABCSeries, ABCIndex, ABCDataFrame)) for other in inputs + ): + return NotImplemented + + result = arraylike.maybe_dispatch_ufunc_to_dunder_op( + self, ufunc, method, *inputs, **kwargs + ) + if result is not NotImplemented: + return result + + if "out" in kwargs: + return arraylike.dispatch_ufunc_with_out( + self, ufunc, method, *inputs, **kwargs + ) + + if method == "reduce": + result = arraylike.dispatch_reduction_ufunc( + self, ufunc, method, *inputs, **kwargs + ) + if result is not NotImplemented: + return result + + return arraylike.default_array_ufunc(self, ufunc, method, *inputs, **kwargs) + + def map(self, mapper, na_action=None): + """ + Map values using an input mapping or function. + + Parameters + ---------- + mapper : function, dict, or Series + Mapping correspondence. + na_action : {None, 'ignore'}, default None + If 'ignore', propagate NA values, without passing them to the + mapping correspondence. If 'ignore' is not supported, a + ``NotImplementedError`` should be raised. + + Returns + ------- + Union[ndarray, Index, ExtensionArray] + The output of the mapping function applied to the array. + If the function returns a tuple with more than one element + a MultiIndex will be returned. + """ + return map_array(self, mapper, na_action=na_action) + + # ------------------------------------------------------------------------ + # GroupBy Methods + + def _groupby_op( + self, + *, + how: str, + has_dropped_na: bool, + min_count: int, + ngroups: int, + ids: npt.NDArray[np.intp], + **kwargs, + ) -> ArrayLike: + """ + Dispatch GroupBy reduction or transformation operation. + + This is an *experimental* API to allow ExtensionArray authors to implement + reductions and transformations. The API is subject to change. + + Parameters + ---------- + how : {'any', 'all', 'sum', 'prod', 'min', 'max', 'mean', 'median', + 'median', 'var', 'std', 'sem', 'nth', 'last', 'ohlc', + 'cumprod', 'cumsum', 'cummin', 'cummax', 'rank'} + has_dropped_na : bool + min_count : int + ngroups : int + ids : np.ndarray[np.intp] + ids[i] gives the integer label for the group that self[i] belongs to. + **kwargs : operation-specific + 'any', 'all' -> ['skipna'] + 'var', 'std', 'sem' -> ['ddof'] + 'cumprod', 'cumsum', 'cummin', 'cummax' -> ['skipna'] + 'rank' -> ['ties_method', 'ascending', 'na_option', 'pct'] + + Returns + ------- + np.ndarray or ExtensionArray + """ + from pandas.core.arrays.string_ import StringDtype + from pandas.core.groupby.ops import WrappedCythonOp + + kind = WrappedCythonOp.get_kind_from_how(how) + op = WrappedCythonOp(how=how, kind=kind, has_dropped_na=has_dropped_na) + + # GH#43682 + if isinstance(self.dtype, StringDtype): + # StringArray + if op.how not in ["any", "all"]: + # Fail early to avoid conversion to object + op._get_cython_function(op.kind, op.how, np.dtype(object), False) + npvalues = self.to_numpy(object, na_value=np.nan) + else: + raise NotImplementedError( + f"function is not implemented for this dtype: {self.dtype}" + ) + + res_values = op._cython_op_ndim_compat( + npvalues, + min_count=min_count, + ngroups=ngroups, + comp_ids=ids, + mask=None, + **kwargs, + ) + + if op.how in op.cast_blocklist: + # i.e. how in ["rank"], since other cast_blocklist methods don't go + # through cython_operation + return res_values + + if isinstance(self.dtype, StringDtype): + dtype = self.dtype + string_array_cls = dtype.construct_array_type() + return string_array_cls._from_sequence(res_values, dtype=dtype) + + else: + raise NotImplementedError + + +class ExtensionArraySupportsAnyAll(ExtensionArray): + def any(self, *, skipna: bool = True) -> bool: + raise AbstractMethodError(self) + + def all(self, *, skipna: bool = True) -> bool: + raise AbstractMethodError(self) + + +class ExtensionOpsMixin: + """ + A base class for linking the operators to their dunder names. + + .. note:: + + You may want to set ``__array_priority__`` if you want your + implementation to be called when involved in binary operations + with NumPy arrays. + """ + + @classmethod + def _create_arithmetic_method(cls, op): + raise AbstractMethodError(cls) + + @classmethod + def _add_arithmetic_ops(cls) -> None: + setattr(cls, "__add__", cls._create_arithmetic_method(operator.add)) + setattr(cls, "__radd__", cls._create_arithmetic_method(roperator.radd)) + setattr(cls, "__sub__", cls._create_arithmetic_method(operator.sub)) + setattr(cls, "__rsub__", cls._create_arithmetic_method(roperator.rsub)) + setattr(cls, "__mul__", cls._create_arithmetic_method(operator.mul)) + setattr(cls, "__rmul__", cls._create_arithmetic_method(roperator.rmul)) + setattr(cls, "__pow__", cls._create_arithmetic_method(operator.pow)) + setattr(cls, "__rpow__", cls._create_arithmetic_method(roperator.rpow)) + setattr(cls, "__mod__", cls._create_arithmetic_method(operator.mod)) + setattr(cls, "__rmod__", cls._create_arithmetic_method(roperator.rmod)) + setattr(cls, "__floordiv__", cls._create_arithmetic_method(operator.floordiv)) + setattr( + cls, "__rfloordiv__", cls._create_arithmetic_method(roperator.rfloordiv) + ) + setattr(cls, "__truediv__", cls._create_arithmetic_method(operator.truediv)) + setattr(cls, "__rtruediv__", cls._create_arithmetic_method(roperator.rtruediv)) + setattr(cls, "__divmod__", cls._create_arithmetic_method(divmod)) + setattr(cls, "__rdivmod__", cls._create_arithmetic_method(roperator.rdivmod)) + + @classmethod + def _create_comparison_method(cls, op): + raise AbstractMethodError(cls) + + @classmethod + def _add_comparison_ops(cls) -> None: + setattr(cls, "__eq__", cls._create_comparison_method(operator.eq)) + setattr(cls, "__ne__", cls._create_comparison_method(operator.ne)) + setattr(cls, "__lt__", cls._create_comparison_method(operator.lt)) + setattr(cls, "__gt__", cls._create_comparison_method(operator.gt)) + setattr(cls, "__le__", cls._create_comparison_method(operator.le)) + setattr(cls, "__ge__", cls._create_comparison_method(operator.ge)) + + @classmethod + def _create_logical_method(cls, op): + raise AbstractMethodError(cls) + + @classmethod + def _add_logical_ops(cls) -> None: + setattr(cls, "__and__", cls._create_logical_method(operator.and_)) + setattr(cls, "__rand__", cls._create_logical_method(roperator.rand_)) + setattr(cls, "__or__", cls._create_logical_method(operator.or_)) + setattr(cls, "__ror__", cls._create_logical_method(roperator.ror_)) + setattr(cls, "__xor__", cls._create_logical_method(operator.xor)) + setattr(cls, "__rxor__", cls._create_logical_method(roperator.rxor)) + + +class ExtensionScalarOpsMixin(ExtensionOpsMixin): + """ + A mixin for defining ops on an ExtensionArray. + + It is assumed that the underlying scalar objects have the operators + already defined. + + Notes + ----- + If you have defined a subclass MyExtensionArray(ExtensionArray), then + use MyExtensionArray(ExtensionArray, ExtensionScalarOpsMixin) to + get the arithmetic operators. After the definition of MyExtensionArray, + insert the lines + + MyExtensionArray._add_arithmetic_ops() + MyExtensionArray._add_comparison_ops() + + to link the operators to your class. + + .. note:: + + You may want to set ``__array_priority__`` if you want your + implementation to be called when involved in binary operations + with NumPy arrays. + """ + + @classmethod + def _create_method(cls, op, coerce_to_dtype: bool = True, result_dtype=None): + """ + A class method that returns a method that will correspond to an + operator for an ExtensionArray subclass, by dispatching to the + relevant operator defined on the individual elements of the + ExtensionArray. + + Parameters + ---------- + op : function + An operator that takes arguments op(a, b) + coerce_to_dtype : bool, default True + boolean indicating whether to attempt to convert + the result to the underlying ExtensionArray dtype. + If it's not possible to create a new ExtensionArray with the + values, an ndarray is returned instead. + + Returns + ------- + Callable[[Any, Any], Union[ndarray, ExtensionArray]] + A method that can be bound to a class. When used, the method + receives the two arguments, one of which is the instance of + this class, and should return an ExtensionArray or an ndarray. + + Returning an ndarray may be necessary when the result of the + `op` cannot be stored in the ExtensionArray. The dtype of the + ndarray uses NumPy's normal inference rules. + + Examples + -------- + Given an ExtensionArray subclass called MyExtensionArray, use + + __add__ = cls._create_method(operator.add) + + in the class definition of MyExtensionArray to create the operator + for addition, that will be based on the operator implementation + of the underlying elements of the ExtensionArray + """ + + def _binop(self, other): + def convert_values(param): + if isinstance(param, ExtensionArray) or is_list_like(param): + ovalues = param + else: # Assume its an object + ovalues = [param] * len(self) + return ovalues + + if isinstance(other, (ABCSeries, ABCIndex, ABCDataFrame)): + # rely on pandas to unbox and dispatch to us + return NotImplemented + + lvalues = self + rvalues = convert_values(other) + + # If the operator is not defined for the underlying objects, + # a TypeError should be raised + res = [op(a, b) for (a, b) in zip(lvalues, rvalues)] + + def _maybe_convert(arr): + if coerce_to_dtype: + # https://github.com/pandas-dev/pandas/issues/22850 + # We catch all regular exceptions here, and fall back + # to an ndarray. + res = maybe_cast_pointwise_result(arr, self.dtype, same_dtype=False) + if not isinstance(res, type(self)): + # exception raised in _from_sequence; ensure we have ndarray + res = np.asarray(arr) + else: + res = np.asarray(arr, dtype=result_dtype) + return res + + if op.__name__ in {"divmod", "rdivmod"}: + a, b = zip(*res) + return _maybe_convert(a), _maybe_convert(b) + + return _maybe_convert(res) + + op_name = f"__{op.__name__}__" + return set_function_name(_binop, op_name, cls) + + @classmethod + def _create_arithmetic_method(cls, op): + return cls._create_method(op) + + @classmethod + def _create_comparison_method(cls, op): + return cls._create_method(op, coerce_to_dtype=False, result_dtype=bool) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/boolean.py b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/boolean.py new file mode 100644 index 0000000000000000000000000000000000000000..04e6f0a0bcdde9a11550fcec8274e09fe8429430 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/boolean.py @@ -0,0 +1,407 @@ +from __future__ import annotations + +import numbers +from typing import ( + TYPE_CHECKING, + ClassVar, + cast, +) + +import numpy as np + +from pandas._libs import ( + lib, + missing as libmissing, +) + +from pandas.core.dtypes.common import is_list_like +from pandas.core.dtypes.dtypes import register_extension_dtype +from pandas.core.dtypes.missing import isna + +from pandas.core import ops +from pandas.core.array_algos import masked_accumulations +from pandas.core.arrays.masked import ( + BaseMaskedArray, + BaseMaskedDtype, +) + +if TYPE_CHECKING: + import pyarrow + + from pandas._typing import ( + Dtype, + DtypeObj, + Self, + npt, + type_t, + ) + + +@register_extension_dtype +class BooleanDtype(BaseMaskedDtype): + """ + Extension dtype for boolean data. + + .. warning:: + + BooleanDtype is considered experimental. The implementation and + parts of the API may change without warning. + + Attributes + ---------- + None + + Methods + ------- + None + + Examples + -------- + >>> pd.BooleanDtype() + BooleanDtype + """ + + name: ClassVar[str] = "boolean" + + # https://github.com/python/mypy/issues/4125 + # error: Signature of "type" incompatible with supertype "BaseMaskedDtype" + @property + def type(self) -> type: # type: ignore[override] + return np.bool_ + + @property + def kind(self) -> str: + return "b" + + @property + def numpy_dtype(self) -> np.dtype: + return np.dtype("bool") + + @classmethod + def construct_array_type(cls) -> type_t[BooleanArray]: + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + return BooleanArray + + def __repr__(self) -> str: + return "BooleanDtype" + + @property + def _is_boolean(self) -> bool: + return True + + @property + def _is_numeric(self) -> bool: + return True + + def __from_arrow__( + self, array: pyarrow.Array | pyarrow.ChunkedArray + ) -> BooleanArray: + """ + Construct BooleanArray from pyarrow Array/ChunkedArray. + """ + import pyarrow + + if array.type != pyarrow.bool_() and not pyarrow.types.is_null(array.type): + raise TypeError(f"Expected array of boolean type, got {array.type} instead") + + if isinstance(array, pyarrow.Array): + chunks = [array] + length = len(array) + else: + # pyarrow.ChunkedArray + chunks = array.chunks + length = array.length() + + if pyarrow.types.is_null(array.type): + mask = np.ones(length, dtype=bool) + # No need to init data, since all null + data = np.empty(length, dtype=bool) + return BooleanArray(data, mask) + + results = [] + for arr in chunks: + buflist = arr.buffers() + data = pyarrow.BooleanArray.from_buffers( + arr.type, len(arr), [None, buflist[1]], offset=arr.offset + ).to_numpy(zero_copy_only=False) + if arr.null_count != 0: + mask = pyarrow.BooleanArray.from_buffers( + arr.type, len(arr), [None, buflist[0]], offset=arr.offset + ).to_numpy(zero_copy_only=False) + mask = ~mask + else: + mask = np.zeros(len(arr), dtype=bool) + + bool_arr = BooleanArray(data, mask) + results.append(bool_arr) + + if not results: + return BooleanArray( + np.array([], dtype=np.bool_), np.array([], dtype=np.bool_) + ) + else: + return BooleanArray._concat_same_type(results) + + +def coerce_to_array( + values, mask=None, copy: bool = False +) -> tuple[np.ndarray, np.ndarray]: + """ + Coerce the input values array to numpy arrays with a mask. + + Parameters + ---------- + values : 1D list-like + mask : bool 1D array, optional + copy : bool, default False + if True, copy the input + + Returns + ------- + tuple of (values, mask) + """ + if isinstance(values, BooleanArray): + if mask is not None: + raise ValueError("cannot pass mask for BooleanArray input") + values, mask = values._data, values._mask + if copy: + values = values.copy() + mask = mask.copy() + return values, mask + + mask_values = None + if isinstance(values, np.ndarray) and values.dtype == np.bool_: + if copy: + values = values.copy() + elif isinstance(values, np.ndarray) and values.dtype.kind in "iufcb": + mask_values = isna(values) + + values_bool = np.zeros(len(values), dtype=bool) + values_bool[~mask_values] = values[~mask_values].astype(bool) + + if not np.all( + values_bool[~mask_values].astype(values.dtype) == values[~mask_values] + ): + raise TypeError("Need to pass bool-like values") + + values = values_bool + else: + values_object = np.asarray(values, dtype=object) + + inferred_dtype = lib.infer_dtype(values_object, skipna=True) + integer_like = ("floating", "integer", "mixed-integer-float") + if inferred_dtype not in ("boolean", "empty") + integer_like: + raise TypeError("Need to pass bool-like values") + + # mypy does not narrow the type of mask_values to npt.NDArray[np.bool_] + # within this branch, it assumes it can also be None + mask_values = cast("npt.NDArray[np.bool_]", isna(values_object)) + values = np.zeros(len(values), dtype=bool) + values[~mask_values] = values_object[~mask_values].astype(bool) + + # if the values were integer-like, validate it were actually 0/1's + if (inferred_dtype in integer_like) and not ( + np.all( + values[~mask_values].astype(float) + == values_object[~mask_values].astype(float) + ) + ): + raise TypeError("Need to pass bool-like values") + + if mask is None and mask_values is None: + mask = np.zeros(values.shape, dtype=bool) + elif mask is None: + mask = mask_values + else: + if isinstance(mask, np.ndarray) and mask.dtype == np.bool_: + if mask_values is not None: + mask = mask | mask_values + else: + if copy: + mask = mask.copy() + else: + mask = np.array(mask, dtype=bool) + if mask_values is not None: + mask = mask | mask_values + + if values.shape != mask.shape: + raise ValueError("values.shape and mask.shape must match") + + return values, mask + + +class BooleanArray(BaseMaskedArray): + """ + Array of boolean (True/False) data with missing values. + + This is a pandas Extension array for boolean data, under the hood + represented by 2 numpy arrays: a boolean array with the data and + a boolean array with the mask (True indicating missing). + + BooleanArray implements Kleene logic (sometimes called three-value + logic) for logical operations. See :ref:`boolean.kleene` for more. + + To construct an BooleanArray from generic array-like input, use + :func:`pandas.array` specifying ``dtype="boolean"`` (see examples + below). + + .. warning:: + + BooleanArray is considered experimental. The implementation and + parts of the API may change without warning. + + Parameters + ---------- + values : numpy.ndarray + A 1-d boolean-dtype array with the data. + mask : numpy.ndarray + A 1-d boolean-dtype array indicating missing values (True + indicates missing). + copy : bool, default False + Whether to copy the `values` and `mask` arrays. + + Attributes + ---------- + None + + Methods + ------- + None + + Returns + ------- + BooleanArray + + Examples + -------- + Create an BooleanArray with :func:`pandas.array`: + + >>> pd.array([True, False, None], dtype="boolean") + + [True, False, ] + Length: 3, dtype: boolean + """ + + # The value used to fill '_data' to avoid upcasting + _internal_fill_value = False + # Fill values used for any/all + # Incompatible types in assignment (expression has type "bool", base class + # "BaseMaskedArray" defined the type as "") + _truthy_value = True # type: ignore[assignment] + _falsey_value = False # type: ignore[assignment] + _TRUE_VALUES = {"True", "TRUE", "true", "1", "1.0"} + _FALSE_VALUES = {"False", "FALSE", "false", "0", "0.0"} + + @classmethod + def _simple_new(cls, values: np.ndarray, mask: npt.NDArray[np.bool_]) -> Self: + result = super()._simple_new(values, mask) + result._dtype = BooleanDtype() + return result + + def __init__( + self, values: np.ndarray, mask: np.ndarray, copy: bool = False + ) -> None: + if not (isinstance(values, np.ndarray) and values.dtype == np.bool_): + raise TypeError( + "values should be boolean numpy array. Use " + "the 'pd.array' function instead" + ) + self._dtype = BooleanDtype() + super().__init__(values, mask, copy=copy) + + @property + def dtype(self) -> BooleanDtype: + return self._dtype + + @classmethod + def _from_sequence_of_strings( + cls, + strings: list[str], + *, + dtype: Dtype | None = None, + copy: bool = False, + true_values: list[str] | None = None, + false_values: list[str] | None = None, + ) -> BooleanArray: + true_values_union = cls._TRUE_VALUES.union(true_values or []) + false_values_union = cls._FALSE_VALUES.union(false_values or []) + + def map_string(s) -> bool: + if s in true_values_union: + return True + elif s in false_values_union: + return False + else: + raise ValueError(f"{s} cannot be cast to bool") + + scalars = np.array(strings, dtype=object) + mask = isna(scalars) + scalars[~mask] = list(map(map_string, scalars[~mask])) + return cls._from_sequence(scalars, dtype=dtype, copy=copy) + + _HANDLED_TYPES = (np.ndarray, numbers.Number, bool, np.bool_) + + @classmethod + def _coerce_to_array( + cls, value, *, dtype: DtypeObj, copy: bool = False + ) -> tuple[np.ndarray, np.ndarray]: + if dtype: + assert dtype == "boolean" + return coerce_to_array(value, copy=copy) + + def _logical_method(self, other, op): + assert op.__name__ in {"or_", "ror_", "and_", "rand_", "xor", "rxor"} + other_is_scalar = lib.is_scalar(other) + mask = None + + if isinstance(other, BooleanArray): + other, mask = other._data, other._mask + elif is_list_like(other): + other = np.asarray(other, dtype="bool") + if other.ndim > 1: + raise NotImplementedError("can only perform ops with 1-d structures") + other, mask = coerce_to_array(other, copy=False) + elif isinstance(other, np.bool_): + other = other.item() + + if other_is_scalar and other is not libmissing.NA and not lib.is_bool(other): + raise TypeError( + "'other' should be pandas.NA or a bool. " + f"Got {type(other).__name__} instead." + ) + + if not other_is_scalar and len(self) != len(other): + raise ValueError("Lengths must match") + + if op.__name__ in {"or_", "ror_"}: + result, mask = ops.kleene_or(self._data, other, self._mask, mask) + elif op.__name__ in {"and_", "rand_"}: + result, mask = ops.kleene_and(self._data, other, self._mask, mask) + else: + # i.e. xor, rxor + result, mask = ops.kleene_xor(self._data, other, self._mask, mask) + + # i.e. BooleanArray + return self._maybe_mask_result(result, mask) + + def _accumulate( + self, name: str, *, skipna: bool = True, **kwargs + ) -> BaseMaskedArray: + data = self._data + mask = self._mask + if name in ("cummin", "cummax"): + op = getattr(masked_accumulations, name) + data, mask = op(data, mask, skipna=skipna, **kwargs) + return self._simple_new(data, mask) + else: + from pandas.core.arrays import IntegerArray + + return IntegerArray(data.astype(int), mask)._accumulate( + name, skipna=skipna, **kwargs + ) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/categorical.py b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/categorical.py new file mode 100644 index 0000000000000000000000000000000000000000..f191f7277743fe1e9273558f87b3f26008cddda0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/categorical.py @@ -0,0 +1,3070 @@ +from __future__ import annotations + +from csv import QUOTE_NONNUMERIC +from functools import partial +import operator +from shutil import get_terminal_size +from typing import ( + TYPE_CHECKING, + Literal, + cast, + overload, +) +import warnings + +import numpy as np + +from pandas._config import get_option + +from pandas._libs import ( + NaT, + algos as libalgos, + lib, +) +from pandas._libs.arrays import NDArrayBacked +from pandas.compat.numpy import function as nv +from pandas.util._exceptions import find_stack_level +from pandas.util._validators import validate_bool_kwarg + +from pandas.core.dtypes.cast import ( + coerce_indexer_dtype, + find_common_type, +) +from pandas.core.dtypes.common import ( + ensure_int64, + ensure_platform_int, + is_any_real_numeric_dtype, + is_bool_dtype, + is_dict_like, + is_hashable, + is_integer_dtype, + is_list_like, + is_scalar, + needs_i8_conversion, + pandas_dtype, +) +from pandas.core.dtypes.dtypes import ( + ArrowDtype, + CategoricalDtype, + CategoricalDtypeType, + ExtensionDtype, +) +from pandas.core.dtypes.generic import ( + ABCIndex, + ABCSeries, +) +from pandas.core.dtypes.missing import ( + is_valid_na_for_dtype, + isna, +) + +from pandas.core import ( + algorithms, + arraylike, + ops, +) +from pandas.core.accessor import ( + PandasDelegate, + delegate_names, +) +from pandas.core.algorithms import ( + factorize, + take_nd, +) +from pandas.core.arrays._mixins import ( + NDArrayBackedExtensionArray, + ravel_compat, +) +from pandas.core.base import ( + ExtensionArray, + NoNewAttributesMixin, + PandasObject, +) +import pandas.core.common as com +from pandas.core.construction import ( + extract_array, + sanitize_array, +) +from pandas.core.ops.common import unpack_zerodim_and_defer +from pandas.core.sorting import nargsort +from pandas.core.strings.object_array import ObjectStringArrayMixin + +from pandas.io.formats import console + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Iterator, + Sequence, + ) + + from pandas._typing import ( + ArrayLike, + AstypeArg, + AxisInt, + Dtype, + DtypeObj, + NpDtype, + Ordered, + Self, + Shape, + SortKind, + npt, + ) + + from pandas import ( + DataFrame, + Index, + Series, + ) + + +def _cat_compare_op(op): + opname = f"__{op.__name__}__" + fill_value = op is operator.ne + + @unpack_zerodim_and_defer(opname) + def func(self, other): + hashable = is_hashable(other) + if is_list_like(other) and len(other) != len(self) and not hashable: + # in hashable case we may have a tuple that is itself a category + raise ValueError("Lengths must match.") + + if not self.ordered: + if opname in ["__lt__", "__gt__", "__le__", "__ge__"]: + raise TypeError( + "Unordered Categoricals can only compare equality or not" + ) + if isinstance(other, Categorical): + # Two Categoricals can only be compared if the categories are + # the same (maybe up to ordering, depending on ordered) + + msg = "Categoricals can only be compared if 'categories' are the same." + if not self._categories_match_up_to_permutation(other): + raise TypeError(msg) + + if not self.ordered and not self.categories.equals(other.categories): + # both unordered and different order + other_codes = recode_for_categories( + other.codes, other.categories, self.categories, copy=False + ) + else: + other_codes = other._codes + + ret = op(self._codes, other_codes) + mask = (self._codes == -1) | (other_codes == -1) + if mask.any(): + ret[mask] = fill_value + return ret + + if hashable: + if other in self.categories: + i = self._unbox_scalar(other) + ret = op(self._codes, i) + + if opname not in {"__eq__", "__ge__", "__gt__"}: + # GH#29820 performance trick; get_loc will always give i>=0, + # so in the cases (__ne__, __le__, __lt__) the setting + # here is a no-op, so can be skipped. + mask = self._codes == -1 + ret[mask] = fill_value + return ret + else: + return ops.invalid_comparison(self, other, op) + else: + # allow categorical vs object dtype array comparisons for equality + # these are only positional comparisons + if opname not in ["__eq__", "__ne__"]: + raise TypeError( + f"Cannot compare a Categorical for op {opname} with " + f"type {type(other)}.\nIf you want to compare values, " + "use 'np.asarray(cat) other'." + ) + + if isinstance(other, ExtensionArray) and needs_i8_conversion(other.dtype): + # We would return NotImplemented here, but that messes up + # ExtensionIndex's wrapped methods + return op(other, self) + return getattr(np.array(self), opname)(np.array(other)) + + func.__name__ = opname + + return func + + +def contains(cat, key, container) -> bool: + """ + Helper for membership check for ``key`` in ``cat``. + + This is a helper method for :method:`__contains__` + and :class:`CategoricalIndex.__contains__`. + + Returns True if ``key`` is in ``cat.categories`` and the + location of ``key`` in ``categories`` is in ``container``. + + Parameters + ---------- + cat : :class:`Categorical`or :class:`categoricalIndex` + key : a hashable object + The key to check membership for. + container : Container (e.g. list-like or mapping) + The container to check for membership in. + + Returns + ------- + is_in : bool + True if ``key`` is in ``self.categories`` and location of + ``key`` in ``categories`` is in ``container``, else False. + + Notes + ----- + This method does not check for NaN values. Do that separately + before calling this method. + """ + hash(key) + + # get location of key in categories. + # If a KeyError, the key isn't in categories, so logically + # can't be in container either. + try: + loc = cat.categories.get_loc(key) + except (KeyError, TypeError): + return False + + # loc is the location of key in categories, but also the *value* + # for key in container. So, `key` may be in categories, + # but still not in `container`. Example ('b' in categories, + # but not in values): + # 'b' in Categorical(['a'], categories=['a', 'b']) # False + if is_scalar(loc): + return loc in container + else: + # if categories is an IntervalIndex, loc is an array. + return any(loc_ in container for loc_ in loc) + + +class Categorical(NDArrayBackedExtensionArray, PandasObject, ObjectStringArrayMixin): + """ + Represent a categorical variable in classic R / S-plus fashion. + + `Categoricals` can only take on a limited, and usually fixed, number + of possible values (`categories`). In contrast to statistical categorical + variables, a `Categorical` might have an order, but numerical operations + (additions, divisions, ...) are not possible. + + All values of the `Categorical` are either in `categories` or `np.nan`. + Assigning values outside of `categories` will raise a `ValueError`. Order + is defined by the order of the `categories`, not lexical order of the + values. + + Parameters + ---------- + values : list-like + The values of the categorical. If categories are given, values not in + categories will be replaced with NaN. + categories : Index-like (unique), optional + The unique categories for this categorical. If not given, the + categories are assumed to be the unique values of `values` (sorted, if + possible, otherwise in the order in which they appear). + ordered : bool, default False + Whether or not this categorical is treated as a ordered categorical. + If True, the resulting categorical will be ordered. + An ordered categorical respects, when sorted, the order of its + `categories` attribute (which in turn is the `categories` argument, if + provided). + dtype : CategoricalDtype + An instance of ``CategoricalDtype`` to use for this categorical. + + Attributes + ---------- + categories : Index + The categories of this categorical. + codes : ndarray + The codes (integer positions, which point to the categories) of this + categorical, read only. + ordered : bool + Whether or not this Categorical is ordered. + dtype : CategoricalDtype + The instance of ``CategoricalDtype`` storing the ``categories`` + and ``ordered``. + + Methods + ------- + from_codes + __array__ + + Raises + ------ + ValueError + If the categories do not validate. + TypeError + If an explicit ``ordered=True`` is given but no `categories` and the + `values` are not sortable. + + See Also + -------- + CategoricalDtype : Type for categorical data. + CategoricalIndex : An Index with an underlying ``Categorical``. + + Notes + ----- + See the `user guide + `__ + for more. + + Examples + -------- + >>> pd.Categorical([1, 2, 3, 1, 2, 3]) + [1, 2, 3, 1, 2, 3] + Categories (3, int64): [1, 2, 3] + + >>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c']) + ['a', 'b', 'c', 'a', 'b', 'c'] + Categories (3, object): ['a', 'b', 'c'] + + Missing values are not included as a category. + + >>> c = pd.Categorical([1, 2, 3, 1, 2, 3, np.nan]) + >>> c + [1, 2, 3, 1, 2, 3, NaN] + Categories (3, int64): [1, 2, 3] + + However, their presence is indicated in the `codes` attribute + by code `-1`. + + >>> c.codes + array([ 0, 1, 2, 0, 1, 2, -1], dtype=int8) + + Ordered `Categoricals` can be sorted according to the custom order + of the categories and can have a min and max value. + + >>> c = pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'], ordered=True, + ... categories=['c', 'b', 'a']) + >>> c + ['a', 'b', 'c', 'a', 'b', 'c'] + Categories (3, object): ['c' < 'b' < 'a'] + >>> c.min() + 'c' + """ + + # For comparisons, so that numpy uses our implementation if the compare + # ops, which raise + __array_priority__ = 1000 + # tolist is not actually deprecated, just suppressed in the __dir__ + _hidden_attrs = PandasObject._hidden_attrs | frozenset(["tolist"]) + _typ = "categorical" + + _dtype: CategoricalDtype + + @classmethod + # error: Argument 2 of "_simple_new" is incompatible with supertype + # "NDArrayBacked"; supertype defines the argument type as + # "Union[dtype[Any], ExtensionDtype]" + def _simple_new( # type: ignore[override] + cls, codes: np.ndarray, dtype: CategoricalDtype + ) -> Self: + # NB: This is not _quite_ as simple as the "usual" _simple_new + codes = coerce_indexer_dtype(codes, dtype.categories) + dtype = CategoricalDtype(ordered=False).update_dtype(dtype) + return super()._simple_new(codes, dtype) + + def __init__( + self, + values, + categories=None, + ordered=None, + dtype: Dtype | None = None, + fastpath: bool | lib.NoDefault = lib.no_default, + copy: bool = True, + ) -> None: + if fastpath is not lib.no_default: + # GH#20110 + warnings.warn( + "The 'fastpath' keyword in Categorical is deprecated and will " + "be removed in a future version. Use Categorical.from_codes instead", + DeprecationWarning, + stacklevel=find_stack_level(), + ) + else: + fastpath = False + + dtype = CategoricalDtype._from_values_or_dtype( + values, categories, ordered, dtype + ) + # At this point, dtype is always a CategoricalDtype, but + # we may have dtype.categories be None, and we need to + # infer categories in a factorization step further below + + if fastpath: + codes = coerce_indexer_dtype(values, dtype.categories) + dtype = CategoricalDtype(ordered=False).update_dtype(dtype) + super().__init__(codes, dtype) + return + + if not is_list_like(values): + # GH#38433 + raise TypeError("Categorical input must be list-like") + + # null_mask indicates missing values we want to exclude from inference. + # This means: only missing values in list-likes (not arrays/ndframes). + null_mask = np.array(False) + + # sanitize input + vdtype = getattr(values, "dtype", None) + if isinstance(vdtype, CategoricalDtype): + if dtype.categories is None: + dtype = CategoricalDtype(values.categories, dtype.ordered) + elif not isinstance(values, (ABCIndex, ABCSeries, ExtensionArray)): + values = com.convert_to_list_like(values) + if isinstance(values, list) and len(values) == 0: + # By convention, empty lists result in object dtype: + values = np.array([], dtype=object) + elif isinstance(values, np.ndarray): + if values.ndim > 1: + # preempt sanitize_array from raising ValueError + raise NotImplementedError( + "> 1 ndim Categorical are not supported at this time" + ) + values = sanitize_array(values, None) + else: + # i.e. must be a list + arr = sanitize_array(values, None) + null_mask = isna(arr) + if null_mask.any(): + # We remove null values here, then below will re-insert + # them, grep "full_codes" + arr_list = [values[idx] for idx in np.where(~null_mask)[0]] + + # GH#44900 Do not cast to float if we have only missing values + if arr_list or arr.dtype == "object": + sanitize_dtype = None + else: + sanitize_dtype = arr.dtype + + arr = sanitize_array(arr_list, None, dtype=sanitize_dtype) + values = arr + + if dtype.categories is None: + if isinstance(values.dtype, ArrowDtype) and issubclass( + values.dtype.type, CategoricalDtypeType + ): + arr = values._pa_array.combine_chunks() + categories = arr.dictionary.to_pandas(types_mapper=ArrowDtype) + codes = arr.indices.to_numpy() + dtype = CategoricalDtype(categories, values.dtype.pyarrow_dtype.ordered) + else: + if not isinstance(values, ABCIndex): + # in particular RangeIndex xref test_index_equal_range_categories + values = sanitize_array(values, None) + try: + codes, categories = factorize(values, sort=True) + except TypeError as err: + codes, categories = factorize(values, sort=False) + if dtype.ordered: + # raise, as we don't have a sortable data structure and so + # the user should give us one by specifying categories + raise TypeError( + "'values' is not ordered, please " + "explicitly specify the categories order " + "by passing in a categories argument." + ) from err + + # we're inferring from values + dtype = CategoricalDtype(categories, dtype.ordered) + + elif isinstance(values.dtype, CategoricalDtype): + old_codes = extract_array(values)._codes + codes = recode_for_categories( + old_codes, values.dtype.categories, dtype.categories, copy=copy + ) + + else: + codes = _get_codes_for_values(values, dtype.categories) + + if null_mask.any(): + # Reinsert -1 placeholders for previously removed missing values + full_codes = -np.ones(null_mask.shape, dtype=codes.dtype) + full_codes[~null_mask] = codes + codes = full_codes + + dtype = CategoricalDtype(ordered=False).update_dtype(dtype) + arr = coerce_indexer_dtype(codes, dtype.categories) + super().__init__(arr, dtype) + + @property + def dtype(self) -> CategoricalDtype: + """ + The :class:`~pandas.api.types.CategoricalDtype` for this instance. + + Examples + -------- + >>> cat = pd.Categorical(['a', 'b'], ordered=True) + >>> cat + ['a', 'b'] + Categories (2, object): ['a' < 'b'] + >>> cat.dtype + CategoricalDtype(categories=['a', 'b'], ordered=True, categories_dtype=object) + """ + return self._dtype + + @property + def _internal_fill_value(self) -> int: + # using the specific numpy integer instead of python int to get + # the correct dtype back from _quantile in the all-NA case + dtype = self._ndarray.dtype + return dtype.type(-1) + + @classmethod + def _from_sequence( + cls, scalars, *, dtype: Dtype | None = None, copy: bool = False + ) -> Self: + return cls(scalars, dtype=dtype, copy=copy) + + @classmethod + def _from_scalars(cls, scalars, *, dtype: DtypeObj) -> Self: + if dtype is None: + # The _from_scalars strictness doesn't make much sense in this case. + raise NotImplementedError + + res = cls._from_sequence(scalars, dtype=dtype) + + # if there are any non-category elements in scalars, these will be + # converted to NAs in res. + mask = isna(scalars) + if not (mask == res.isna()).all(): + # Some non-category element in scalars got converted to NA in res. + raise ValueError + return res + + @overload + def astype(self, dtype: npt.DTypeLike, copy: bool = ...) -> np.ndarray: + ... + + @overload + def astype(self, dtype: ExtensionDtype, copy: bool = ...) -> ExtensionArray: + ... + + @overload + def astype(self, dtype: AstypeArg, copy: bool = ...) -> ArrayLike: + ... + + def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike: + """ + Coerce this type to another dtype + + Parameters + ---------- + dtype : numpy dtype or pandas type + copy : bool, default True + By default, astype always returns a newly allocated object. + If copy is set to False and dtype is categorical, the original + object is returned. + """ + dtype = pandas_dtype(dtype) + if self.dtype is dtype: + result = self.copy() if copy else self + + elif isinstance(dtype, CategoricalDtype): + # GH 10696/18593/18630 + dtype = self.dtype.update_dtype(dtype) + self = self.copy() if copy else self + result = self._set_dtype(dtype) + + elif isinstance(dtype, ExtensionDtype): + return super().astype(dtype, copy=copy) + + elif dtype.kind in "iu" and self.isna().any(): + raise ValueError("Cannot convert float NaN to integer") + + elif len(self.codes) == 0 or len(self.categories) == 0: + result = np.array( + self, + dtype=dtype, + copy=copy, + ) + + else: + # GH8628 (PERF): astype category codes instead of astyping array + new_cats = self.categories._values + + try: + new_cats = new_cats.astype(dtype=dtype, copy=copy) + fill_value = self.categories._na_value + if not is_valid_na_for_dtype(fill_value, dtype): + fill_value = lib.item_from_zerodim( + np.array(self.categories._na_value).astype(dtype) + ) + except ( + TypeError, # downstream error msg for CategoricalIndex is misleading + ValueError, + ): + msg = f"Cannot cast {self.categories.dtype} dtype to {dtype}" + raise ValueError(msg) + + result = take_nd( + new_cats, ensure_platform_int(self._codes), fill_value=fill_value + ) + + return result + + def to_list(self): + """ + Alias for tolist. + """ + # GH#51254 + warnings.warn( + "Categorical.to_list is deprecated and will be removed in a future " + "version. Use obj.tolist() instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self.tolist() + + @classmethod + def _from_inferred_categories( + cls, inferred_categories, inferred_codes, dtype, true_values=None + ) -> Self: + """ + Construct a Categorical from inferred values. + + For inferred categories (`dtype` is None) the categories are sorted. + For explicit `dtype`, the `inferred_categories` are cast to the + appropriate type. + + Parameters + ---------- + inferred_categories : Index + inferred_codes : Index + dtype : CategoricalDtype or 'category' + true_values : list, optional + If none are provided, the default ones are + "True", "TRUE", and "true." + + Returns + ------- + Categorical + """ + from pandas import ( + Index, + to_datetime, + to_numeric, + to_timedelta, + ) + + cats = Index(inferred_categories) + known_categories = ( + isinstance(dtype, CategoricalDtype) and dtype.categories is not None + ) + + if known_categories: + # Convert to a specialized type with `dtype` if specified. + if is_any_real_numeric_dtype(dtype.categories.dtype): + cats = to_numeric(inferred_categories, errors="coerce") + elif lib.is_np_dtype(dtype.categories.dtype, "M"): + cats = to_datetime(inferred_categories, errors="coerce") + elif lib.is_np_dtype(dtype.categories.dtype, "m"): + cats = to_timedelta(inferred_categories, errors="coerce") + elif is_bool_dtype(dtype.categories.dtype): + if true_values is None: + true_values = ["True", "TRUE", "true"] + + # error: Incompatible types in assignment (expression has type + # "ndarray", variable has type "Index") + cats = cats.isin(true_values) # type: ignore[assignment] + + if known_categories: + # Recode from observation order to dtype.categories order. + categories = dtype.categories + codes = recode_for_categories(inferred_codes, cats, categories) + elif not cats.is_monotonic_increasing: + # Sort categories and recode for unknown categories. + unsorted = cats.copy() + categories = cats.sort_values() + + codes = recode_for_categories(inferred_codes, unsorted, categories) + dtype = CategoricalDtype(categories, ordered=False) + else: + dtype = CategoricalDtype(cats, ordered=False) + codes = inferred_codes + + return cls._simple_new(codes, dtype=dtype) + + @classmethod + def from_codes( + cls, + codes, + categories=None, + ordered=None, + dtype: Dtype | None = None, + validate: bool = True, + ) -> Self: + """ + Make a Categorical type from codes and categories or dtype. + + This constructor is useful if you already have codes and + categories/dtype and so do not need the (computation intensive) + factorization step, which is usually done on the constructor. + + If your data does not follow this convention, please use the normal + constructor. + + Parameters + ---------- + codes : array-like of int + An integer array, where each integer points to a category in + categories or dtype.categories, or else is -1 for NaN. + categories : index-like, optional + The categories for the categorical. Items need to be unique. + If the categories are not given here, then they must be provided + in `dtype`. + ordered : bool, optional + Whether or not this categorical is treated as an ordered + categorical. If not given here or in `dtype`, the resulting + categorical will be unordered. + dtype : CategoricalDtype or "category", optional + If :class:`CategoricalDtype`, cannot be used together with + `categories` or `ordered`. + validate : bool, default True + If True, validate that the codes are valid for the dtype. + If False, don't validate that the codes are valid. Be careful about skipping + validation, as invalid codes can lead to severe problems, such as segfaults. + + .. versionadded:: 2.1.0 + + Returns + ------- + Categorical + + Examples + -------- + >>> dtype = pd.CategoricalDtype(['a', 'b'], ordered=True) + >>> pd.Categorical.from_codes(codes=[0, 1, 0, 1], dtype=dtype) + ['a', 'b', 'a', 'b'] + Categories (2, object): ['a' < 'b'] + """ + dtype = CategoricalDtype._from_values_or_dtype( + categories=categories, ordered=ordered, dtype=dtype + ) + if dtype.categories is None: + msg = ( + "The categories must be provided in 'categories' or " + "'dtype'. Both were None." + ) + raise ValueError(msg) + + if validate: + # beware: non-valid codes may segfault + codes = cls._validate_codes_for_dtype(codes, dtype=dtype) + + return cls._simple_new(codes, dtype=dtype) + + # ------------------------------------------------------------------ + # Categories/Codes/Ordered + + @property + def categories(self) -> Index: + """ + The categories of this categorical. + + Setting assigns new values to each category (effectively a rename of + each individual category). + + The assigned value has to be a list-like object. All items must be + unique and the number of items in the new categories must be the same + as the number of items in the old categories. + + Raises + ------ + ValueError + If the new categories do not validate as categories or if the + number of new categories is unequal the number of old categories + + See Also + -------- + rename_categories : Rename categories. + reorder_categories : Reorder categories. + add_categories : Add new categories. + remove_categories : Remove the specified categories. + remove_unused_categories : Remove categories which are not used. + set_categories : Set the categories to the specified ones. + + Examples + -------- + For :class:`pandas.Series`: + + >>> ser = pd.Series(['a', 'b', 'c', 'a'], dtype='category') + >>> ser.cat.categories + Index(['a', 'b', 'c'], dtype='object') + + >>> raw_cat = pd.Categorical(['a', 'b', 'c', 'a'], categories=['b', 'c', 'd']) + >>> ser = pd.Series(raw_cat) + >>> ser.cat.categories + Index(['b', 'c', 'd'], dtype='object') + + For :class:`pandas.Categorical`: + + >>> cat = pd.Categorical(['a', 'b'], ordered=True) + >>> cat.categories + Index(['a', 'b'], dtype='object') + + For :class:`pandas.CategoricalIndex`: + + >>> ci = pd.CategoricalIndex(['a', 'c', 'b', 'a', 'c', 'b']) + >>> ci.categories + Index(['a', 'b', 'c'], dtype='object') + + >>> ci = pd.CategoricalIndex(['a', 'c'], categories=['c', 'b', 'a']) + >>> ci.categories + Index(['c', 'b', 'a'], dtype='object') + """ + return self.dtype.categories + + @property + def ordered(self) -> Ordered: + """ + Whether the categories have an ordered relationship. + + Examples + -------- + For :class:`pandas.Series`: + + >>> ser = pd.Series(['a', 'b', 'c', 'a'], dtype='category') + >>> ser.cat.ordered + False + + >>> raw_cat = pd.Categorical(['a', 'b', 'c', 'a'], ordered=True) + >>> ser = pd.Series(raw_cat) + >>> ser.cat.ordered + True + + For :class:`pandas.Categorical`: + + >>> cat = pd.Categorical(['a', 'b'], ordered=True) + >>> cat.ordered + True + + >>> cat = pd.Categorical(['a', 'b'], ordered=False) + >>> cat.ordered + False + + For :class:`pandas.CategoricalIndex`: + + >>> ci = pd.CategoricalIndex(['a', 'b'], ordered=True) + >>> ci.ordered + True + + >>> ci = pd.CategoricalIndex(['a', 'b'], ordered=False) + >>> ci.ordered + False + """ + return self.dtype.ordered + + @property + def codes(self) -> np.ndarray: + """ + The category codes of this categorical index. + + Codes are an array of integers which are the positions of the actual + values in the categories array. + + There is no setter, use the other categorical methods and the normal item + setter to change values in the categorical. + + Returns + ------- + ndarray[int] + A non-writable view of the ``codes`` array. + + Examples + -------- + For :class:`pandas.Categorical`: + + >>> cat = pd.Categorical(['a', 'b'], ordered=True) + >>> cat.codes + array([0, 1], dtype=int8) + + For :class:`pandas.CategoricalIndex`: + + >>> ci = pd.CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c']) + >>> ci.codes + array([0, 1, 2, 0, 1, 2], dtype=int8) + + >>> ci = pd.CategoricalIndex(['a', 'c'], categories=['c', 'b', 'a']) + >>> ci.codes + array([2, 0], dtype=int8) + """ + v = self._codes.view() + v.flags.writeable = False + return v + + def _set_categories(self, categories, fastpath: bool = False) -> None: + """ + Sets new categories inplace + + Parameters + ---------- + fastpath : bool, default False + Don't perform validation of the categories for uniqueness or nulls + + Examples + -------- + >>> c = pd.Categorical(['a', 'b']) + >>> c + ['a', 'b'] + Categories (2, object): ['a', 'b'] + + >>> c._set_categories(pd.Index(['a', 'c'])) + >>> c + ['a', 'c'] + Categories (2, object): ['a', 'c'] + """ + if fastpath: + new_dtype = CategoricalDtype._from_fastpath(categories, self.ordered) + else: + new_dtype = CategoricalDtype(categories, ordered=self.ordered) + if ( + not fastpath + and self.dtype.categories is not None + and len(new_dtype.categories) != len(self.dtype.categories) + ): + raise ValueError( + "new categories need to have the same number of " + "items as the old categories!" + ) + + super().__init__(self._ndarray, new_dtype) + + def _set_dtype(self, dtype: CategoricalDtype) -> Self: + """ + Internal method for directly updating the CategoricalDtype + + Parameters + ---------- + dtype : CategoricalDtype + + Notes + ----- + We don't do any validation here. It's assumed that the dtype is + a (valid) instance of `CategoricalDtype`. + """ + codes = recode_for_categories(self.codes, self.categories, dtype.categories) + return type(self)._simple_new(codes, dtype=dtype) + + def set_ordered(self, value: bool) -> Self: + """ + Set the ordered attribute to the boolean value. + + Parameters + ---------- + value : bool + Set whether this categorical is ordered (True) or not (False). + """ + new_dtype = CategoricalDtype(self.categories, ordered=value) + cat = self.copy() + NDArrayBacked.__init__(cat, cat._ndarray, new_dtype) + return cat + + def as_ordered(self) -> Self: + """ + Set the Categorical to be ordered. + + Returns + ------- + Categorical + Ordered Categorical. + + Examples + -------- + For :class:`pandas.Series`: + + >>> ser = pd.Series(['a', 'b', 'c', 'a'], dtype='category') + >>> ser.cat.ordered + False + >>> ser = ser.cat.as_ordered() + >>> ser.cat.ordered + True + + For :class:`pandas.CategoricalIndex`: + + >>> ci = pd.CategoricalIndex(['a', 'b', 'c', 'a']) + >>> ci.ordered + False + >>> ci = ci.as_ordered() + >>> ci.ordered + True + """ + return self.set_ordered(True) + + def as_unordered(self) -> Self: + """ + Set the Categorical to be unordered. + + Returns + ------- + Categorical + Unordered Categorical. + + Examples + -------- + For :class:`pandas.Series`: + + >>> raw_cat = pd.Categorical(['a', 'b', 'c', 'a'], ordered=True) + >>> ser = pd.Series(raw_cat) + >>> ser.cat.ordered + True + >>> ser = ser.cat.as_unordered() + >>> ser.cat.ordered + False + + For :class:`pandas.CategoricalIndex`: + + >>> ci = pd.CategoricalIndex(['a', 'b', 'c', 'a'], ordered=True) + >>> ci.ordered + True + >>> ci = ci.as_unordered() + >>> ci.ordered + False + """ + return self.set_ordered(False) + + def set_categories(self, new_categories, ordered=None, rename: bool = False): + """ + Set the categories to the specified new categories. + + ``new_categories`` can include new categories (which will result in + unused categories) or remove old categories (which results in values + set to ``NaN``). If ``rename=True``, the categories will simply be renamed + (less or more items than in old categories will result in values set to + ``NaN`` or in unused categories respectively). + + This method can be used to perform more than one action of adding, + removing, and reordering simultaneously and is therefore faster than + performing the individual steps via the more specialised methods. + + On the other hand this methods does not do checks (e.g., whether the + old categories are included in the new categories on a reorder), which + can result in surprising changes, for example when using special string + dtypes, which does not considers a S1 string equal to a single char + python string. + + Parameters + ---------- + new_categories : Index-like + The categories in new order. + ordered : bool, default False + Whether or not the categorical is treated as a ordered categorical. + If not given, do not change the ordered information. + rename : bool, default False + Whether or not the new_categories should be considered as a rename + of the old categories or as reordered categories. + + Returns + ------- + Categorical with reordered categories. + + Raises + ------ + ValueError + If new_categories does not validate as categories + + See Also + -------- + rename_categories : Rename categories. + reorder_categories : Reorder categories. + add_categories : Add new categories. + remove_categories : Remove the specified categories. + remove_unused_categories : Remove categories which are not used. + + Examples + -------- + For :class:`pandas.Series`: + + >>> raw_cat = pd.Categorical(['a', 'b', 'c', 'A'], + ... categories=['a', 'b', 'c'], ordered=True) + >>> ser = pd.Series(raw_cat) + >>> ser + 0 a + 1 b + 2 c + 3 NaN + dtype: category + Categories (3, object): ['a' < 'b' < 'c'] + + >>> ser.cat.set_categories(['A', 'B', 'C'], rename=True) + 0 A + 1 B + 2 C + 3 NaN + dtype: category + Categories (3, object): ['A' < 'B' < 'C'] + + For :class:`pandas.CategoricalIndex`: + + >>> ci = pd.CategoricalIndex(['a', 'b', 'c', 'A'], + ... categories=['a', 'b', 'c'], ordered=True) + >>> ci + CategoricalIndex(['a', 'b', 'c', nan], categories=['a', 'b', 'c'], + ordered=True, dtype='category') + + >>> ci.set_categories(['A', 'b', 'c']) + CategoricalIndex([nan, 'b', 'c', nan], categories=['A', 'b', 'c'], + ordered=True, dtype='category') + >>> ci.set_categories(['A', 'b', 'c'], rename=True) + CategoricalIndex(['A', 'b', 'c', nan], categories=['A', 'b', 'c'], + ordered=True, dtype='category') + """ + + if ordered is None: + ordered = self.dtype.ordered + new_dtype = CategoricalDtype(new_categories, ordered=ordered) + + cat = self.copy() + if rename: + if cat.dtype.categories is not None and len(new_dtype.categories) < len( + cat.dtype.categories + ): + # remove all _codes which are larger and set to -1/NaN + cat._codes[cat._codes >= len(new_dtype.categories)] = -1 + codes = cat._codes + else: + codes = recode_for_categories( + cat.codes, cat.categories, new_dtype.categories + ) + NDArrayBacked.__init__(cat, codes, new_dtype) + return cat + + def rename_categories(self, new_categories) -> Self: + """ + Rename categories. + + Parameters + ---------- + new_categories : list-like, dict-like or callable + + New categories which will replace old categories. + + * list-like: all items must be unique and the number of items in + the new categories must match the existing number of categories. + + * dict-like: specifies a mapping from + old categories to new. Categories not contained in the mapping + are passed through and extra categories in the mapping are + ignored. + + * callable : a callable that is called on all items in the old + categories and whose return values comprise the new categories. + + Returns + ------- + Categorical + Categorical with renamed categories. + + Raises + ------ + ValueError + If new categories are list-like and do not have the same number of + items than the current categories or do not validate as categories + + See Also + -------- + reorder_categories : Reorder categories. + add_categories : Add new categories. + remove_categories : Remove the specified categories. + remove_unused_categories : Remove categories which are not used. + set_categories : Set the categories to the specified ones. + + Examples + -------- + >>> c = pd.Categorical(['a', 'a', 'b']) + >>> c.rename_categories([0, 1]) + [0, 0, 1] + Categories (2, int64): [0, 1] + + For dict-like ``new_categories``, extra keys are ignored and + categories not in the dictionary are passed through + + >>> c.rename_categories({'a': 'A', 'c': 'C'}) + ['A', 'A', 'b'] + Categories (2, object): ['A', 'b'] + + You may also provide a callable to create the new categories + + >>> c.rename_categories(lambda x: x.upper()) + ['A', 'A', 'B'] + Categories (2, object): ['A', 'B'] + """ + + if is_dict_like(new_categories): + new_categories = [ + new_categories.get(item, item) for item in self.categories + ] + elif callable(new_categories): + new_categories = [new_categories(item) for item in self.categories] + + cat = self.copy() + cat._set_categories(new_categories) + return cat + + def reorder_categories(self, new_categories, ordered=None) -> Self: + """ + Reorder categories as specified in new_categories. + + ``new_categories`` need to include all old categories and no new category + items. + + Parameters + ---------- + new_categories : Index-like + The categories in new order. + ordered : bool, optional + Whether or not the categorical is treated as a ordered categorical. + If not given, do not change the ordered information. + + Returns + ------- + Categorical + Categorical with reordered categories. + + Raises + ------ + ValueError + If the new categories do not contain all old category items or any + new ones + + See Also + -------- + rename_categories : Rename categories. + add_categories : Add new categories. + remove_categories : Remove the specified categories. + remove_unused_categories : Remove categories which are not used. + set_categories : Set the categories to the specified ones. + + Examples + -------- + For :class:`pandas.Series`: + + >>> ser = pd.Series(['a', 'b', 'c', 'a'], dtype='category') + >>> ser = ser.cat.reorder_categories(['c', 'b', 'a'], ordered=True) + >>> ser + 0 a + 1 b + 2 c + 3 a + dtype: category + Categories (3, object): ['c' < 'b' < 'a'] + + >>> ser.sort_values() + 2 c + 1 b + 0 a + 3 a + dtype: category + Categories (3, object): ['c' < 'b' < 'a'] + + For :class:`pandas.CategoricalIndex`: + + >>> ci = pd.CategoricalIndex(['a', 'b', 'c', 'a']) + >>> ci + CategoricalIndex(['a', 'b', 'c', 'a'], categories=['a', 'b', 'c'], + ordered=False, dtype='category') + >>> ci.reorder_categories(['c', 'b', 'a'], ordered=True) + CategoricalIndex(['a', 'b', 'c', 'a'], categories=['c', 'b', 'a'], + ordered=True, dtype='category') + """ + if ( + len(self.categories) != len(new_categories) + or not self.categories.difference(new_categories).empty + ): + raise ValueError( + "items in new_categories are not the same as in old categories" + ) + return self.set_categories(new_categories, ordered=ordered) + + def add_categories(self, new_categories) -> Self: + """ + Add new categories. + + `new_categories` will be included at the last/highest place in the + categories and will be unused directly after this call. + + Parameters + ---------- + new_categories : category or list-like of category + The new categories to be included. + + Returns + ------- + Categorical + Categorical with new categories added. + + Raises + ------ + ValueError + If the new categories include old categories or do not validate as + categories + + See Also + -------- + rename_categories : Rename categories. + reorder_categories : Reorder categories. + remove_categories : Remove the specified categories. + remove_unused_categories : Remove categories which are not used. + set_categories : Set the categories to the specified ones. + + Examples + -------- + >>> c = pd.Categorical(['c', 'b', 'c']) + >>> c + ['c', 'b', 'c'] + Categories (2, object): ['b', 'c'] + + >>> c.add_categories(['d', 'a']) + ['c', 'b', 'c'] + Categories (4, object): ['b', 'c', 'd', 'a'] + """ + + if not is_list_like(new_categories): + new_categories = [new_categories] + already_included = set(new_categories) & set(self.dtype.categories) + if len(already_included) != 0: + raise ValueError( + f"new categories must not include old categories: {already_included}" + ) + + if hasattr(new_categories, "dtype"): + from pandas import Series + + dtype = find_common_type( + [self.dtype.categories.dtype, new_categories.dtype] + ) + new_categories = Series( + list(self.dtype.categories) + list(new_categories), dtype=dtype + ) + else: + new_categories = list(self.dtype.categories) + list(new_categories) + + new_dtype = CategoricalDtype(new_categories, self.ordered) + cat = self.copy() + codes = coerce_indexer_dtype(cat._ndarray, new_dtype.categories) + NDArrayBacked.__init__(cat, codes, new_dtype) + return cat + + def remove_categories(self, removals) -> Self: + """ + Remove the specified categories. + + `removals` must be included in the old categories. Values which were in + the removed categories will be set to NaN + + Parameters + ---------- + removals : category or list of categories + The categories which should be removed. + + Returns + ------- + Categorical + Categorical with removed categories. + + Raises + ------ + ValueError + If the removals are not contained in the categories + + See Also + -------- + rename_categories : Rename categories. + reorder_categories : Reorder categories. + add_categories : Add new categories. + remove_unused_categories : Remove categories which are not used. + set_categories : Set the categories to the specified ones. + + Examples + -------- + >>> c = pd.Categorical(['a', 'c', 'b', 'c', 'd']) + >>> c + ['a', 'c', 'b', 'c', 'd'] + Categories (4, object): ['a', 'b', 'c', 'd'] + + >>> c.remove_categories(['d', 'a']) + [NaN, 'c', 'b', 'c', NaN] + Categories (2, object): ['b', 'c'] + """ + from pandas import Index + + if not is_list_like(removals): + removals = [removals] + + removals = Index(removals).unique().dropna() + new_categories = ( + self.dtype.categories.difference(removals, sort=False) + if self.dtype.ordered is True + else self.dtype.categories.difference(removals) + ) + not_included = removals.difference(self.dtype.categories) + + if len(not_included) != 0: + not_included = set(not_included) + raise ValueError(f"removals must all be in old categories: {not_included}") + + return self.set_categories(new_categories, ordered=self.ordered, rename=False) + + def remove_unused_categories(self) -> Self: + """ + Remove categories which are not used. + + Returns + ------- + Categorical + Categorical with unused categories dropped. + + See Also + -------- + rename_categories : Rename categories. + reorder_categories : Reorder categories. + add_categories : Add new categories. + remove_categories : Remove the specified categories. + set_categories : Set the categories to the specified ones. + + Examples + -------- + >>> c = pd.Categorical(['a', 'c', 'b', 'c', 'd']) + >>> c + ['a', 'c', 'b', 'c', 'd'] + Categories (4, object): ['a', 'b', 'c', 'd'] + + >>> c[2] = 'a' + >>> c[4] = 'c' + >>> c + ['a', 'c', 'a', 'c', 'c'] + Categories (4, object): ['a', 'b', 'c', 'd'] + + >>> c.remove_unused_categories() + ['a', 'c', 'a', 'c', 'c'] + Categories (2, object): ['a', 'c'] + """ + idx, inv = np.unique(self._codes, return_inverse=True) + + if idx.size != 0 and idx[0] == -1: # na sentinel + idx, inv = idx[1:], inv - 1 + + new_categories = self.dtype.categories.take(idx) + new_dtype = CategoricalDtype._from_fastpath( + new_categories, ordered=self.ordered + ) + new_codes = coerce_indexer_dtype(inv, new_dtype.categories) + + cat = self.copy() + NDArrayBacked.__init__(cat, new_codes, new_dtype) + return cat + + # ------------------------------------------------------------------ + + def map( + self, + mapper, + na_action: Literal["ignore"] | None | lib.NoDefault = lib.no_default, + ): + """ + Map categories using an input mapping or function. + + Maps the categories to new categories. If the mapping correspondence is + one-to-one the result is a :class:`~pandas.Categorical` which has the + same order property as the original, otherwise a :class:`~pandas.Index` + is returned. NaN values are unaffected. + + If a `dict` or :class:`~pandas.Series` is used any unmapped category is + mapped to `NaN`. Note that if this happens an :class:`~pandas.Index` + will be returned. + + Parameters + ---------- + mapper : function, dict, or Series + Mapping correspondence. + na_action : {None, 'ignore'}, default 'ignore' + If 'ignore', propagate NaN values, without passing them to the + mapping correspondence. + + .. deprecated:: 2.1.0 + + The default value of 'ignore' has been deprecated and will be changed to + None in the future. + + Returns + ------- + pandas.Categorical or pandas.Index + Mapped categorical. + + See Also + -------- + CategoricalIndex.map : Apply a mapping correspondence on a + :class:`~pandas.CategoricalIndex`. + Index.map : Apply a mapping correspondence on an + :class:`~pandas.Index`. + Series.map : Apply a mapping correspondence on a + :class:`~pandas.Series`. + Series.apply : Apply more complex functions on a + :class:`~pandas.Series`. + + Examples + -------- + >>> cat = pd.Categorical(['a', 'b', 'c']) + >>> cat + ['a', 'b', 'c'] + Categories (3, object): ['a', 'b', 'c'] + >>> cat.map(lambda x: x.upper(), na_action=None) + ['A', 'B', 'C'] + Categories (3, object): ['A', 'B', 'C'] + >>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'}, na_action=None) + ['first', 'second', 'third'] + Categories (3, object): ['first', 'second', 'third'] + + If the mapping is one-to-one the ordering of the categories is + preserved: + + >>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True) + >>> cat + ['a', 'b', 'c'] + Categories (3, object): ['a' < 'b' < 'c'] + >>> cat.map({'a': 3, 'b': 2, 'c': 1}, na_action=None) + [3, 2, 1] + Categories (3, int64): [3 < 2 < 1] + + If the mapping is not one-to-one an :class:`~pandas.Index` is returned: + + >>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'}, na_action=None) + Index(['first', 'second', 'first'], dtype='object') + + If a `dict` is used, all unmapped categories are mapped to `NaN` and + the result is an :class:`~pandas.Index`: + + >>> cat.map({'a': 'first', 'b': 'second'}, na_action=None) + Index(['first', 'second', nan], dtype='object') + """ + if na_action is lib.no_default: + warnings.warn( + "The default value of 'ignore' for the `na_action` parameter in " + "pandas.Categorical.map is deprecated and will be " + "changed to 'None' in a future version. Please set na_action to the " + "desired value to avoid seeing this warning", + FutureWarning, + stacklevel=find_stack_level(), + ) + na_action = "ignore" + + assert callable(mapper) or is_dict_like(mapper) + + new_categories = self.categories.map(mapper) + + has_nans = np.any(self._codes == -1) + + na_val = np.nan + if na_action is None and has_nans: + na_val = mapper(np.nan) if callable(mapper) else mapper.get(np.nan, np.nan) + + if new_categories.is_unique and not new_categories.hasnans and na_val is np.nan: + new_dtype = CategoricalDtype(new_categories, ordered=self.ordered) + return self.from_codes(self._codes.copy(), dtype=new_dtype, validate=False) + + if has_nans: + new_categories = new_categories.insert(len(new_categories), na_val) + + return np.take(new_categories, self._codes) + + __eq__ = _cat_compare_op(operator.eq) + __ne__ = _cat_compare_op(operator.ne) + __lt__ = _cat_compare_op(operator.lt) + __gt__ = _cat_compare_op(operator.gt) + __le__ = _cat_compare_op(operator.le) + __ge__ = _cat_compare_op(operator.ge) + + # ------------------------------------------------------------- + # Validators; ideally these can be de-duplicated + + def _validate_setitem_value(self, value): + if not is_hashable(value): + # wrap scalars and hashable-listlikes in list + return self._validate_listlike(value) + else: + return self._validate_scalar(value) + + def _validate_scalar(self, fill_value): + """ + Convert a user-facing fill_value to a representation to use with our + underlying ndarray, raising TypeError if this is not possible. + + Parameters + ---------- + fill_value : object + + Returns + ------- + fill_value : int + + Raises + ------ + TypeError + """ + + if is_valid_na_for_dtype(fill_value, self.categories.dtype): + fill_value = -1 + elif fill_value in self.categories: + fill_value = self._unbox_scalar(fill_value) + else: + raise TypeError( + "Cannot setitem on a Categorical with a new " + f"category ({fill_value}), set the categories first" + ) from None + return fill_value + + @classmethod + def _validate_codes_for_dtype(cls, codes, *, dtype: CategoricalDtype) -> np.ndarray: + if isinstance(codes, ExtensionArray) and is_integer_dtype(codes.dtype): + # Avoid the implicit conversion of Int to object + if isna(codes).any(): + raise ValueError("codes cannot contain NA values") + codes = codes.to_numpy(dtype=np.int64) + else: + codes = np.asarray(codes) + if len(codes) and codes.dtype.kind not in "iu": + raise ValueError("codes need to be array-like integers") + + if len(codes) and (codes.max() >= len(dtype.categories) or codes.min() < -1): + raise ValueError("codes need to be between -1 and len(categories)-1") + return codes + + # ------------------------------------------------------------- + + @ravel_compat + def __array__( + self, dtype: NpDtype | None = None, copy: bool | None = None + ) -> np.ndarray: + """ + The numpy array interface. + + Returns + ------- + numpy.array + A numpy array of either the specified dtype or, + if dtype==None (default), the same dtype as + categorical.categories.dtype. + + Examples + -------- + + >>> cat = pd.Categorical(['a', 'b'], ordered=True) + + The following calls ``cat.__array__`` + + >>> np.asarray(cat) + array(['a', 'b'], dtype=object) + """ + ret = take_nd(self.categories._values, self._codes) + if dtype and np.dtype(dtype) != self.categories.dtype: + return np.asarray(ret, dtype) + # When we're a Categorical[ExtensionArray], like Interval, + # we need to ensure __array__ gets all the way to an + # ndarray. + return np.asarray(ret) + + def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): + # for binary ops, use our custom dunder methods + result = arraylike.maybe_dispatch_ufunc_to_dunder_op( + self, ufunc, method, *inputs, **kwargs + ) + if result is not NotImplemented: + return result + + if "out" in kwargs: + # e.g. test_numpy_ufuncs_out + return arraylike.dispatch_ufunc_with_out( + self, ufunc, method, *inputs, **kwargs + ) + + if method == "reduce": + # e.g. TestCategoricalAnalytics::test_min_max_ordered + result = arraylike.dispatch_reduction_ufunc( + self, ufunc, method, *inputs, **kwargs + ) + if result is not NotImplemented: + return result + + # for all other cases, raise for now (similarly as what happens in + # Series.__array_prepare__) + raise TypeError( + f"Object with dtype {self.dtype} cannot perform " + f"the numpy op {ufunc.__name__}" + ) + + def __setstate__(self, state) -> None: + """Necessary for making this object picklable""" + if not isinstance(state, dict): + return super().__setstate__(state) + + if "_dtype" not in state: + state["_dtype"] = CategoricalDtype(state["_categories"], state["_ordered"]) + + if "_codes" in state and "_ndarray" not in state: + # backward compat, changed what is property vs attribute + state["_ndarray"] = state.pop("_codes") + + super().__setstate__(state) + + @property + def nbytes(self) -> int: + return self._codes.nbytes + self.dtype.categories.values.nbytes + + def memory_usage(self, deep: bool = False) -> int: + """ + Memory usage of my values + + Parameters + ---------- + deep : bool + Introspect the data deeply, interrogate + `object` dtypes for system-level memory consumption + + Returns + ------- + bytes used + + Notes + ----- + Memory usage does not include memory consumed by elements that + are not components of the array if deep=False + + See Also + -------- + numpy.ndarray.nbytes + """ + return self._codes.nbytes + self.dtype.categories.memory_usage(deep=deep) + + def isna(self) -> npt.NDArray[np.bool_]: + """ + Detect missing values + + Missing values (-1 in .codes) are detected. + + Returns + ------- + np.ndarray[bool] of whether my values are null + + See Also + -------- + isna : Top-level isna. + isnull : Alias of isna. + Categorical.notna : Boolean inverse of Categorical.isna. + + """ + return self._codes == -1 + + isnull = isna + + def notna(self) -> npt.NDArray[np.bool_]: + """ + Inverse of isna + + Both missing values (-1 in .codes) and NA as a category are detected as + null. + + Returns + ------- + np.ndarray[bool] of whether my values are not null + + See Also + -------- + notna : Top-level notna. + notnull : Alias of notna. + Categorical.isna : Boolean inverse of Categorical.notna. + + """ + return ~self.isna() + + notnull = notna + + def value_counts(self, dropna: bool = True) -> Series: + """ + Return a Series containing counts of each category. + + Every category will have an entry, even those with a count of 0. + + Parameters + ---------- + dropna : bool, default True + Don't include counts of NaN. + + Returns + ------- + counts : Series + + See Also + -------- + Series.value_counts + """ + from pandas import ( + CategoricalIndex, + Series, + ) + + code, cat = self._codes, self.categories + ncat, mask = (len(cat), code >= 0) + ix, clean = np.arange(ncat), mask.all() + + if dropna or clean: + obs = code if clean else code[mask] + count = np.bincount(obs, minlength=ncat or 0) + else: + count = np.bincount(np.where(mask, code, ncat)) + ix = np.append(ix, -1) + + ix = coerce_indexer_dtype(ix, self.dtype.categories) + ix = self._from_backing_data(ix) + + return Series( + count, index=CategoricalIndex(ix), dtype="int64", name="count", copy=False + ) + + # error: Argument 2 of "_empty" is incompatible with supertype + # "NDArrayBackedExtensionArray"; supertype defines the argument type as + # "ExtensionDtype" + @classmethod + def _empty( # type: ignore[override] + cls, shape: Shape, dtype: CategoricalDtype + ) -> Self: + """ + Analogous to np.empty(shape, dtype=dtype) + + Parameters + ---------- + shape : tuple[int] + dtype : CategoricalDtype + """ + arr = cls._from_sequence([], dtype=dtype) + + # We have to use np.zeros instead of np.empty otherwise the resulting + # ndarray may contain codes not supported by this dtype, in which + # case repr(result) could segfault. + backing = np.zeros(shape, dtype=arr._ndarray.dtype) + + return arr._from_backing_data(backing) + + def _internal_get_values(self) -> ArrayLike: + """ + Return the values. + + For internal compatibility with pandas formatting. + + Returns + ------- + np.ndarray or ExtensionArray + A numpy array or ExtensionArray of the same dtype as + categorical.categories.dtype. + """ + # if we are a datetime and period index, return Index to keep metadata + if needs_i8_conversion(self.categories.dtype): + return self.categories.take(self._codes, fill_value=NaT)._values + elif is_integer_dtype(self.categories.dtype) and -1 in self._codes: + return ( + self.categories.astype("object") + .take(self._codes, fill_value=np.nan) + ._values + ) + return np.array(self) + + def check_for_ordered(self, op) -> None: + """assert that we are ordered""" + if not self.ordered: + raise TypeError( + f"Categorical is not ordered for operation {op}\n" + "you can use .as_ordered() to change the " + "Categorical to an ordered one\n" + ) + + def argsort( + self, *, ascending: bool = True, kind: SortKind = "quicksort", **kwargs + ): + """ + Return the indices that would sort the Categorical. + + Missing values are sorted at the end. + + Parameters + ---------- + ascending : bool, default True + Whether the indices should result in an ascending + or descending sort. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional + Sorting algorithm. + **kwargs: + passed through to :func:`numpy.argsort`. + + Returns + ------- + np.ndarray[np.intp] + + See Also + -------- + numpy.ndarray.argsort + + Notes + ----- + While an ordering is applied to the category values, arg-sorting + in this context refers more to organizing and grouping together + based on matching category values. Thus, this function can be + called on an unordered Categorical instance unlike the functions + 'Categorical.min' and 'Categorical.max'. + + Examples + -------- + >>> pd.Categorical(['b', 'b', 'a', 'c']).argsort() + array([2, 0, 1, 3]) + + >>> cat = pd.Categorical(['b', 'b', 'a', 'c'], + ... categories=['c', 'b', 'a'], + ... ordered=True) + >>> cat.argsort() + array([3, 0, 1, 2]) + + Missing values are placed at the end + + >>> cat = pd.Categorical([2, None, 1]) + >>> cat.argsort() + array([2, 0, 1]) + """ + return super().argsort(ascending=ascending, kind=kind, **kwargs) + + @overload + def sort_values( + self, + *, + inplace: Literal[False] = ..., + ascending: bool = ..., + na_position: str = ..., + ) -> Self: + ... + + @overload + def sort_values( + self, *, inplace: Literal[True], ascending: bool = ..., na_position: str = ... + ) -> None: + ... + + def sort_values( + self, + *, + inplace: bool = False, + ascending: bool = True, + na_position: str = "last", + ) -> Self | None: + """ + Sort the Categorical by category value returning a new + Categorical by default. + + While an ordering is applied to the category values, sorting in this + context refers more to organizing and grouping together based on + matching category values. Thus, this function can be called on an + unordered Categorical instance unlike the functions 'Categorical.min' + and 'Categorical.max'. + + Parameters + ---------- + inplace : bool, default False + Do operation in place. + ascending : bool, default True + Order ascending. Passing False orders descending. The + ordering parameter provides the method by which the + category values are organized. + na_position : {'first', 'last'} (optional, default='last') + 'first' puts NaNs at the beginning + 'last' puts NaNs at the end + + Returns + ------- + Categorical or None + + See Also + -------- + Categorical.sort + Series.sort_values + + Examples + -------- + >>> c = pd.Categorical([1, 2, 2, 1, 5]) + >>> c + [1, 2, 2, 1, 5] + Categories (3, int64): [1, 2, 5] + >>> c.sort_values() + [1, 1, 2, 2, 5] + Categories (3, int64): [1, 2, 5] + >>> c.sort_values(ascending=False) + [5, 2, 2, 1, 1] + Categories (3, int64): [1, 2, 5] + + >>> c = pd.Categorical([1, 2, 2, 1, 5]) + + 'sort_values' behaviour with NaNs. Note that 'na_position' + is independent of the 'ascending' parameter: + + >>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5]) + >>> c + [NaN, 2, 2, NaN, 5] + Categories (2, int64): [2, 5] + >>> c.sort_values() + [2, 2, 5, NaN, NaN] + Categories (2, int64): [2, 5] + >>> c.sort_values(ascending=False) + [5, 2, 2, NaN, NaN] + Categories (2, int64): [2, 5] + >>> c.sort_values(na_position='first') + [NaN, NaN, 2, 2, 5] + Categories (2, int64): [2, 5] + >>> c.sort_values(ascending=False, na_position='first') + [NaN, NaN, 5, 2, 2] + Categories (2, int64): [2, 5] + """ + inplace = validate_bool_kwarg(inplace, "inplace") + if na_position not in ["last", "first"]: + raise ValueError(f"invalid na_position: {repr(na_position)}") + + sorted_idx = nargsort(self, ascending=ascending, na_position=na_position) + + if not inplace: + codes = self._codes[sorted_idx] + return self._from_backing_data(codes) + self._codes[:] = self._codes[sorted_idx] + return None + + def _rank( + self, + *, + axis: AxisInt = 0, + method: str = "average", + na_option: str = "keep", + ascending: bool = True, + pct: bool = False, + ): + """ + See Series.rank.__doc__. + """ + if axis != 0: + raise NotImplementedError + vff = self._values_for_rank() + return algorithms.rank( + vff, + axis=axis, + method=method, + na_option=na_option, + ascending=ascending, + pct=pct, + ) + + def _values_for_rank(self) -> np.ndarray: + """ + For correctly ranking ordered categorical data. See GH#15420 + + Ordered categorical data should be ranked on the basis of + codes with -1 translated to NaN. + + Returns + ------- + numpy.array + + """ + from pandas import Series + + if self.ordered: + values = self.codes + mask = values == -1 + if mask.any(): + values = values.astype("float64") + values[mask] = np.nan + elif is_any_real_numeric_dtype(self.categories.dtype): + values = np.array(self) + else: + # reorder the categories (so rank can use the float codes) + # instead of passing an object array to rank + values = np.array( + self.rename_categories( + Series(self.categories, copy=False).rank().values + ) + ) + return values + + def _hash_pandas_object( + self, *, encoding: str, hash_key: str, categorize: bool + ) -> npt.NDArray[np.uint64]: + """ + Hash a Categorical by hashing its categories, and then mapping the codes + to the hashes. + + Parameters + ---------- + encoding : str + hash_key : str + categorize : bool + Ignored for Categorical. + + Returns + ------- + np.ndarray[uint64] + """ + # Note we ignore categorize, as we are already Categorical. + from pandas.core.util.hashing import hash_array + + # Convert ExtensionArrays to ndarrays + values = np.asarray(self.categories._values) + hashed = hash_array(values, encoding, hash_key, categorize=False) + + # we have uint64, as we don't directly support missing values + # we don't want to use take_nd which will coerce to float + # instead, directly construct the result with a + # max(np.uint64) as the missing value indicator + # + # TODO: GH#15362 + + mask = self.isna() + if len(hashed): + result = hashed.take(self._codes) + else: + result = np.zeros(len(mask), dtype="uint64") + + if mask.any(): + result[mask] = lib.u8max + + return result + + # ------------------------------------------------------------------ + # NDArrayBackedExtensionArray compat + + @property + def _codes(self) -> np.ndarray: + return self._ndarray + + def _box_func(self, i: int): + if i == -1: + return np.nan + return self.categories[i] + + def _unbox_scalar(self, key) -> int: + # searchsorted is very performance sensitive. By converting codes + # to same dtype as self.codes, we get much faster performance. + code = self.categories.get_loc(key) + code = self._ndarray.dtype.type(code) + return code + + # ------------------------------------------------------------------ + + def __iter__(self) -> Iterator: + """ + Returns an Iterator over the values of this Categorical. + """ + if self.ndim == 1: + return iter(self._internal_get_values().tolist()) + else: + return (self[n] for n in range(len(self))) + + def __contains__(self, key) -> bool: + """ + Returns True if `key` is in this Categorical. + """ + # if key is a NaN, check if any NaN is in self. + if is_valid_na_for_dtype(key, self.categories.dtype): + return bool(self.isna().any()) + + return contains(self, key, container=self._codes) + + # ------------------------------------------------------------------ + # Rendering Methods + + def _formatter(self, boxed: bool = False): + # Returning None here will cause format_array to do inference. + return None + + def _repr_categories(self) -> list[str]: + """ + return the base repr for the categories + """ + max_categories = ( + 10 + if get_option("display.max_categories") == 0 + else get_option("display.max_categories") + ) + from pandas.io.formats import format as fmt + + format_array = partial( + fmt.format_array, formatter=None, quoting=QUOTE_NONNUMERIC + ) + if len(self.categories) > max_categories: + num = max_categories // 2 + head = format_array(self.categories[:num]._values) + tail = format_array(self.categories[-num:]._values) + category_strs = head + ["..."] + tail + else: + category_strs = format_array(self.categories._values) + + # Strip all leading spaces, which format_array adds for columns... + category_strs = [x.strip() for x in category_strs] + return category_strs + + def _get_repr_footer(self) -> str: + """ + Returns a string representation of the footer. + """ + category_strs = self._repr_categories() + dtype = str(self.categories.dtype) + levheader = f"Categories ({len(self.categories)}, {dtype}): " + width, _ = get_terminal_size() + max_width = get_option("display.width") or width + if console.in_ipython_frontend(): + # 0 = no breaks + max_width = 0 + levstring = "" + start = True + cur_col_len = len(levheader) # header + sep_len, sep = (3, " < ") if self.ordered else (2, ", ") + linesep = f"{sep.rstrip()}\n" # remove whitespace + for val in category_strs: + if max_width != 0 and cur_col_len + sep_len + len(val) > max_width: + levstring += linesep + (" " * (len(levheader) + 1)) + cur_col_len = len(levheader) + 1 # header + a whitespace + elif not start: + levstring += sep + cur_col_len += len(val) + levstring += val + start = False + # replace to simple save space by + return f"{levheader}[{levstring.replace(' < ... < ', ' ... ')}]" + + def _get_values_repr(self) -> str: + from pandas.io.formats import format as fmt + + assert len(self) > 0 + + vals = self._internal_get_values() + fmt_values = fmt.format_array( + vals, + None, + float_format=None, + na_rep="NaN", + quoting=QUOTE_NONNUMERIC, + ) + + fmt_values = [i.strip() for i in fmt_values] + joined = ", ".join(fmt_values) + result = "[" + joined + "]" + return result + + def __repr__(self) -> str: + """ + String representation. + """ + footer = self._get_repr_footer() + length = len(self) + max_len = 10 + if length > max_len: + # In long cases we do not display all entries, so we add Length + # information to the __repr__. + num = max_len // 2 + head = self[:num]._get_values_repr() + tail = self[-(max_len - num) :]._get_values_repr() + body = f"{head[:-1]}, ..., {tail[1:]}" + length_info = f"Length: {len(self)}" + result = f"{body}\n{length_info}\n{footer}" + elif length > 0: + body = self._get_values_repr() + result = f"{body}\n{footer}" + else: + # In the empty case we use a comma instead of newline to get + # a more compact __repr__ + body = "[]" + result = f"{body}, {footer}" + + return result + + # ------------------------------------------------------------------ + + def _validate_listlike(self, value): + # NB: here we assume scalar-like tuples have already been excluded + value = extract_array(value, extract_numpy=True) + + # require identical categories set + if isinstance(value, Categorical): + if self.dtype != value.dtype: + raise TypeError( + "Cannot set a Categorical with another, " + "without identical categories" + ) + # dtype equality implies categories_match_up_to_permutation + value = self._encode_with_my_categories(value) + return value._codes + + from pandas import Index + + # tupleize_cols=False for e.g. test_fillna_iterable_category GH#41914 + to_add = Index._with_infer(value, tupleize_cols=False).difference( + self.categories + ) + + # no assignments of values not in categories, but it's always ok to set + # something to np.nan + if len(to_add) and not isna(to_add).all(): + raise TypeError( + "Cannot setitem on a Categorical with a new " + "category, set the categories first" + ) + + codes = self.categories.get_indexer(value) + return codes.astype(self._ndarray.dtype, copy=False) + + def _reverse_indexer(self) -> dict[Hashable, npt.NDArray[np.intp]]: + """ + Compute the inverse of a categorical, returning + a dict of categories -> indexers. + + *This is an internal function* + + Returns + ------- + Dict[Hashable, np.ndarray[np.intp]] + dict of categories -> indexers + + Examples + -------- + >>> c = pd.Categorical(list('aabca')) + >>> c + ['a', 'a', 'b', 'c', 'a'] + Categories (3, object): ['a', 'b', 'c'] + >>> c.categories + Index(['a', 'b', 'c'], dtype='object') + >>> c.codes + array([0, 0, 1, 2, 0], dtype=int8) + >>> c._reverse_indexer() + {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])} + + """ + categories = self.categories + r, counts = libalgos.groupsort_indexer( + ensure_platform_int(self.codes), categories.size + ) + counts = ensure_int64(counts).cumsum() + _result = (r[start:end] for start, end in zip(counts, counts[1:])) + return dict(zip(categories, _result)) + + # ------------------------------------------------------------------ + # Reductions + + def _reduce( + self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs + ): + result = super()._reduce(name, skipna=skipna, keepdims=keepdims, **kwargs) + if name in ["argmax", "argmin"]: + # don't wrap in Categorical! + return result + if keepdims: + return type(self)(result, dtype=self.dtype) + else: + return result + + def min(self, *, skipna: bool = True, **kwargs): + """ + The minimum value of the object. + + Only ordered `Categoricals` have a minimum! + + Raises + ------ + TypeError + If the `Categorical` is not `ordered`. + + Returns + ------- + min : the minimum of this `Categorical`, NA value if empty + """ + nv.validate_minmax_axis(kwargs.get("axis", 0)) + nv.validate_min((), kwargs) + self.check_for_ordered("min") + + if not len(self._codes): + return self.dtype.na_value + + good = self._codes != -1 + if not good.all(): + if skipna and good.any(): + pointer = self._codes[good].min() + else: + return np.nan + else: + pointer = self._codes.min() + return self._wrap_reduction_result(None, pointer) + + def max(self, *, skipna: bool = True, **kwargs): + """ + The maximum value of the object. + + Only ordered `Categoricals` have a maximum! + + Raises + ------ + TypeError + If the `Categorical` is not `ordered`. + + Returns + ------- + max : the maximum of this `Categorical`, NA if array is empty + """ + nv.validate_minmax_axis(kwargs.get("axis", 0)) + nv.validate_max((), kwargs) + self.check_for_ordered("max") + + if not len(self._codes): + return self.dtype.na_value + + good = self._codes != -1 + if not good.all(): + if skipna and good.any(): + pointer = self._codes[good].max() + else: + return np.nan + else: + pointer = self._codes.max() + return self._wrap_reduction_result(None, pointer) + + def _mode(self, dropna: bool = True) -> Categorical: + codes = self._codes + mask = None + if dropna: + mask = self.isna() + + res_codes = algorithms.mode(codes, mask=mask) + res_codes = cast(np.ndarray, res_codes) + assert res_codes.dtype == codes.dtype + res = self._from_backing_data(res_codes) + return res + + # ------------------------------------------------------------------ + # ExtensionArray Interface + + def unique(self) -> Self: + """ + Return the ``Categorical`` which ``categories`` and ``codes`` are + unique. + + .. versionchanged:: 1.3.0 + + Previously, unused categories were dropped from the new categories. + + Returns + ------- + Categorical + + See Also + -------- + pandas.unique + CategoricalIndex.unique + Series.unique : Return unique values of Series object. + + Examples + -------- + >>> pd.Categorical(list("baabc")).unique() + ['b', 'a', 'c'] + Categories (3, object): ['a', 'b', 'c'] + >>> pd.Categorical(list("baab"), categories=list("abc"), ordered=True).unique() + ['b', 'a'] + Categories (3, object): ['a' < 'b' < 'c'] + """ + # pylint: disable=useless-parent-delegation + return super().unique() + + def _cast_quantile_result(self, res_values: np.ndarray) -> np.ndarray: + # make sure we have correct itemsize for resulting codes + assert res_values.dtype == self._ndarray.dtype + return res_values + + def equals(self, other: object) -> bool: + """ + Returns True if categorical arrays are equal. + + Parameters + ---------- + other : `Categorical` + + Returns + ------- + bool + """ + if not isinstance(other, Categorical): + return False + elif self._categories_match_up_to_permutation(other): + other = self._encode_with_my_categories(other) + return np.array_equal(self._codes, other._codes) + return False + + @classmethod + def _concat_same_type(cls, to_concat: Sequence[Self], axis: AxisInt = 0) -> Self: + from pandas.core.dtypes.concat import union_categoricals + + first = to_concat[0] + if axis >= first.ndim: + raise ValueError( + f"axis {axis} is out of bounds for array of dimension {first.ndim}" + ) + + if axis == 1: + # Flatten, concatenate then reshape + if not all(x.ndim == 2 for x in to_concat): + raise ValueError + + # pass correctly-shaped to union_categoricals + tc_flat = [] + for obj in to_concat: + tc_flat.extend([obj[:, i] for i in range(obj.shape[1])]) + + res_flat = cls._concat_same_type(tc_flat, axis=0) + + result = res_flat.reshape(len(first), -1, order="F") + return result + + result = union_categoricals(to_concat) + return result + + # ------------------------------------------------------------------ + + def _encode_with_my_categories(self, other: Categorical) -> Categorical: + """ + Re-encode another categorical using this Categorical's categories. + + Notes + ----- + This assumes we have already checked + self._categories_match_up_to_permutation(other). + """ + # Indexing on codes is more efficient if categories are the same, + # so we can apply some optimizations based on the degree of + # dtype-matching. + codes = recode_for_categories( + other.codes, other.categories, self.categories, copy=False + ) + return self._from_backing_data(codes) + + def _categories_match_up_to_permutation(self, other: Categorical) -> bool: + """ + Returns True if categoricals are the same dtype + same categories, and same ordered + + Parameters + ---------- + other : Categorical + + Returns + ------- + bool + """ + return hash(self.dtype) == hash(other.dtype) + + def describe(self) -> DataFrame: + """ + Describes this Categorical + + Returns + ------- + description: `DataFrame` + A dataframe with frequency and counts by category. + """ + counts = self.value_counts(dropna=False) + freqs = counts / counts.sum() + + from pandas import Index + from pandas.core.reshape.concat import concat + + result = concat([counts, freqs], axis=1) + result.columns = Index(["counts", "freqs"]) + result.index.name = "categories" + + return result + + def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]: + """ + Check whether `values` are contained in Categorical. + + Return a boolean NumPy Array showing whether each element in + the Categorical matches an element in the passed sequence of + `values` exactly. + + Parameters + ---------- + values : np.ndarray or ExtensionArray + The sequence of values to test. Passing in a single string will + raise a ``TypeError``. Instead, turn a single string into a + list of one element. + + Returns + ------- + np.ndarray[bool] + + Raises + ------ + TypeError + * If `values` is not a set or list-like + + See Also + -------- + pandas.Series.isin : Equivalent method on Series. + + Examples + -------- + >>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama', + ... 'hippo']) + >>> s.isin(['cow', 'lama']) + array([ True, True, True, False, True, False]) + + Passing a single string as ``s.isin('lama')`` will raise an error. Use + a list of one element instead: + + >>> s.isin(['lama']) + array([ True, False, True, False, True, False]) + """ + null_mask = np.asarray(isna(values)) + code_values = self.categories.get_indexer_for(values) + code_values = code_values[null_mask | (code_values >= 0)] + return algorithms.isin(self.codes, code_values) + + def _replace(self, *, to_replace, value, inplace: bool = False): + from pandas import Index + + orig_dtype = self.dtype + + inplace = validate_bool_kwarg(inplace, "inplace") + cat = self if inplace else self.copy() + + mask = isna(np.asarray(value)) + if mask.any(): + removals = np.asarray(to_replace)[mask] + removals = cat.categories[cat.categories.isin(removals)] + new_cat = cat.remove_categories(removals) + NDArrayBacked.__init__(cat, new_cat.codes, new_cat.dtype) + + ser = cat.categories.to_series() + ser = ser.replace(to_replace=to_replace, value=value) + + all_values = Index(ser) + + # GH51016: maintain order of existing categories + idxr = cat.categories.get_indexer_for(all_values) + locs = np.arange(len(ser)) + locs = np.where(idxr == -1, locs, idxr) + locs = locs.argsort() + + new_categories = ser.take(locs) + new_categories = new_categories.drop_duplicates(keep="first") + new_categories = Index(new_categories) + new_codes = recode_for_categories( + cat._codes, all_values, new_categories, copy=False + ) + new_dtype = CategoricalDtype(new_categories, ordered=self.dtype.ordered) + NDArrayBacked.__init__(cat, new_codes, new_dtype) + + if new_dtype != orig_dtype: + warnings.warn( + # GH#55147 + "The behavior of Series.replace (and DataFrame.replace) with " + "CategoricalDtype is deprecated. In a future version, replace " + "will only be used for cases that preserve the categories. " + "To change the categories, use ser.cat.rename_categories " + "instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + if not inplace: + return cat + + # ------------------------------------------------------------------------ + # String methods interface + def _str_map( + self, f, na_value=np.nan, dtype=np.dtype("object"), convert: bool = True + ): + # Optimization to apply the callable `f` to the categories once + # and rebuild the result by `take`ing from the result with the codes. + # Returns the same type as the object-dtype implementation though. + from pandas.core.arrays import NumpyExtensionArray + + categories = self.categories + codes = self.codes + result = NumpyExtensionArray(categories.to_numpy())._str_map(f, na_value, dtype) + return take_nd(result, codes, fill_value=na_value) + + def _str_get_dummies(self, sep: str = "|"): + # sep may not be in categories. Just bail on this. + from pandas.core.arrays import NumpyExtensionArray + + return NumpyExtensionArray(self.astype(str))._str_get_dummies(sep) + + # ------------------------------------------------------------------------ + # GroupBy Methods + + def _groupby_op( + self, + *, + how: str, + has_dropped_na: bool, + min_count: int, + ngroups: int, + ids: npt.NDArray[np.intp], + **kwargs, + ): + from pandas.core.groupby.ops import WrappedCythonOp + + kind = WrappedCythonOp.get_kind_from_how(how) + op = WrappedCythonOp(how=how, kind=kind, has_dropped_na=has_dropped_na) + + dtype = self.dtype + if how in ["sum", "prod", "cumsum", "cumprod", "skew"]: + raise TypeError(f"{dtype} type does not support {how} operations") + if how in ["min", "max", "rank", "idxmin", "idxmax"] and not dtype.ordered: + # raise TypeError instead of NotImplementedError to ensure we + # don't go down a group-by-group path, since in the empty-groups + # case that would fail to raise + raise TypeError(f"Cannot perform {how} with non-ordered Categorical") + if how not in [ + "rank", + "any", + "all", + "first", + "last", + "min", + "max", + "idxmin", + "idxmax", + ]: + if kind == "transform": + raise TypeError(f"{dtype} type does not support {how} operations") + raise TypeError(f"{dtype} dtype does not support aggregation '{how}'") + + result_mask = None + mask = self.isna() + if how == "rank": + assert self.ordered # checked earlier + npvalues = self._ndarray + elif how in ["first", "last", "min", "max", "idxmin", "idxmax"]: + npvalues = self._ndarray + result_mask = np.zeros(ngroups, dtype=bool) + else: + # any/all + npvalues = self.astype(bool) + + res_values = op._cython_op_ndim_compat( + npvalues, + min_count=min_count, + ngroups=ngroups, + comp_ids=ids, + mask=mask, + result_mask=result_mask, + **kwargs, + ) + + if how in op.cast_blocklist: + return res_values + elif how in ["first", "last", "min", "max"]: + res_values[result_mask == 1] = -1 + return self._from_backing_data(res_values) + + +# The Series.cat accessor + + +@delegate_names( + delegate=Categorical, accessors=["categories", "ordered"], typ="property" +) +@delegate_names( + delegate=Categorical, + accessors=[ + "rename_categories", + "reorder_categories", + "add_categories", + "remove_categories", + "remove_unused_categories", + "set_categories", + "as_ordered", + "as_unordered", + ], + typ="method", +) +class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin): + """ + Accessor object for categorical properties of the Series values. + + Parameters + ---------- + data : Series or CategoricalIndex + + Examples + -------- + >>> s = pd.Series(list("abbccc")).astype("category") + >>> s + 0 a + 1 b + 2 b + 3 c + 4 c + 5 c + dtype: category + Categories (3, object): ['a', 'b', 'c'] + + >>> s.cat.categories + Index(['a', 'b', 'c'], dtype='object') + + >>> s.cat.rename_categories(list("cba")) + 0 c + 1 b + 2 b + 3 a + 4 a + 5 a + dtype: category + Categories (3, object): ['c', 'b', 'a'] + + >>> s.cat.reorder_categories(list("cba")) + 0 a + 1 b + 2 b + 3 c + 4 c + 5 c + dtype: category + Categories (3, object): ['c', 'b', 'a'] + + >>> s.cat.add_categories(["d", "e"]) + 0 a + 1 b + 2 b + 3 c + 4 c + 5 c + dtype: category + Categories (5, object): ['a', 'b', 'c', 'd', 'e'] + + >>> s.cat.remove_categories(["a", "c"]) + 0 NaN + 1 b + 2 b + 3 NaN + 4 NaN + 5 NaN + dtype: category + Categories (1, object): ['b'] + + >>> s1 = s.cat.add_categories(["d", "e"]) + >>> s1.cat.remove_unused_categories() + 0 a + 1 b + 2 b + 3 c + 4 c + 5 c + dtype: category + Categories (3, object): ['a', 'b', 'c'] + + >>> s.cat.set_categories(list("abcde")) + 0 a + 1 b + 2 b + 3 c + 4 c + 5 c + dtype: category + Categories (5, object): ['a', 'b', 'c', 'd', 'e'] + + >>> s.cat.as_ordered() + 0 a + 1 b + 2 b + 3 c + 4 c + 5 c + dtype: category + Categories (3, object): ['a' < 'b' < 'c'] + + >>> s.cat.as_unordered() + 0 a + 1 b + 2 b + 3 c + 4 c + 5 c + dtype: category + Categories (3, object): ['a', 'b', 'c'] + """ + + def __init__(self, data) -> None: + self._validate(data) + self._parent = data.values + self._index = data.index + self._name = data.name + self._freeze() + + @staticmethod + def _validate(data): + if not isinstance(data.dtype, CategoricalDtype): + raise AttributeError("Can only use .cat accessor with a 'category' dtype") + + def _delegate_property_get(self, name: str): + return getattr(self._parent, name) + + # error: Signature of "_delegate_property_set" incompatible with supertype + # "PandasDelegate" + def _delegate_property_set(self, name: str, new_values): # type: ignore[override] + return setattr(self._parent, name, new_values) + + @property + def codes(self) -> Series: + """ + Return Series of codes as well as the index. + + Examples + -------- + >>> raw_cate = pd.Categorical(["a", "b", "c", "a"], categories=["a", "b"]) + >>> ser = pd.Series(raw_cate) + >>> ser.cat.codes + 0 0 + 1 1 + 2 -1 + 3 0 + dtype: int8 + """ + from pandas import Series + + return Series(self._parent.codes, index=self._index) + + def _delegate_method(self, name: str, *args, **kwargs): + from pandas import Series + + method = getattr(self._parent, name) + res = method(*args, **kwargs) + if res is not None: + return Series(res, index=self._index, name=self._name) + + +# utility routines + + +def _get_codes_for_values( + values: Index | Series | ExtensionArray | np.ndarray, + categories: Index, +) -> np.ndarray: + """ + utility routine to turn values into codes given the specified categories + + If `values` is known to be a Categorical, use recode_for_categories instead. + """ + codes = categories.get_indexer_for(values) + return coerce_indexer_dtype(codes, categories) + + +def recode_for_categories( + codes: np.ndarray, old_categories, new_categories, copy: bool = True +) -> np.ndarray: + """ + Convert a set of codes for to a new set of categories + + Parameters + ---------- + codes : np.ndarray + old_categories, new_categories : Index + copy: bool, default True + Whether to copy if the codes are unchanged. + + Returns + ------- + new_codes : np.ndarray[np.int64] + + Examples + -------- + >>> old_cat = pd.Index(['b', 'a', 'c']) + >>> new_cat = pd.Index(['a', 'b']) + >>> codes = np.array([0, 1, 1, 2]) + >>> recode_for_categories(codes, old_cat, new_cat) + array([ 1, 0, 0, -1], dtype=int8) + """ + if len(old_categories) == 0: + # All null anyway, so just retain the nulls + if copy: + return codes.copy() + return codes + elif new_categories.equals(old_categories): + # Same categories, so no need to actually recode + if copy: + return codes.copy() + return codes + + indexer = coerce_indexer_dtype( + new_categories.get_indexer_for(old_categories), new_categories + ) + new_codes = take_nd(indexer, codes, fill_value=-1) + return new_codes + + +def factorize_from_iterable(values) -> tuple[np.ndarray, Index]: + """ + Factorize an input `values` into `categories` and `codes`. Preserves + categorical dtype in `categories`. + + Parameters + ---------- + values : list-like + + Returns + ------- + codes : ndarray + categories : Index + If `values` has a categorical dtype, then `categories` is + a CategoricalIndex keeping the categories and order of `values`. + """ + from pandas import CategoricalIndex + + if not is_list_like(values): + raise TypeError("Input must be list-like") + + categories: Index + + vdtype = getattr(values, "dtype", None) + if isinstance(vdtype, CategoricalDtype): + values = extract_array(values) + # The Categorical we want to build has the same categories + # as values but its codes are by def [0, ..., len(n_categories) - 1] + cat_codes = np.arange(len(values.categories), dtype=values.codes.dtype) + cat = Categorical.from_codes(cat_codes, dtype=values.dtype, validate=False) + + categories = CategoricalIndex(cat) + codes = values.codes + else: + # The value of ordered is irrelevant since we don't use cat as such, + # but only the resulting categories, the order of which is independent + # from ordered. Set ordered to False as default. See GH #15457 + cat = Categorical(values, ordered=False) + categories = cat.categories + codes = cat.codes + return codes, categories + + +def factorize_from_iterables(iterables) -> tuple[list[np.ndarray], list[Index]]: + """ + A higher-level wrapper over `factorize_from_iterable`. + + Parameters + ---------- + iterables : list-like of list-likes + + Returns + ------- + codes : list of ndarrays + categories : list of Indexes + + Notes + ----- + See `factorize_from_iterable` for more info. + """ + if len(iterables) == 0: + # For consistency, it should return two empty lists. + return [], [] + + codes, categories = zip(*(factorize_from_iterable(it) for it in iterables)) + return list(codes), list(categories) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/datetimelike.py b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/datetimelike.py new file mode 100644 index 0000000000000000000000000000000000000000..1042a1b3fde61d18dac0c921bee64fc975d786ae --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/datetimelike.py @@ -0,0 +1,2556 @@ +from __future__ import annotations + +from datetime import ( + datetime, + timedelta, +) +from functools import wraps +import operator +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, + Union, + cast, + final, + overload, +) +import warnings + +import numpy as np + +from pandas._libs import ( + algos, + lib, +) +from pandas._libs.arrays import NDArrayBacked +from pandas._libs.tslibs import ( + BaseOffset, + IncompatibleFrequency, + NaT, + NaTType, + Period, + Resolution, + Tick, + Timedelta, + Timestamp, + add_overflowsafe, + astype_overflowsafe, + get_unit_from_dtype, + iNaT, + ints_to_pydatetime, + ints_to_pytimedelta, + periods_per_day, + to_offset, +) +from pandas._libs.tslibs.fields import ( + RoundTo, + round_nsint64, +) +from pandas._libs.tslibs.np_datetime import compare_mismatched_resolutions +from pandas._libs.tslibs.timedeltas import get_unit_for_round +from pandas._libs.tslibs.timestamps import integer_op_not_supported +from pandas._typing import ( + ArrayLike, + AxisInt, + DatetimeLikeScalar, + Dtype, + DtypeObj, + F, + InterpolateOptions, + NpDtype, + PositionalIndexer2D, + PositionalIndexerTuple, + ScalarIndexer, + Self, + SequenceIndexer, + TimeAmbiguous, + TimeNonexistent, + npt, +) +from pandas.compat.numpy import function as nv +from pandas.errors import ( + AbstractMethodError, + InvalidComparison, + PerformanceWarning, +) +from pandas.util._decorators import ( + Appender, + Substitution, + cache_readonly, +) +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike +from pandas.core.dtypes.common import ( + is_all_strings, + is_integer_dtype, + is_list_like, + is_object_dtype, + is_string_dtype, + pandas_dtype, +) +from pandas.core.dtypes.dtypes import ( + ArrowDtype, + CategoricalDtype, + DatetimeTZDtype, + ExtensionDtype, + PeriodDtype, +) +from pandas.core.dtypes.generic import ( + ABCCategorical, + ABCMultiIndex, +) +from pandas.core.dtypes.missing import ( + is_valid_na_for_dtype, + isna, +) + +from pandas.core import ( + algorithms, + missing, + nanops, + ops, +) +from pandas.core.algorithms import ( + isin, + map_array, + unique1d, +) +from pandas.core.array_algos import datetimelike_accumulations +from pandas.core.arraylike import OpsMixin +from pandas.core.arrays._mixins import ( + NDArrayBackedExtensionArray, + ravel_compat, +) +from pandas.core.arrays.arrow.array import ArrowExtensionArray +from pandas.core.arrays.base import ExtensionArray +from pandas.core.arrays.integer import IntegerArray +import pandas.core.common as com +from pandas.core.construction import ( + array as pd_array, + ensure_wrapped_if_datetimelike, + extract_array, +) +from pandas.core.indexers import ( + check_array_indexer, + check_setitem_lengths, +) +from pandas.core.ops.common import unpack_zerodim_and_defer +from pandas.core.ops.invalid import ( + invalid_comparison, + make_invalid_op, +) + +from pandas.tseries import frequencies + +if TYPE_CHECKING: + from collections.abc import ( + Iterator, + Sequence, + ) + + from pandas import Index + from pandas.core.arrays import ( + DatetimeArray, + PeriodArray, + TimedeltaArray, + ) + +DTScalarOrNaT = Union[DatetimeLikeScalar, NaTType] + + +def _make_unpacked_invalid_op(op_name: str): + op = make_invalid_op(op_name) + return unpack_zerodim_and_defer(op_name)(op) + + +def _period_dispatch(meth: F) -> F: + """ + For PeriodArray methods, dispatch to DatetimeArray and re-wrap the results + in PeriodArray. We cannot use ._ndarray directly for the affected + methods because the i8 data has different semantics on NaT values. + """ + + @wraps(meth) + def new_meth(self, *args, **kwargs): + if not isinstance(self.dtype, PeriodDtype): + return meth(self, *args, **kwargs) + + arr = self.view("M8[ns]") + result = meth(arr, *args, **kwargs) + if result is NaT: + return NaT + elif isinstance(result, Timestamp): + return self._box_func(result._value) + + res_i8 = result.view("i8") + return self._from_backing_data(res_i8) + + return cast(F, new_meth) + + +# error: Definition of "_concat_same_type" in base class "NDArrayBacked" is +# incompatible with definition in base class "ExtensionArray" +class DatetimeLikeArrayMixin( # type: ignore[misc] + OpsMixin, NDArrayBackedExtensionArray +): + """ + Shared Base/Mixin class for DatetimeArray, TimedeltaArray, PeriodArray + + Assumes that __new__/__init__ defines: + _ndarray + + and that inheriting subclass implements: + freq + """ + + # _infer_matches -> which infer_dtype strings are close enough to our own + _infer_matches: tuple[str, ...] + _is_recognized_dtype: Callable[[DtypeObj], bool] + _recognized_scalars: tuple[type, ...] + _ndarray: np.ndarray + freq: BaseOffset | None + + @cache_readonly + def _can_hold_na(self) -> bool: + return True + + def __init__( + self, data, dtype: Dtype | None = None, freq=None, copy: bool = False + ) -> None: + raise AbstractMethodError(self) + + @property + def _scalar_type(self) -> type[DatetimeLikeScalar]: + """ + The scalar associated with this datelike + + * PeriodArray : Period + * DatetimeArray : Timestamp + * TimedeltaArray : Timedelta + """ + raise AbstractMethodError(self) + + def _scalar_from_string(self, value: str) -> DTScalarOrNaT: + """ + Construct a scalar type from a string. + + Parameters + ---------- + value : str + + Returns + ------- + Period, Timestamp, or Timedelta, or NaT + Whatever the type of ``self._scalar_type`` is. + + Notes + ----- + This should call ``self._check_compatible_with`` before + unboxing the result. + """ + raise AbstractMethodError(self) + + def _unbox_scalar( + self, value: DTScalarOrNaT + ) -> np.int64 | np.datetime64 | np.timedelta64: + """ + Unbox the integer value of a scalar `value`. + + Parameters + ---------- + value : Period, Timestamp, Timedelta, or NaT + Depending on subclass. + + Returns + ------- + int + + Examples + -------- + >>> arr = pd.array(np.array(['1970-01-01'], 'datetime64[ns]')) + >>> arr._unbox_scalar(arr[0]) + numpy.datetime64('1970-01-01T00:00:00.000000000') + """ + raise AbstractMethodError(self) + + def _check_compatible_with(self, other: DTScalarOrNaT) -> None: + """ + Verify that `self` and `other` are compatible. + + * DatetimeArray verifies that the timezones (if any) match + * PeriodArray verifies that the freq matches + * Timedelta has no verification + + In each case, NaT is considered compatible. + + Parameters + ---------- + other + + Raises + ------ + Exception + """ + raise AbstractMethodError(self) + + # ------------------------------------------------------------------ + + def _box_func(self, x): + """ + box function to get object from internal representation + """ + raise AbstractMethodError(self) + + def _box_values(self, values) -> np.ndarray: + """ + apply box func to passed values + """ + return lib.map_infer(values, self._box_func, convert=False) + + def __iter__(self) -> Iterator: + if self.ndim > 1: + return (self[n] for n in range(len(self))) + else: + return (self._box_func(v) for v in self.asi8) + + @property + def asi8(self) -> npt.NDArray[np.int64]: + """ + Integer representation of the values. + + Returns + ------- + ndarray + An ndarray with int64 dtype. + """ + # do not cache or you'll create a memory leak + return self._ndarray.view("i8") + + # ---------------------------------------------------------------- + # Rendering Methods + + def _format_native_types( + self, *, na_rep: str | float = "NaT", date_format=None + ) -> npt.NDArray[np.object_]: + """ + Helper method for astype when converting to strings. + + Returns + ------- + ndarray[str] + """ + raise AbstractMethodError(self) + + def _formatter(self, boxed: bool = False): + # TODO: Remove Datetime & DatetimeTZ formatters. + return "'{}'".format + + # ---------------------------------------------------------------- + # Array-Like / EA-Interface Methods + + def __array__( + self, dtype: NpDtype | None = None, copy: bool | None = None + ) -> np.ndarray: + # used for Timedelta/DatetimeArray, overwritten by PeriodArray + if is_object_dtype(dtype): + return np.array(list(self), dtype=object) + return self._ndarray + + @overload + def __getitem__(self, item: ScalarIndexer) -> DTScalarOrNaT: + ... + + @overload + def __getitem__( + self, + item: SequenceIndexer | PositionalIndexerTuple, + ) -> Self: + ... + + def __getitem__(self, key: PositionalIndexer2D) -> Self | DTScalarOrNaT: + """ + This getitem defers to the underlying array, which by-definition can + only handle list-likes, slices, and integer scalars + """ + # Use cast as we know we will get back a DatetimeLikeArray or DTScalar, + # but skip evaluating the Union at runtime for performance + # (see https://github.com/pandas-dev/pandas/pull/44624) + result = cast("Union[Self, DTScalarOrNaT]", super().__getitem__(key)) + if lib.is_scalar(result): + return result + else: + # At this point we know the result is an array. + result = cast(Self, result) + result._freq = self._get_getitem_freq(key) + return result + + def _get_getitem_freq(self, key) -> BaseOffset | None: + """ + Find the `freq` attribute to assign to the result of a __getitem__ lookup. + """ + is_period = isinstance(self.dtype, PeriodDtype) + if is_period: + freq = self.freq + elif self.ndim != 1: + freq = None + else: + key = check_array_indexer(self, key) # maybe ndarray[bool] -> slice + freq = None + if isinstance(key, slice): + if self.freq is not None and key.step is not None: + freq = key.step * self.freq + else: + freq = self.freq + elif key is Ellipsis: + # GH#21282 indexing with Ellipsis is similar to a full slice, + # should preserve `freq` attribute + freq = self.freq + elif com.is_bool_indexer(key): + new_key = lib.maybe_booleans_to_slice(key.view(np.uint8)) + if isinstance(new_key, slice): + return self._get_getitem_freq(new_key) + return freq + + # error: Argument 1 of "__setitem__" is incompatible with supertype + # "ExtensionArray"; supertype defines the argument type as "Union[int, + # ndarray]" + def __setitem__( + self, + key: int | Sequence[int] | Sequence[bool] | slice, + value: NaTType | Any | Sequence[Any], + ) -> None: + # I'm fudging the types a bit here. "Any" above really depends + # on type(self). For PeriodArray, it's Period (or stuff coercible + # to a period in from_sequence). For DatetimeArray, it's Timestamp... + # I don't know if mypy can do that, possibly with Generics. + # https://mypy.readthedocs.io/en/latest/generics.html + + no_op = check_setitem_lengths(key, value, self) + + # Calling super() before the no_op short-circuit means that we raise + # on invalid 'value' even if this is a no-op, e.g. wrong-dtype empty array. + super().__setitem__(key, value) + + if no_op: + return + + self._maybe_clear_freq() + + def _maybe_clear_freq(self) -> None: + # inplace operations like __setitem__ may invalidate the freq of + # DatetimeArray and TimedeltaArray + pass + + def astype(self, dtype, copy: bool = True): + # Some notes on cases we don't have to handle here in the base class: + # 1. PeriodArray.astype handles period -> period + # 2. DatetimeArray.astype handles conversion between tz. + # 3. DatetimeArray.astype handles datetime -> period + dtype = pandas_dtype(dtype) + + if dtype == object: + if self.dtype.kind == "M": + self = cast("DatetimeArray", self) + # *much* faster than self._box_values + # for e.g. test_get_loc_tuple_monotonic_above_size_cutoff + i8data = self.asi8 + converted = ints_to_pydatetime( + i8data, + tz=self.tz, + box="timestamp", + reso=self._creso, + ) + return converted + + elif self.dtype.kind == "m": + return ints_to_pytimedelta(self._ndarray, box=True) + + return self._box_values(self.asi8.ravel()).reshape(self.shape) + + elif isinstance(dtype, ExtensionDtype): + return super().astype(dtype, copy=copy) + elif is_string_dtype(dtype): + return self._format_native_types() + elif dtype.kind in "iu": + # we deliberately ignore int32 vs. int64 here. + # See https://github.com/pandas-dev/pandas/issues/24381 for more. + values = self.asi8 + if dtype != np.int64: + raise TypeError( + f"Converting from {self.dtype} to {dtype} is not supported. " + "Do obj.astype('int64').astype(dtype) instead" + ) + + if copy: + values = values.copy() + return values + elif (dtype.kind in "mM" and self.dtype != dtype) or dtype.kind == "f": + # disallow conversion between datetime/timedelta, + # and conversions for any datetimelike to float + msg = f"Cannot cast {type(self).__name__} to dtype {dtype}" + raise TypeError(msg) + else: + return np.asarray(self, dtype=dtype) + + @overload + def view(self) -> Self: + ... + + @overload + def view(self, dtype: Literal["M8[ns]"]) -> DatetimeArray: + ... + + @overload + def view(self, dtype: Literal["m8[ns]"]) -> TimedeltaArray: + ... + + @overload + def view(self, dtype: Dtype | None = ...) -> ArrayLike: + ... + + # pylint: disable-next=useless-parent-delegation + def view(self, dtype: Dtype | None = None) -> ArrayLike: + # we need to explicitly call super() method as long as the `@overload`s + # are present in this file. + return super().view(dtype) + + # ------------------------------------------------------------------ + # Validation Methods + # TODO: try to de-duplicate these, ensure identical behavior + + def _validate_comparison_value(self, other): + if isinstance(other, str): + try: + # GH#18435 strings get a pass from tzawareness compat + other = self._scalar_from_string(other) + except (ValueError, IncompatibleFrequency): + # failed to parse as Timestamp/Timedelta/Period + raise InvalidComparison(other) + + if isinstance(other, self._recognized_scalars) or other is NaT: + other = self._scalar_type(other) + try: + self._check_compatible_with(other) + except (TypeError, IncompatibleFrequency) as err: + # e.g. tzawareness mismatch + raise InvalidComparison(other) from err + + elif not is_list_like(other): + raise InvalidComparison(other) + + elif len(other) != len(self): + raise ValueError("Lengths must match") + + else: + try: + other = self._validate_listlike(other, allow_object=True) + self._check_compatible_with(other) + except (TypeError, IncompatibleFrequency) as err: + if is_object_dtype(getattr(other, "dtype", None)): + # We will have to operate element-wise + pass + else: + raise InvalidComparison(other) from err + + return other + + def _validate_scalar( + self, + value, + *, + allow_listlike: bool = False, + unbox: bool = True, + ): + """ + Validate that the input value can be cast to our scalar_type. + + Parameters + ---------- + value : object + allow_listlike: bool, default False + When raising an exception, whether the message should say + listlike inputs are allowed. + unbox : bool, default True + Whether to unbox the result before returning. Note: unbox=False + skips the setitem compatibility check. + + Returns + ------- + self._scalar_type or NaT + """ + if isinstance(value, self._scalar_type): + pass + + elif isinstance(value, str): + # NB: Careful about tzawareness + try: + value = self._scalar_from_string(value) + except ValueError as err: + msg = self._validation_error_message(value, allow_listlike) + raise TypeError(msg) from err + + elif is_valid_na_for_dtype(value, self.dtype): + # GH#18295 + value = NaT + + elif isna(value): + # if we are dt64tz and value is dt64("NaT"), dont cast to NaT, + # or else we'll fail to raise in _unbox_scalar + msg = self._validation_error_message(value, allow_listlike) + raise TypeError(msg) + + elif isinstance(value, self._recognized_scalars): + # error: Argument 1 to "Timestamp" has incompatible type "object"; expected + # "integer[Any] | float | str | date | datetime | datetime64" + value = self._scalar_type(value) # type: ignore[arg-type] + + else: + msg = self._validation_error_message(value, allow_listlike) + raise TypeError(msg) + + if not unbox: + # NB: In general NDArrayBackedExtensionArray will unbox here; + # this option exists to prevent a performance hit in + # TimedeltaIndex.get_loc + return value + return self._unbox_scalar(value) + + def _validation_error_message(self, value, allow_listlike: bool = False) -> str: + """ + Construct an exception message on validation error. + + Some methods allow only scalar inputs, while others allow either scalar + or listlike. + + Parameters + ---------- + allow_listlike: bool, default False + + Returns + ------- + str + """ + if hasattr(value, "dtype") and getattr(value, "ndim", 0) > 0: + msg_got = f"{value.dtype} array" + else: + msg_got = f"'{type(value).__name__}'" + if allow_listlike: + msg = ( + f"value should be a '{self._scalar_type.__name__}', 'NaT', " + f"or array of those. Got {msg_got} instead." + ) + else: + msg = ( + f"value should be a '{self._scalar_type.__name__}' or 'NaT'. " + f"Got {msg_got} instead." + ) + return msg + + def _validate_listlike(self, value, allow_object: bool = False): + if isinstance(value, type(self)): + if self.dtype.kind in "mM" and not allow_object: + # error: "DatetimeLikeArrayMixin" has no attribute "as_unit" + value = value.as_unit(self.unit, round_ok=False) # type: ignore[attr-defined] + return value + + if isinstance(value, list) and len(value) == 0: + # We treat empty list as our own dtype. + return type(self)._from_sequence([], dtype=self.dtype) + + if hasattr(value, "dtype") and value.dtype == object: + # `array` below won't do inference if value is an Index or Series. + # so do so here. in the Index case, inferred_type may be cached. + if lib.infer_dtype(value) in self._infer_matches: + try: + value = type(self)._from_sequence(value) + except (ValueError, TypeError): + if allow_object: + return value + msg = self._validation_error_message(value, True) + raise TypeError(msg) + + # Do type inference if necessary up front (after unpacking + # NumpyExtensionArray) + # e.g. we passed PeriodIndex.values and got an ndarray of Periods + value = extract_array(value, extract_numpy=True) + value = pd_array(value) + value = extract_array(value, extract_numpy=True) + + if is_all_strings(value): + # We got a StringArray + try: + # TODO: Could use from_sequence_of_strings if implemented + # Note: passing dtype is necessary for PeriodArray tests + value = type(self)._from_sequence(value, dtype=self.dtype) + except ValueError: + pass + + if isinstance(value.dtype, CategoricalDtype): + # e.g. we have a Categorical holding self.dtype + if value.categories.dtype == self.dtype: + # TODO: do we need equal dtype or just comparable? + value = value._internal_get_values() + value = extract_array(value, extract_numpy=True) + + if allow_object and is_object_dtype(value.dtype): + pass + + elif not type(self)._is_recognized_dtype(value.dtype): + msg = self._validation_error_message(value, True) + raise TypeError(msg) + + if self.dtype.kind in "mM" and not allow_object: + # error: "DatetimeLikeArrayMixin" has no attribute "as_unit" + value = value.as_unit(self.unit, round_ok=False) # type: ignore[attr-defined] + return value + + def _validate_setitem_value(self, value): + if is_list_like(value): + value = self._validate_listlike(value) + else: + return self._validate_scalar(value, allow_listlike=True) + + return self._unbox(value) + + @final + def _unbox(self, other) -> np.int64 | np.datetime64 | np.timedelta64 | np.ndarray: + """ + Unbox either a scalar with _unbox_scalar or an instance of our own type. + """ + if lib.is_scalar(other): + other = self._unbox_scalar(other) + else: + # same type as self + self._check_compatible_with(other) + other = other._ndarray + return other + + # ------------------------------------------------------------------ + # Additional array methods + # These are not part of the EA API, but we implement them because + # pandas assumes they're there. + + @ravel_compat + def map(self, mapper, na_action=None): + from pandas import Index + + result = map_array(self, mapper, na_action=na_action) + result = Index(result) + + if isinstance(result, ABCMultiIndex): + return result.to_numpy() + else: + return result.array + + def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]: + """ + Compute boolean array of whether each value is found in the + passed set of values. + + Parameters + ---------- + values : np.ndarray or ExtensionArray + + Returns + ------- + ndarray[bool] + """ + if values.dtype.kind in "fiuc": + # TODO: de-duplicate with equals, validate_comparison_value + return np.zeros(self.shape, dtype=bool) + + values = ensure_wrapped_if_datetimelike(values) + + if not isinstance(values, type(self)): + inferable = [ + "timedelta", + "timedelta64", + "datetime", + "datetime64", + "date", + "period", + ] + if values.dtype == object: + values = lib.maybe_convert_objects( + values, # type: ignore[arg-type] + convert_non_numeric=True, + dtype_if_all_nat=self.dtype, + ) + if values.dtype != object: + return self.isin(values) + + inferred = lib.infer_dtype(values, skipna=False) + if inferred not in inferable: + if inferred == "string": + pass + + elif "mixed" in inferred: + return isin(self.astype(object), values) + else: + return np.zeros(self.shape, dtype=bool) + + try: + values = type(self)._from_sequence(values) + except ValueError: + return isin(self.astype(object), values) + else: + warnings.warn( + # GH#53111 + f"The behavior of 'isin' with dtype={self.dtype} and " + "castable values (e.g. strings) is deprecated. In a " + "future version, these will not be considered matching " + "by isin. Explicitly cast to the appropriate dtype before " + "calling isin instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + if self.dtype.kind in "mM": + self = cast("DatetimeArray | TimedeltaArray", self) + # error: Item "ExtensionArray" of "ExtensionArray | ndarray[Any, Any]" + # has no attribute "as_unit" + values = values.as_unit(self.unit) # type: ignore[union-attr] + + try: + # error: Argument 1 to "_check_compatible_with" of "DatetimeLikeArrayMixin" + # has incompatible type "ExtensionArray | ndarray[Any, Any]"; expected + # "Period | Timestamp | Timedelta | NaTType" + self._check_compatible_with(values) # type: ignore[arg-type] + except (TypeError, ValueError): + # Includes tzawareness mismatch and IncompatibleFrequencyError + return np.zeros(self.shape, dtype=bool) + + # error: Item "ExtensionArray" of "ExtensionArray | ndarray[Any, Any]" + # has no attribute "asi8" + return isin(self.asi8, values.asi8) # type: ignore[union-attr] + + # ------------------------------------------------------------------ + # Null Handling + + def isna(self) -> npt.NDArray[np.bool_]: + return self._isnan + + @property # NB: override with cache_readonly in immutable subclasses + def _isnan(self) -> npt.NDArray[np.bool_]: + """ + return if each value is nan + """ + return self.asi8 == iNaT + + @property # NB: override with cache_readonly in immutable subclasses + def _hasna(self) -> bool: + """ + return if I have any nans; enables various perf speedups + """ + return bool(self._isnan.any()) + + def _maybe_mask_results( + self, result: np.ndarray, fill_value=iNaT, convert=None + ) -> np.ndarray: + """ + Parameters + ---------- + result : np.ndarray + fill_value : object, default iNaT + convert : str, dtype or None + + Returns + ------- + result : ndarray with values replace by the fill_value + + mask the result if needed, convert to the provided dtype if its not + None + + This is an internal routine. + """ + if self._hasna: + if convert: + result = result.astype(convert) + if fill_value is None: + fill_value = np.nan + np.putmask(result, self._isnan, fill_value) + return result + + # ------------------------------------------------------------------ + # Frequency Properties/Methods + + @property + def freqstr(self) -> str | None: + """ + Return the frequency object as a string if it's set, otherwise None. + + Examples + -------- + For DatetimeIndex: + + >>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00"], freq="D") + >>> idx.freqstr + 'D' + + The frequency can be inferred if there are more than 2 points: + + >>> idx = pd.DatetimeIndex(["2018-01-01", "2018-01-03", "2018-01-05"], + ... freq="infer") + >>> idx.freqstr + '2D' + + For PeriodIndex: + + >>> idx = pd.PeriodIndex(["2023-1", "2023-2", "2023-3"], freq="M") + >>> idx.freqstr + 'M' + """ + if self.freq is None: + return None + return self.freq.freqstr + + @property # NB: override with cache_readonly in immutable subclasses + def inferred_freq(self) -> str | None: + """ + Tries to return a string representing a frequency generated by infer_freq. + + Returns None if it can't autodetect the frequency. + + Examples + -------- + For DatetimeIndex: + + >>> idx = pd.DatetimeIndex(["2018-01-01", "2018-01-03", "2018-01-05"]) + >>> idx.inferred_freq + '2D' + + For TimedeltaIndex: + + >>> tdelta_idx = pd.to_timedelta(["0 days", "10 days", "20 days"]) + >>> tdelta_idx + TimedeltaIndex(['0 days', '10 days', '20 days'], + dtype='timedelta64[ns]', freq=None) + >>> tdelta_idx.inferred_freq + '10D' + """ + if self.ndim != 1: + return None + try: + return frequencies.infer_freq(self) + except ValueError: + return None + + @property # NB: override with cache_readonly in immutable subclasses + def _resolution_obj(self) -> Resolution | None: + freqstr = self.freqstr + if freqstr is None: + return None + try: + return Resolution.get_reso_from_freqstr(freqstr) + except KeyError: + return None + + @property # NB: override with cache_readonly in immutable subclasses + def resolution(self) -> str: + """ + Returns day, hour, minute, second, millisecond or microsecond + """ + # error: Item "None" of "Optional[Any]" has no attribute "attrname" + return self._resolution_obj.attrname # type: ignore[union-attr] + + # monotonicity/uniqueness properties are called via frequencies.infer_freq, + # see GH#23789 + + @property + def _is_monotonic_increasing(self) -> bool: + return algos.is_monotonic(self.asi8, timelike=True)[0] + + @property + def _is_monotonic_decreasing(self) -> bool: + return algos.is_monotonic(self.asi8, timelike=True)[1] + + @property + def _is_unique(self) -> bool: + return len(unique1d(self.asi8.ravel("K"))) == self.size + + # ------------------------------------------------------------------ + # Arithmetic Methods + + def _cmp_method(self, other, op): + if self.ndim > 1 and getattr(other, "shape", None) == self.shape: + # TODO: handle 2D-like listlikes + return op(self.ravel(), other.ravel()).reshape(self.shape) + + try: + other = self._validate_comparison_value(other) + except InvalidComparison: + return invalid_comparison(self, other, op) + + dtype = getattr(other, "dtype", None) + if is_object_dtype(dtype): + # We have to use comp_method_OBJECT_ARRAY instead of numpy + # comparison otherwise it would raise when comparing to None + result = ops.comp_method_OBJECT_ARRAY( + op, np.asarray(self.astype(object)), other + ) + return result + if other is NaT: + if op is operator.ne: + result = np.ones(self.shape, dtype=bool) + else: + result = np.zeros(self.shape, dtype=bool) + return result + + if not isinstance(self.dtype, PeriodDtype): + self = cast(TimelikeOps, self) + if self._creso != other._creso: + if not isinstance(other, type(self)): + # i.e. Timedelta/Timestamp, cast to ndarray and let + # compare_mismatched_resolutions handle broadcasting + try: + # GH#52080 see if we can losslessly cast to shared unit + other = other.as_unit(self.unit, round_ok=False) + except ValueError: + other_arr = np.array(other.asm8) + return compare_mismatched_resolutions( + self._ndarray, other_arr, op + ) + else: + other_arr = other._ndarray + return compare_mismatched_resolutions(self._ndarray, other_arr, op) + + other_vals = self._unbox(other) + # GH#37462 comparison on i8 values is almost 2x faster than M8/m8 + result = op(self._ndarray.view("i8"), other_vals.view("i8")) + + o_mask = isna(other) + mask = self._isnan | o_mask + if mask.any(): + nat_result = op is operator.ne + np.putmask(result, mask, nat_result) + + return result + + # pow is invalid for all three subclasses; TimedeltaArray will override + # the multiplication and division ops + __pow__ = _make_unpacked_invalid_op("__pow__") + __rpow__ = _make_unpacked_invalid_op("__rpow__") + __mul__ = _make_unpacked_invalid_op("__mul__") + __rmul__ = _make_unpacked_invalid_op("__rmul__") + __truediv__ = _make_unpacked_invalid_op("__truediv__") + __rtruediv__ = _make_unpacked_invalid_op("__rtruediv__") + __floordiv__ = _make_unpacked_invalid_op("__floordiv__") + __rfloordiv__ = _make_unpacked_invalid_op("__rfloordiv__") + __mod__ = _make_unpacked_invalid_op("__mod__") + __rmod__ = _make_unpacked_invalid_op("__rmod__") + __divmod__ = _make_unpacked_invalid_op("__divmod__") + __rdivmod__ = _make_unpacked_invalid_op("__rdivmod__") + + @final + def _get_i8_values_and_mask( + self, other + ) -> tuple[int | npt.NDArray[np.int64], None | npt.NDArray[np.bool_]]: + """ + Get the int64 values and b_mask to pass to add_overflowsafe. + """ + if isinstance(other, Period): + i8values = other.ordinal + mask = None + elif isinstance(other, (Timestamp, Timedelta)): + i8values = other._value + mask = None + else: + # PeriodArray, DatetimeArray, TimedeltaArray + mask = other._isnan + i8values = other.asi8 + return i8values, mask + + @final + def _get_arithmetic_result_freq(self, other) -> BaseOffset | None: + """ + Check if we can preserve self.freq in addition or subtraction. + """ + # Adding or subtracting a Timedelta/Timestamp scalar is freq-preserving + # whenever self.freq is a Tick + if isinstance(self.dtype, PeriodDtype): + return self.freq + elif not lib.is_scalar(other): + return None + elif isinstance(self.freq, Tick): + # In these cases + return self.freq + return None + + @final + def _add_datetimelike_scalar(self, other) -> DatetimeArray: + if not lib.is_np_dtype(self.dtype, "m"): + raise TypeError( + f"cannot add {type(self).__name__} and {type(other).__name__}" + ) + + self = cast("TimedeltaArray", self) + + from pandas.core.arrays import DatetimeArray + from pandas.core.arrays.datetimes import tz_to_dtype + + assert other is not NaT + if isna(other): + # i.e. np.datetime64("NaT") + # In this case we specifically interpret NaT as a datetime, not + # the timedelta interpretation we would get by returning self + NaT + result = self._ndarray + NaT.to_datetime64().astype(f"M8[{self.unit}]") + # Preserve our resolution + return DatetimeArray._simple_new(result, dtype=result.dtype) + + other = Timestamp(other) + self, other = self._ensure_matching_resos(other) + self = cast("TimedeltaArray", self) + + other_i8, o_mask = self._get_i8_values_and_mask(other) + result = add_overflowsafe(self.asi8, np.asarray(other_i8, dtype="i8")) + res_values = result.view(f"M8[{self.unit}]") + + dtype = tz_to_dtype(tz=other.tz, unit=self.unit) + res_values = result.view(f"M8[{self.unit}]") + new_freq = self._get_arithmetic_result_freq(other) + return DatetimeArray._simple_new(res_values, dtype=dtype, freq=new_freq) + + @final + def _add_datetime_arraylike(self, other: DatetimeArray) -> DatetimeArray: + if not lib.is_np_dtype(self.dtype, "m"): + raise TypeError( + f"cannot add {type(self).__name__} and {type(other).__name__}" + ) + + # defer to DatetimeArray.__add__ + return other + self + + @final + def _sub_datetimelike_scalar( + self, other: datetime | np.datetime64 + ) -> TimedeltaArray: + if self.dtype.kind != "M": + raise TypeError(f"cannot subtract a datelike from a {type(self).__name__}") + + self = cast("DatetimeArray", self) + # subtract a datetime from myself, yielding a ndarray[timedelta64[ns]] + + if isna(other): + # i.e. np.datetime64("NaT") + return self - NaT + + ts = Timestamp(other) + + self, ts = self._ensure_matching_resos(ts) + return self._sub_datetimelike(ts) + + @final + def _sub_datetime_arraylike(self, other: DatetimeArray) -> TimedeltaArray: + if self.dtype.kind != "M": + raise TypeError(f"cannot subtract a datelike from a {type(self).__name__}") + + if len(self) != len(other): + raise ValueError("cannot add indices of unequal length") + + self = cast("DatetimeArray", self) + + self, other = self._ensure_matching_resos(other) + return self._sub_datetimelike(other) + + @final + def _sub_datetimelike(self, other: Timestamp | DatetimeArray) -> TimedeltaArray: + self = cast("DatetimeArray", self) + + from pandas.core.arrays import TimedeltaArray + + try: + self._assert_tzawareness_compat(other) + except TypeError as err: + new_message = str(err).replace("compare", "subtract") + raise type(err)(new_message) from err + + other_i8, o_mask = self._get_i8_values_and_mask(other) + res_values = add_overflowsafe(self.asi8, np.asarray(-other_i8, dtype="i8")) + res_m8 = res_values.view(f"timedelta64[{self.unit}]") + + new_freq = self._get_arithmetic_result_freq(other) + new_freq = cast("Tick | None", new_freq) + return TimedeltaArray._simple_new(res_m8, dtype=res_m8.dtype, freq=new_freq) + + @final + def _add_period(self, other: Period) -> PeriodArray: + if not lib.is_np_dtype(self.dtype, "m"): + raise TypeError(f"cannot add Period to a {type(self).__name__}") + + # We will wrap in a PeriodArray and defer to the reversed operation + from pandas.core.arrays.period import PeriodArray + + i8vals = np.broadcast_to(other.ordinal, self.shape) + dtype = PeriodDtype(other.freq) + parr = PeriodArray(i8vals, dtype=dtype) + return parr + self + + def _add_offset(self, offset): + raise AbstractMethodError(self) + + def _add_timedeltalike_scalar(self, other): + """ + Add a delta of a timedeltalike + + Returns + ------- + Same type as self + """ + if isna(other): + # i.e np.timedelta64("NaT") + new_values = np.empty(self.shape, dtype="i8").view(self._ndarray.dtype) + new_values.fill(iNaT) + return type(self)._simple_new(new_values, dtype=self.dtype) + + # PeriodArray overrides, so we only get here with DTA/TDA + self = cast("DatetimeArray | TimedeltaArray", self) + other = Timedelta(other) + self, other = self._ensure_matching_resos(other) + return self._add_timedeltalike(other) + + def _add_timedelta_arraylike(self, other: TimedeltaArray): + """ + Add a delta of a TimedeltaIndex + + Returns + ------- + Same type as self + """ + # overridden by PeriodArray + + if len(self) != len(other): + raise ValueError("cannot add indices of unequal length") + + self = cast("DatetimeArray | TimedeltaArray", self) + + self, other = self._ensure_matching_resos(other) + return self._add_timedeltalike(other) + + @final + def _add_timedeltalike(self, other: Timedelta | TimedeltaArray): + self = cast("DatetimeArray | TimedeltaArray", self) + + other_i8, o_mask = self._get_i8_values_and_mask(other) + new_values = add_overflowsafe(self.asi8, np.asarray(other_i8, dtype="i8")) + res_values = new_values.view(self._ndarray.dtype) + + new_freq = self._get_arithmetic_result_freq(other) + + # error: Argument "dtype" to "_simple_new" of "DatetimeArray" has + # incompatible type "Union[dtype[datetime64], DatetimeTZDtype, + # dtype[timedelta64]]"; expected "Union[dtype[datetime64], DatetimeTZDtype]" + return type(self)._simple_new( + res_values, dtype=self.dtype, freq=new_freq # type: ignore[arg-type] + ) + + @final + def _add_nat(self): + """ + Add pd.NaT to self + """ + if isinstance(self.dtype, PeriodDtype): + raise TypeError( + f"Cannot add {type(self).__name__} and {type(NaT).__name__}" + ) + self = cast("TimedeltaArray | DatetimeArray", self) + + # GH#19124 pd.NaT is treated like a timedelta for both timedelta + # and datetime dtypes + result = np.empty(self.shape, dtype=np.int64) + result.fill(iNaT) + result = result.view(self._ndarray.dtype) # preserve reso + # error: Argument "dtype" to "_simple_new" of "DatetimeArray" has + # incompatible type "Union[dtype[timedelta64], dtype[datetime64], + # DatetimeTZDtype]"; expected "Union[dtype[datetime64], DatetimeTZDtype]" + return type(self)._simple_new( + result, dtype=self.dtype, freq=None # type: ignore[arg-type] + ) + + @final + def _sub_nat(self): + """ + Subtract pd.NaT from self + """ + # GH#19124 Timedelta - datetime is not in general well-defined. + # We make an exception for pd.NaT, which in this case quacks + # like a timedelta. + # For datetime64 dtypes by convention we treat NaT as a datetime, so + # this subtraction returns a timedelta64 dtype. + # For period dtype, timedelta64 is a close-enough return dtype. + result = np.empty(self.shape, dtype=np.int64) + result.fill(iNaT) + if self.dtype.kind in "mM": + # We can retain unit in dtype + self = cast("DatetimeArray| TimedeltaArray", self) + return result.view(f"timedelta64[{self.unit}]") + else: + return result.view("timedelta64[ns]") + + @final + def _sub_periodlike(self, other: Period | PeriodArray) -> npt.NDArray[np.object_]: + # If the operation is well-defined, we return an object-dtype ndarray + # of DateOffsets. Null entries are filled with pd.NaT + if not isinstance(self.dtype, PeriodDtype): + raise TypeError( + f"cannot subtract {type(other).__name__} from {type(self).__name__}" + ) + + self = cast("PeriodArray", self) + self._check_compatible_with(other) + + other_i8, o_mask = self._get_i8_values_and_mask(other) + new_i8_data = add_overflowsafe(self.asi8, np.asarray(-other_i8, dtype="i8")) + new_data = np.array([self.freq.base * x for x in new_i8_data]) + + if o_mask is None: + # i.e. Period scalar + mask = self._isnan + else: + # i.e. PeriodArray + mask = self._isnan | o_mask + new_data[mask] = NaT + return new_data + + @final + def _addsub_object_array(self, other: npt.NDArray[np.object_], op): + """ + Add or subtract array-like of DateOffset objects + + Parameters + ---------- + other : np.ndarray[object] + op : {operator.add, operator.sub} + + Returns + ------- + np.ndarray[object] + Except in fastpath case with length 1 where we operate on the + contained scalar. + """ + assert op in [operator.add, operator.sub] + if len(other) == 1 and self.ndim == 1: + # Note: without this special case, we could annotate return type + # as ndarray[object] + # If both 1D then broadcasting is unambiguous + return op(self, other[0]) + + warnings.warn( + "Adding/subtracting object-dtype array to " + f"{type(self).__name__} not vectorized.", + PerformanceWarning, + stacklevel=find_stack_level(), + ) + + # Caller is responsible for broadcasting if necessary + assert self.shape == other.shape, (self.shape, other.shape) + + res_values = op(self.astype("O"), np.asarray(other)) + return res_values + + def _accumulate(self, name: str, *, skipna: bool = True, **kwargs) -> Self: + if name not in {"cummin", "cummax"}: + raise TypeError(f"Accumulation {name} not supported for {type(self)}") + + op = getattr(datetimelike_accumulations, name) + result = op(self.copy(), skipna=skipna, **kwargs) + + return type(self)._simple_new(result, dtype=self.dtype) + + @unpack_zerodim_and_defer("__add__") + def __add__(self, other): + other_dtype = getattr(other, "dtype", None) + other = ensure_wrapped_if_datetimelike(other) + + # scalar others + if other is NaT: + result = self._add_nat() + elif isinstance(other, (Tick, timedelta, np.timedelta64)): + result = self._add_timedeltalike_scalar(other) + elif isinstance(other, BaseOffset): + # specifically _not_ a Tick + result = self._add_offset(other) + elif isinstance(other, (datetime, np.datetime64)): + result = self._add_datetimelike_scalar(other) + elif isinstance(other, Period) and lib.is_np_dtype(self.dtype, "m"): + result = self._add_period(other) + elif lib.is_integer(other): + # This check must come after the check for np.timedelta64 + # as is_integer returns True for these + if not isinstance(self.dtype, PeriodDtype): + raise integer_op_not_supported(self) + obj = cast("PeriodArray", self) + result = obj._addsub_int_array_or_scalar(other * obj.dtype._n, operator.add) + + # array-like others + elif lib.is_np_dtype(other_dtype, "m"): + # TimedeltaIndex, ndarray[timedelta64] + result = self._add_timedelta_arraylike(other) + elif is_object_dtype(other_dtype): + # e.g. Array/Index of DateOffset objects + result = self._addsub_object_array(other, operator.add) + elif lib.is_np_dtype(other_dtype, "M") or isinstance( + other_dtype, DatetimeTZDtype + ): + # DatetimeIndex, ndarray[datetime64] + return self._add_datetime_arraylike(other) + elif is_integer_dtype(other_dtype): + if not isinstance(self.dtype, PeriodDtype): + raise integer_op_not_supported(self) + obj = cast("PeriodArray", self) + result = obj._addsub_int_array_or_scalar(other * obj.dtype._n, operator.add) + else: + # Includes Categorical, other ExtensionArrays + # For PeriodDtype, if self is a TimedeltaArray and other is a + # PeriodArray with a timedelta-like (i.e. Tick) freq, this + # operation is valid. Defer to the PeriodArray implementation. + # In remaining cases, this will end up raising TypeError. + return NotImplemented + + if isinstance(result, np.ndarray) and lib.is_np_dtype(result.dtype, "m"): + from pandas.core.arrays import TimedeltaArray + + return TimedeltaArray._from_sequence(result) + return result + + def __radd__(self, other): + # alias for __add__ + return self.__add__(other) + + @unpack_zerodim_and_defer("__sub__") + def __sub__(self, other): + other_dtype = getattr(other, "dtype", None) + other = ensure_wrapped_if_datetimelike(other) + + # scalar others + if other is NaT: + result = self._sub_nat() + elif isinstance(other, (Tick, timedelta, np.timedelta64)): + result = self._add_timedeltalike_scalar(-other) + elif isinstance(other, BaseOffset): + # specifically _not_ a Tick + result = self._add_offset(-other) + elif isinstance(other, (datetime, np.datetime64)): + result = self._sub_datetimelike_scalar(other) + elif lib.is_integer(other): + # This check must come after the check for np.timedelta64 + # as is_integer returns True for these + if not isinstance(self.dtype, PeriodDtype): + raise integer_op_not_supported(self) + obj = cast("PeriodArray", self) + result = obj._addsub_int_array_or_scalar(other * obj.dtype._n, operator.sub) + + elif isinstance(other, Period): + result = self._sub_periodlike(other) + + # array-like others + elif lib.is_np_dtype(other_dtype, "m"): + # TimedeltaIndex, ndarray[timedelta64] + result = self._add_timedelta_arraylike(-other) + elif is_object_dtype(other_dtype): + # e.g. Array/Index of DateOffset objects + result = self._addsub_object_array(other, operator.sub) + elif lib.is_np_dtype(other_dtype, "M") or isinstance( + other_dtype, DatetimeTZDtype + ): + # DatetimeIndex, ndarray[datetime64] + result = self._sub_datetime_arraylike(other) + elif isinstance(other_dtype, PeriodDtype): + # PeriodIndex + result = self._sub_periodlike(other) + elif is_integer_dtype(other_dtype): + if not isinstance(self.dtype, PeriodDtype): + raise integer_op_not_supported(self) + obj = cast("PeriodArray", self) + result = obj._addsub_int_array_or_scalar(other * obj.dtype._n, operator.sub) + else: + # Includes ExtensionArrays, float_dtype + return NotImplemented + + if isinstance(result, np.ndarray) and lib.is_np_dtype(result.dtype, "m"): + from pandas.core.arrays import TimedeltaArray + + return TimedeltaArray._from_sequence(result) + return result + + def __rsub__(self, other): + other_dtype = getattr(other, "dtype", None) + other_is_dt64 = lib.is_np_dtype(other_dtype, "M") or isinstance( + other_dtype, DatetimeTZDtype + ) + + if other_is_dt64 and lib.is_np_dtype(self.dtype, "m"): + # ndarray[datetime64] cannot be subtracted from self, so + # we need to wrap in DatetimeArray/Index and flip the operation + if lib.is_scalar(other): + # i.e. np.datetime64 object + return Timestamp(other) - self + if not isinstance(other, DatetimeLikeArrayMixin): + # Avoid down-casting DatetimeIndex + from pandas.core.arrays import DatetimeArray + + other = DatetimeArray._from_sequence(other) + return other - self + elif self.dtype.kind == "M" and hasattr(other, "dtype") and not other_is_dt64: + # GH#19959 datetime - datetime is well-defined as timedelta, + # but any other type - datetime is not well-defined. + raise TypeError( + f"cannot subtract {type(self).__name__} from {type(other).__name__}" + ) + elif isinstance(self.dtype, PeriodDtype) and lib.is_np_dtype(other_dtype, "m"): + # TODO: Can we simplify/generalize these cases at all? + raise TypeError(f"cannot subtract {type(self).__name__} from {other.dtype}") + elif lib.is_np_dtype(self.dtype, "m"): + self = cast("TimedeltaArray", self) + return (-self) + other + + # We get here with e.g. datetime objects + return -(self - other) + + def __iadd__(self, other) -> Self: + result = self + other + self[:] = result[:] + + if not isinstance(self.dtype, PeriodDtype): + # restore freq, which is invalidated by setitem + self._freq = result.freq + return self + + def __isub__(self, other) -> Self: + result = self - other + self[:] = result[:] + + if not isinstance(self.dtype, PeriodDtype): + # restore freq, which is invalidated by setitem + self._freq = result.freq + return self + + # -------------------------------------------------------------- + # Reductions + + @_period_dispatch + def _quantile( + self, + qs: npt.NDArray[np.float64], + interpolation: str, + ) -> Self: + return super()._quantile(qs=qs, interpolation=interpolation) + + @_period_dispatch + def min(self, *, axis: AxisInt | None = None, skipna: bool = True, **kwargs): + """ + Return the minimum value of the Array or minimum along + an axis. + + See Also + -------- + numpy.ndarray.min + Index.min : Return the minimum value in an Index. + Series.min : Return the minimum value in a Series. + """ + nv.validate_min((), kwargs) + nv.validate_minmax_axis(axis, self.ndim) + + result = nanops.nanmin(self._ndarray, axis=axis, skipna=skipna) + return self._wrap_reduction_result(axis, result) + + @_period_dispatch + def max(self, *, axis: AxisInt | None = None, skipna: bool = True, **kwargs): + """ + Return the maximum value of the Array or maximum along + an axis. + + See Also + -------- + numpy.ndarray.max + Index.max : Return the maximum value in an Index. + Series.max : Return the maximum value in a Series. + """ + nv.validate_max((), kwargs) + nv.validate_minmax_axis(axis, self.ndim) + + result = nanops.nanmax(self._ndarray, axis=axis, skipna=skipna) + return self._wrap_reduction_result(axis, result) + + def mean(self, *, skipna: bool = True, axis: AxisInt | None = 0): + """ + Return the mean value of the Array. + + Parameters + ---------- + skipna : bool, default True + Whether to ignore any NaT elements. + axis : int, optional, default 0 + + Returns + ------- + scalar + Timestamp or Timedelta. + + See Also + -------- + numpy.ndarray.mean : Returns the average of array elements along a given axis. + Series.mean : Return the mean value in a Series. + + Notes + ----- + mean is only defined for Datetime and Timedelta dtypes, not for Period. + + Examples + -------- + For :class:`pandas.DatetimeIndex`: + + >>> idx = pd.date_range('2001-01-01 00:00', periods=3) + >>> idx + DatetimeIndex(['2001-01-01', '2001-01-02', '2001-01-03'], + dtype='datetime64[ns]', freq='D') + >>> idx.mean() + Timestamp('2001-01-02 00:00:00') + + For :class:`pandas.TimedeltaIndex`: + + >>> tdelta_idx = pd.to_timedelta([1, 2, 3], unit='D') + >>> tdelta_idx + TimedeltaIndex(['1 days', '2 days', '3 days'], + dtype='timedelta64[ns]', freq=None) + >>> tdelta_idx.mean() + Timedelta('2 days 00:00:00') + """ + if isinstance(self.dtype, PeriodDtype): + # See discussion in GH#24757 + raise TypeError( + f"mean is not implemented for {type(self).__name__} since the " + "meaning is ambiguous. An alternative is " + "obj.to_timestamp(how='start').mean()" + ) + + result = nanops.nanmean( + self._ndarray, axis=axis, skipna=skipna, mask=self.isna() + ) + return self._wrap_reduction_result(axis, result) + + @_period_dispatch + def median(self, *, axis: AxisInt | None = None, skipna: bool = True, **kwargs): + nv.validate_median((), kwargs) + + if axis is not None and abs(axis) >= self.ndim: + raise ValueError("abs(axis) must be less than ndim") + + result = nanops.nanmedian(self._ndarray, axis=axis, skipna=skipna) + return self._wrap_reduction_result(axis, result) + + def _mode(self, dropna: bool = True): + mask = None + if dropna: + mask = self.isna() + + i8modes = algorithms.mode(self.view("i8"), mask=mask) + npmodes = i8modes.view(self._ndarray.dtype) + npmodes = cast(np.ndarray, npmodes) + return self._from_backing_data(npmodes) + + # ------------------------------------------------------------------ + # GroupBy Methods + + def _groupby_op( + self, + *, + how: str, + has_dropped_na: bool, + min_count: int, + ngroups: int, + ids: npt.NDArray[np.intp], + **kwargs, + ): + dtype = self.dtype + if dtype.kind == "M": + # Adding/multiplying datetimes is not valid + if how in ["sum", "prod", "cumsum", "cumprod", "var", "skew"]: + raise TypeError(f"datetime64 type does not support {how} operations") + if how in ["any", "all"]: + # GH#34479 + warnings.warn( + f"'{how}' with datetime64 dtypes is deprecated and will raise in a " + f"future version. Use (obj != pd.Timestamp(0)).{how}() instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + elif isinstance(dtype, PeriodDtype): + # Adding/multiplying Periods is not valid + if how in ["sum", "prod", "cumsum", "cumprod", "var", "skew"]: + raise TypeError(f"Period type does not support {how} operations") + if how in ["any", "all"]: + # GH#34479 + warnings.warn( + f"'{how}' with PeriodDtype is deprecated and will raise in a " + f"future version. Use (obj != pd.Period(0, freq)).{how}() instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + # timedeltas we can add but not multiply + if how in ["prod", "cumprod", "skew", "var"]: + raise TypeError(f"timedelta64 type does not support {how} operations") + + # All of the functions implemented here are ordinal, so we can + # operate on the tz-naive equivalents + npvalues = self._ndarray.view("M8[ns]") + + from pandas.core.groupby.ops import WrappedCythonOp + + kind = WrappedCythonOp.get_kind_from_how(how) + op = WrappedCythonOp(how=how, kind=kind, has_dropped_na=has_dropped_na) + + res_values = op._cython_op_ndim_compat( + npvalues, + min_count=min_count, + ngroups=ngroups, + comp_ids=ids, + mask=None, + **kwargs, + ) + + if op.how in op.cast_blocklist: + # i.e. how in ["rank"], since other cast_blocklist methods don't go + # through cython_operation + return res_values + + # We did a view to M8[ns] above, now we go the other direction + assert res_values.dtype == "M8[ns]" + if how in ["std", "sem"]: + from pandas.core.arrays import TimedeltaArray + + if isinstance(self.dtype, PeriodDtype): + raise TypeError("'std' and 'sem' are not valid for PeriodDtype") + self = cast("DatetimeArray | TimedeltaArray", self) + new_dtype = f"m8[{self.unit}]" + res_values = res_values.view(new_dtype) + return TimedeltaArray._simple_new(res_values, dtype=res_values.dtype) + + res_values = res_values.view(self._ndarray.dtype) + return self._from_backing_data(res_values) + + +class DatelikeOps(DatetimeLikeArrayMixin): + """ + Common ops for DatetimeIndex/PeriodIndex, but not TimedeltaIndex. + """ + + @Substitution( + URL="https://docs.python.org/3/library/datetime.html" + "#strftime-and-strptime-behavior" + ) + def strftime(self, date_format: str) -> npt.NDArray[np.object_]: + """ + Convert to Index using specified date_format. + + Return an Index of formatted strings specified by date_format, which + supports the same string format as the python standard library. Details + of the string format can be found in `python string format + doc <%(URL)s>`__. + + Formats supported by the C `strftime` API but not by the python string format + doc (such as `"%%R"`, `"%%r"`) are not officially supported and should be + preferably replaced with their supported equivalents (such as `"%%H:%%M"`, + `"%%I:%%M:%%S %%p"`). + + Note that `PeriodIndex` support additional directives, detailed in + `Period.strftime`. + + Parameters + ---------- + date_format : str + Date format string (e.g. "%%Y-%%m-%%d"). + + Returns + ------- + ndarray[object] + NumPy ndarray of formatted strings. + + See Also + -------- + to_datetime : Convert the given argument to datetime. + DatetimeIndex.normalize : Return DatetimeIndex with times to midnight. + DatetimeIndex.round : Round the DatetimeIndex to the specified freq. + DatetimeIndex.floor : Floor the DatetimeIndex to the specified freq. + Timestamp.strftime : Format a single Timestamp. + Period.strftime : Format a single Period. + + Examples + -------- + >>> rng = pd.date_range(pd.Timestamp("2018-03-10 09:00"), + ... periods=3, freq='s') + >>> rng.strftime('%%B %%d, %%Y, %%r') + Index(['March 10, 2018, 09:00:00 AM', 'March 10, 2018, 09:00:01 AM', + 'March 10, 2018, 09:00:02 AM'], + dtype='object') + """ + result = self._format_native_types(date_format=date_format, na_rep=np.nan) + return result.astype(object, copy=False) + + +_round_doc = """ + Perform {op} operation on the data to the specified `freq`. + + Parameters + ---------- + freq : str or Offset + The frequency level to {op} the index to. Must be a fixed + frequency like 'S' (second) not 'ME' (month end). See + :ref:`frequency aliases ` for + a list of possible `freq` values. + ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise' + Only relevant for DatetimeIndex: + + - 'infer' will attempt to infer fall dst-transition hours based on + order + - bool-ndarray where True signifies a DST time, False designates + a non-DST time (note that this flag is only applicable for + ambiguous times) + - 'NaT' will return NaT where there are ambiguous times + - 'raise' will raise an AmbiguousTimeError if there are ambiguous + times. + + nonexistent : 'shift_forward', 'shift_backward', 'NaT', timedelta, default 'raise' + A nonexistent time does not exist in a particular timezone + where clocks moved forward due to DST. + + - 'shift_forward' will shift the nonexistent time forward to the + closest existing time + - 'shift_backward' will shift the nonexistent time backward to the + closest existing time + - 'NaT' will return NaT where there are nonexistent times + - timedelta objects will shift nonexistent times by the timedelta + - 'raise' will raise an NonExistentTimeError if there are + nonexistent times. + + Returns + ------- + DatetimeIndex, TimedeltaIndex, or Series + Index of the same type for a DatetimeIndex or TimedeltaIndex, + or a Series with the same index for a Series. + + Raises + ------ + ValueError if the `freq` cannot be converted. + + Notes + ----- + If the timestamps have a timezone, {op}ing will take place relative to the + local ("wall") time and re-localized to the same timezone. When {op}ing + near daylight savings time, use ``nonexistent`` and ``ambiguous`` to + control the re-localization behavior. + + Examples + -------- + **DatetimeIndex** + + >>> rng = pd.date_range('1/1/2018 11:59:00', periods=3, freq='min') + >>> rng + DatetimeIndex(['2018-01-01 11:59:00', '2018-01-01 12:00:00', + '2018-01-01 12:01:00'], + dtype='datetime64[ns]', freq='min') + """ + +_round_example = """>>> rng.round('h') + DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00', + '2018-01-01 12:00:00'], + dtype='datetime64[ns]', freq=None) + + **Series** + + >>> pd.Series(rng).dt.round("h") + 0 2018-01-01 12:00:00 + 1 2018-01-01 12:00:00 + 2 2018-01-01 12:00:00 + dtype: datetime64[ns] + + When rounding near a daylight savings time transition, use ``ambiguous`` or + ``nonexistent`` to control how the timestamp should be re-localized. + + >>> rng_tz = pd.DatetimeIndex(["2021-10-31 03:30:00"], tz="Europe/Amsterdam") + + >>> rng_tz.floor("2h", ambiguous=False) + DatetimeIndex(['2021-10-31 02:00:00+01:00'], + dtype='datetime64[ns, Europe/Amsterdam]', freq=None) + + >>> rng_tz.floor("2h", ambiguous=True) + DatetimeIndex(['2021-10-31 02:00:00+02:00'], + dtype='datetime64[ns, Europe/Amsterdam]', freq=None) + """ + +_floor_example = """>>> rng.floor('h') + DatetimeIndex(['2018-01-01 11:00:00', '2018-01-01 12:00:00', + '2018-01-01 12:00:00'], + dtype='datetime64[ns]', freq=None) + + **Series** + + >>> pd.Series(rng).dt.floor("h") + 0 2018-01-01 11:00:00 + 1 2018-01-01 12:00:00 + 2 2018-01-01 12:00:00 + dtype: datetime64[ns] + + When rounding near a daylight savings time transition, use ``ambiguous`` or + ``nonexistent`` to control how the timestamp should be re-localized. + + >>> rng_tz = pd.DatetimeIndex(["2021-10-31 03:30:00"], tz="Europe/Amsterdam") + + >>> rng_tz.floor("2h", ambiguous=False) + DatetimeIndex(['2021-10-31 02:00:00+01:00'], + dtype='datetime64[ns, Europe/Amsterdam]', freq=None) + + >>> rng_tz.floor("2h", ambiguous=True) + DatetimeIndex(['2021-10-31 02:00:00+02:00'], + dtype='datetime64[ns, Europe/Amsterdam]', freq=None) + """ + +_ceil_example = """>>> rng.ceil('h') + DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00', + '2018-01-01 13:00:00'], + dtype='datetime64[ns]', freq=None) + + **Series** + + >>> pd.Series(rng).dt.ceil("h") + 0 2018-01-01 12:00:00 + 1 2018-01-01 12:00:00 + 2 2018-01-01 13:00:00 + dtype: datetime64[ns] + + When rounding near a daylight savings time transition, use ``ambiguous`` or + ``nonexistent`` to control how the timestamp should be re-localized. + + >>> rng_tz = pd.DatetimeIndex(["2021-10-31 01:30:00"], tz="Europe/Amsterdam") + + >>> rng_tz.ceil("h", ambiguous=False) + DatetimeIndex(['2021-10-31 02:00:00+01:00'], + dtype='datetime64[ns, Europe/Amsterdam]', freq=None) + + >>> rng_tz.ceil("h", ambiguous=True) + DatetimeIndex(['2021-10-31 02:00:00+02:00'], + dtype='datetime64[ns, Europe/Amsterdam]', freq=None) + """ + + +class TimelikeOps(DatetimeLikeArrayMixin): + """ + Common ops for TimedeltaIndex/DatetimeIndex, but not PeriodIndex. + """ + + _default_dtype: np.dtype + + def __init__( + self, values, dtype=None, freq=lib.no_default, copy: bool = False + ) -> None: + warnings.warn( + # GH#55623 + f"{type(self).__name__}.__init__ is deprecated and will be " + "removed in a future version. Use pd.array instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + if dtype is not None: + dtype = pandas_dtype(dtype) + + values = extract_array(values, extract_numpy=True) + if isinstance(values, IntegerArray): + values = values.to_numpy("int64", na_value=iNaT) + + inferred_freq = getattr(values, "_freq", None) + explicit_none = freq is None + freq = freq if freq is not lib.no_default else None + + if isinstance(values, type(self)): + if explicit_none: + # don't inherit from values + pass + elif freq is None: + freq = values.freq + elif freq and values.freq: + freq = to_offset(freq) + freq = _validate_inferred_freq(freq, values.freq) + + if dtype is not None and dtype != values.dtype: + # TODO: we only have tests for this for DTA, not TDA (2022-07-01) + raise TypeError( + f"dtype={dtype} does not match data dtype {values.dtype}" + ) + + dtype = values.dtype + values = values._ndarray + + elif dtype is None: + if isinstance(values, np.ndarray) and values.dtype.kind in "Mm": + dtype = values.dtype + else: + dtype = self._default_dtype + if isinstance(values, np.ndarray) and values.dtype == "i8": + values = values.view(dtype) + + if not isinstance(values, np.ndarray): + raise ValueError( + f"Unexpected type '{type(values).__name__}'. 'values' must be a " + f"{type(self).__name__}, ndarray, or Series or Index " + "containing one of those." + ) + if values.ndim not in [1, 2]: + raise ValueError("Only 1-dimensional input arrays are supported.") + + if values.dtype == "i8": + # for compat with datetime/timedelta/period shared methods, + # we can sometimes get here with int64 values. These represent + # nanosecond UTC (or tz-naive) unix timestamps + if dtype is None: + dtype = self._default_dtype + values = values.view(self._default_dtype) + elif lib.is_np_dtype(dtype, "mM"): + values = values.view(dtype) + elif isinstance(dtype, DatetimeTZDtype): + kind = self._default_dtype.kind + new_dtype = f"{kind}8[{dtype.unit}]" + values = values.view(new_dtype) + + dtype = self._validate_dtype(values, dtype) + + if freq == "infer": + raise ValueError( + f"Frequency inference not allowed in {type(self).__name__}.__init__. " + "Use 'pd.array()' instead." + ) + + if copy: + values = values.copy() + if freq: + freq = to_offset(freq) + if values.dtype.kind == "m" and not isinstance(freq, Tick): + raise TypeError("TimedeltaArray/Index freq must be a Tick") + + NDArrayBacked.__init__(self, values=values, dtype=dtype) + self._freq = freq + + if inferred_freq is None and freq is not None: + type(self)._validate_frequency(self, freq) + + @classmethod + def _validate_dtype(cls, values, dtype): + raise AbstractMethodError(cls) + + @property + def freq(self): + """ + Return the frequency object if it is set, otherwise None. + """ + return self._freq + + @freq.setter + def freq(self, value) -> None: + if value is not None: + value = to_offset(value) + self._validate_frequency(self, value) + if self.dtype.kind == "m" and not isinstance(value, Tick): + raise TypeError("TimedeltaArray/Index freq must be a Tick") + + if self.ndim > 1: + raise ValueError("Cannot set freq with ndim > 1") + + self._freq = value + + @final + def _maybe_pin_freq(self, freq, validate_kwds: dict): + """ + Constructor helper to pin the appropriate `freq` attribute. Assumes + that self._freq is currently set to any freq inferred in + _from_sequence_not_strict. + """ + if freq is None: + # user explicitly passed None -> override any inferred_freq + self._freq = None + elif freq == "infer": + # if self._freq is *not* None then we already inferred a freq + # and there is nothing left to do + if self._freq is None: + # Set _freq directly to bypass duplicative _validate_frequency + # check. + self._freq = to_offset(self.inferred_freq) + elif freq is lib.no_default: + # user did not specify anything, keep inferred freq if the original + # data had one, otherwise do nothing + pass + elif self._freq is None: + # We cannot inherit a freq from the data, so we need to validate + # the user-passed freq + freq = to_offset(freq) + type(self)._validate_frequency(self, freq, **validate_kwds) + self._freq = freq + else: + # Otherwise we just need to check that the user-passed freq + # doesn't conflict with the one we already have. + freq = to_offset(freq) + _validate_inferred_freq(freq, self._freq) + + @final + @classmethod + def _validate_frequency(cls, index, freq: BaseOffset, **kwargs): + """ + Validate that a frequency is compatible with the values of a given + Datetime Array/Index or Timedelta Array/Index + + Parameters + ---------- + index : DatetimeIndex or TimedeltaIndex + The index on which to determine if the given frequency is valid + freq : DateOffset + The frequency to validate + """ + inferred = index.inferred_freq + if index.size == 0 or inferred == freq.freqstr: + return None + + try: + on_freq = cls._generate_range( + start=index[0], + end=None, + periods=len(index), + freq=freq, + unit=index.unit, + **kwargs, + ) + if not np.array_equal(index.asi8, on_freq.asi8): + raise ValueError + except ValueError as err: + if "non-fixed" in str(err): + # non-fixed frequencies are not meaningful for timedelta64; + # we retain that error message + raise err + # GH#11587 the main way this is reached is if the `np.array_equal` + # check above is False. This can also be reached if index[0] + # is `NaT`, in which case the call to `cls._generate_range` will + # raise a ValueError, which we re-raise with a more targeted + # message. + raise ValueError( + f"Inferred frequency {inferred} from passed values " + f"does not conform to passed frequency {freq.freqstr}" + ) from err + + @classmethod + def _generate_range( + cls, start, end, periods: int | None, freq, *args, **kwargs + ) -> Self: + raise AbstractMethodError(cls) + + # -------------------------------------------------------------- + + @cache_readonly + def _creso(self) -> int: + return get_unit_from_dtype(self._ndarray.dtype) + + @cache_readonly + def unit(self) -> str: + # e.g. "ns", "us", "ms" + # error: Argument 1 to "dtype_to_unit" has incompatible type + # "ExtensionDtype"; expected "Union[DatetimeTZDtype, dtype[Any]]" + return dtype_to_unit(self.dtype) # type: ignore[arg-type] + + def as_unit(self, unit: str, round_ok: bool = True) -> Self: + if unit not in ["s", "ms", "us", "ns"]: + raise ValueError("Supported units are 's', 'ms', 'us', 'ns'") + + dtype = np.dtype(f"{self.dtype.kind}8[{unit}]") + new_values = astype_overflowsafe(self._ndarray, dtype, round_ok=round_ok) + + if isinstance(self.dtype, np.dtype): + new_dtype = new_values.dtype + else: + tz = cast("DatetimeArray", self).tz + new_dtype = DatetimeTZDtype(tz=tz, unit=unit) + + # error: Unexpected keyword argument "freq" for "_simple_new" of + # "NDArrayBacked" [call-arg] + return type(self)._simple_new( + new_values, dtype=new_dtype, freq=self.freq # type: ignore[call-arg] + ) + + # TODO: annotate other as DatetimeArray | TimedeltaArray | Timestamp | Timedelta + # with the return type matching input type. TypeVar? + def _ensure_matching_resos(self, other): + if self._creso != other._creso: + # Just as with Timestamp/Timedelta, we cast to the higher resolution + if self._creso < other._creso: + self = self.as_unit(other.unit) + else: + other = other.as_unit(self.unit) + return self, other + + # -------------------------------------------------------------- + + def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): + if ( + ufunc in [np.isnan, np.isinf, np.isfinite] + and len(inputs) == 1 + and inputs[0] is self + ): + # numpy 1.18 changed isinf and isnan to not raise on dt64/td64 + return getattr(ufunc, method)(self._ndarray, **kwargs) + + return super().__array_ufunc__(ufunc, method, *inputs, **kwargs) + + def _round(self, freq, mode, ambiguous, nonexistent): + # round the local times + if isinstance(self.dtype, DatetimeTZDtype): + # operate on naive timestamps, then convert back to aware + self = cast("DatetimeArray", self) + naive = self.tz_localize(None) + result = naive._round(freq, mode, ambiguous, nonexistent) + return result.tz_localize( + self.tz, ambiguous=ambiguous, nonexistent=nonexistent + ) + + values = self.view("i8") + values = cast(np.ndarray, values) + nanos = get_unit_for_round(freq, self._creso) + if nanos == 0: + # GH 52761 + return self.copy() + result_i8 = round_nsint64(values, mode, nanos) + result = self._maybe_mask_results(result_i8, fill_value=iNaT) + result = result.view(self._ndarray.dtype) + return self._simple_new(result, dtype=self.dtype) + + @Appender((_round_doc + _round_example).format(op="round")) + def round( + self, + freq, + ambiguous: TimeAmbiguous = "raise", + nonexistent: TimeNonexistent = "raise", + ) -> Self: + return self._round(freq, RoundTo.NEAREST_HALF_EVEN, ambiguous, nonexistent) + + @Appender((_round_doc + _floor_example).format(op="floor")) + def floor( + self, + freq, + ambiguous: TimeAmbiguous = "raise", + nonexistent: TimeNonexistent = "raise", + ) -> Self: + return self._round(freq, RoundTo.MINUS_INFTY, ambiguous, nonexistent) + + @Appender((_round_doc + _ceil_example).format(op="ceil")) + def ceil( + self, + freq, + ambiguous: TimeAmbiguous = "raise", + nonexistent: TimeNonexistent = "raise", + ) -> Self: + return self._round(freq, RoundTo.PLUS_INFTY, ambiguous, nonexistent) + + # -------------------------------------------------------------- + # Reductions + + def any(self, *, axis: AxisInt | None = None, skipna: bool = True) -> bool: + # GH#34479 the nanops call will issue a FutureWarning for non-td64 dtype + return nanops.nanany(self._ndarray, axis=axis, skipna=skipna, mask=self.isna()) + + def all(self, *, axis: AxisInt | None = None, skipna: bool = True) -> bool: + # GH#34479 the nanops call will issue a FutureWarning for non-td64 dtype + + return nanops.nanall(self._ndarray, axis=axis, skipna=skipna, mask=self.isna()) + + # -------------------------------------------------------------- + # Frequency Methods + + def _maybe_clear_freq(self) -> None: + self._freq = None + + def _with_freq(self, freq) -> Self: + """ + Helper to get a view on the same data, with a new freq. + + Parameters + ---------- + freq : DateOffset, None, or "infer" + + Returns + ------- + Same type as self + """ + # GH#29843 + if freq is None: + # Always valid + pass + elif len(self) == 0 and isinstance(freq, BaseOffset): + # Always valid. In the TimedeltaArray case, we require a Tick offset + if self.dtype.kind == "m" and not isinstance(freq, Tick): + raise TypeError("TimedeltaArray/Index freq must be a Tick") + else: + # As an internal method, we can ensure this assertion always holds + assert freq == "infer" + freq = to_offset(self.inferred_freq) + + arr = self.view() + arr._freq = freq + return arr + + # -------------------------------------------------------------- + # ExtensionArray Interface + + def _values_for_json(self) -> np.ndarray: + # Small performance bump vs the base class which calls np.asarray(self) + if isinstance(self.dtype, np.dtype): + return self._ndarray + return super()._values_for_json() + + def factorize( + self, + use_na_sentinel: bool = True, + sort: bool = False, + ): + if self.freq is not None: + # We must be unique, so can short-circuit (and retain freq) + codes = np.arange(len(self), dtype=np.intp) + uniques = self.copy() # TODO: copy or view? + if sort and self.freq.n < 0: + codes = codes[::-1] + uniques = uniques[::-1] + return codes, uniques + + if sort: + # algorithms.factorize only passes sort=True here when freq is + # not None, so this should not be reached. + raise NotImplementedError( + f"The 'sort' keyword in {type(self).__name__}.factorize is " + "ignored unless arr.freq is not None. To factorize with sort, " + "call pd.factorize(obj, sort=True) instead." + ) + return super().factorize(use_na_sentinel=use_na_sentinel) + + @classmethod + def _concat_same_type( + cls, + to_concat: Sequence[Self], + axis: AxisInt = 0, + ) -> Self: + new_obj = super()._concat_same_type(to_concat, axis) + + obj = to_concat[0] + + if axis == 0: + # GH 3232: If the concat result is evenly spaced, we can retain the + # original frequency + to_concat = [x for x in to_concat if len(x)] + + if obj.freq is not None and all(x.freq == obj.freq for x in to_concat): + pairs = zip(to_concat[:-1], to_concat[1:]) + if all(pair[0][-1] + obj.freq == pair[1][0] for pair in pairs): + new_freq = obj.freq + new_obj._freq = new_freq + return new_obj + + def copy(self, order: str = "C") -> Self: + new_obj = super().copy(order=order) + new_obj._freq = self.freq + return new_obj + + def interpolate( + self, + *, + method: InterpolateOptions, + axis: int, + index: Index, + limit, + limit_direction, + limit_area, + copy: bool, + **kwargs, + ) -> Self: + """ + See NDFrame.interpolate.__doc__. + """ + # NB: we return type(self) even if copy=False + if method != "linear": + raise NotImplementedError + + if not copy: + out_data = self._ndarray + else: + out_data = self._ndarray.copy() + + missing.interpolate_2d_inplace( + out_data, + method=method, + axis=axis, + index=index, + limit=limit, + limit_direction=limit_direction, + limit_area=limit_area, + **kwargs, + ) + if not copy: + return self + return type(self)._simple_new(out_data, dtype=self.dtype) + + # -------------------------------------------------------------- + # Unsorted + + @property + def _is_dates_only(self) -> bool: + """ + Check if we are round times at midnight (and no timezone), which will + be given a more compact __repr__ than other cases. For TimedeltaArray + we are checking for multiples of 24H. + """ + if not lib.is_np_dtype(self.dtype): + # i.e. we have a timezone + return False + + values_int = self.asi8 + consider_values = values_int != iNaT + reso = get_unit_from_dtype(self.dtype) + ppd = periods_per_day(reso) + + # TODO: can we reuse is_date_array_normalized? would need a skipna kwd + # (first attempt at this was less performant than this implementation) + even_days = np.logical_and(consider_values, values_int % ppd != 0).sum() == 0 + return even_days + + +# ------------------------------------------------------------------- +# Shared Constructor Helpers + + +def ensure_arraylike_for_datetimelike( + data, copy: bool, cls_name: str +) -> tuple[ArrayLike, bool]: + if not hasattr(data, "dtype"): + # e.g. list, tuple + if not isinstance(data, (list, tuple)) and np.ndim(data) == 0: + # i.e. generator + data = list(data) + + data = construct_1d_object_array_from_listlike(data) + copy = False + elif isinstance(data, ABCMultiIndex): + raise TypeError(f"Cannot create a {cls_name} from a MultiIndex.") + else: + data = extract_array(data, extract_numpy=True) + + if isinstance(data, IntegerArray) or ( + isinstance(data, ArrowExtensionArray) and data.dtype.kind in "iu" + ): + data = data.to_numpy("int64", na_value=iNaT) + copy = False + elif isinstance(data, ArrowExtensionArray): + data = data._maybe_convert_datelike_array() + data = data.to_numpy() + copy = False + elif not isinstance(data, (np.ndarray, ExtensionArray)): + # GH#24539 e.g. xarray, dask object + data = np.asarray(data) + + elif isinstance(data, ABCCategorical): + # GH#18664 preserve tz in going DTI->Categorical->DTI + # TODO: cases where we need to do another pass through maybe_convert_dtype, + # e.g. the categories are timedelta64s + data = data.categories.take(data.codes, fill_value=NaT)._values + copy = False + + return data, copy + + +@overload +def validate_periods(periods: None) -> None: + ... + + +@overload +def validate_periods(periods: int | float) -> int: + ... + + +def validate_periods(periods: int | float | None) -> int | None: + """ + If a `periods` argument is passed to the Datetime/Timedelta Array/Index + constructor, cast it to an integer. + + Parameters + ---------- + periods : None, float, int + + Returns + ------- + periods : None or int + + Raises + ------ + TypeError + if periods is None, float, or int + """ + if periods is not None: + if lib.is_float(periods): + warnings.warn( + # GH#56036 + "Non-integer 'periods' in pd.date_range, pd.timedelta_range, " + "pd.period_range, and pd.interval_range are deprecated and " + "will raise in a future version.", + FutureWarning, + stacklevel=find_stack_level(), + ) + periods = int(periods) + elif not lib.is_integer(periods): + raise TypeError(f"periods must be a number, got {periods}") + return periods + + +def _validate_inferred_freq( + freq: BaseOffset | None, inferred_freq: BaseOffset | None +) -> BaseOffset | None: + """ + If the user passes a freq and another freq is inferred from passed data, + require that they match. + + Parameters + ---------- + freq : DateOffset or None + inferred_freq : DateOffset or None + + Returns + ------- + freq : DateOffset or None + """ + if inferred_freq is not None: + if freq is not None and freq != inferred_freq: + raise ValueError( + f"Inferred frequency {inferred_freq} from passed " + "values does not conform to passed frequency " + f"{freq.freqstr}" + ) + if freq is None: + freq = inferred_freq + + return freq + + +def dtype_to_unit(dtype: DatetimeTZDtype | np.dtype | ArrowDtype) -> str: + """ + Return the unit str corresponding to the dtype's resolution. + + Parameters + ---------- + dtype : DatetimeTZDtype or np.dtype + If np.dtype, we assume it is a datetime64 dtype. + + Returns + ------- + str + """ + if isinstance(dtype, DatetimeTZDtype): + return dtype.unit + elif isinstance(dtype, ArrowDtype): + if dtype.kind not in "mM": + raise ValueError(f"{dtype=} does not have a resolution.") + return dtype.pyarrow_dtype.unit + return np.datetime_data(dtype)[0] diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/datetimes.py b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/datetimes.py new file mode 100644 index 0000000000000000000000000000000000000000..a146220d249e2013c91cb647ea0cbeccf66b68b3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/datetimes.py @@ -0,0 +1,2820 @@ +from __future__ import annotations + +from datetime import ( + datetime, + timedelta, + tzinfo, +) +from typing import ( + TYPE_CHECKING, + cast, + overload, +) +import warnings + +import numpy as np + +from pandas._libs import ( + lib, + tslib, +) +from pandas._libs.tslibs import ( + BaseOffset, + NaT, + NaTType, + Resolution, + Timestamp, + astype_overflowsafe, + fields, + get_resolution, + get_supported_dtype, + get_unit_from_dtype, + ints_to_pydatetime, + is_date_array_normalized, + is_supported_dtype, + is_unitless, + normalize_i8_timestamps, + timezones, + to_offset, + tz_convert_from_utc, + tzconversion, +) +from pandas._libs.tslibs.dtypes import abbrev_to_npy_unit +from pandas.errors import PerformanceWarning +from pandas.util._exceptions import find_stack_level +from pandas.util._validators import validate_inclusive + +from pandas.core.dtypes.common import ( + DT64NS_DTYPE, + INT64_DTYPE, + is_bool_dtype, + is_float_dtype, + is_string_dtype, + pandas_dtype, +) +from pandas.core.dtypes.dtypes import ( + DatetimeTZDtype, + ExtensionDtype, + PeriodDtype, +) +from pandas.core.dtypes.missing import isna + +from pandas.core.arrays import datetimelike as dtl +from pandas.core.arrays._ranges import generate_regular_range +import pandas.core.common as com + +from pandas.tseries.frequencies import get_period_alias +from pandas.tseries.offsets import ( + Day, + Tick, +) + +if TYPE_CHECKING: + from collections.abc import Iterator + + from pandas._typing import ( + ArrayLike, + DateTimeErrorChoices, + DtypeObj, + IntervalClosedType, + Self, + TimeAmbiguous, + TimeNonexistent, + npt, + ) + + from pandas import DataFrame + from pandas.core.arrays import PeriodArray + + +_ITER_CHUNKSIZE = 10_000 + + +@overload +def tz_to_dtype(tz: tzinfo, unit: str = ...) -> DatetimeTZDtype: + ... + + +@overload +def tz_to_dtype(tz: None, unit: str = ...) -> np.dtype[np.datetime64]: + ... + + +def tz_to_dtype( + tz: tzinfo | None, unit: str = "ns" +) -> np.dtype[np.datetime64] | DatetimeTZDtype: + """ + Return a datetime64[ns] dtype appropriate for the given timezone. + + Parameters + ---------- + tz : tzinfo or None + unit : str, default "ns" + + Returns + ------- + np.dtype or Datetime64TZDType + """ + if tz is None: + return np.dtype(f"M8[{unit}]") + else: + return DatetimeTZDtype(tz=tz, unit=unit) + + +def _field_accessor(name: str, field: str, docstring: str | None = None): + def f(self): + values = self._local_timestamps() + + if field in self._bool_ops: + result: np.ndarray + + if field.endswith(("start", "end")): + freq = self.freq + month_kw = 12 + if freq: + kwds = freq.kwds + month_kw = kwds.get("startingMonth", kwds.get("month", 12)) + + result = fields.get_start_end_field( + values, field, self.freqstr, month_kw, reso=self._creso + ) + else: + result = fields.get_date_field(values, field, reso=self._creso) + + # these return a boolean by-definition + return result + + if field in self._object_ops: + result = fields.get_date_name_field(values, field, reso=self._creso) + result = self._maybe_mask_results(result, fill_value=None) + + else: + result = fields.get_date_field(values, field, reso=self._creso) + result = self._maybe_mask_results( + result, fill_value=None, convert="float64" + ) + + return result + + f.__name__ = name + f.__doc__ = docstring + return property(f) + + +# error: Definition of "_concat_same_type" in base class "NDArrayBacked" is +# incompatible with definition in base class "ExtensionArray" +class DatetimeArray(dtl.TimelikeOps, dtl.DatelikeOps): # type: ignore[misc] + """ + Pandas ExtensionArray for tz-naive or tz-aware datetime data. + + .. warning:: + + DatetimeArray is currently experimental, and its API may change + without warning. In particular, :attr:`DatetimeArray.dtype` is + expected to change to always be an instance of an ``ExtensionDtype`` + subclass. + + Parameters + ---------- + values : Series, Index, DatetimeArray, ndarray + The datetime data. + + For DatetimeArray `values` (or a Series or Index boxing one), + `dtype` and `freq` will be extracted from `values`. + + dtype : numpy.dtype or DatetimeTZDtype + Note that the only NumPy dtype allowed is 'datetime64[ns]'. + freq : str or Offset, optional + The frequency. + copy : bool, default False + Whether to copy the underlying array of values. + + Attributes + ---------- + None + + Methods + ------- + None + + Examples + -------- + >>> pd.arrays.DatetimeArray._from_sequence( + ... pd.DatetimeIndex(['2023-01-01', '2023-01-02'], freq='D')) + + ['2023-01-01 00:00:00', '2023-01-02 00:00:00'] + Length: 2, dtype: datetime64[ns] + """ + + _typ = "datetimearray" + _internal_fill_value = np.datetime64("NaT", "ns") + _recognized_scalars = (datetime, np.datetime64) + _is_recognized_dtype = lambda x: lib.is_np_dtype(x, "M") or isinstance( + x, DatetimeTZDtype + ) + _infer_matches = ("datetime", "datetime64", "date") + + @property + def _scalar_type(self) -> type[Timestamp]: + return Timestamp + + # define my properties & methods for delegation + _bool_ops: list[str] = [ + "is_month_start", + "is_month_end", + "is_quarter_start", + "is_quarter_end", + "is_year_start", + "is_year_end", + "is_leap_year", + ] + _object_ops: list[str] = ["freq", "tz"] + _field_ops: list[str] = [ + "year", + "month", + "day", + "hour", + "minute", + "second", + "weekday", + "dayofweek", + "day_of_week", + "dayofyear", + "day_of_year", + "quarter", + "days_in_month", + "daysinmonth", + "microsecond", + "nanosecond", + ] + _other_ops: list[str] = ["date", "time", "timetz"] + _datetimelike_ops: list[str] = ( + _field_ops + _object_ops + _bool_ops + _other_ops + ["unit"] + ) + _datetimelike_methods: list[str] = [ + "to_period", + "tz_localize", + "tz_convert", + "normalize", + "strftime", + "round", + "floor", + "ceil", + "month_name", + "day_name", + "as_unit", + ] + + # ndim is inherited from ExtensionArray, must exist to ensure + # Timestamp.__richcmp__(DateTimeArray) operates pointwise + + # ensure that operations with numpy arrays defer to our implementation + __array_priority__ = 1000 + + # ----------------------------------------------------------------- + # Constructors + + _dtype: np.dtype[np.datetime64] | DatetimeTZDtype + _freq: BaseOffset | None = None + _default_dtype = DT64NS_DTYPE # used in TimeLikeOps.__init__ + + @classmethod + def _from_scalars(cls, scalars, *, dtype: DtypeObj) -> Self: + if lib.infer_dtype(scalars, skipna=True) not in ["datetime", "datetime64"]: + # TODO: require any NAs be valid-for-DTA + # TODO: if dtype is passed, check for tzawareness compat? + raise ValueError + return cls._from_sequence(scalars, dtype=dtype) + + @classmethod + def _validate_dtype(cls, values, dtype): + # used in TimeLikeOps.__init__ + dtype = _validate_dt64_dtype(dtype) + _validate_dt64_dtype(values.dtype) + if isinstance(dtype, np.dtype): + if values.dtype != dtype: + raise ValueError("Values resolution does not match dtype.") + else: + vunit = np.datetime_data(values.dtype)[0] + if vunit != dtype.unit: + raise ValueError("Values resolution does not match dtype.") + return dtype + + # error: Signature of "_simple_new" incompatible with supertype "NDArrayBacked" + @classmethod + def _simple_new( # type: ignore[override] + cls, + values: npt.NDArray[np.datetime64], + freq: BaseOffset | None = None, + dtype: np.dtype[np.datetime64] | DatetimeTZDtype = DT64NS_DTYPE, + ) -> Self: + assert isinstance(values, np.ndarray) + assert dtype.kind == "M" + if isinstance(dtype, np.dtype): + assert dtype == values.dtype + assert not is_unitless(dtype) + else: + # DatetimeTZDtype. If we have e.g. DatetimeTZDtype[us, UTC], + # then values.dtype should be M8[us]. + assert dtype._creso == get_unit_from_dtype(values.dtype) + + result = super()._simple_new(values, dtype) + result._freq = freq + return result + + @classmethod + def _from_sequence(cls, scalars, *, dtype=None, copy: bool = False): + return cls._from_sequence_not_strict(scalars, dtype=dtype, copy=copy) + + @classmethod + def _from_sequence_not_strict( + cls, + data, + *, + dtype=None, + copy: bool = False, + tz=lib.no_default, + freq: str | BaseOffset | lib.NoDefault | None = lib.no_default, + dayfirst: bool = False, + yearfirst: bool = False, + ambiguous: TimeAmbiguous = "raise", + ) -> Self: + """ + A non-strict version of _from_sequence, called from DatetimeIndex.__new__. + """ + + # if the user either explicitly passes tz=None or a tz-naive dtype, we + # disallows inferring a tz. + explicit_tz_none = tz is None + if tz is lib.no_default: + tz = None + else: + tz = timezones.maybe_get_tz(tz) + + dtype = _validate_dt64_dtype(dtype) + # if dtype has an embedded tz, capture it + tz = _validate_tz_from_dtype(dtype, tz, explicit_tz_none) + + unit = None + if dtype is not None: + unit = dtl.dtype_to_unit(dtype) + + data, copy = dtl.ensure_arraylike_for_datetimelike( + data, copy, cls_name="DatetimeArray" + ) + inferred_freq = None + if isinstance(data, DatetimeArray): + inferred_freq = data.freq + + subarr, tz = _sequence_to_dt64( + data, + copy=copy, + tz=tz, + dayfirst=dayfirst, + yearfirst=yearfirst, + ambiguous=ambiguous, + out_unit=unit, + ) + # We have to call this again after possibly inferring a tz above + _validate_tz_from_dtype(dtype, tz, explicit_tz_none) + if tz is not None and explicit_tz_none: + raise ValueError( + "Passed data is timezone-aware, incompatible with 'tz=None'. " + "Use obj.tz_localize(None) instead." + ) + + data_unit = np.datetime_data(subarr.dtype)[0] + data_dtype = tz_to_dtype(tz, data_unit) + result = cls._simple_new(subarr, freq=inferred_freq, dtype=data_dtype) + if unit is not None and unit != result.unit: + # If unit was specified in user-passed dtype, cast to it here + result = result.as_unit(unit) + + validate_kwds = {"ambiguous": ambiguous} + result._maybe_pin_freq(freq, validate_kwds) + return result + + @classmethod + def _generate_range( + cls, + start, + end, + periods: int | None, + freq, + tz=None, + normalize: bool = False, + ambiguous: TimeAmbiguous = "raise", + nonexistent: TimeNonexistent = "raise", + inclusive: IntervalClosedType = "both", + *, + unit: str | None = None, + ) -> Self: + periods = dtl.validate_periods(periods) + if freq is None and any(x is None for x in [periods, start, end]): + raise ValueError("Must provide freq argument if no data is supplied") + + if com.count_not_none(start, end, periods, freq) != 3: + raise ValueError( + "Of the four parameters: start, end, periods, " + "and freq, exactly three must be specified" + ) + freq = to_offset(freq) + + if start is not None: + start = Timestamp(start) + + if end is not None: + end = Timestamp(end) + + if start is NaT or end is NaT: + raise ValueError("Neither `start` nor `end` can be NaT") + + if unit is not None: + if unit not in ["s", "ms", "us", "ns"]: + raise ValueError("'unit' must be one of 's', 'ms', 'us', 'ns'") + else: + unit = "ns" + + if start is not None: + start = start.as_unit(unit, round_ok=False) + if end is not None: + end = end.as_unit(unit, round_ok=False) + + left_inclusive, right_inclusive = validate_inclusive(inclusive) + start, end = _maybe_normalize_endpoints(start, end, normalize) + tz = _infer_tz_from_endpoints(start, end, tz) + + if tz is not None: + # Localize the start and end arguments + start = _maybe_localize_point(start, freq, tz, ambiguous, nonexistent) + end = _maybe_localize_point(end, freq, tz, ambiguous, nonexistent) + + if freq is not None: + # We break Day arithmetic (fixed 24 hour) here and opt for + # Day to mean calendar day (23/24/25 hour). Therefore, strip + # tz info from start and day to avoid DST arithmetic + if isinstance(freq, Day): + if start is not None: + start = start.tz_localize(None) + if end is not None: + end = end.tz_localize(None) + + if isinstance(freq, Tick): + i8values = generate_regular_range(start, end, periods, freq, unit=unit) + else: + xdr = _generate_range( + start=start, end=end, periods=periods, offset=freq, unit=unit + ) + i8values = np.array([x._value for x in xdr], dtype=np.int64) + + endpoint_tz = start.tz if start is not None else end.tz + + if tz is not None and endpoint_tz is None: + if not timezones.is_utc(tz): + # short-circuit tz_localize_to_utc which would make + # an unnecessary copy with UTC but be a no-op. + creso = abbrev_to_npy_unit(unit) + i8values = tzconversion.tz_localize_to_utc( + i8values, + tz, + ambiguous=ambiguous, + nonexistent=nonexistent, + creso=creso, + ) + + # i8values is localized datetime64 array -> have to convert + # start/end as well to compare + if start is not None: + start = start.tz_localize(tz, ambiguous, nonexistent) + if end is not None: + end = end.tz_localize(tz, ambiguous, nonexistent) + else: + # Create a linearly spaced date_range in local time + # Nanosecond-granularity timestamps aren't always correctly + # representable with doubles, so we limit the range that we + # pass to np.linspace as much as possible + periods = cast(int, periods) + i8values = ( + np.linspace(0, end._value - start._value, periods, dtype="int64") + + start._value + ) + if i8values.dtype != "i8": + # 2022-01-09 I (brock) am not sure if it is possible for this + # to overflow and cast to e.g. f8, but if it does we need to cast + i8values = i8values.astype("i8") + + if start == end: + if not left_inclusive and not right_inclusive: + i8values = i8values[1:-1] + else: + start_i8 = Timestamp(start)._value + end_i8 = Timestamp(end)._value + if not left_inclusive or not right_inclusive: + if not left_inclusive and len(i8values) and i8values[0] == start_i8: + i8values = i8values[1:] + if not right_inclusive and len(i8values) and i8values[-1] == end_i8: + i8values = i8values[:-1] + + dt64_values = i8values.view(f"datetime64[{unit}]") + dtype = tz_to_dtype(tz, unit=unit) + return cls._simple_new(dt64_values, freq=freq, dtype=dtype) + + # ----------------------------------------------------------------- + # DatetimeLike Interface + + def _unbox_scalar(self, value) -> np.datetime64: + if not isinstance(value, self._scalar_type) and value is not NaT: + raise ValueError("'value' should be a Timestamp.") + self._check_compatible_with(value) + if value is NaT: + return np.datetime64(value._value, self.unit) + else: + return value.as_unit(self.unit).asm8 + + def _scalar_from_string(self, value) -> Timestamp | NaTType: + return Timestamp(value, tz=self.tz) + + def _check_compatible_with(self, other) -> None: + if other is NaT: + return + self._assert_tzawareness_compat(other) + + # ----------------------------------------------------------------- + # Descriptive Properties + + def _box_func(self, x: np.datetime64) -> Timestamp | NaTType: + # GH#42228 + value = x.view("i8") + ts = Timestamp._from_value_and_reso(value, reso=self._creso, tz=self.tz) + return ts + + @property + # error: Return type "Union[dtype, DatetimeTZDtype]" of "dtype" + # incompatible with return type "ExtensionDtype" in supertype + # "ExtensionArray" + def dtype(self) -> np.dtype[np.datetime64] | DatetimeTZDtype: # type: ignore[override] + """ + The dtype for the DatetimeArray. + + .. warning:: + + A future version of pandas will change dtype to never be a + ``numpy.dtype``. Instead, :attr:`DatetimeArray.dtype` will + always be an instance of an ``ExtensionDtype`` subclass. + + Returns + ------- + numpy.dtype or DatetimeTZDtype + If the values are tz-naive, then ``np.dtype('datetime64[ns]')`` + is returned. + + If the values are tz-aware, then the ``DatetimeTZDtype`` + is returned. + """ + return self._dtype + + @property + def tz(self) -> tzinfo | None: + """ + Return the timezone. + + Returns + ------- + datetime.tzinfo, pytz.tzinfo.BaseTZInfo, dateutil.tz.tz.tzfile, or None + Returns None when the array is tz-naive. + + Examples + -------- + For Series: + + >>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"]) + >>> s = pd.to_datetime(s) + >>> s + 0 2020-01-01 10:00:00+00:00 + 1 2020-02-01 11:00:00+00:00 + dtype: datetime64[ns, UTC] + >>> s.dt.tz + datetime.timezone.utc + + For DatetimeIndex: + + >>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00", + ... "2/1/2020 11:00:00+00:00"]) + >>> idx.tz + datetime.timezone.utc + """ + # GH 18595 + return getattr(self.dtype, "tz", None) + + @tz.setter + def tz(self, value): + # GH 3746: Prevent localizing or converting the index by setting tz + raise AttributeError( + "Cannot directly set timezone. Use tz_localize() " + "or tz_convert() as appropriate" + ) + + @property + def tzinfo(self) -> tzinfo | None: + """ + Alias for tz attribute + """ + return self.tz + + @property # NB: override with cache_readonly in immutable subclasses + def is_normalized(self) -> bool: + """ + Returns True if all of the dates are at midnight ("no time") + """ + return is_date_array_normalized(self.asi8, self.tz, reso=self._creso) + + @property # NB: override with cache_readonly in immutable subclasses + def _resolution_obj(self) -> Resolution: + return get_resolution(self.asi8, self.tz, reso=self._creso) + + # ---------------------------------------------------------------- + # Array-Like / EA-Interface Methods + + def __array__(self, dtype=None, copy=None) -> np.ndarray: + if dtype is None and self.tz: + # The default for tz-aware is object, to preserve tz info + dtype = object + + return super().__array__(dtype=dtype, copy=copy) + + def __iter__(self) -> Iterator: + """ + Return an iterator over the boxed values + + Yields + ------ + tstamp : Timestamp + """ + if self.ndim > 1: + for i in range(len(self)): + yield self[i] + else: + # convert in chunks of 10k for efficiency + data = self.asi8 + length = len(self) + chunksize = _ITER_CHUNKSIZE + chunks = (length // chunksize) + 1 + + for i in range(chunks): + start_i = i * chunksize + end_i = min((i + 1) * chunksize, length) + converted = ints_to_pydatetime( + data[start_i:end_i], + tz=self.tz, + box="timestamp", + reso=self._creso, + ) + yield from converted + + def astype(self, dtype, copy: bool = True): + # We handle + # --> datetime + # --> period + # DatetimeLikeArrayMixin Super handles the rest. + dtype = pandas_dtype(dtype) + + if dtype == self.dtype: + if copy: + return self.copy() + return self + + elif isinstance(dtype, ExtensionDtype): + if not isinstance(dtype, DatetimeTZDtype): + # e.g. Sparse[datetime64[ns]] + return super().astype(dtype, copy=copy) + elif self.tz is None: + # pre-2.0 this did self.tz_localize(dtype.tz), which did not match + # the Series behavior which did + # values.tz_localize("UTC").tz_convert(dtype.tz) + raise TypeError( + "Cannot use .astype to convert from timezone-naive dtype to " + "timezone-aware dtype. Use obj.tz_localize instead or " + "series.dt.tz_localize instead" + ) + else: + # tzaware unit conversion e.g. datetime64[s, UTC] + np_dtype = np.dtype(dtype.str) + res_values = astype_overflowsafe(self._ndarray, np_dtype, copy=copy) + return type(self)._simple_new(res_values, dtype=dtype, freq=self.freq) + + elif ( + self.tz is None + and lib.is_np_dtype(dtype, "M") + and not is_unitless(dtype) + and is_supported_dtype(dtype) + ): + # unit conversion e.g. datetime64[s] + res_values = astype_overflowsafe(self._ndarray, dtype, copy=True) + return type(self)._simple_new(res_values, dtype=res_values.dtype) + # TODO: preserve freq? + + elif self.tz is not None and lib.is_np_dtype(dtype, "M"): + # pre-2.0 behavior for DTA/DTI was + # values.tz_convert("UTC").tz_localize(None), which did not match + # the Series behavior + raise TypeError( + "Cannot use .astype to convert from timezone-aware dtype to " + "timezone-naive dtype. Use obj.tz_localize(None) or " + "obj.tz_convert('UTC').tz_localize(None) instead." + ) + + elif ( + self.tz is None + and lib.is_np_dtype(dtype, "M") + and dtype != self.dtype + and is_unitless(dtype) + ): + raise TypeError( + "Casting to unit-less dtype 'datetime64' is not supported. " + "Pass e.g. 'datetime64[ns]' instead." + ) + + elif isinstance(dtype, PeriodDtype): + return self.to_period(freq=dtype.freq) + return dtl.DatetimeLikeArrayMixin.astype(self, dtype, copy) + + # ----------------------------------------------------------------- + # Rendering Methods + + def _format_native_types( + self, *, na_rep: str | float = "NaT", date_format=None, **kwargs + ) -> npt.NDArray[np.object_]: + if date_format is None and self._is_dates_only: + # Only dates and no timezone: provide a default format + date_format = "%Y-%m-%d" + + return tslib.format_array_from_datetime( + self.asi8, tz=self.tz, format=date_format, na_rep=na_rep, reso=self._creso + ) + + # ----------------------------------------------------------------- + # Comparison Methods + + def _has_same_tz(self, other) -> bool: + # vzone shouldn't be None if value is non-datetime like + if isinstance(other, np.datetime64): + # convert to Timestamp as np.datetime64 doesn't have tz attr + other = Timestamp(other) + + if not hasattr(other, "tzinfo"): + return False + other_tz = other.tzinfo + return timezones.tz_compare(self.tzinfo, other_tz) + + def _assert_tzawareness_compat(self, other) -> None: + # adapted from _Timestamp._assert_tzawareness_compat + other_tz = getattr(other, "tzinfo", None) + other_dtype = getattr(other, "dtype", None) + + if isinstance(other_dtype, DatetimeTZDtype): + # Get tzinfo from Series dtype + other_tz = other.dtype.tz + if other is NaT: + # pd.NaT quacks both aware and naive + pass + elif self.tz is None: + if other_tz is not None: + raise TypeError( + "Cannot compare tz-naive and tz-aware datetime-like objects." + ) + elif other_tz is None: + raise TypeError( + "Cannot compare tz-naive and tz-aware datetime-like objects" + ) + + # ----------------------------------------------------------------- + # Arithmetic Methods + + def _add_offset(self, offset: BaseOffset) -> Self: + assert not isinstance(offset, Tick) + + if self.tz is not None: + values = self.tz_localize(None) + else: + values = self + + try: + res_values = offset._apply_array(values._ndarray) + if res_values.dtype.kind == "i": + # error: Argument 1 to "view" of "ndarray" has incompatible type + # "dtype[datetime64] | DatetimeTZDtype"; expected + # "dtype[Any] | type[Any] | _SupportsDType[dtype[Any]]" + res_values = res_values.view(values.dtype) # type: ignore[arg-type] + except NotImplementedError: + warnings.warn( + "Non-vectorized DateOffset being applied to Series or DatetimeIndex.", + PerformanceWarning, + stacklevel=find_stack_level(), + ) + res_values = self.astype("O") + offset + # TODO(GH#55564): as_unit will be unnecessary + result = type(self)._from_sequence(res_values).as_unit(self.unit) + if not len(self): + # GH#30336 _from_sequence won't be able to infer self.tz + return result.tz_localize(self.tz) + + else: + result = type(self)._simple_new(res_values, dtype=res_values.dtype) + if offset.normalize: + result = result.normalize() + result._freq = None + + if self.tz is not None: + result = result.tz_localize(self.tz) + + return result + + # ----------------------------------------------------------------- + # Timezone Conversion and Localization Methods + + def _local_timestamps(self) -> npt.NDArray[np.int64]: + """ + Convert to an i8 (unix-like nanosecond timestamp) representation + while keeping the local timezone and not using UTC. + This is used to calculate time-of-day information as if the timestamps + were timezone-naive. + """ + if self.tz is None or timezones.is_utc(self.tz): + # Avoid the copy that would be made in tzconversion + return self.asi8 + return tz_convert_from_utc(self.asi8, self.tz, reso=self._creso) + + def tz_convert(self, tz) -> Self: + """ + Convert tz-aware Datetime Array/Index from one time zone to another. + + Parameters + ---------- + tz : str, pytz.timezone, dateutil.tz.tzfile, datetime.tzinfo or None + Time zone for time. Corresponding timestamps would be converted + to this time zone of the Datetime Array/Index. A `tz` of None will + convert to UTC and remove the timezone information. + + Returns + ------- + Array or Index + + Raises + ------ + TypeError + If Datetime Array/Index is tz-naive. + + See Also + -------- + DatetimeIndex.tz : A timezone that has a variable offset from UTC. + DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a + given time zone, or remove timezone from a tz-aware DatetimeIndex. + + Examples + -------- + With the `tz` parameter, we can change the DatetimeIndex + to other time zones: + + >>> dti = pd.date_range(start='2014-08-01 09:00', + ... freq='h', periods=3, tz='Europe/Berlin') + + >>> dti + DatetimeIndex(['2014-08-01 09:00:00+02:00', + '2014-08-01 10:00:00+02:00', + '2014-08-01 11:00:00+02:00'], + dtype='datetime64[ns, Europe/Berlin]', freq='h') + + >>> dti.tz_convert('US/Central') + DatetimeIndex(['2014-08-01 02:00:00-05:00', + '2014-08-01 03:00:00-05:00', + '2014-08-01 04:00:00-05:00'], + dtype='datetime64[ns, US/Central]', freq='h') + + With the ``tz=None``, we can remove the timezone (after converting + to UTC if necessary): + + >>> dti = pd.date_range(start='2014-08-01 09:00', freq='h', + ... periods=3, tz='Europe/Berlin') + + >>> dti + DatetimeIndex(['2014-08-01 09:00:00+02:00', + '2014-08-01 10:00:00+02:00', + '2014-08-01 11:00:00+02:00'], + dtype='datetime64[ns, Europe/Berlin]', freq='h') + + >>> dti.tz_convert(None) + DatetimeIndex(['2014-08-01 07:00:00', + '2014-08-01 08:00:00', + '2014-08-01 09:00:00'], + dtype='datetime64[ns]', freq='h') + """ + tz = timezones.maybe_get_tz(tz) + + if self.tz is None: + # tz naive, use tz_localize + raise TypeError( + "Cannot convert tz-naive timestamps, use tz_localize to localize" + ) + + # No conversion since timestamps are all UTC to begin with + dtype = tz_to_dtype(tz, unit=self.unit) + return self._simple_new(self._ndarray, dtype=dtype, freq=self.freq) + + @dtl.ravel_compat + def tz_localize( + self, + tz, + ambiguous: TimeAmbiguous = "raise", + nonexistent: TimeNonexistent = "raise", + ) -> Self: + """ + Localize tz-naive Datetime Array/Index to tz-aware Datetime Array/Index. + + This method takes a time zone (tz) naive Datetime Array/Index object + and makes this time zone aware. It does not move the time to another + time zone. + + This method can also be used to do the inverse -- to create a time + zone unaware object from an aware object. To that end, pass `tz=None`. + + Parameters + ---------- + tz : str, pytz.timezone, dateutil.tz.tzfile, datetime.tzinfo or None + Time zone to convert timestamps to. Passing ``None`` will + remove the time zone information preserving local time. + ambiguous : 'infer', 'NaT', bool array, default 'raise' + When clocks moved backward due to DST, ambiguous times may arise. + For example in Central European Time (UTC+01), when going from + 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at + 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the + `ambiguous` parameter dictates how ambiguous times should be + handled. + + - 'infer' will attempt to infer fall dst-transition hours based on + order + - bool-ndarray where True signifies a DST time, False signifies a + non-DST time (note that this flag is only applicable for + ambiguous times) + - 'NaT' will return NaT where there are ambiguous times + - 'raise' will raise an AmbiguousTimeError if there are ambiguous + times. + + nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \ +default 'raise' + A nonexistent time does not exist in a particular timezone + where clocks moved forward due to DST. + + - 'shift_forward' will shift the nonexistent time forward to the + closest existing time + - 'shift_backward' will shift the nonexistent time backward to the + closest existing time + - 'NaT' will return NaT where there are nonexistent times + - timedelta objects will shift nonexistent times by the timedelta + - 'raise' will raise an NonExistentTimeError if there are + nonexistent times. + + Returns + ------- + Same type as self + Array/Index converted to the specified time zone. + + Raises + ------ + TypeError + If the Datetime Array/Index is tz-aware and tz is not None. + + See Also + -------- + DatetimeIndex.tz_convert : Convert tz-aware DatetimeIndex from + one time zone to another. + + Examples + -------- + >>> tz_naive = pd.date_range('2018-03-01 09:00', periods=3) + >>> tz_naive + DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00', + '2018-03-03 09:00:00'], + dtype='datetime64[ns]', freq='D') + + Localize DatetimeIndex in US/Eastern time zone: + + >>> tz_aware = tz_naive.tz_localize(tz='US/Eastern') + >>> tz_aware + DatetimeIndex(['2018-03-01 09:00:00-05:00', + '2018-03-02 09:00:00-05:00', + '2018-03-03 09:00:00-05:00'], + dtype='datetime64[ns, US/Eastern]', freq=None) + + With the ``tz=None``, we can remove the time zone information + while keeping the local time (not converted to UTC): + + >>> tz_aware.tz_localize(None) + DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00', + '2018-03-03 09:00:00'], + dtype='datetime64[ns]', freq=None) + + Be careful with DST changes. When there is sequential data, pandas can + infer the DST time: + + >>> s = pd.to_datetime(pd.Series(['2018-10-28 01:30:00', + ... '2018-10-28 02:00:00', + ... '2018-10-28 02:30:00', + ... '2018-10-28 02:00:00', + ... '2018-10-28 02:30:00', + ... '2018-10-28 03:00:00', + ... '2018-10-28 03:30:00'])) + >>> s.dt.tz_localize('CET', ambiguous='infer') + 0 2018-10-28 01:30:00+02:00 + 1 2018-10-28 02:00:00+02:00 + 2 2018-10-28 02:30:00+02:00 + 3 2018-10-28 02:00:00+01:00 + 4 2018-10-28 02:30:00+01:00 + 5 2018-10-28 03:00:00+01:00 + 6 2018-10-28 03:30:00+01:00 + dtype: datetime64[ns, CET] + + In some cases, inferring the DST is impossible. In such cases, you can + pass an ndarray to the ambiguous parameter to set the DST explicitly + + >>> s = pd.to_datetime(pd.Series(['2018-10-28 01:20:00', + ... '2018-10-28 02:36:00', + ... '2018-10-28 03:46:00'])) + >>> s.dt.tz_localize('CET', ambiguous=np.array([True, True, False])) + 0 2018-10-28 01:20:00+02:00 + 1 2018-10-28 02:36:00+02:00 + 2 2018-10-28 03:46:00+01:00 + dtype: datetime64[ns, CET] + + If the DST transition causes nonexistent times, you can shift these + dates forward or backwards with a timedelta object or `'shift_forward'` + or `'shift_backwards'`. + + >>> s = pd.to_datetime(pd.Series(['2015-03-29 02:30:00', + ... '2015-03-29 03:30:00'])) + >>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_forward') + 0 2015-03-29 03:00:00+02:00 + 1 2015-03-29 03:30:00+02:00 + dtype: datetime64[ns, Europe/Warsaw] + + >>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_backward') + 0 2015-03-29 01:59:59.999999999+01:00 + 1 2015-03-29 03:30:00+02:00 + dtype: datetime64[ns, Europe/Warsaw] + + >>> s.dt.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1h')) + 0 2015-03-29 03:30:00+02:00 + 1 2015-03-29 03:30:00+02:00 + dtype: datetime64[ns, Europe/Warsaw] + """ + nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward") + if nonexistent not in nonexistent_options and not isinstance( + nonexistent, timedelta + ): + raise ValueError( + "The nonexistent argument must be one of 'raise', " + "'NaT', 'shift_forward', 'shift_backward' or " + "a timedelta object" + ) + + if self.tz is not None: + if tz is None: + new_dates = tz_convert_from_utc(self.asi8, self.tz, reso=self._creso) + else: + raise TypeError("Already tz-aware, use tz_convert to convert.") + else: + tz = timezones.maybe_get_tz(tz) + # Convert to UTC + + new_dates = tzconversion.tz_localize_to_utc( + self.asi8, + tz, + ambiguous=ambiguous, + nonexistent=nonexistent, + creso=self._creso, + ) + new_dates_dt64 = new_dates.view(f"M8[{self.unit}]") + dtype = tz_to_dtype(tz, unit=self.unit) + + freq = None + if timezones.is_utc(tz) or (len(self) == 1 and not isna(new_dates_dt64[0])): + # we can preserve freq + # TODO: Also for fixed-offsets + freq = self.freq + elif tz is None and self.tz is None: + # no-op + freq = self.freq + return self._simple_new(new_dates_dt64, dtype=dtype, freq=freq) + + # ---------------------------------------------------------------- + # Conversion Methods - Vectorized analogues of Timestamp methods + + def to_pydatetime(self) -> npt.NDArray[np.object_]: + """ + Return an ndarray of ``datetime.datetime`` objects. + + Returns + ------- + numpy.ndarray + + Examples + -------- + >>> idx = pd.date_range('2018-02-27', periods=3) + >>> idx.to_pydatetime() + array([datetime.datetime(2018, 2, 27, 0, 0), + datetime.datetime(2018, 2, 28, 0, 0), + datetime.datetime(2018, 3, 1, 0, 0)], dtype=object) + """ + return ints_to_pydatetime(self.asi8, tz=self.tz, reso=self._creso) + + def normalize(self) -> Self: + """ + Convert times to midnight. + + The time component of the date-time is converted to midnight i.e. + 00:00:00. This is useful in cases, when the time does not matter. + Length is unaltered. The timezones are unaffected. + + This method is available on Series with datetime values under + the ``.dt`` accessor, and directly on Datetime Array/Index. + + Returns + ------- + DatetimeArray, DatetimeIndex or Series + The same type as the original data. Series will have the same + name and index. DatetimeIndex will have the same name. + + See Also + -------- + floor : Floor the datetimes to the specified freq. + ceil : Ceil the datetimes to the specified freq. + round : Round the datetimes to the specified freq. + + Examples + -------- + >>> idx = pd.date_range(start='2014-08-01 10:00', freq='h', + ... periods=3, tz='Asia/Calcutta') + >>> idx + DatetimeIndex(['2014-08-01 10:00:00+05:30', + '2014-08-01 11:00:00+05:30', + '2014-08-01 12:00:00+05:30'], + dtype='datetime64[ns, Asia/Calcutta]', freq='h') + >>> idx.normalize() + DatetimeIndex(['2014-08-01 00:00:00+05:30', + '2014-08-01 00:00:00+05:30', + '2014-08-01 00:00:00+05:30'], + dtype='datetime64[ns, Asia/Calcutta]', freq=None) + """ + new_values = normalize_i8_timestamps(self.asi8, self.tz, reso=self._creso) + dt64_values = new_values.view(self._ndarray.dtype) + + dta = type(self)._simple_new(dt64_values, dtype=dt64_values.dtype) + dta = dta._with_freq("infer") + if self.tz is not None: + dta = dta.tz_localize(self.tz) + return dta + + def to_period(self, freq=None) -> PeriodArray: + """ + Cast to PeriodArray/PeriodIndex at a particular frequency. + + Converts DatetimeArray/Index to PeriodArray/PeriodIndex. + + Parameters + ---------- + freq : str or Period, optional + One of pandas' :ref:`period aliases ` + or an Period object. Will be inferred by default. + + Returns + ------- + PeriodArray/PeriodIndex + + Raises + ------ + ValueError + When converting a DatetimeArray/Index with non-regular values, + so that a frequency cannot be inferred. + + See Also + -------- + PeriodIndex: Immutable ndarray holding ordinal values. + DatetimeIndex.to_pydatetime: Return DatetimeIndex as object. + + Examples + -------- + >>> df = pd.DataFrame({"y": [1, 2, 3]}, + ... index=pd.to_datetime(["2000-03-31 00:00:00", + ... "2000-05-31 00:00:00", + ... "2000-08-31 00:00:00"])) + >>> df.index.to_period("M") + PeriodIndex(['2000-03', '2000-05', '2000-08'], + dtype='period[M]') + + Infer the daily frequency + + >>> idx = pd.date_range("2017-01-01", periods=2) + >>> idx.to_period() + PeriodIndex(['2017-01-01', '2017-01-02'], + dtype='period[D]') + """ + from pandas.core.arrays import PeriodArray + + if self.tz is not None: + warnings.warn( + "Converting to PeriodArray/Index representation " + "will drop timezone information.", + UserWarning, + stacklevel=find_stack_level(), + ) + + if freq is None: + freq = self.freqstr or self.inferred_freq + if isinstance(self.freq, BaseOffset) and hasattr( + self.freq, "_period_dtype_code" + ): + freq = PeriodDtype(self.freq)._freqstr + + if freq is None: + raise ValueError( + "You must pass a freq argument as current index has none." + ) + + res = get_period_alias(freq) + + # https://github.com/pandas-dev/pandas/issues/33358 + if res is None: + res = freq + + freq = res + return PeriodArray._from_datetime64(self._ndarray, freq, tz=self.tz) + + # ----------------------------------------------------------------- + # Properties - Vectorized Timestamp Properties/Methods + + def month_name(self, locale=None) -> npt.NDArray[np.object_]: + """ + Return the month names with specified locale. + + Parameters + ---------- + locale : str, optional + Locale determining the language in which to return the month name. + Default is English locale (``'en_US.utf8'``). Use the command + ``locale -a`` on your terminal on Unix systems to find your locale + language code. + + Returns + ------- + Series or Index + Series or Index of month names. + + Examples + -------- + >>> s = pd.Series(pd.date_range(start='2018-01', freq='ME', periods=3)) + >>> s + 0 2018-01-31 + 1 2018-02-28 + 2 2018-03-31 + dtype: datetime64[ns] + >>> s.dt.month_name() + 0 January + 1 February + 2 March + dtype: object + + >>> idx = pd.date_range(start='2018-01', freq='ME', periods=3) + >>> idx + DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'], + dtype='datetime64[ns]', freq='ME') + >>> idx.month_name() + Index(['January', 'February', 'March'], dtype='object') + + Using the ``locale`` parameter you can set a different locale language, + for example: ``idx.month_name(locale='pt_BR.utf8')`` will return month + names in Brazilian Portuguese language. + + >>> idx = pd.date_range(start='2018-01', freq='ME', periods=3) + >>> idx + DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'], + dtype='datetime64[ns]', freq='ME') + >>> idx.month_name(locale='pt_BR.utf8') # doctest: +SKIP + Index(['Janeiro', 'Fevereiro', 'Março'], dtype='object') + """ + values = self._local_timestamps() + + result = fields.get_date_name_field( + values, "month_name", locale=locale, reso=self._creso + ) + result = self._maybe_mask_results(result, fill_value=None) + return result + + def day_name(self, locale=None) -> npt.NDArray[np.object_]: + """ + Return the day names with specified locale. + + Parameters + ---------- + locale : str, optional + Locale determining the language in which to return the day name. + Default is English locale (``'en_US.utf8'``). Use the command + ``locale -a`` on your terminal on Unix systems to find your locale + language code. + + Returns + ------- + Series or Index + Series or Index of day names. + + Examples + -------- + >>> s = pd.Series(pd.date_range(start='2018-01-01', freq='D', periods=3)) + >>> s + 0 2018-01-01 + 1 2018-01-02 + 2 2018-01-03 + dtype: datetime64[ns] + >>> s.dt.day_name() + 0 Monday + 1 Tuesday + 2 Wednesday + dtype: object + + >>> idx = pd.date_range(start='2018-01-01', freq='D', periods=3) + >>> idx + DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'], + dtype='datetime64[ns]', freq='D') + >>> idx.day_name() + Index(['Monday', 'Tuesday', 'Wednesday'], dtype='object') + + Using the ``locale`` parameter you can set a different locale language, + for example: ``idx.day_name(locale='pt_BR.utf8')`` will return day + names in Brazilian Portuguese language. + + >>> idx = pd.date_range(start='2018-01-01', freq='D', periods=3) + >>> idx + DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'], + dtype='datetime64[ns]', freq='D') + >>> idx.day_name(locale='pt_BR.utf8') # doctest: +SKIP + Index(['Segunda', 'Terça', 'Quarta'], dtype='object') + """ + values = self._local_timestamps() + + result = fields.get_date_name_field( + values, "day_name", locale=locale, reso=self._creso + ) + result = self._maybe_mask_results(result, fill_value=None) + return result + + @property + def time(self) -> npt.NDArray[np.object_]: + """ + Returns numpy array of :class:`datetime.time` objects. + + The time part of the Timestamps. + + Examples + -------- + For Series: + + >>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"]) + >>> s = pd.to_datetime(s) + >>> s + 0 2020-01-01 10:00:00+00:00 + 1 2020-02-01 11:00:00+00:00 + dtype: datetime64[ns, UTC] + >>> s.dt.time + 0 10:00:00 + 1 11:00:00 + dtype: object + + For DatetimeIndex: + + >>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00", + ... "2/1/2020 11:00:00+00:00"]) + >>> idx.time + array([datetime.time(10, 0), datetime.time(11, 0)], dtype=object) + """ + # If the Timestamps have a timezone that is not UTC, + # convert them into their i8 representation while + # keeping their timezone and not using UTC + timestamps = self._local_timestamps() + + return ints_to_pydatetime(timestamps, box="time", reso=self._creso) + + @property + def timetz(self) -> npt.NDArray[np.object_]: + """ + Returns numpy array of :class:`datetime.time` objects with timezones. + + The time part of the Timestamps. + + Examples + -------- + For Series: + + >>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"]) + >>> s = pd.to_datetime(s) + >>> s + 0 2020-01-01 10:00:00+00:00 + 1 2020-02-01 11:00:00+00:00 + dtype: datetime64[ns, UTC] + >>> s.dt.timetz + 0 10:00:00+00:00 + 1 11:00:00+00:00 + dtype: object + + For DatetimeIndex: + + >>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00", + ... "2/1/2020 11:00:00+00:00"]) + >>> idx.timetz + array([datetime.time(10, 0, tzinfo=datetime.timezone.utc), + datetime.time(11, 0, tzinfo=datetime.timezone.utc)], dtype=object) + """ + return ints_to_pydatetime(self.asi8, self.tz, box="time", reso=self._creso) + + @property + def date(self) -> npt.NDArray[np.object_]: + """ + Returns numpy array of python :class:`datetime.date` objects. + + Namely, the date part of Timestamps without time and + timezone information. + + Examples + -------- + For Series: + + >>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"]) + >>> s = pd.to_datetime(s) + >>> s + 0 2020-01-01 10:00:00+00:00 + 1 2020-02-01 11:00:00+00:00 + dtype: datetime64[ns, UTC] + >>> s.dt.date + 0 2020-01-01 + 1 2020-02-01 + dtype: object + + For DatetimeIndex: + + >>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00", + ... "2/1/2020 11:00:00+00:00"]) + >>> idx.date + array([datetime.date(2020, 1, 1), datetime.date(2020, 2, 1)], dtype=object) + """ + # If the Timestamps have a timezone that is not UTC, + # convert them into their i8 representation while + # keeping their timezone and not using UTC + timestamps = self._local_timestamps() + + return ints_to_pydatetime(timestamps, box="date", reso=self._creso) + + def isocalendar(self) -> DataFrame: + """ + Calculate year, week, and day according to the ISO 8601 standard. + + Returns + ------- + DataFrame + With columns year, week and day. + + See Also + -------- + Timestamp.isocalendar : Function return a 3-tuple containing ISO year, + week number, and weekday for the given Timestamp object. + datetime.date.isocalendar : Return a named tuple object with + three components: year, week and weekday. + + Examples + -------- + >>> idx = pd.date_range(start='2019-12-29', freq='D', periods=4) + >>> idx.isocalendar() + year week day + 2019-12-29 2019 52 7 + 2019-12-30 2020 1 1 + 2019-12-31 2020 1 2 + 2020-01-01 2020 1 3 + >>> idx.isocalendar().week + 2019-12-29 52 + 2019-12-30 1 + 2019-12-31 1 + 2020-01-01 1 + Freq: D, Name: week, dtype: UInt32 + """ + from pandas import DataFrame + + values = self._local_timestamps() + sarray = fields.build_isocalendar_sarray(values, reso=self._creso) + iso_calendar_df = DataFrame( + sarray, columns=["year", "week", "day"], dtype="UInt32" + ) + if self._hasna: + iso_calendar_df.iloc[self._isnan] = None + return iso_calendar_df + + year = _field_accessor( + "year", + "Y", + """ + The year of the datetime. + + Examples + -------- + >>> datetime_series = pd.Series( + ... pd.date_range("2000-01-01", periods=3, freq="YE") + ... ) + >>> datetime_series + 0 2000-12-31 + 1 2001-12-31 + 2 2002-12-31 + dtype: datetime64[ns] + >>> datetime_series.dt.year + 0 2000 + 1 2001 + 2 2002 + dtype: int32 + """, + ) + month = _field_accessor( + "month", + "M", + """ + The month as January=1, December=12. + + Examples + -------- + >>> datetime_series = pd.Series( + ... pd.date_range("2000-01-01", periods=3, freq="ME") + ... ) + >>> datetime_series + 0 2000-01-31 + 1 2000-02-29 + 2 2000-03-31 + dtype: datetime64[ns] + >>> datetime_series.dt.month + 0 1 + 1 2 + 2 3 + dtype: int32 + """, + ) + day = _field_accessor( + "day", + "D", + """ + The day of the datetime. + + Examples + -------- + >>> datetime_series = pd.Series( + ... pd.date_range("2000-01-01", periods=3, freq="D") + ... ) + >>> datetime_series + 0 2000-01-01 + 1 2000-01-02 + 2 2000-01-03 + dtype: datetime64[ns] + >>> datetime_series.dt.day + 0 1 + 1 2 + 2 3 + dtype: int32 + """, + ) + hour = _field_accessor( + "hour", + "h", + """ + The hours of the datetime. + + Examples + -------- + >>> datetime_series = pd.Series( + ... pd.date_range("2000-01-01", periods=3, freq="h") + ... ) + >>> datetime_series + 0 2000-01-01 00:00:00 + 1 2000-01-01 01:00:00 + 2 2000-01-01 02:00:00 + dtype: datetime64[ns] + >>> datetime_series.dt.hour + 0 0 + 1 1 + 2 2 + dtype: int32 + """, + ) + minute = _field_accessor( + "minute", + "m", + """ + The minutes of the datetime. + + Examples + -------- + >>> datetime_series = pd.Series( + ... pd.date_range("2000-01-01", periods=3, freq="min") + ... ) + >>> datetime_series + 0 2000-01-01 00:00:00 + 1 2000-01-01 00:01:00 + 2 2000-01-01 00:02:00 + dtype: datetime64[ns] + >>> datetime_series.dt.minute + 0 0 + 1 1 + 2 2 + dtype: int32 + """, + ) + second = _field_accessor( + "second", + "s", + """ + The seconds of the datetime. + + Examples + -------- + >>> datetime_series = pd.Series( + ... pd.date_range("2000-01-01", periods=3, freq="s") + ... ) + >>> datetime_series + 0 2000-01-01 00:00:00 + 1 2000-01-01 00:00:01 + 2 2000-01-01 00:00:02 + dtype: datetime64[ns] + >>> datetime_series.dt.second + 0 0 + 1 1 + 2 2 + dtype: int32 + """, + ) + microsecond = _field_accessor( + "microsecond", + "us", + """ + The microseconds of the datetime. + + Examples + -------- + >>> datetime_series = pd.Series( + ... pd.date_range("2000-01-01", periods=3, freq="us") + ... ) + >>> datetime_series + 0 2000-01-01 00:00:00.000000 + 1 2000-01-01 00:00:00.000001 + 2 2000-01-01 00:00:00.000002 + dtype: datetime64[ns] + >>> datetime_series.dt.microsecond + 0 0 + 1 1 + 2 2 + dtype: int32 + """, + ) + nanosecond = _field_accessor( + "nanosecond", + "ns", + """ + The nanoseconds of the datetime. + + Examples + -------- + >>> datetime_series = pd.Series( + ... pd.date_range("2000-01-01", periods=3, freq="ns") + ... ) + >>> datetime_series + 0 2000-01-01 00:00:00.000000000 + 1 2000-01-01 00:00:00.000000001 + 2 2000-01-01 00:00:00.000000002 + dtype: datetime64[ns] + >>> datetime_series.dt.nanosecond + 0 0 + 1 1 + 2 2 + dtype: int32 + """, + ) + _dayofweek_doc = """ + The day of the week with Monday=0, Sunday=6. + + Return the day of the week. It is assumed the week starts on + Monday, which is denoted by 0 and ends on Sunday which is denoted + by 6. This method is available on both Series with datetime + values (using the `dt` accessor) or DatetimeIndex. + + Returns + ------- + Series or Index + Containing integers indicating the day number. + + See Also + -------- + Series.dt.dayofweek : Alias. + Series.dt.weekday : Alias. + Series.dt.day_name : Returns the name of the day of the week. + + Examples + -------- + >>> s = pd.date_range('2016-12-31', '2017-01-08', freq='D').to_series() + >>> s.dt.dayofweek + 2016-12-31 5 + 2017-01-01 6 + 2017-01-02 0 + 2017-01-03 1 + 2017-01-04 2 + 2017-01-05 3 + 2017-01-06 4 + 2017-01-07 5 + 2017-01-08 6 + Freq: D, dtype: int32 + """ + day_of_week = _field_accessor("day_of_week", "dow", _dayofweek_doc) + dayofweek = day_of_week + weekday = day_of_week + + day_of_year = _field_accessor( + "dayofyear", + "doy", + """ + The ordinal day of the year. + + Examples + -------- + For Series: + + >>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"]) + >>> s = pd.to_datetime(s) + >>> s + 0 2020-01-01 10:00:00+00:00 + 1 2020-02-01 11:00:00+00:00 + dtype: datetime64[ns, UTC] + >>> s.dt.dayofyear + 0 1 + 1 32 + dtype: int32 + + For DatetimeIndex: + + >>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00", + ... "2/1/2020 11:00:00+00:00"]) + >>> idx.dayofyear + Index([1, 32], dtype='int32') + """, + ) + dayofyear = day_of_year + quarter = _field_accessor( + "quarter", + "q", + """ + The quarter of the date. + + Examples + -------- + For Series: + + >>> s = pd.Series(["1/1/2020 10:00:00+00:00", "4/1/2020 11:00:00+00:00"]) + >>> s = pd.to_datetime(s) + >>> s + 0 2020-01-01 10:00:00+00:00 + 1 2020-04-01 11:00:00+00:00 + dtype: datetime64[ns, UTC] + >>> s.dt.quarter + 0 1 + 1 2 + dtype: int32 + + For DatetimeIndex: + + >>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00", + ... "2/1/2020 11:00:00+00:00"]) + >>> idx.quarter + Index([1, 1], dtype='int32') + """, + ) + days_in_month = _field_accessor( + "days_in_month", + "dim", + """ + The number of days in the month. + + Examples + -------- + >>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"]) + >>> s = pd.to_datetime(s) + >>> s + 0 2020-01-01 10:00:00+00:00 + 1 2020-02-01 11:00:00+00:00 + dtype: datetime64[ns, UTC] + >>> s.dt.daysinmonth + 0 31 + 1 29 + dtype: int32 + """, + ) + daysinmonth = days_in_month + _is_month_doc = """ + Indicates whether the date is the {first_or_last} day of the month. + + Returns + ------- + Series or array + For Series, returns a Series with boolean values. + For DatetimeIndex, returns a boolean array. + + See Also + -------- + is_month_start : Return a boolean indicating whether the date + is the first day of the month. + is_month_end : Return a boolean indicating whether the date + is the last day of the month. + + Examples + -------- + This method is available on Series with datetime values under + the ``.dt`` accessor, and directly on DatetimeIndex. + + >>> s = pd.Series(pd.date_range("2018-02-27", periods=3)) + >>> s + 0 2018-02-27 + 1 2018-02-28 + 2 2018-03-01 + dtype: datetime64[ns] + >>> s.dt.is_month_start + 0 False + 1 False + 2 True + dtype: bool + >>> s.dt.is_month_end + 0 False + 1 True + 2 False + dtype: bool + + >>> idx = pd.date_range("2018-02-27", periods=3) + >>> idx.is_month_start + array([False, False, True]) + >>> idx.is_month_end + array([False, True, False]) + """ + is_month_start = _field_accessor( + "is_month_start", "is_month_start", _is_month_doc.format(first_or_last="first") + ) + + is_month_end = _field_accessor( + "is_month_end", "is_month_end", _is_month_doc.format(first_or_last="last") + ) + + is_quarter_start = _field_accessor( + "is_quarter_start", + "is_quarter_start", + """ + Indicator for whether the date is the first day of a quarter. + + Returns + ------- + is_quarter_start : Series or DatetimeIndex + The same type as the original data with boolean values. Series will + have the same name and index. DatetimeIndex will have the same + name. + + See Also + -------- + quarter : Return the quarter of the date. + is_quarter_end : Similar property for indicating the quarter end. + + Examples + -------- + This method is available on Series with datetime values under + the ``.dt`` accessor, and directly on DatetimeIndex. + + >>> df = pd.DataFrame({'dates': pd.date_range("2017-03-30", + ... periods=4)}) + >>> df.assign(quarter=df.dates.dt.quarter, + ... is_quarter_start=df.dates.dt.is_quarter_start) + dates quarter is_quarter_start + 0 2017-03-30 1 False + 1 2017-03-31 1 False + 2 2017-04-01 2 True + 3 2017-04-02 2 False + + >>> idx = pd.date_range('2017-03-30', periods=4) + >>> idx + DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'], + dtype='datetime64[ns]', freq='D') + + >>> idx.is_quarter_start + array([False, False, True, False]) + """, + ) + is_quarter_end = _field_accessor( + "is_quarter_end", + "is_quarter_end", + """ + Indicator for whether the date is the last day of a quarter. + + Returns + ------- + is_quarter_end : Series or DatetimeIndex + The same type as the original data with boolean values. Series will + have the same name and index. DatetimeIndex will have the same + name. + + See Also + -------- + quarter : Return the quarter of the date. + is_quarter_start : Similar property indicating the quarter start. + + Examples + -------- + This method is available on Series with datetime values under + the ``.dt`` accessor, and directly on DatetimeIndex. + + >>> df = pd.DataFrame({'dates': pd.date_range("2017-03-30", + ... periods=4)}) + >>> df.assign(quarter=df.dates.dt.quarter, + ... is_quarter_end=df.dates.dt.is_quarter_end) + dates quarter is_quarter_end + 0 2017-03-30 1 False + 1 2017-03-31 1 True + 2 2017-04-01 2 False + 3 2017-04-02 2 False + + >>> idx = pd.date_range('2017-03-30', periods=4) + >>> idx + DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'], + dtype='datetime64[ns]', freq='D') + + >>> idx.is_quarter_end + array([False, True, False, False]) + """, + ) + is_year_start = _field_accessor( + "is_year_start", + "is_year_start", + """ + Indicate whether the date is the first day of a year. + + Returns + ------- + Series or DatetimeIndex + The same type as the original data with boolean values. Series will + have the same name and index. DatetimeIndex will have the same + name. + + See Also + -------- + is_year_end : Similar property indicating the last day of the year. + + Examples + -------- + This method is available on Series with datetime values under + the ``.dt`` accessor, and directly on DatetimeIndex. + + >>> dates = pd.Series(pd.date_range("2017-12-30", periods=3)) + >>> dates + 0 2017-12-30 + 1 2017-12-31 + 2 2018-01-01 + dtype: datetime64[ns] + + >>> dates.dt.is_year_start + 0 False + 1 False + 2 True + dtype: bool + + >>> idx = pd.date_range("2017-12-30", periods=3) + >>> idx + DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'], + dtype='datetime64[ns]', freq='D') + + >>> idx.is_year_start + array([False, False, True]) + """, + ) + is_year_end = _field_accessor( + "is_year_end", + "is_year_end", + """ + Indicate whether the date is the last day of the year. + + Returns + ------- + Series or DatetimeIndex + The same type as the original data with boolean values. Series will + have the same name and index. DatetimeIndex will have the same + name. + + See Also + -------- + is_year_start : Similar property indicating the start of the year. + + Examples + -------- + This method is available on Series with datetime values under + the ``.dt`` accessor, and directly on DatetimeIndex. + + >>> dates = pd.Series(pd.date_range("2017-12-30", periods=3)) + >>> dates + 0 2017-12-30 + 1 2017-12-31 + 2 2018-01-01 + dtype: datetime64[ns] + + >>> dates.dt.is_year_end + 0 False + 1 True + 2 False + dtype: bool + + >>> idx = pd.date_range("2017-12-30", periods=3) + >>> idx + DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'], + dtype='datetime64[ns]', freq='D') + + >>> idx.is_year_end + array([False, True, False]) + """, + ) + is_leap_year = _field_accessor( + "is_leap_year", + "is_leap_year", + """ + Boolean indicator if the date belongs to a leap year. + + A leap year is a year, which has 366 days (instead of 365) including + 29th of February as an intercalary day. + Leap years are years which are multiples of four with the exception + of years divisible by 100 but not by 400. + + Returns + ------- + Series or ndarray + Booleans indicating if dates belong to a leap year. + + Examples + -------- + This method is available on Series with datetime values under + the ``.dt`` accessor, and directly on DatetimeIndex. + + >>> idx = pd.date_range("2012-01-01", "2015-01-01", freq="YE") + >>> idx + DatetimeIndex(['2012-12-31', '2013-12-31', '2014-12-31'], + dtype='datetime64[ns]', freq='YE-DEC') + >>> idx.is_leap_year + array([ True, False, False]) + + >>> dates_series = pd.Series(idx) + >>> dates_series + 0 2012-12-31 + 1 2013-12-31 + 2 2014-12-31 + dtype: datetime64[ns] + >>> dates_series.dt.is_leap_year + 0 True + 1 False + 2 False + dtype: bool + """, + ) + + def to_julian_date(self) -> npt.NDArray[np.float64]: + """ + Convert Datetime Array to float64 ndarray of Julian Dates. + 0 Julian date is noon January 1, 4713 BC. + https://en.wikipedia.org/wiki/Julian_day + """ + + # http://mysite.verizon.net/aesir_research/date/jdalg2.htm + year = np.asarray(self.year) + month = np.asarray(self.month) + day = np.asarray(self.day) + testarr = month < 3 + year[testarr] -= 1 + month[testarr] += 12 + return ( + day + + np.fix((153 * month - 457) / 5) + + 365 * year + + np.floor(year / 4) + - np.floor(year / 100) + + np.floor(year / 400) + + 1_721_118.5 + + ( + self.hour + + self.minute / 60 + + self.second / 3600 + + self.microsecond / 3600 / 10**6 + + self.nanosecond / 3600 / 10**9 + ) + / 24 + ) + + # ----------------------------------------------------------------- + # Reductions + + def std( + self, + axis=None, + dtype=None, + out=None, + ddof: int = 1, + keepdims: bool = False, + skipna: bool = True, + ): + """ + Return sample standard deviation over requested axis. + + Normalized by `N-1` by default. This can be changed using ``ddof``. + + Parameters + ---------- + axis : int, optional + Axis for the function to be applied on. For :class:`pandas.Series` + this parameter is unused and defaults to ``None``. + ddof : int, default 1 + Degrees of Freedom. The divisor used in calculations is `N - ddof`, + where `N` represents the number of elements. + skipna : bool, default True + Exclude NA/null values. If an entire row/column is ``NA``, the result + will be ``NA``. + + Returns + ------- + Timedelta + + See Also + -------- + numpy.ndarray.std : Returns the standard deviation of the array elements + along given axis. + Series.std : Return sample standard deviation over requested axis. + + Examples + -------- + For :class:`pandas.DatetimeIndex`: + + >>> idx = pd.date_range('2001-01-01 00:00', periods=3) + >>> idx + DatetimeIndex(['2001-01-01', '2001-01-02', '2001-01-03'], + dtype='datetime64[ns]', freq='D') + >>> idx.std() + Timedelta('1 days 00:00:00') + """ + # Because std is translation-invariant, we can get self.std + # by calculating (self - Timestamp(0)).std, and we can do it + # without creating a copy by using a view on self._ndarray + from pandas.core.arrays import TimedeltaArray + + # Find the td64 dtype with the same resolution as our dt64 dtype + dtype_str = self._ndarray.dtype.name.replace("datetime64", "timedelta64") + dtype = np.dtype(dtype_str) + + tda = TimedeltaArray._simple_new(self._ndarray.view(dtype), dtype=dtype) + + return tda.std(axis=axis, out=out, ddof=ddof, keepdims=keepdims, skipna=skipna) + + +# ------------------------------------------------------------------- +# Constructor Helpers + + +def _sequence_to_dt64( + data: ArrayLike, + *, + copy: bool = False, + tz: tzinfo | None = None, + dayfirst: bool = False, + yearfirst: bool = False, + ambiguous: TimeAmbiguous = "raise", + out_unit: str | None = None, +): + """ + Parameters + ---------- + data : np.ndarray or ExtensionArray + dtl.ensure_arraylike_for_datetimelike has already been called. + copy : bool, default False + tz : tzinfo or None, default None + dayfirst : bool, default False + yearfirst : bool, default False + ambiguous : str, bool, or arraylike, default 'raise' + See pandas._libs.tslibs.tzconversion.tz_localize_to_utc. + out_unit : str or None, default None + Desired output resolution. + + Returns + ------- + result : numpy.ndarray + The sequence converted to a numpy array with dtype ``datetime64[unit]``. + Where `unit` is "ns" unless specified otherwise by `out_unit`. + tz : tzinfo or None + Either the user-provided tzinfo or one inferred from the data. + + Raises + ------ + TypeError : PeriodDType data is passed + """ + + # By this point we are assured to have either a numpy array or Index + data, copy = maybe_convert_dtype(data, copy, tz=tz) + data_dtype = getattr(data, "dtype", None) + + if out_unit is None: + out_unit = "ns" + out_dtype = np.dtype(f"M8[{out_unit}]") + + if data_dtype == object or is_string_dtype(data_dtype): + # TODO: We do not have tests specific to string-dtypes, + # also complex or categorical or other extension + data = cast(np.ndarray, data) + copy = False + if lib.infer_dtype(data, skipna=False) == "integer": + # Much more performant than going through array_to_datetime + data = data.astype(np.int64) + elif tz is not None and ambiguous == "raise": + obj_data = np.asarray(data, dtype=object) + result = tslib.array_to_datetime_with_tz( + obj_data, + tz=tz, + dayfirst=dayfirst, + yearfirst=yearfirst, + creso=abbrev_to_npy_unit(out_unit), + ) + return result, tz + else: + converted, inferred_tz = objects_to_datetime64( + data, + dayfirst=dayfirst, + yearfirst=yearfirst, + allow_object=False, + out_unit=out_unit or "ns", + ) + copy = False + if tz and inferred_tz: + # two timezones: convert to intended from base UTC repr + # GH#42505 by convention, these are _already_ UTC + result = converted + + elif inferred_tz: + tz = inferred_tz + result = converted + + else: + result, _ = _construct_from_dt64_naive( + converted, tz=tz, copy=copy, ambiguous=ambiguous + ) + return result, tz + + data_dtype = data.dtype + + # `data` may have originally been a Categorical[datetime64[ns, tz]], + # so we need to handle these types. + if isinstance(data_dtype, DatetimeTZDtype): + # DatetimeArray -> ndarray + data = cast(DatetimeArray, data) + tz = _maybe_infer_tz(tz, data.tz) + result = data._ndarray + + elif lib.is_np_dtype(data_dtype, "M"): + # tz-naive DatetimeArray or ndarray[datetime64] + if isinstance(data, DatetimeArray): + data = data._ndarray + + data = cast(np.ndarray, data) + result, copy = _construct_from_dt64_naive( + data, tz=tz, copy=copy, ambiguous=ambiguous + ) + + else: + # must be integer dtype otherwise + # assume this data are epoch timestamps + if data.dtype != INT64_DTYPE: + data = data.astype(np.int64, copy=False) + copy = False + data = cast(np.ndarray, data) + result = data.view(out_dtype) + + if copy: + result = result.copy() + + assert isinstance(result, np.ndarray), type(result) + assert result.dtype.kind == "M" + assert result.dtype != "M8" + assert is_supported_dtype(result.dtype) + return result, tz + + +def _construct_from_dt64_naive( + data: np.ndarray, *, tz: tzinfo | None, copy: bool, ambiguous: TimeAmbiguous +) -> tuple[np.ndarray, bool]: + """ + Convert datetime64 data to a supported dtype, localizing if necessary. + """ + # Caller is responsible for ensuring + # lib.is_np_dtype(data.dtype) + + new_dtype = data.dtype + if not is_supported_dtype(new_dtype): + # Cast to the nearest supported unit, generally "s" + new_dtype = get_supported_dtype(new_dtype) + data = astype_overflowsafe(data, dtype=new_dtype, copy=False) + copy = False + + if data.dtype.byteorder == ">": + # TODO: better way to handle this? non-copying alternative? + # without this, test_constructor_datetime64_bigendian fails + data = data.astype(data.dtype.newbyteorder("<")) + new_dtype = data.dtype + copy = False + + if tz is not None: + # Convert tz-naive to UTC + # TODO: if tz is UTC, are there situations where we *don't* want a + # copy? tz_localize_to_utc always makes one. + shape = data.shape + if data.ndim > 1: + data = data.ravel() + + data_unit = get_unit_from_dtype(new_dtype) + data = tzconversion.tz_localize_to_utc( + data.view("i8"), tz, ambiguous=ambiguous, creso=data_unit + ) + data = data.view(new_dtype) + data = data.reshape(shape) + + assert data.dtype == new_dtype, data.dtype + result = data + + return result, copy + + +def objects_to_datetime64( + data: np.ndarray, + dayfirst, + yearfirst, + utc: bool = False, + errors: DateTimeErrorChoices = "raise", + allow_object: bool = False, + out_unit: str = "ns", +): + """ + Convert data to array of timestamps. + + Parameters + ---------- + data : np.ndarray[object] + dayfirst : bool + yearfirst : bool + utc : bool, default False + Whether to convert/localize timestamps to UTC. + errors : {'raise', 'ignore', 'coerce'} + allow_object : bool + Whether to return an object-dtype ndarray instead of raising if the + data contains more than one timezone. + out_unit : str, default "ns" + + Returns + ------- + result : ndarray + np.datetime64[out_unit] if returned values represent wall times or UTC + timestamps. + object if mixed timezones + inferred_tz : tzinfo or None + If not None, then the datetime64 values in `result` denote UTC timestamps. + + Raises + ------ + ValueError : if data cannot be converted to datetimes + TypeError : When a type cannot be converted to datetime + """ + assert errors in ["raise", "ignore", "coerce"] + + # if str-dtype, convert + data = np.asarray(data, dtype=np.object_) + + result, tz_parsed = tslib.array_to_datetime( + data, + errors=errors, + utc=utc, + dayfirst=dayfirst, + yearfirst=yearfirst, + creso=abbrev_to_npy_unit(out_unit), + ) + + if tz_parsed is not None: + # We can take a shortcut since the datetime64 numpy array + # is in UTC + return result, tz_parsed + elif result.dtype.kind == "M": + return result, tz_parsed + elif result.dtype == object: + # GH#23675 when called via `pd.to_datetime`, returning an object-dtype + # array is allowed. When called via `pd.DatetimeIndex`, we can + # only accept datetime64 dtype, so raise TypeError if object-dtype + # is returned, as that indicates the values can be recognized as + # datetimes but they have conflicting timezones/awareness + if allow_object: + return result, tz_parsed + raise TypeError("DatetimeIndex has mixed timezones") + else: # pragma: no cover + # GH#23675 this TypeError should never be hit, whereas the TypeError + # in the object-dtype branch above is reachable. + raise TypeError(result) + + +def maybe_convert_dtype(data, copy: bool, tz: tzinfo | None = None): + """ + Convert data based on dtype conventions, issuing + errors where appropriate. + + Parameters + ---------- + data : np.ndarray or pd.Index + copy : bool + tz : tzinfo or None, default None + + Returns + ------- + data : np.ndarray or pd.Index + copy : bool + + Raises + ------ + TypeError : PeriodDType data is passed + """ + if not hasattr(data, "dtype"): + # e.g. collections.deque + return data, copy + + if is_float_dtype(data.dtype): + # pre-2.0 we treated these as wall-times, inconsistent with ints + # GH#23675, GH#45573 deprecated to treat symmetrically with integer dtypes. + # Note: data.astype(np.int64) fails ARM tests, see + # https://github.com/pandas-dev/pandas/issues/49468. + data = data.astype(DT64NS_DTYPE).view("i8") + copy = False + + elif lib.is_np_dtype(data.dtype, "m") or is_bool_dtype(data.dtype): + # GH#29794 enforcing deprecation introduced in GH#23539 + raise TypeError(f"dtype {data.dtype} cannot be converted to datetime64[ns]") + elif isinstance(data.dtype, PeriodDtype): + # Note: without explicitly raising here, PeriodIndex + # test_setops.test_join_does_not_recur fails + raise TypeError( + "Passing PeriodDtype data is invalid. Use `data.to_timestamp()` instead" + ) + + elif isinstance(data.dtype, ExtensionDtype) and not isinstance( + data.dtype, DatetimeTZDtype + ): + # TODO: We have no tests for these + data = np.array(data, dtype=np.object_) + copy = False + + return data, copy + + +# ------------------------------------------------------------------- +# Validation and Inference + + +def _maybe_infer_tz(tz: tzinfo | None, inferred_tz: tzinfo | None) -> tzinfo | None: + """ + If a timezone is inferred from data, check that it is compatible with + the user-provided timezone, if any. + + Parameters + ---------- + tz : tzinfo or None + inferred_tz : tzinfo or None + + Returns + ------- + tz : tzinfo or None + + Raises + ------ + TypeError : if both timezones are present but do not match + """ + if tz is None: + tz = inferred_tz + elif inferred_tz is None: + pass + elif not timezones.tz_compare(tz, inferred_tz): + raise TypeError( + f"data is already tz-aware {inferred_tz}, unable to " + f"set specified tz: {tz}" + ) + return tz + + +def _validate_dt64_dtype(dtype): + """ + Check that a dtype, if passed, represents either a numpy datetime64[ns] + dtype or a pandas DatetimeTZDtype. + + Parameters + ---------- + dtype : object + + Returns + ------- + dtype : None, numpy.dtype, or DatetimeTZDtype + + Raises + ------ + ValueError : invalid dtype + + Notes + ----- + Unlike _validate_tz_from_dtype, this does _not_ allow non-existent + tz errors to go through + """ + if dtype is not None: + dtype = pandas_dtype(dtype) + if dtype == np.dtype("M8"): + # no precision, disallowed GH#24806 + msg = ( + "Passing in 'datetime64' dtype with no precision is not allowed. " + "Please pass in 'datetime64[ns]' instead." + ) + raise ValueError(msg) + + if ( + isinstance(dtype, np.dtype) + and (dtype.kind != "M" or not is_supported_dtype(dtype)) + ) or not isinstance(dtype, (np.dtype, DatetimeTZDtype)): + raise ValueError( + f"Unexpected value for 'dtype': '{dtype}'. " + "Must be 'datetime64[s]', 'datetime64[ms]', 'datetime64[us]', " + "'datetime64[ns]' or DatetimeTZDtype'." + ) + + if getattr(dtype, "tz", None): + # https://github.com/pandas-dev/pandas/issues/18595 + # Ensure that we have a standard timezone for pytz objects. + # Without this, things like adding an array of timedeltas and + # a tz-aware Timestamp (with a tz specific to its datetime) will + # be incorrect(ish?) for the array as a whole + dtype = cast(DatetimeTZDtype, dtype) + dtype = DatetimeTZDtype( + unit=dtype.unit, tz=timezones.tz_standardize(dtype.tz) + ) + + return dtype + + +def _validate_tz_from_dtype( + dtype, tz: tzinfo | None, explicit_tz_none: bool = False +) -> tzinfo | None: + """ + If the given dtype is a DatetimeTZDtype, extract the implied + tzinfo object from it and check that it does not conflict with the given + tz. + + Parameters + ---------- + dtype : dtype, str + tz : None, tzinfo + explicit_tz_none : bool, default False + Whether tz=None was passed explicitly, as opposed to lib.no_default. + + Returns + ------- + tz : consensus tzinfo + + Raises + ------ + ValueError : on tzinfo mismatch + """ + if dtype is not None: + if isinstance(dtype, str): + try: + dtype = DatetimeTZDtype.construct_from_string(dtype) + except TypeError: + # Things like `datetime64[ns]`, which is OK for the + # constructors, but also nonsense, which should be validated + # but not by us. We *do* allow non-existent tz errors to + # go through + pass + dtz = getattr(dtype, "tz", None) + if dtz is not None: + if tz is not None and not timezones.tz_compare(tz, dtz): + raise ValueError("cannot supply both a tz and a dtype with a tz") + if explicit_tz_none: + raise ValueError("Cannot pass both a timezone-aware dtype and tz=None") + tz = dtz + + if tz is not None and lib.is_np_dtype(dtype, "M"): + # We also need to check for the case where the user passed a + # tz-naive dtype (i.e. datetime64[ns]) + if tz is not None and not timezones.tz_compare(tz, dtz): + raise ValueError( + "cannot supply both a tz and a " + "timezone-naive dtype (i.e. datetime64[ns])" + ) + + return tz + + +def _infer_tz_from_endpoints( + start: Timestamp, end: Timestamp, tz: tzinfo | None +) -> tzinfo | None: + """ + If a timezone is not explicitly given via `tz`, see if one can + be inferred from the `start` and `end` endpoints. If more than one + of these inputs provides a timezone, require that they all agree. + + Parameters + ---------- + start : Timestamp + end : Timestamp + tz : tzinfo or None + + Returns + ------- + tz : tzinfo or None + + Raises + ------ + TypeError : if start and end timezones do not agree + """ + try: + inferred_tz = timezones.infer_tzinfo(start, end) + except AssertionError as err: + # infer_tzinfo raises AssertionError if passed mismatched timezones + raise TypeError( + "Start and end cannot both be tz-aware with different timezones" + ) from err + + inferred_tz = timezones.maybe_get_tz(inferred_tz) + tz = timezones.maybe_get_tz(tz) + + if tz is not None and inferred_tz is not None: + if not timezones.tz_compare(inferred_tz, tz): + raise AssertionError("Inferred time zone not equal to passed time zone") + + elif inferred_tz is not None: + tz = inferred_tz + + return tz + + +def _maybe_normalize_endpoints( + start: Timestamp | None, end: Timestamp | None, normalize: bool +): + if normalize: + if start is not None: + start = start.normalize() + + if end is not None: + end = end.normalize() + + return start, end + + +def _maybe_localize_point( + ts: Timestamp | None, freq, tz, ambiguous, nonexistent +) -> Timestamp | None: + """ + Localize a start or end Timestamp to the timezone of the corresponding + start or end Timestamp + + Parameters + ---------- + ts : start or end Timestamp to potentially localize + freq : Tick, DateOffset, or None + tz : str, timezone object or None + ambiguous: str, localization behavior for ambiguous times + nonexistent: str, localization behavior for nonexistent times + + Returns + ------- + ts : Timestamp + """ + # Make sure start and end are timezone localized if: + # 1) freq = a Timedelta-like frequency (Tick) + # 2) freq = None i.e. generating a linspaced range + if ts is not None and ts.tzinfo is None: + # Note: We can't ambiguous='infer' a singular ambiguous time; however, + # we have historically defaulted ambiguous=False + ambiguous = ambiguous if ambiguous != "infer" else False + localize_args = {"ambiguous": ambiguous, "nonexistent": nonexistent, "tz": None} + if isinstance(freq, Tick) or freq is None: + localize_args["tz"] = tz + ts = ts.tz_localize(**localize_args) + return ts + + +def _generate_range( + start: Timestamp | None, + end: Timestamp | None, + periods: int | None, + offset: BaseOffset, + *, + unit: str, +): + """ + Generates a sequence of dates corresponding to the specified time + offset. Similar to dateutil.rrule except uses pandas DateOffset + objects to represent time increments. + + Parameters + ---------- + start : Timestamp or None + end : Timestamp or None + periods : int or None + offset : DateOffset + unit : str + + Notes + ----- + * This method is faster for generating weekdays than dateutil.rrule + * At least two of (start, end, periods) must be specified. + * If both start and end are specified, the returned dates will + satisfy start <= date <= end. + + Returns + ------- + dates : generator object + """ + offset = to_offset(offset) + + # Argument 1 to "Timestamp" has incompatible type "Optional[Timestamp]"; + # expected "Union[integer[Any], float, str, date, datetime64]" + start = Timestamp(start) # type: ignore[arg-type] + if start is not NaT: + start = start.as_unit(unit) + else: + start = None + + # Argument 1 to "Timestamp" has incompatible type "Optional[Timestamp]"; + # expected "Union[integer[Any], float, str, date, datetime64]" + end = Timestamp(end) # type: ignore[arg-type] + if end is not NaT: + end = end.as_unit(unit) + else: + end = None + + if start and not offset.is_on_offset(start): + # Incompatible types in assignment (expression has type "datetime", + # variable has type "Optional[Timestamp]") + start = offset.rollforward(start) # type: ignore[assignment] + + elif end and not offset.is_on_offset(end): + # Incompatible types in assignment (expression has type "datetime", + # variable has type "Optional[Timestamp]") + end = offset.rollback(end) # type: ignore[assignment] + + # Unsupported operand types for < ("Timestamp" and "None") + if periods is None and end < start and offset.n >= 0: # type: ignore[operator] + end = None + periods = 0 + + if end is None: + # error: No overload variant of "__radd__" of "BaseOffset" matches + # argument type "None" + end = start + (periods - 1) * offset # type: ignore[operator] + + if start is None: + # error: No overload variant of "__radd__" of "BaseOffset" matches + # argument type "None" + start = end - (periods - 1) * offset # type: ignore[operator] + + start = cast(Timestamp, start) + end = cast(Timestamp, end) + + cur = start + if offset.n >= 0: + while cur <= end: + yield cur + + if cur == end: + # GH#24252 avoid overflows by not performing the addition + # in offset.apply unless we have to + break + + # faster than cur + offset + next_date = offset._apply(cur) + next_date = next_date.as_unit(unit) + if next_date <= cur: + raise ValueError(f"Offset {offset} did not increment date") + cur = next_date + else: + while cur >= end: + yield cur + + if cur == end: + # GH#24252 avoid overflows by not performing the addition + # in offset.apply unless we have to + break + + # faster than cur + offset + next_date = offset._apply(cur) + next_date = next_date.as_unit(unit) + if next_date >= cur: + raise ValueError(f"Offset {offset} did not decrement date") + cur = next_date diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/floating.py b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/floating.py new file mode 100644 index 0000000000000000000000000000000000000000..74b8cfb65cbc7887b7d2a164121c90eda0833121 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/floating.py @@ -0,0 +1,173 @@ +from __future__ import annotations + +from typing import ClassVar + +import numpy as np + +from pandas.core.dtypes.base import register_extension_dtype +from pandas.core.dtypes.common import is_float_dtype + +from pandas.core.arrays.numeric import ( + NumericArray, + NumericDtype, +) + + +class FloatingDtype(NumericDtype): + """ + An ExtensionDtype to hold a single size of floating dtype. + + These specific implementations are subclasses of the non-public + FloatingDtype. For example we have Float32Dtype to represent float32. + + The attributes name & type are set when these subclasses are created. + """ + + _default_np_dtype = np.dtype(np.float64) + _checker = is_float_dtype + + @classmethod + def construct_array_type(cls) -> type[FloatingArray]: + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + return FloatingArray + + @classmethod + def _get_dtype_mapping(cls) -> dict[np.dtype, FloatingDtype]: + return NUMPY_FLOAT_TO_DTYPE + + @classmethod + def _safe_cast(cls, values: np.ndarray, dtype: np.dtype, copy: bool) -> np.ndarray: + """ + Safely cast the values to the given dtype. + + "safe" in this context means the casting is lossless. + """ + # This is really only here for compatibility with IntegerDtype + # Here for compat with IntegerDtype + return values.astype(dtype, copy=copy) + + +class FloatingArray(NumericArray): + """ + Array of floating (optional missing) values. + + .. warning:: + + FloatingArray is currently experimental, and its API or internal + implementation may change without warning. Especially the behaviour + regarding NaN (distinct from NA missing values) is subject to change. + + We represent a FloatingArray with 2 numpy arrays: + + - data: contains a numpy float array of the appropriate dtype + - mask: a boolean array holding a mask on the data, True is missing + + To construct an FloatingArray from generic array-like input, use + :func:`pandas.array` with one of the float dtypes (see examples). + + See :ref:`integer_na` for more. + + Parameters + ---------- + values : numpy.ndarray + A 1-d float-dtype array. + mask : numpy.ndarray + A 1-d boolean-dtype array indicating missing values. + copy : bool, default False + Whether to copy the `values` and `mask`. + + Attributes + ---------- + None + + Methods + ------- + None + + Returns + ------- + FloatingArray + + Examples + -------- + Create an FloatingArray with :func:`pandas.array`: + + >>> pd.array([0.1, None, 0.3], dtype=pd.Float32Dtype()) + + [0.1, , 0.3] + Length: 3, dtype: Float32 + + String aliases for the dtypes are also available. They are capitalized. + + >>> pd.array([0.1, None, 0.3], dtype="Float32") + + [0.1, , 0.3] + Length: 3, dtype: Float32 + """ + + _dtype_cls = FloatingDtype + + # The value used to fill '_data' to avoid upcasting + _internal_fill_value = np.nan + # Fill values used for any/all + # Incompatible types in assignment (expression has type "float", base class + # "BaseMaskedArray" defined the type as "") + _truthy_value = 1.0 # type: ignore[assignment] + _falsey_value = 0.0 # type: ignore[assignment] + + +_dtype_docstring = """ +An ExtensionDtype for {dtype} data. + +This dtype uses ``pd.NA`` as missing value indicator. + +Attributes +---------- +None + +Methods +------- +None + +Examples +-------- +For Float32Dtype: + +>>> ser = pd.Series([2.25, pd.NA], dtype=pd.Float32Dtype()) +>>> ser.dtype +Float32Dtype() + +For Float64Dtype: + +>>> ser = pd.Series([2.25, pd.NA], dtype=pd.Float64Dtype()) +>>> ser.dtype +Float64Dtype() +""" + +# create the Dtype + + +@register_extension_dtype +class Float32Dtype(FloatingDtype): + type = np.float32 + name: ClassVar[str] = "Float32" + __doc__ = _dtype_docstring.format(dtype="float32") + + +@register_extension_dtype +class Float64Dtype(FloatingDtype): + type = np.float64 + name: ClassVar[str] = "Float64" + __doc__ = _dtype_docstring.format(dtype="float64") + + +NUMPY_FLOAT_TO_DTYPE: dict[np.dtype, FloatingDtype] = { + np.dtype(np.float32): Float32Dtype(), + np.dtype(np.float64): Float64Dtype(), +} diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/integer.py b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/integer.py new file mode 100644 index 0000000000000000000000000000000000000000..f9384e25ba9d9f32caf826efc01b4eb58a454d65 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/integer.py @@ -0,0 +1,272 @@ +from __future__ import annotations + +from typing import ClassVar + +import numpy as np + +from pandas.core.dtypes.base import register_extension_dtype +from pandas.core.dtypes.common import is_integer_dtype + +from pandas.core.arrays.numeric import ( + NumericArray, + NumericDtype, +) + + +class IntegerDtype(NumericDtype): + """ + An ExtensionDtype to hold a single size & kind of integer dtype. + + These specific implementations are subclasses of the non-public + IntegerDtype. For example, we have Int8Dtype to represent signed int 8s. + + The attributes name & type are set when these subclasses are created. + """ + + _default_np_dtype = np.dtype(np.int64) + _checker = is_integer_dtype + + @classmethod + def construct_array_type(cls) -> type[IntegerArray]: + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + return IntegerArray + + @classmethod + def _get_dtype_mapping(cls) -> dict[np.dtype, IntegerDtype]: + return NUMPY_INT_TO_DTYPE + + @classmethod + def _safe_cast(cls, values: np.ndarray, dtype: np.dtype, copy: bool) -> np.ndarray: + """ + Safely cast the values to the given dtype. + + "safe" in this context means the casting is lossless. e.g. if 'values' + has a floating dtype, each value must be an integer. + """ + try: + return values.astype(dtype, casting="safe", copy=copy) + except TypeError as err: + casted = values.astype(dtype, copy=copy) + if (casted == values).all(): + return casted + + raise TypeError( + f"cannot safely cast non-equivalent {values.dtype} to {np.dtype(dtype)}" + ) from err + + +class IntegerArray(NumericArray): + """ + Array of integer (optional missing) values. + + Uses :attr:`pandas.NA` as the missing value. + + .. warning:: + + IntegerArray is currently experimental, and its API or internal + implementation may change without warning. + + We represent an IntegerArray with 2 numpy arrays: + + - data: contains a numpy integer array of the appropriate dtype + - mask: a boolean array holding a mask on the data, True is missing + + To construct an IntegerArray from generic array-like input, use + :func:`pandas.array` with one of the integer dtypes (see examples). + + See :ref:`integer_na` for more. + + Parameters + ---------- + values : numpy.ndarray + A 1-d integer-dtype array. + mask : numpy.ndarray + A 1-d boolean-dtype array indicating missing values. + copy : bool, default False + Whether to copy the `values` and `mask`. + + Attributes + ---------- + None + + Methods + ------- + None + + Returns + ------- + IntegerArray + + Examples + -------- + Create an IntegerArray with :func:`pandas.array`. + + >>> int_array = pd.array([1, None, 3], dtype=pd.Int32Dtype()) + >>> int_array + + [1, , 3] + Length: 3, dtype: Int32 + + String aliases for the dtypes are also available. They are capitalized. + + >>> pd.array([1, None, 3], dtype='Int32') + + [1, , 3] + Length: 3, dtype: Int32 + + >>> pd.array([1, None, 3], dtype='UInt16') + + [1, , 3] + Length: 3, dtype: UInt16 + """ + + _dtype_cls = IntegerDtype + + # The value used to fill '_data' to avoid upcasting + _internal_fill_value = 1 + # Fill values used for any/all + # Incompatible types in assignment (expression has type "int", base class + # "BaseMaskedArray" defined the type as "") + _truthy_value = 1 # type: ignore[assignment] + _falsey_value = 0 # type: ignore[assignment] + + +_dtype_docstring = """ +An ExtensionDtype for {dtype} integer data. + +Uses :attr:`pandas.NA` as its missing value, rather than :attr:`numpy.nan`. + +Attributes +---------- +None + +Methods +------- +None + +Examples +-------- +For Int8Dtype: + +>>> ser = pd.Series([2, pd.NA], dtype=pd.Int8Dtype()) +>>> ser.dtype +Int8Dtype() + +For Int16Dtype: + +>>> ser = pd.Series([2, pd.NA], dtype=pd.Int16Dtype()) +>>> ser.dtype +Int16Dtype() + +For Int32Dtype: + +>>> ser = pd.Series([2, pd.NA], dtype=pd.Int32Dtype()) +>>> ser.dtype +Int32Dtype() + +For Int64Dtype: + +>>> ser = pd.Series([2, pd.NA], dtype=pd.Int64Dtype()) +>>> ser.dtype +Int64Dtype() + +For UInt8Dtype: + +>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt8Dtype()) +>>> ser.dtype +UInt8Dtype() + +For UInt16Dtype: + +>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt16Dtype()) +>>> ser.dtype +UInt16Dtype() + +For UInt32Dtype: + +>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt32Dtype()) +>>> ser.dtype +UInt32Dtype() + +For UInt64Dtype: + +>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt64Dtype()) +>>> ser.dtype +UInt64Dtype() +""" + +# create the Dtype + + +@register_extension_dtype +class Int8Dtype(IntegerDtype): + type = np.int8 + name: ClassVar[str] = "Int8" + __doc__ = _dtype_docstring.format(dtype="int8") + + +@register_extension_dtype +class Int16Dtype(IntegerDtype): + type = np.int16 + name: ClassVar[str] = "Int16" + __doc__ = _dtype_docstring.format(dtype="int16") + + +@register_extension_dtype +class Int32Dtype(IntegerDtype): + type = np.int32 + name: ClassVar[str] = "Int32" + __doc__ = _dtype_docstring.format(dtype="int32") + + +@register_extension_dtype +class Int64Dtype(IntegerDtype): + type = np.int64 + name: ClassVar[str] = "Int64" + __doc__ = _dtype_docstring.format(dtype="int64") + + +@register_extension_dtype +class UInt8Dtype(IntegerDtype): + type = np.uint8 + name: ClassVar[str] = "UInt8" + __doc__ = _dtype_docstring.format(dtype="uint8") + + +@register_extension_dtype +class UInt16Dtype(IntegerDtype): + type = np.uint16 + name: ClassVar[str] = "UInt16" + __doc__ = _dtype_docstring.format(dtype="uint16") + + +@register_extension_dtype +class UInt32Dtype(IntegerDtype): + type = np.uint32 + name: ClassVar[str] = "UInt32" + __doc__ = _dtype_docstring.format(dtype="uint32") + + +@register_extension_dtype +class UInt64Dtype(IntegerDtype): + type = np.uint64 + name: ClassVar[str] = "UInt64" + __doc__ = _dtype_docstring.format(dtype="uint64") + + +NUMPY_INT_TO_DTYPE: dict[np.dtype, IntegerDtype] = { + np.dtype(np.int8): Int8Dtype(), + np.dtype(np.int16): Int16Dtype(), + np.dtype(np.int32): Int32Dtype(), + np.dtype(np.int64): Int64Dtype(), + np.dtype(np.uint8): UInt8Dtype(), + np.dtype(np.uint16): UInt16Dtype(), + np.dtype(np.uint32): UInt32Dtype(), + np.dtype(np.uint64): UInt64Dtype(), +} diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/interval.py b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/interval.py new file mode 100644 index 0000000000000000000000000000000000000000..91db7f11bcbe025045318c70451f1c9e75fd779a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/interval.py @@ -0,0 +1,1917 @@ +from __future__ import annotations + +import operator +from operator import ( + le, + lt, +) +import textwrap +from typing import ( + TYPE_CHECKING, + Literal, + Union, + overload, +) +import warnings + +import numpy as np + +from pandas._libs import lib +from pandas._libs.interval import ( + VALID_CLOSED, + Interval, + IntervalMixin, + intervals_to_interval_bounds, +) +from pandas._libs.missing import NA +from pandas._typing import ( + ArrayLike, + AxisInt, + Dtype, + FillnaOptions, + IntervalClosedType, + NpDtype, + PositionalIndexer, + ScalarIndexer, + Self, + SequenceIndexer, + SortKind, + TimeArrayLike, + npt, +) +from pandas.compat.numpy import function as nv +from pandas.errors import IntCastingNaNError +from pandas.util._decorators import Appender + +from pandas.core.dtypes.cast import ( + LossySetitemError, + maybe_upcast_numeric_to_64bit, +) +from pandas.core.dtypes.common import ( + is_float_dtype, + is_integer_dtype, + is_list_like, + is_object_dtype, + is_scalar, + is_string_dtype, + needs_i8_conversion, + pandas_dtype, +) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + IntervalDtype, +) +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCDatetimeIndex, + ABCIntervalIndex, + ABCPeriodIndex, +) +from pandas.core.dtypes.missing import ( + is_valid_na_for_dtype, + isna, + notna, +) + +from pandas.core.algorithms import ( + isin, + take, + unique, + value_counts_internal as value_counts, +) +from pandas.core.arrays import ArrowExtensionArray +from pandas.core.arrays.base import ( + ExtensionArray, + _extension_array_shared_docs, +) +from pandas.core.arrays.datetimes import DatetimeArray +from pandas.core.arrays.timedeltas import TimedeltaArray +import pandas.core.common as com +from pandas.core.construction import ( + array as pd_array, + ensure_wrapped_if_datetimelike, + extract_array, +) +from pandas.core.indexers import check_array_indexer +from pandas.core.ops import ( + invalid_comparison, + unpack_zerodim_and_defer, +) + +if TYPE_CHECKING: + from collections.abc import ( + Iterator, + Sequence, + ) + + from pandas import ( + Index, + Series, + ) + + +IntervalSide = Union[TimeArrayLike, np.ndarray] +IntervalOrNA = Union[Interval, float] + +_interval_shared_docs: dict[str, str] = {} + +_shared_docs_kwargs = { + "klass": "IntervalArray", + "qualname": "arrays.IntervalArray", + "name": "", +} + + +_interval_shared_docs[ + "class" +] = """ +%(summary)s + +Parameters +---------- +data : array-like (1-dimensional) + Array-like (ndarray, :class:`DateTimeArray`, :class:`TimeDeltaArray`) containing + Interval objects from which to build the %(klass)s. +closed : {'left', 'right', 'both', 'neither'}, default 'right' + Whether the intervals are closed on the left-side, right-side, both or + neither. +dtype : dtype or None, default None + If None, dtype will be inferred. +copy : bool, default False + Copy the input data. +%(name)s\ +verify_integrity : bool, default True + Verify that the %(klass)s is valid. + +Attributes +---------- +left +right +closed +mid +length +is_empty +is_non_overlapping_monotonic +%(extra_attributes)s\ + +Methods +------- +from_arrays +from_tuples +from_breaks +contains +overlaps +set_closed +to_tuples +%(extra_methods)s\ + +See Also +-------- +Index : The base pandas Index type. +Interval : A bounded slice-like interval; the elements of an %(klass)s. +interval_range : Function to create a fixed frequency IntervalIndex. +cut : Bin values into discrete Intervals. +qcut : Bin values into equal-sized Intervals based on rank or sample quantiles. + +Notes +----- +See the `user guide +`__ +for more. + +%(examples)s\ +""" + + +@Appender( + _interval_shared_docs["class"] + % { + "klass": "IntervalArray", + "summary": "Pandas array for interval data that are closed on the same side.", + "name": "", + "extra_attributes": "", + "extra_methods": "", + "examples": textwrap.dedent( + """\ + Examples + -------- + A new ``IntervalArray`` can be constructed directly from an array-like of + ``Interval`` objects: + + >>> pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)]) + + [(0, 1], (1, 5]] + Length: 2, dtype: interval[int64, right] + + It may also be constructed using one of the constructor + methods: :meth:`IntervalArray.from_arrays`, + :meth:`IntervalArray.from_breaks`, and :meth:`IntervalArray.from_tuples`. + """ + ), + } +) +class IntervalArray(IntervalMixin, ExtensionArray): + can_hold_na = True + _na_value = _fill_value = np.nan + + @property + def ndim(self) -> Literal[1]: + return 1 + + # To make mypy recognize the fields + _left: IntervalSide + _right: IntervalSide + _dtype: IntervalDtype + + # --------------------------------------------------------------------- + # Constructors + + def __new__( + cls, + data, + closed: IntervalClosedType | None = None, + dtype: Dtype | None = None, + copy: bool = False, + verify_integrity: bool = True, + ): + data = extract_array(data, extract_numpy=True) + + if isinstance(data, cls): + left: IntervalSide = data._left + right: IntervalSide = data._right + closed = closed or data.closed + dtype = IntervalDtype(left.dtype, closed=closed) + else: + # don't allow scalars + if is_scalar(data): + msg = ( + f"{cls.__name__}(...) must be called with a collection " + f"of some kind, {data} was passed" + ) + raise TypeError(msg) + + # might need to convert empty or purely na data + data = _maybe_convert_platform_interval(data) + left, right, infer_closed = intervals_to_interval_bounds( + data, validate_closed=closed is None + ) + if left.dtype == object: + left = lib.maybe_convert_objects(left) + right = lib.maybe_convert_objects(right) + closed = closed or infer_closed + + left, right, dtype = cls._ensure_simple_new_inputs( + left, + right, + closed=closed, + copy=copy, + dtype=dtype, + ) + + if verify_integrity: + cls._validate(left, right, dtype=dtype) + + return cls._simple_new( + left, + right, + dtype=dtype, + ) + + @classmethod + def _simple_new( + cls, + left: IntervalSide, + right: IntervalSide, + dtype: IntervalDtype, + ) -> Self: + result = IntervalMixin.__new__(cls) + result._left = left + result._right = right + result._dtype = dtype + + return result + + @classmethod + def _ensure_simple_new_inputs( + cls, + left, + right, + closed: IntervalClosedType | None = None, + copy: bool = False, + dtype: Dtype | None = None, + ) -> tuple[IntervalSide, IntervalSide, IntervalDtype]: + """Ensure correctness of input parameters for cls._simple_new.""" + from pandas.core.indexes.base import ensure_index + + left = ensure_index(left, copy=copy) + left = maybe_upcast_numeric_to_64bit(left) + + right = ensure_index(right, copy=copy) + right = maybe_upcast_numeric_to_64bit(right) + + if closed is None and isinstance(dtype, IntervalDtype): + closed = dtype.closed + + closed = closed or "right" + + if dtype is not None: + # GH 19262: dtype must be an IntervalDtype to override inferred + dtype = pandas_dtype(dtype) + if isinstance(dtype, IntervalDtype): + if dtype.subtype is not None: + left = left.astype(dtype.subtype) + right = right.astype(dtype.subtype) + else: + msg = f"dtype must be an IntervalDtype, got {dtype}" + raise TypeError(msg) + + if dtype.closed is None: + # possibly loading an old pickle + dtype = IntervalDtype(dtype.subtype, closed) + elif closed != dtype.closed: + raise ValueError("closed keyword does not match dtype.closed") + + # coerce dtypes to match if needed + if is_float_dtype(left.dtype) and is_integer_dtype(right.dtype): + right = right.astype(left.dtype) + elif is_float_dtype(right.dtype) and is_integer_dtype(left.dtype): + left = left.astype(right.dtype) + + if type(left) != type(right): + msg = ( + f"must not have differing left [{type(left).__name__}] and " + f"right [{type(right).__name__}] types" + ) + raise ValueError(msg) + if isinstance(left.dtype, CategoricalDtype) or is_string_dtype(left.dtype): + # GH 19016 + msg = ( + "category, object, and string subtypes are not supported " + "for IntervalArray" + ) + raise TypeError(msg) + if isinstance(left, ABCPeriodIndex): + msg = "Period dtypes are not supported, use a PeriodIndex instead" + raise ValueError(msg) + if isinstance(left, ABCDatetimeIndex) and str(left.tz) != str(right.tz): + msg = ( + "left and right must have the same time zone, got " + f"'{left.tz}' and '{right.tz}'" + ) + raise ValueError(msg) + elif needs_i8_conversion(left.dtype) and left.unit != right.unit: + # e.g. m8[s] vs m8[ms], try to cast to a common dtype GH#55714 + left_arr, right_arr = left._data._ensure_matching_resos(right._data) + left = ensure_index(left_arr) + right = ensure_index(right_arr) + + # For dt64/td64 we want DatetimeArray/TimedeltaArray instead of ndarray + left = ensure_wrapped_if_datetimelike(left) + left = extract_array(left, extract_numpy=True) + right = ensure_wrapped_if_datetimelike(right) + right = extract_array(right, extract_numpy=True) + + if isinstance(left, ArrowExtensionArray) or isinstance( + right, ArrowExtensionArray + ): + pass + else: + lbase = getattr(left, "_ndarray", left) + lbase = getattr(lbase, "_data", lbase).base + rbase = getattr(right, "_ndarray", right) + rbase = getattr(rbase, "_data", rbase).base + if lbase is not None and lbase is rbase: + # If these share data, then setitem could corrupt our IA + right = right.copy() + + dtype = IntervalDtype(left.dtype, closed=closed) + + return left, right, dtype + + @classmethod + def _from_sequence( + cls, + scalars, + *, + dtype: Dtype | None = None, + copy: bool = False, + ) -> Self: + return cls(scalars, dtype=dtype, copy=copy) + + @classmethod + def _from_factorized(cls, values: np.ndarray, original: IntervalArray) -> Self: + return cls._from_sequence(values, dtype=original.dtype) + + _interval_shared_docs["from_breaks"] = textwrap.dedent( + """ + Construct an %(klass)s from an array of splits. + + Parameters + ---------- + breaks : array-like (1-dimensional) + Left and right bounds for each interval. + closed : {'left', 'right', 'both', 'neither'}, default 'right' + Whether the intervals are closed on the left-side, right-side, both + or neither.\ + %(name)s + copy : bool, default False + Copy the data. + dtype : dtype or None, default None + If None, dtype will be inferred. + + Returns + ------- + %(klass)s + + See Also + -------- + interval_range : Function to create a fixed frequency IntervalIndex. + %(klass)s.from_arrays : Construct from a left and right array. + %(klass)s.from_tuples : Construct from a sequence of tuples. + + %(examples)s\ + """ + ) + + @classmethod + @Appender( + _interval_shared_docs["from_breaks"] + % { + "klass": "IntervalArray", + "name": "", + "examples": textwrap.dedent( + """\ + Examples + -------- + >>> pd.arrays.IntervalArray.from_breaks([0, 1, 2, 3]) + + [(0, 1], (1, 2], (2, 3]] + Length: 3, dtype: interval[int64, right] + """ + ), + } + ) + def from_breaks( + cls, + breaks, + closed: IntervalClosedType | None = "right", + copy: bool = False, + dtype: Dtype | None = None, + ) -> Self: + breaks = _maybe_convert_platform_interval(breaks) + + return cls.from_arrays(breaks[:-1], breaks[1:], closed, copy=copy, dtype=dtype) + + _interval_shared_docs["from_arrays"] = textwrap.dedent( + """ + Construct from two arrays defining the left and right bounds. + + Parameters + ---------- + left : array-like (1-dimensional) + Left bounds for each interval. + right : array-like (1-dimensional) + Right bounds for each interval. + closed : {'left', 'right', 'both', 'neither'}, default 'right' + Whether the intervals are closed on the left-side, right-side, both + or neither.\ + %(name)s + copy : bool, default False + Copy the data. + dtype : dtype, optional + If None, dtype will be inferred. + + Returns + ------- + %(klass)s + + Raises + ------ + ValueError + When a value is missing in only one of `left` or `right`. + When a value in `left` is greater than the corresponding value + in `right`. + + See Also + -------- + interval_range : Function to create a fixed frequency IntervalIndex. + %(klass)s.from_breaks : Construct an %(klass)s from an array of + splits. + %(klass)s.from_tuples : Construct an %(klass)s from an + array-like of tuples. + + Notes + ----- + Each element of `left` must be less than or equal to the `right` + element at the same position. If an element is missing, it must be + missing in both `left` and `right`. A TypeError is raised when + using an unsupported type for `left` or `right`. At the moment, + 'category', 'object', and 'string' subtypes are not supported. + + %(examples)s\ + """ + ) + + @classmethod + @Appender( + _interval_shared_docs["from_arrays"] + % { + "klass": "IntervalArray", + "name": "", + "examples": textwrap.dedent( + """\ + Examples + -------- + >>> pd.arrays.IntervalArray.from_arrays([0, 1, 2], [1, 2, 3]) + + [(0, 1], (1, 2], (2, 3]] + Length: 3, dtype: interval[int64, right] + """ + ), + } + ) + def from_arrays( + cls, + left, + right, + closed: IntervalClosedType | None = "right", + copy: bool = False, + dtype: Dtype | None = None, + ) -> Self: + left = _maybe_convert_platform_interval(left) + right = _maybe_convert_platform_interval(right) + + left, right, dtype = cls._ensure_simple_new_inputs( + left, + right, + closed=closed, + copy=copy, + dtype=dtype, + ) + cls._validate(left, right, dtype=dtype) + + return cls._simple_new(left, right, dtype=dtype) + + _interval_shared_docs["from_tuples"] = textwrap.dedent( + """ + Construct an %(klass)s from an array-like of tuples. + + Parameters + ---------- + data : array-like (1-dimensional) + Array of tuples. + closed : {'left', 'right', 'both', 'neither'}, default 'right' + Whether the intervals are closed on the left-side, right-side, both + or neither.\ + %(name)s + copy : bool, default False + By-default copy the data, this is compat only and ignored. + dtype : dtype or None, default None + If None, dtype will be inferred. + + Returns + ------- + %(klass)s + + See Also + -------- + interval_range : Function to create a fixed frequency IntervalIndex. + %(klass)s.from_arrays : Construct an %(klass)s from a left and + right array. + %(klass)s.from_breaks : Construct an %(klass)s from an array of + splits. + + %(examples)s\ + """ + ) + + @classmethod + @Appender( + _interval_shared_docs["from_tuples"] + % { + "klass": "IntervalArray", + "name": "", + "examples": textwrap.dedent( + """\ + Examples + -------- + >>> pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 2)]) + + [(0, 1], (1, 2]] + Length: 2, dtype: interval[int64, right] + """ + ), + } + ) + def from_tuples( + cls, + data, + closed: IntervalClosedType | None = "right", + copy: bool = False, + dtype: Dtype | None = None, + ) -> Self: + if len(data): + left, right = [], [] + else: + # ensure that empty data keeps input dtype + left = right = data + + for d in data: + if not isinstance(d, tuple) and isna(d): + lhs = rhs = np.nan + else: + name = cls.__name__ + try: + # need list of length 2 tuples, e.g. [(0, 1), (1, 2), ...] + lhs, rhs = d + except ValueError as err: + msg = f"{name}.from_tuples requires tuples of length 2, got {d}" + raise ValueError(msg) from err + except TypeError as err: + msg = f"{name}.from_tuples received an invalid item, {d}" + raise TypeError(msg) from err + left.append(lhs) + right.append(rhs) + + return cls.from_arrays(left, right, closed, copy=False, dtype=dtype) + + @classmethod + def _validate(cls, left, right, dtype: IntervalDtype) -> None: + """ + Verify that the IntervalArray is valid. + + Checks that + + * dtype is correct + * left and right match lengths + * left and right have the same missing values + * left is always below right + """ + if not isinstance(dtype, IntervalDtype): + msg = f"invalid dtype: {dtype}" + raise ValueError(msg) + if len(left) != len(right): + msg = "left and right must have the same length" + raise ValueError(msg) + left_mask = notna(left) + right_mask = notna(right) + if not (left_mask == right_mask).all(): + msg = ( + "missing values must be missing in the same " + "location both left and right sides" + ) + raise ValueError(msg) + if not (left[left_mask] <= right[left_mask]).all(): + msg = "left side of interval must be <= right side" + raise ValueError(msg) + + def _shallow_copy(self, left, right) -> Self: + """ + Return a new IntervalArray with the replacement attributes + + Parameters + ---------- + left : Index + Values to be used for the left-side of the intervals. + right : Index + Values to be used for the right-side of the intervals. + """ + dtype = IntervalDtype(left.dtype, closed=self.closed) + left, right, dtype = self._ensure_simple_new_inputs(left, right, dtype=dtype) + + return self._simple_new(left, right, dtype=dtype) + + # --------------------------------------------------------------------- + # Descriptive + + @property + def dtype(self) -> IntervalDtype: + return self._dtype + + @property + def nbytes(self) -> int: + return self.left.nbytes + self.right.nbytes + + @property + def size(self) -> int: + # Avoid materializing self.values + return self.left.size + + # --------------------------------------------------------------------- + # EA Interface + + def __iter__(self) -> Iterator: + return iter(np.asarray(self)) + + def __len__(self) -> int: + return len(self._left) + + @overload + def __getitem__(self, key: ScalarIndexer) -> IntervalOrNA: + ... + + @overload + def __getitem__(self, key: SequenceIndexer) -> Self: + ... + + def __getitem__(self, key: PositionalIndexer) -> Self | IntervalOrNA: + key = check_array_indexer(self, key) + left = self._left[key] + right = self._right[key] + + if not isinstance(left, (np.ndarray, ExtensionArray)): + # scalar + if is_scalar(left) and isna(left): + return self._fill_value + return Interval(left, right, self.closed) + if np.ndim(left) > 1: + # GH#30588 multi-dimensional indexer disallowed + raise ValueError("multi-dimensional indexing not allowed") + # Argument 2 to "_simple_new" of "IntervalArray" has incompatible type + # "Union[Period, Timestamp, Timedelta, NaTType, DatetimeArray, TimedeltaArray, + # ndarray[Any, Any]]"; expected "Union[Union[DatetimeArray, TimedeltaArray], + # ndarray[Any, Any]]" + return self._simple_new(left, right, dtype=self.dtype) # type: ignore[arg-type] + + def __setitem__(self, key, value) -> None: + value_left, value_right = self._validate_setitem_value(value) + key = check_array_indexer(self, key) + + self._left[key] = value_left + self._right[key] = value_right + + def _cmp_method(self, other, op): + # ensure pandas array for list-like and eliminate non-interval scalars + if is_list_like(other): + if len(self) != len(other): + raise ValueError("Lengths must match to compare") + other = pd_array(other) + elif not isinstance(other, Interval): + # non-interval scalar -> no matches + if other is NA: + # GH#31882 + from pandas.core.arrays import BooleanArray + + arr = np.empty(self.shape, dtype=bool) + mask = np.ones(self.shape, dtype=bool) + return BooleanArray(arr, mask) + return invalid_comparison(self, other, op) + + # determine the dtype of the elements we want to compare + if isinstance(other, Interval): + other_dtype = pandas_dtype("interval") + elif not isinstance(other.dtype, CategoricalDtype): + other_dtype = other.dtype + else: + # for categorical defer to categories for dtype + other_dtype = other.categories.dtype + + # extract intervals if we have interval categories with matching closed + if isinstance(other_dtype, IntervalDtype): + if self.closed != other.categories.closed: + return invalid_comparison(self, other, op) + + other = other.categories._values.take( + other.codes, allow_fill=True, fill_value=other.categories._na_value + ) + + # interval-like -> need same closed and matching endpoints + if isinstance(other_dtype, IntervalDtype): + if self.closed != other.closed: + return invalid_comparison(self, other, op) + elif not isinstance(other, Interval): + other = type(self)(other) + + if op is operator.eq: + return (self._left == other.left) & (self._right == other.right) + elif op is operator.ne: + return (self._left != other.left) | (self._right != other.right) + elif op is operator.gt: + return (self._left > other.left) | ( + (self._left == other.left) & (self._right > other.right) + ) + elif op is operator.ge: + return (self == other) | (self > other) + elif op is operator.lt: + return (self._left < other.left) | ( + (self._left == other.left) & (self._right < other.right) + ) + else: + # operator.lt + return (self == other) | (self < other) + + # non-interval/non-object dtype -> no matches + if not is_object_dtype(other_dtype): + return invalid_comparison(self, other, op) + + # object dtype -> iteratively check for intervals + result = np.zeros(len(self), dtype=bool) + for i, obj in enumerate(other): + try: + result[i] = op(self[i], obj) + except TypeError: + if obj is NA: + # comparison with np.nan returns NA + # github.com/pandas-dev/pandas/pull/37124#discussion_r509095092 + result = result.astype(object) + result[i] = NA + else: + raise + return result + + @unpack_zerodim_and_defer("__eq__") + def __eq__(self, other): + return self._cmp_method(other, operator.eq) + + @unpack_zerodim_and_defer("__ne__") + def __ne__(self, other): + return self._cmp_method(other, operator.ne) + + @unpack_zerodim_and_defer("__gt__") + def __gt__(self, other): + return self._cmp_method(other, operator.gt) + + @unpack_zerodim_and_defer("__ge__") + def __ge__(self, other): + return self._cmp_method(other, operator.ge) + + @unpack_zerodim_and_defer("__lt__") + def __lt__(self, other): + return self._cmp_method(other, operator.lt) + + @unpack_zerodim_and_defer("__le__") + def __le__(self, other): + return self._cmp_method(other, operator.le) + + def argsort( + self, + *, + ascending: bool = True, + kind: SortKind = "quicksort", + na_position: str = "last", + **kwargs, + ) -> np.ndarray: + ascending = nv.validate_argsort_with_ascending(ascending, (), kwargs) + + if ascending and kind == "quicksort" and na_position == "last": + # TODO: in an IntervalIndex we can reuse the cached + # IntervalTree.left_sorter + return np.lexsort((self.right, self.left)) + + # TODO: other cases we can use lexsort for? much more performant. + return super().argsort( + ascending=ascending, kind=kind, na_position=na_position, **kwargs + ) + + def min(self, *, axis: AxisInt | None = None, skipna: bool = True) -> IntervalOrNA: + nv.validate_minmax_axis(axis, self.ndim) + + if not len(self): + return self._na_value + + mask = self.isna() + if mask.any(): + if not skipna: + return self._na_value + obj = self[~mask] + else: + obj = self + + indexer = obj.argsort()[0] + return obj[indexer] + + def max(self, *, axis: AxisInt | None = None, skipna: bool = True) -> IntervalOrNA: + nv.validate_minmax_axis(axis, self.ndim) + + if not len(self): + return self._na_value + + mask = self.isna() + if mask.any(): + if not skipna: + return self._na_value + obj = self[~mask] + else: + obj = self + + indexer = obj.argsort()[-1] + return obj[indexer] + + def _pad_or_backfill( # pylint: disable=useless-parent-delegation + self, + *, + method: FillnaOptions, + limit: int | None = None, + limit_area: Literal["inside", "outside"] | None = None, + copy: bool = True, + ) -> Self: + # TODO(3.0): after EA.fillna 'method' deprecation is enforced, we can remove + # this method entirely. + return super()._pad_or_backfill( + method=method, limit=limit, limit_area=limit_area, copy=copy + ) + + def fillna( + self, value=None, method=None, limit: int | None = None, copy: bool = True + ) -> Self: + """ + Fill NA/NaN values using the specified method. + + Parameters + ---------- + value : scalar, dict, Series + If a scalar value is passed it is used to fill all missing values. + Alternatively, a Series or dict can be used to fill in different + values for each index. The value should not be a list. The + value(s) passed should be either Interval objects or NA/NaN. + method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None + (Not implemented yet for IntervalArray) + Method to use for filling holes in reindexed Series + limit : int, default None + (Not implemented yet for IntervalArray) + If method is specified, this is the maximum number of consecutive + NaN values to forward/backward fill. In other words, if there is + a gap with more than this number of consecutive NaNs, it will only + be partially filled. If method is not specified, this is the + maximum number of entries along the entire axis where NaNs will be + filled. + copy : bool, default True + Whether to make a copy of the data before filling. If False, then + the original should be modified and no new memory should be allocated. + For ExtensionArray subclasses that cannot do this, it is at the + author's discretion whether to ignore "copy=False" or to raise. + + Returns + ------- + filled : IntervalArray with NA/NaN filled + """ + if copy is False: + raise NotImplementedError + if method is not None: + return super().fillna(value=value, method=method, limit=limit) + + value_left, value_right = self._validate_scalar(value) + + left = self.left.fillna(value=value_left) + right = self.right.fillna(value=value_right) + return self._shallow_copy(left, right) + + def astype(self, dtype, copy: bool = True): + """ + Cast to an ExtensionArray or NumPy array with dtype 'dtype'. + + Parameters + ---------- + dtype : str or dtype + Typecode or data-type to which the array is cast. + + copy : bool, default True + Whether to copy the data, even if not necessary. If False, + a copy is made only if the old dtype does not match the + new dtype. + + Returns + ------- + array : ExtensionArray or ndarray + ExtensionArray or NumPy ndarray with 'dtype' for its dtype. + """ + from pandas import Index + + if dtype is not None: + dtype = pandas_dtype(dtype) + + if isinstance(dtype, IntervalDtype): + if dtype == self.dtype: + return self.copy() if copy else self + + if is_float_dtype(self.dtype.subtype) and needs_i8_conversion( + dtype.subtype + ): + # This is allowed on the Index.astype but we disallow it here + msg = ( + f"Cannot convert {self.dtype} to {dtype}; subtypes are incompatible" + ) + raise TypeError(msg) + + # need to cast to different subtype + try: + # We need to use Index rules for astype to prevent casting + # np.nan entries to int subtypes + new_left = Index(self._left, copy=False).astype(dtype.subtype) + new_right = Index(self._right, copy=False).astype(dtype.subtype) + except IntCastingNaNError: + # e.g test_subtype_integer + raise + except (TypeError, ValueError) as err: + # e.g. test_subtype_integer_errors f8->u8 can be lossy + # and raises ValueError + msg = ( + f"Cannot convert {self.dtype} to {dtype}; subtypes are incompatible" + ) + raise TypeError(msg) from err + return self._shallow_copy(new_left, new_right) + else: + try: + return super().astype(dtype, copy=copy) + except (TypeError, ValueError) as err: + msg = f"Cannot cast {type(self).__name__} to dtype {dtype}" + raise TypeError(msg) from err + + def equals(self, other) -> bool: + if type(self) != type(other): + return False + + return bool( + self.closed == other.closed + and self.left.equals(other.left) + and self.right.equals(other.right) + ) + + @classmethod + def _concat_same_type(cls, to_concat: Sequence[IntervalArray]) -> Self: + """ + Concatenate multiple IntervalArray + + Parameters + ---------- + to_concat : sequence of IntervalArray + + Returns + ------- + IntervalArray + """ + closed_set = {interval.closed for interval in to_concat} + if len(closed_set) != 1: + raise ValueError("Intervals must all be closed on the same side.") + closed = closed_set.pop() + + left: IntervalSide = np.concatenate([interval.left for interval in to_concat]) + right: IntervalSide = np.concatenate([interval.right for interval in to_concat]) + + left, right, dtype = cls._ensure_simple_new_inputs(left, right, closed=closed) + + return cls._simple_new(left, right, dtype=dtype) + + def copy(self) -> Self: + """ + Return a copy of the array. + + Returns + ------- + IntervalArray + """ + left = self._left.copy() + right = self._right.copy() + dtype = self.dtype + return self._simple_new(left, right, dtype=dtype) + + def isna(self) -> np.ndarray: + return isna(self._left) + + def shift(self, periods: int = 1, fill_value: object = None) -> IntervalArray: + if not len(self) or periods == 0: + return self.copy() + + self._validate_scalar(fill_value) + + # ExtensionArray.shift doesn't work for two reasons + # 1. IntervalArray.dtype.na_value may not be correct for the dtype. + # 2. IntervalArray._from_sequence only accepts NaN for missing values, + # not other values like NaT + + empty_len = min(abs(periods), len(self)) + if isna(fill_value): + from pandas import Index + + fill_value = Index(self._left, copy=False)._na_value + empty = IntervalArray.from_breaks([fill_value] * (empty_len + 1)) + else: + empty = self._from_sequence([fill_value] * empty_len, dtype=self.dtype) + + if periods > 0: + a = empty + b = self[:-periods] + else: + a = self[abs(periods) :] + b = empty + return self._concat_same_type([a, b]) + + def take( + self, + indices, + *, + allow_fill: bool = False, + fill_value=None, + axis=None, + **kwargs, + ) -> Self: + """ + Take elements from the IntervalArray. + + Parameters + ---------- + indices : sequence of integers + Indices to be taken. + + allow_fill : bool, default False + How to handle negative values in `indices`. + + * False: negative values in `indices` indicate positional indices + from the right (the default). This is similar to + :func:`numpy.take`. + + * True: negative values in `indices` indicate + missing values. These values are set to `fill_value`. Any other + other negative values raise a ``ValueError``. + + fill_value : Interval or NA, optional + Fill value to use for NA-indices when `allow_fill` is True. + This may be ``None``, in which case the default NA value for + the type, ``self.dtype.na_value``, is used. + + For many ExtensionArrays, there will be two representations of + `fill_value`: a user-facing "boxed" scalar, and a low-level + physical NA value. `fill_value` should be the user-facing version, + and the implementation should handle translating that to the + physical version for processing the take if necessary. + + axis : any, default None + Present for compat with IntervalIndex; does nothing. + + Returns + ------- + IntervalArray + + Raises + ------ + IndexError + When the indices are out of bounds for the array. + ValueError + When `indices` contains negative values other than ``-1`` + and `allow_fill` is True. + """ + nv.validate_take((), kwargs) + + fill_left = fill_right = fill_value + if allow_fill: + fill_left, fill_right = self._validate_scalar(fill_value) + + left_take = take( + self._left, indices, allow_fill=allow_fill, fill_value=fill_left + ) + right_take = take( + self._right, indices, allow_fill=allow_fill, fill_value=fill_right + ) + + return self._shallow_copy(left_take, right_take) + + def _validate_listlike(self, value): + # list-like of intervals + try: + array = IntervalArray(value) + self._check_closed_matches(array, name="value") + value_left, value_right = array.left, array.right + except TypeError as err: + # wrong type: not interval or NA + msg = f"'value' should be an interval type, got {type(value)} instead." + raise TypeError(msg) from err + + try: + self.left._validate_fill_value(value_left) + except (LossySetitemError, TypeError) as err: + msg = ( + "'value' should be a compatible interval type, " + f"got {type(value)} instead." + ) + raise TypeError(msg) from err + + return value_left, value_right + + def _validate_scalar(self, value): + if isinstance(value, Interval): + self._check_closed_matches(value, name="value") + left, right = value.left, value.right + # TODO: check subdtype match like _validate_setitem_value? + elif is_valid_na_for_dtype(value, self.left.dtype): + # GH#18295 + left = right = self.left._na_value + else: + raise TypeError( + "can only insert Interval objects and NA into an IntervalArray" + ) + return left, right + + def _validate_setitem_value(self, value): + if is_valid_na_for_dtype(value, self.left.dtype): + # na value: need special casing to set directly on numpy arrays + value = self.left._na_value + if is_integer_dtype(self.dtype.subtype): + # can't set NaN on a numpy integer array + # GH#45484 TypeError, not ValueError, matches what we get with + # non-NA un-holdable value. + raise TypeError("Cannot set float NaN to integer-backed IntervalArray") + value_left, value_right = value, value + + elif isinstance(value, Interval): + # scalar interval + self._check_closed_matches(value, name="value") + value_left, value_right = value.left, value.right + self.left._validate_fill_value(value_left) + self.left._validate_fill_value(value_right) + + else: + return self._validate_listlike(value) + + return value_left, value_right + + def value_counts(self, dropna: bool = True) -> Series: + """ + Returns a Series containing counts of each interval. + + Parameters + ---------- + dropna : bool, default True + Don't include counts of NaN. + + Returns + ------- + counts : Series + + See Also + -------- + Series.value_counts + """ + # TODO: implement this is a non-naive way! + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + "The behavior of value_counts with object-dtype is deprecated", + category=FutureWarning, + ) + result = value_counts(np.asarray(self), dropna=dropna) + # Once the deprecation is enforced, we will need to do + # `result.index = result.index.astype(self.dtype)` + return result + + # --------------------------------------------------------------------- + # Rendering Methods + + def _formatter(self, boxed: bool = False): + # returning 'str' here causes us to render as e.g. "(0, 1]" instead of + # "Interval(0, 1, closed='right')" + return str + + # --------------------------------------------------------------------- + # Vectorized Interval Properties/Attributes + + @property + def left(self) -> Index: + """ + Return the left endpoints of each Interval in the IntervalArray as an Index. + + Examples + -------- + + >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(2, 5)]) + >>> interv_arr + + [(0, 1], (2, 5]] + Length: 2, dtype: interval[int64, right] + >>> interv_arr.left + Index([0, 2], dtype='int64') + """ + from pandas import Index + + return Index(self._left, copy=False) + + @property + def right(self) -> Index: + """ + Return the right endpoints of each Interval in the IntervalArray as an Index. + + Examples + -------- + + >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(2, 5)]) + >>> interv_arr + + [(0, 1], (2, 5]] + Length: 2, dtype: interval[int64, right] + >>> interv_arr.right + Index([1, 5], dtype='int64') + """ + from pandas import Index + + return Index(self._right, copy=False) + + @property + def length(self) -> Index: + """ + Return an Index with entries denoting the length of each Interval. + + Examples + -------- + + >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)]) + >>> interv_arr + + [(0, 1], (1, 5]] + Length: 2, dtype: interval[int64, right] + >>> interv_arr.length + Index([1, 4], dtype='int64') + """ + return self.right - self.left + + @property + def mid(self) -> Index: + """ + Return the midpoint of each Interval in the IntervalArray as an Index. + + Examples + -------- + + >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)]) + >>> interv_arr + + [(0, 1], (1, 5]] + Length: 2, dtype: interval[int64, right] + >>> interv_arr.mid + Index([0.5, 3.0], dtype='float64') + """ + try: + return 0.5 * (self.left + self.right) + except TypeError: + # datetime safe version + return self.left + 0.5 * self.length + + _interval_shared_docs["overlaps"] = textwrap.dedent( + """ + Check elementwise if an Interval overlaps the values in the %(klass)s. + + Two intervals overlap if they share a common point, including closed + endpoints. Intervals that only have an open endpoint in common do not + overlap. + + Parameters + ---------- + other : %(klass)s + Interval to check against for an overlap. + + Returns + ------- + ndarray + Boolean array positionally indicating where an overlap occurs. + + See Also + -------- + Interval.overlaps : Check whether two Interval objects overlap. + + Examples + -------- + %(examples)s + >>> intervals.overlaps(pd.Interval(0.5, 1.5)) + array([ True, True, False]) + + Intervals that share closed endpoints overlap: + + >>> intervals.overlaps(pd.Interval(1, 3, closed='left')) + array([ True, True, True]) + + Intervals that only have an open endpoint in common do not overlap: + + >>> intervals.overlaps(pd.Interval(1, 2, closed='right')) + array([False, True, False]) + """ + ) + + @Appender( + _interval_shared_docs["overlaps"] + % { + "klass": "IntervalArray", + "examples": textwrap.dedent( + """\ + >>> data = [(0, 1), (1, 3), (2, 4)] + >>> intervals = pd.arrays.IntervalArray.from_tuples(data) + >>> intervals + + [(0, 1], (1, 3], (2, 4]] + Length: 3, dtype: interval[int64, right] + """ + ), + } + ) + def overlaps(self, other): + if isinstance(other, (IntervalArray, ABCIntervalIndex)): + raise NotImplementedError + if not isinstance(other, Interval): + msg = f"`other` must be Interval-like, got {type(other).__name__}" + raise TypeError(msg) + + # equality is okay if both endpoints are closed (overlap at a point) + op1 = le if (self.closed_left and other.closed_right) else lt + op2 = le if (other.closed_left and self.closed_right) else lt + + # overlaps is equivalent negation of two interval being disjoint: + # disjoint = (A.left > B.right) or (B.left > A.right) + # (simplifying the negation allows this to be done in less operations) + return op1(self.left, other.right) & op2(other.left, self.right) + + # --------------------------------------------------------------------- + + @property + def closed(self) -> IntervalClosedType: + """ + String describing the inclusive side the intervals. + + Either ``left``, ``right``, ``both`` or ``neither``. + + Examples + -------- + + For arrays: + + >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)]) + >>> interv_arr + + [(0, 1], (1, 5]] + Length: 2, dtype: interval[int64, right] + >>> interv_arr.closed + 'right' + + For Interval Index: + + >>> interv_idx = pd.interval_range(start=0, end=2) + >>> interv_idx + IntervalIndex([(0, 1], (1, 2]], dtype='interval[int64, right]') + >>> interv_idx.closed + 'right' + """ + return self.dtype.closed + + _interval_shared_docs["set_closed"] = textwrap.dedent( + """ + Return an identical %(klass)s closed on the specified side. + + Parameters + ---------- + closed : {'left', 'right', 'both', 'neither'} + Whether the intervals are closed on the left-side, right-side, both + or neither. + + Returns + ------- + %(klass)s + + %(examples)s\ + """ + ) + + @Appender( + _interval_shared_docs["set_closed"] + % { + "klass": "IntervalArray", + "examples": textwrap.dedent( + """\ + Examples + -------- + >>> index = pd.arrays.IntervalArray.from_breaks(range(4)) + >>> index + + [(0, 1], (1, 2], (2, 3]] + Length: 3, dtype: interval[int64, right] + >>> index.set_closed('both') + + [[0, 1], [1, 2], [2, 3]] + Length: 3, dtype: interval[int64, both] + """ + ), + } + ) + def set_closed(self, closed: IntervalClosedType) -> Self: + if closed not in VALID_CLOSED: + msg = f"invalid option for 'closed': {closed}" + raise ValueError(msg) + + left, right = self._left, self._right + dtype = IntervalDtype(left.dtype, closed=closed) + return self._simple_new(left, right, dtype=dtype) + + _interval_shared_docs[ + "is_non_overlapping_monotonic" + ] = """ + Return a boolean whether the %(klass)s is non-overlapping and monotonic. + + Non-overlapping means (no Intervals share points), and monotonic means + either monotonic increasing or monotonic decreasing. + + Examples + -------- + For arrays: + + >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)]) + >>> interv_arr + + [(0, 1], (1, 5]] + Length: 2, dtype: interval[int64, right] + >>> interv_arr.is_non_overlapping_monotonic + True + + >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), + ... pd.Interval(-1, 0.1)]) + >>> interv_arr + + [(0.0, 1.0], (-1.0, 0.1]] + Length: 2, dtype: interval[float64, right] + >>> interv_arr.is_non_overlapping_monotonic + False + + For Interval Index: + + >>> interv_idx = pd.interval_range(start=0, end=2) + >>> interv_idx + IntervalIndex([(0, 1], (1, 2]], dtype='interval[int64, right]') + >>> interv_idx.is_non_overlapping_monotonic + True + + >>> interv_idx = pd.interval_range(start=0, end=2, closed='both') + >>> interv_idx + IntervalIndex([[0, 1], [1, 2]], dtype='interval[int64, both]') + >>> interv_idx.is_non_overlapping_monotonic + False + """ + + @property + @Appender( + _interval_shared_docs["is_non_overlapping_monotonic"] % _shared_docs_kwargs + ) + def is_non_overlapping_monotonic(self) -> bool: + # must be increasing (e.g., [0, 1), [1, 2), [2, 3), ... ) + # or decreasing (e.g., [-1, 0), [-2, -1), [-3, -2), ...) + # we already require left <= right + + # strict inequality for closed == 'both'; equality implies overlapping + # at a point when both sides of intervals are included + if self.closed == "both": + return bool( + (self._right[:-1] < self._left[1:]).all() + or (self._left[:-1] > self._right[1:]).all() + ) + + # non-strict inequality when closed != 'both'; at least one side is + # not included in the intervals, so equality does not imply overlapping + return bool( + (self._right[:-1] <= self._left[1:]).all() + or (self._left[:-1] >= self._right[1:]).all() + ) + + # --------------------------------------------------------------------- + # Conversion + + def __array__( + self, dtype: NpDtype | None = None, copy: bool | None = None + ) -> np.ndarray: + """ + Return the IntervalArray's data as a numpy array of Interval + objects (with dtype='object') + """ + left = self._left + right = self._right + mask = self.isna() + closed = self.closed + + result = np.empty(len(left), dtype=object) + for i, left_value in enumerate(left): + if mask[i]: + result[i] = np.nan + else: + result[i] = Interval(left_value, right[i], closed) + return result + + def __arrow_array__(self, type=None): + """ + Convert myself into a pyarrow Array. + """ + import pyarrow + + from pandas.core.arrays.arrow.extension_types import ArrowIntervalType + + try: + subtype = pyarrow.from_numpy_dtype(self.dtype.subtype) + except TypeError as err: + raise TypeError( + f"Conversion to arrow with subtype '{self.dtype.subtype}' " + "is not supported" + ) from err + interval_type = ArrowIntervalType(subtype, self.closed) + storage_array = pyarrow.StructArray.from_arrays( + [ + pyarrow.array(self._left, type=subtype, from_pandas=True), + pyarrow.array(self._right, type=subtype, from_pandas=True), + ], + names=["left", "right"], + ) + mask = self.isna() + if mask.any(): + # if there are missing values, set validity bitmap also on the array level + null_bitmap = pyarrow.array(~mask).buffers()[1] + storage_array = pyarrow.StructArray.from_buffers( + storage_array.type, + len(storage_array), + [null_bitmap], + children=[storage_array.field(0), storage_array.field(1)], + ) + + if type is not None: + if type.equals(interval_type.storage_type): + return storage_array + elif isinstance(type, ArrowIntervalType): + # ensure we have the same subtype and closed attributes + if not type.equals(interval_type): + raise TypeError( + "Not supported to convert IntervalArray to type with " + f"different 'subtype' ({self.dtype.subtype} vs {type.subtype}) " + f"and 'closed' ({self.closed} vs {type.closed}) attributes" + ) + else: + raise TypeError( + f"Not supported to convert IntervalArray to '{type}' type" + ) + + return pyarrow.ExtensionArray.from_storage(interval_type, storage_array) + + _interval_shared_docs["to_tuples"] = textwrap.dedent( + """ + Return an %(return_type)s of tuples of the form (left, right). + + Parameters + ---------- + na_tuple : bool, default True + If ``True``, return ``NA`` as a tuple ``(nan, nan)``. If ``False``, + just return ``NA`` as ``nan``. + + Returns + ------- + tuples: %(return_type)s + %(examples)s\ + """ + ) + + @Appender( + _interval_shared_docs["to_tuples"] + % { + "return_type": ( + "ndarray (if self is IntervalArray) or Index (if self is IntervalIndex)" + ), + "examples": textwrap.dedent( + """\ + + Examples + -------- + For :class:`pandas.IntervalArray`: + + >>> idx = pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 2)]) + >>> idx + + [(0, 1], (1, 2]] + Length: 2, dtype: interval[int64, right] + >>> idx.to_tuples() + array([(0, 1), (1, 2)], dtype=object) + + For :class:`pandas.IntervalIndex`: + + >>> idx = pd.interval_range(start=0, end=2) + >>> idx + IntervalIndex([(0, 1], (1, 2]], dtype='interval[int64, right]') + >>> idx.to_tuples() + Index([(0, 1), (1, 2)], dtype='object') + """ + ), + } + ) + def to_tuples(self, na_tuple: bool = True) -> np.ndarray: + tuples = com.asarray_tuplesafe(zip(self._left, self._right)) + if not na_tuple: + # GH 18756 + tuples = np.where(~self.isna(), tuples, np.nan) + return tuples + + # --------------------------------------------------------------------- + + def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None: + value_left, value_right = self._validate_setitem_value(value) + + if isinstance(self._left, np.ndarray): + np.putmask(self._left, mask, value_left) + assert isinstance(self._right, np.ndarray) + np.putmask(self._right, mask, value_right) + else: + self._left._putmask(mask, value_left) + assert not isinstance(self._right, np.ndarray) + self._right._putmask(mask, value_right) + + def insert(self, loc: int, item: Interval) -> Self: + """ + Return a new IntervalArray inserting new item at location. Follows + Python numpy.insert semantics for negative values. Only Interval + objects and NA can be inserted into an IntervalIndex + + Parameters + ---------- + loc : int + item : Interval + + Returns + ------- + IntervalArray + """ + left_insert, right_insert = self._validate_scalar(item) + + new_left = self.left.insert(loc, left_insert) + new_right = self.right.insert(loc, right_insert) + + return self._shallow_copy(new_left, new_right) + + def delete(self, loc) -> Self: + if isinstance(self._left, np.ndarray): + new_left = np.delete(self._left, loc) + assert isinstance(self._right, np.ndarray) + new_right = np.delete(self._right, loc) + else: + new_left = self._left.delete(loc) + assert not isinstance(self._right, np.ndarray) + new_right = self._right.delete(loc) + return self._shallow_copy(left=new_left, right=new_right) + + @Appender(_extension_array_shared_docs["repeat"] % _shared_docs_kwargs) + def repeat( + self, + repeats: int | Sequence[int], + axis: AxisInt | None = None, + ) -> Self: + nv.validate_repeat((), {"axis": axis}) + left_repeat = self.left.repeat(repeats) + right_repeat = self.right.repeat(repeats) + return self._shallow_copy(left=left_repeat, right=right_repeat) + + _interval_shared_docs["contains"] = textwrap.dedent( + """ + Check elementwise if the Intervals contain the value. + + Return a boolean mask whether the value is contained in the Intervals + of the %(klass)s. + + Parameters + ---------- + other : scalar + The value to check whether it is contained in the Intervals. + + Returns + ------- + boolean array + + See Also + -------- + Interval.contains : Check whether Interval object contains value. + %(klass)s.overlaps : Check if an Interval overlaps the values in the + %(klass)s. + + Examples + -------- + %(examples)s + >>> intervals.contains(0.5) + array([ True, False, False]) + """ + ) + + @Appender( + _interval_shared_docs["contains"] + % { + "klass": "IntervalArray", + "examples": textwrap.dedent( + """\ + >>> intervals = pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 3), (2, 4)]) + >>> intervals + + [(0, 1], (1, 3], (2, 4]] + Length: 3, dtype: interval[int64, right] + """ + ), + } + ) + def contains(self, other): + if isinstance(other, Interval): + raise NotImplementedError("contains not implemented for two intervals") + + return (self._left < other if self.open_left else self._left <= other) & ( + other < self._right if self.open_right else other <= self._right + ) + + def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]: + if isinstance(values, IntervalArray): + if self.closed != values.closed: + # not comparable -> no overlap + return np.zeros(self.shape, dtype=bool) + + if self.dtype == values.dtype: + # GH#38353 instead of casting to object, operating on a + # complex128 ndarray is much more performant. + left = self._combined.view("complex128") + right = values._combined.view("complex128") + # error: Argument 1 to "isin" has incompatible type + # "Union[ExtensionArray, ndarray[Any, Any], + # ndarray[Any, dtype[Any]]]"; expected + # "Union[_SupportsArray[dtype[Any]], + # _NestedSequence[_SupportsArray[dtype[Any]]], bool, + # int, float, complex, str, bytes, _NestedSequence[ + # Union[bool, int, float, complex, str, bytes]]]" + return np.isin(left, right).ravel() # type: ignore[arg-type] + + elif needs_i8_conversion(self.left.dtype) ^ needs_i8_conversion( + values.left.dtype + ): + # not comparable -> no overlap + return np.zeros(self.shape, dtype=bool) + + return isin(self.astype(object), values.astype(object)) + + @property + def _combined(self) -> IntervalSide: + # error: Item "ExtensionArray" of "ExtensionArray | ndarray[Any, Any]" + # has no attribute "reshape" [union-attr] + left = self.left._values.reshape(-1, 1) # type: ignore[union-attr] + right = self.right._values.reshape(-1, 1) # type: ignore[union-attr] + if needs_i8_conversion(left.dtype): + # error: Item "ndarray[Any, Any]" of "Any | ndarray[Any, Any]" has + # no attribute "_concat_same_type" + comb = left._concat_same_type( # type: ignore[union-attr] + [left, right], axis=1 + ) + else: + comb = np.concatenate([left, right], axis=1) + return comb + + def _from_combined(self, combined: np.ndarray) -> IntervalArray: + """ + Create a new IntervalArray with our dtype from a 1D complex128 ndarray. + """ + nc = combined.view("i8").reshape(-1, 2) + + dtype = self._left.dtype + if needs_i8_conversion(dtype): + assert isinstance(self._left, (DatetimeArray, TimedeltaArray)) + new_left = type(self._left)._from_sequence(nc[:, 0], dtype=dtype) + assert isinstance(self._right, (DatetimeArray, TimedeltaArray)) + new_right = type(self._right)._from_sequence(nc[:, 1], dtype=dtype) + else: + assert isinstance(dtype, np.dtype) + new_left = nc[:, 0].view(dtype) + new_right = nc[:, 1].view(dtype) + return self._shallow_copy(left=new_left, right=new_right) + + def unique(self) -> IntervalArray: + # No overload variant of "__getitem__" of "ExtensionArray" matches argument + # type "Tuple[slice, int]" + nc = unique( + self._combined.view("complex128")[:, 0] # type: ignore[call-overload] + ) + nc = nc[:, None] + return self._from_combined(nc) + + +def _maybe_convert_platform_interval(values) -> ArrayLike: + """ + Try to do platform conversion, with special casing for IntervalArray. + Wrapper around maybe_convert_platform that alters the default return + dtype in certain cases to be compatible with IntervalArray. For example, + empty lists return with integer dtype instead of object dtype, which is + prohibited for IntervalArray. + + Parameters + ---------- + values : array-like + + Returns + ------- + array + """ + if isinstance(values, (list, tuple)) and len(values) == 0: + # GH 19016 + # empty lists/tuples get object dtype by default, but this is + # prohibited for IntervalArray, so coerce to integer instead + return np.array([], dtype=np.int64) + elif not is_list_like(values) or isinstance(values, ABCDataFrame): + # This will raise later, but we avoid passing to maybe_convert_platform + return values + elif isinstance(getattr(values, "dtype", None), CategoricalDtype): + values = np.asarray(values) + elif not hasattr(values, "dtype") and not isinstance(values, (list, tuple, range)): + # TODO: should we just cast these to list? + return values + else: + values = extract_array(values, extract_numpy=True) + + if not hasattr(values, "dtype"): + values = np.asarray(values) + if values.dtype.kind in "iu" and values.dtype != np.int64: + values = values.astype(np.int64) + return values diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/masked.py b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/masked.py new file mode 100644 index 0000000000000000000000000000000000000000..d7e816b9d37814e40019d94305a1732ebc8b691c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/masked.py @@ -0,0 +1,1650 @@ +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, + overload, +) +import warnings + +import numpy as np + +from pandas._libs import ( + lib, + missing as libmissing, +) +from pandas._libs.tslibs import is_supported_dtype +from pandas._typing import ( + ArrayLike, + AstypeArg, + AxisInt, + DtypeObj, + FillnaOptions, + InterpolateOptions, + NpDtype, + PositionalIndexer, + Scalar, + ScalarIndexer, + Self, + SequenceIndexer, + Shape, + npt, +) +from pandas.compat import ( + IS64, + is_platform_windows, +) +from pandas.errors import AbstractMethodError +from pandas.util._decorators import doc +from pandas.util._validators import validate_fillna_kwargs + +from pandas.core.dtypes.base import ExtensionDtype +from pandas.core.dtypes.common import ( + is_bool, + is_integer_dtype, + is_list_like, + is_scalar, + is_string_dtype, + pandas_dtype, +) +from pandas.core.dtypes.dtypes import BaseMaskedDtype +from pandas.core.dtypes.missing import ( + array_equivalent, + is_valid_na_for_dtype, + isna, + notna, +) + +from pandas.core import ( + algorithms as algos, + arraylike, + missing, + nanops, + ops, +) +from pandas.core.algorithms import ( + factorize_array, + isin, + map_array, + mode, + take, +) +from pandas.core.array_algos import ( + masked_accumulations, + masked_reductions, +) +from pandas.core.array_algos.quantile import quantile_with_mask +from pandas.core.arraylike import OpsMixin +from pandas.core.arrays._utils import to_numpy_dtype_inference +from pandas.core.arrays.base import ExtensionArray +from pandas.core.construction import ( + array as pd_array, + ensure_wrapped_if_datetimelike, + extract_array, +) +from pandas.core.indexers import check_array_indexer +from pandas.core.ops import invalid_comparison +from pandas.core.util.hashing import hash_array + +if TYPE_CHECKING: + from collections.abc import ( + Iterator, + Sequence, + ) + from pandas import Series + from pandas.core.arrays import BooleanArray + from pandas._typing import ( + NumpySorter, + NumpyValueArrayLike, + ) + from pandas.core.arrays import FloatingArray + +from pandas.compat.numpy import function as nv + + +class BaseMaskedArray(OpsMixin, ExtensionArray): + """ + Base class for masked arrays (which use _data and _mask to store the data). + + numpy based + """ + + # The value used to fill '_data' to avoid upcasting + _internal_fill_value: Scalar + # our underlying data and mask are each ndarrays + _data: np.ndarray + _mask: npt.NDArray[np.bool_] + + # Fill values used for any/all + _truthy_value = Scalar # bool(_truthy_value) = True + _falsey_value = Scalar # bool(_falsey_value) = False + + @classmethod + def _simple_new(cls, values: np.ndarray, mask: npt.NDArray[np.bool_]) -> Self: + result = BaseMaskedArray.__new__(cls) + result._data = values + result._mask = mask + return result + + def __init__( + self, values: np.ndarray, mask: npt.NDArray[np.bool_], copy: bool = False + ) -> None: + # values is supposed to already be validated in the subclass + if not (isinstance(mask, np.ndarray) and mask.dtype == np.bool_): + raise TypeError( + "mask should be boolean numpy array. Use " + "the 'pd.array' function instead" + ) + if values.shape != mask.shape: + raise ValueError("values.shape must match mask.shape") + + if copy: + values = values.copy() + mask = mask.copy() + + self._data = values + self._mask = mask + + @classmethod + def _from_sequence(cls, scalars, *, dtype=None, copy: bool = False) -> Self: + values, mask = cls._coerce_to_array(scalars, dtype=dtype, copy=copy) + return cls(values, mask) + + @classmethod + @doc(ExtensionArray._empty) + def _empty(cls, shape: Shape, dtype: ExtensionDtype): + values = np.empty(shape, dtype=dtype.type) + values.fill(cls._internal_fill_value) + mask = np.ones(shape, dtype=bool) + result = cls(values, mask) + if not isinstance(result, cls) or dtype != result.dtype: + raise NotImplementedError( + f"Default 'empty' implementation is invalid for dtype='{dtype}'" + ) + return result + + def _formatter(self, boxed: bool = False) -> Callable[[Any], str | None]: + # NEP 51: https://github.com/numpy/numpy/pull/22449 + return str + + @property + def dtype(self) -> BaseMaskedDtype: + raise AbstractMethodError(self) + + @overload + def __getitem__(self, item: ScalarIndexer) -> Any: + ... + + @overload + def __getitem__(self, item: SequenceIndexer) -> Self: + ... + + def __getitem__(self, item: PositionalIndexer) -> Self | Any: + item = check_array_indexer(self, item) + + newmask = self._mask[item] + if is_bool(newmask): + # This is a scalar indexing + if newmask: + return self.dtype.na_value + return self._data[item] + + return self._simple_new(self._data[item], newmask) + + def _pad_or_backfill( + self, + *, + method: FillnaOptions, + limit: int | None = None, + limit_area: Literal["inside", "outside"] | None = None, + copy: bool = True, + ) -> Self: + mask = self._mask + + if mask.any(): + func = missing.get_fill_func(method, ndim=self.ndim) + + npvalues = self._data.T + new_mask = mask.T + if copy: + npvalues = npvalues.copy() + new_mask = new_mask.copy() + elif limit_area is not None: + mask = mask.copy() + func(npvalues, limit=limit, mask=new_mask) + + if limit_area is not None and not mask.all(): + mask = mask.T + neg_mask = ~mask + first = neg_mask.argmax() + last = len(neg_mask) - neg_mask[::-1].argmax() - 1 + if limit_area == "inside": + new_mask[:first] |= mask[:first] + new_mask[last + 1 :] |= mask[last + 1 :] + elif limit_area == "outside": + new_mask[first + 1 : last] |= mask[first + 1 : last] + + if copy: + return self._simple_new(npvalues.T, new_mask.T) + else: + return self + else: + if copy: + new_values = self.copy() + else: + new_values = self + return new_values + + @doc(ExtensionArray.fillna) + def fillna( + self, value=None, method=None, limit: int | None = None, copy: bool = True + ) -> Self: + value, method = validate_fillna_kwargs(value, method) + + mask = self._mask + + value = missing.check_value_size(value, mask, len(self)) + + if mask.any(): + if method is not None: + func = missing.get_fill_func(method, ndim=self.ndim) + npvalues = self._data.T + new_mask = mask.T + if copy: + npvalues = npvalues.copy() + new_mask = new_mask.copy() + func(npvalues, limit=limit, mask=new_mask) + return self._simple_new(npvalues.T, new_mask.T) + else: + # fill with value + if copy: + new_values = self.copy() + else: + new_values = self[:] + new_values[mask] = value + else: + if copy: + new_values = self.copy() + else: + new_values = self[:] + return new_values + + @classmethod + def _coerce_to_array( + cls, values, *, dtype: DtypeObj, copy: bool = False + ) -> tuple[np.ndarray, np.ndarray]: + raise AbstractMethodError(cls) + + def _validate_setitem_value(self, value): + """ + Check if we have a scalar that we can cast losslessly. + + Raises + ------ + TypeError + """ + kind = self.dtype.kind + # TODO: get this all from np_can_hold_element? + if kind == "b": + if lib.is_bool(value): + return value + + elif kind == "f": + if lib.is_integer(value) or lib.is_float(value): + return value + + else: + if lib.is_integer(value) or (lib.is_float(value) and value.is_integer()): + return value + # TODO: unsigned checks + + # Note: without the "str" here, the f-string rendering raises in + # py38 builds. + raise TypeError(f"Invalid value '{str(value)}' for dtype {self.dtype}") + + def __setitem__(self, key, value) -> None: + key = check_array_indexer(self, key) + + if is_scalar(value): + if is_valid_na_for_dtype(value, self.dtype): + self._mask[key] = True + else: + value = self._validate_setitem_value(value) + self._data[key] = value + self._mask[key] = False + return + + value, mask = self._coerce_to_array(value, dtype=self.dtype) + + self._data[key] = value + self._mask[key] = mask + + def __contains__(self, key) -> bool: + if isna(key) and key is not self.dtype.na_value: + # GH#52840 + if self._data.dtype.kind == "f" and lib.is_float(key): + return bool((np.isnan(self._data) & ~self._mask).any()) + + return bool(super().__contains__(key)) + + def __iter__(self) -> Iterator: + if self.ndim == 1: + if not self._hasna: + for val in self._data: + yield val + else: + na_value = self.dtype.na_value + for isna_, val in zip(self._mask, self._data): + if isna_: + yield na_value + else: + yield val + else: + for i in range(len(self)): + yield self[i] + + def __len__(self) -> int: + return len(self._data) + + @property + def shape(self) -> Shape: + return self._data.shape + + @property + def ndim(self) -> int: + return self._data.ndim + + def swapaxes(self, axis1, axis2) -> Self: + data = self._data.swapaxes(axis1, axis2) + mask = self._mask.swapaxes(axis1, axis2) + return self._simple_new(data, mask) + + def delete(self, loc, axis: AxisInt = 0) -> Self: + data = np.delete(self._data, loc, axis=axis) + mask = np.delete(self._mask, loc, axis=axis) + return self._simple_new(data, mask) + + def reshape(self, *args, **kwargs) -> Self: + data = self._data.reshape(*args, **kwargs) + mask = self._mask.reshape(*args, **kwargs) + return self._simple_new(data, mask) + + def ravel(self, *args, **kwargs) -> Self: + # TODO: need to make sure we have the same order for data/mask + data = self._data.ravel(*args, **kwargs) + mask = self._mask.ravel(*args, **kwargs) + return type(self)(data, mask) + + @property + def T(self) -> Self: + return self._simple_new(self._data.T, self._mask.T) + + def round(self, decimals: int = 0, *args, **kwargs): + """ + Round each value in the array a to the given number of decimals. + + Parameters + ---------- + decimals : int, default 0 + Number of decimal places to round to. If decimals is negative, + it specifies the number of positions to the left of the decimal point. + *args, **kwargs + Additional arguments and keywords have no effect but might be + accepted for compatibility with NumPy. + + Returns + ------- + NumericArray + Rounded values of the NumericArray. + + See Also + -------- + numpy.around : Round values of an np.array. + DataFrame.round : Round values of a DataFrame. + Series.round : Round values of a Series. + """ + if self.dtype.kind == "b": + return self + nv.validate_round(args, kwargs) + values = np.round(self._data, decimals=decimals, **kwargs) + + # Usually we'll get same type as self, but ndarray[bool] casts to float + return self._maybe_mask_result(values, self._mask.copy()) + + # ------------------------------------------------------------------ + # Unary Methods + + def __invert__(self) -> Self: + return self._simple_new(~self._data, self._mask.copy()) + + def __neg__(self) -> Self: + return self._simple_new(-self._data, self._mask.copy()) + + def __pos__(self) -> Self: + return self.copy() + + def __abs__(self) -> Self: + return self._simple_new(abs(self._data), self._mask.copy()) + + # ------------------------------------------------------------------ + + def _values_for_json(self) -> np.ndarray: + return np.asarray(self, dtype=object) + + def to_numpy( + self, + dtype: npt.DTypeLike | None = None, + copy: bool = False, + na_value: object = lib.no_default, + ) -> np.ndarray: + """ + Convert to a NumPy Array. + + By default converts to an object-dtype NumPy array. Specify the `dtype` and + `na_value` keywords to customize the conversion. + + Parameters + ---------- + dtype : dtype, default object + The numpy dtype to convert to. + copy : bool, default False + Whether to ensure that the returned value is a not a view on + the array. Note that ``copy=False`` does not *ensure* that + ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that + a copy is made, even if not strictly necessary. This is typically + only possible when no missing values are present and `dtype` + is the equivalent numpy dtype. + na_value : scalar, optional + Scalar missing value indicator to use in numpy array. Defaults + to the native missing value indicator of this array (pd.NA). + + Returns + ------- + numpy.ndarray + + Examples + -------- + An object-dtype is the default result + + >>> a = pd.array([True, False, pd.NA], dtype="boolean") + >>> a.to_numpy() + array([True, False, ], dtype=object) + + When no missing values are present, an equivalent dtype can be used. + + >>> pd.array([True, False], dtype="boolean").to_numpy(dtype="bool") + array([ True, False]) + >>> pd.array([1, 2], dtype="Int64").to_numpy("int64") + array([1, 2]) + + However, requesting such dtype will raise a ValueError if + missing values are present and the default missing value :attr:`NA` + is used. + + >>> a = pd.array([True, False, pd.NA], dtype="boolean") + >>> a + + [True, False, ] + Length: 3, dtype: boolean + + >>> a.to_numpy(dtype="bool") + Traceback (most recent call last): + ... + ValueError: cannot convert to bool numpy array in presence of missing values + + Specify a valid `na_value` instead + + >>> a.to_numpy(dtype="bool", na_value=False) + array([ True, False, False]) + """ + hasna = self._hasna + dtype, na_value = to_numpy_dtype_inference(self, dtype, na_value, hasna) + if dtype is None: + dtype = object + + if hasna: + if ( + dtype != object + and not is_string_dtype(dtype) + and na_value is libmissing.NA + ): + raise ValueError( + f"cannot convert to '{dtype}'-dtype NumPy array " + "with missing values. Specify an appropriate 'na_value' " + "for this dtype." + ) + # don't pass copy to astype -> always need a copy since we are mutating + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=RuntimeWarning) + data = self._data.astype(dtype) + data[self._mask] = na_value + else: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=RuntimeWarning) + data = self._data.astype(dtype, copy=copy) + return data + + @doc(ExtensionArray.tolist) + def tolist(self): + if self.ndim > 1: + return [x.tolist() for x in self] + dtype = None if self._hasna else self._data.dtype + return self.to_numpy(dtype=dtype, na_value=libmissing.NA).tolist() + + @overload + def astype(self, dtype: npt.DTypeLike, copy: bool = ...) -> np.ndarray: + ... + + @overload + def astype(self, dtype: ExtensionDtype, copy: bool = ...) -> ExtensionArray: + ... + + @overload + def astype(self, dtype: AstypeArg, copy: bool = ...) -> ArrayLike: + ... + + def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike: + dtype = pandas_dtype(dtype) + + if dtype == self.dtype: + if copy: + return self.copy() + return self + + # if we are astyping to another nullable masked dtype, we can fastpath + if isinstance(dtype, BaseMaskedDtype): + # TODO deal with NaNs for FloatingArray case + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=RuntimeWarning) + # TODO: Is rounding what we want long term? + data = self._data.astype(dtype.numpy_dtype, copy=copy) + # mask is copied depending on whether the data was copied, and + # not directly depending on the `copy` keyword + mask = self._mask if data is self._data else self._mask.copy() + cls = dtype.construct_array_type() + return cls(data, mask, copy=False) + + if isinstance(dtype, ExtensionDtype): + eacls = dtype.construct_array_type() + return eacls._from_sequence(self, dtype=dtype, copy=copy) + + na_value: float | np.datetime64 | lib.NoDefault + + # coerce + if dtype.kind == "f": + # In astype, we consider dtype=float to also mean na_value=np.nan + na_value = np.nan + elif dtype.kind == "M": + na_value = np.datetime64("NaT") + else: + na_value = lib.no_default + + # to_numpy will also raise, but we get somewhat nicer exception messages here + if dtype.kind in "iu" and self._hasna: + raise ValueError("cannot convert NA to integer") + if dtype.kind == "b" and self._hasna: + # careful: astype_nansafe converts np.nan to True + raise ValueError("cannot convert float NaN to bool") + + data = self.to_numpy(dtype=dtype, na_value=na_value, copy=copy) + return data + + __array_priority__ = 1000 # higher than ndarray so ops dispatch to us + + def __array__( + self, dtype: NpDtype | None = None, copy: bool | None = None + ) -> np.ndarray: + """ + the array interface, return my values + We return an object array here to preserve our scalar values + """ + return self.to_numpy(dtype=dtype) + + _HANDLED_TYPES: tuple[type, ...] + + def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): + # For MaskedArray inputs, we apply the ufunc to ._data + # and mask the result. + + out = kwargs.get("out", ()) + + for x in inputs + out: + if not isinstance(x, self._HANDLED_TYPES + (BaseMaskedArray,)): + return NotImplemented + + # for binary ops, use our custom dunder methods + result = arraylike.maybe_dispatch_ufunc_to_dunder_op( + self, ufunc, method, *inputs, **kwargs + ) + if result is not NotImplemented: + return result + + if "out" in kwargs: + # e.g. test_ufunc_with_out + return arraylike.dispatch_ufunc_with_out( + self, ufunc, method, *inputs, **kwargs + ) + + if method == "reduce": + result = arraylike.dispatch_reduction_ufunc( + self, ufunc, method, *inputs, **kwargs + ) + if result is not NotImplemented: + return result + + mask = np.zeros(len(self), dtype=bool) + inputs2 = [] + for x in inputs: + if isinstance(x, BaseMaskedArray): + mask |= x._mask + inputs2.append(x._data) + else: + inputs2.append(x) + + def reconstruct(x: np.ndarray): + # we don't worry about scalar `x` here, since we + # raise for reduce up above. + from pandas.core.arrays import ( + BooleanArray, + FloatingArray, + IntegerArray, + ) + + if x.dtype.kind == "b": + m = mask.copy() + return BooleanArray(x, m) + elif x.dtype.kind in "iu": + m = mask.copy() + return IntegerArray(x, m) + elif x.dtype.kind == "f": + m = mask.copy() + if x.dtype == np.float16: + # reached in e.g. np.sqrt on BooleanArray + # we don't support float16 + x = x.astype(np.float32) + return FloatingArray(x, m) + else: + x[mask] = np.nan + return x + + result = getattr(ufunc, method)(*inputs2, **kwargs) + if ufunc.nout > 1: + # e.g. np.divmod + return tuple(reconstruct(x) for x in result) + elif method == "reduce": + # e.g. np.add.reduce; test_ufunc_reduce_raises + if self._mask.any(): + return self._na_value + return result + else: + return reconstruct(result) + + def __arrow_array__(self, type=None): + """ + Convert myself into a pyarrow Array. + """ + import pyarrow as pa + + return pa.array(self._data, mask=self._mask, type=type) + + @property + def _hasna(self) -> bool: + # Note: this is expensive right now! The hope is that we can + # make this faster by having an optional mask, but not have to change + # source code using it.. + + # error: Incompatible return value type (got "bool_", expected "bool") + return self._mask.any() # type: ignore[return-value] + + def _propagate_mask( + self, mask: npt.NDArray[np.bool_] | None, other + ) -> npt.NDArray[np.bool_]: + if mask is None: + mask = self._mask.copy() # TODO: need test for BooleanArray needing a copy + if other is libmissing.NA: + # GH#45421 don't alter inplace + mask = mask | True + elif is_list_like(other) and len(other) == len(mask): + mask = mask | isna(other) + else: + mask = self._mask | mask + # Incompatible return value type (got "Optional[ndarray[Any, dtype[bool_]]]", + # expected "ndarray[Any, dtype[bool_]]") + return mask # type: ignore[return-value] + + def _arith_method(self, other, op): + op_name = op.__name__ + omask = None + + if ( + not hasattr(other, "dtype") + and is_list_like(other) + and len(other) == len(self) + ): + # Try inferring masked dtype instead of casting to object + other = pd_array(other) + other = extract_array(other, extract_numpy=True) + + if isinstance(other, BaseMaskedArray): + other, omask = other._data, other._mask + + elif is_list_like(other): + if not isinstance(other, ExtensionArray): + other = np.asarray(other) + if other.ndim > 1: + raise NotImplementedError("can only perform ops with 1-d structures") + + # We wrap the non-masked arithmetic logic used for numpy dtypes + # in Series/Index arithmetic ops. + other = ops.maybe_prepare_scalar_for_op(other, (len(self),)) + pd_op = ops.get_array_op(op) + other = ensure_wrapped_if_datetimelike(other) + + if op_name in {"pow", "rpow"} and isinstance(other, np.bool_): + # Avoid DeprecationWarning: In future, it will be an error + # for 'np.bool_' scalars to be interpreted as an index + # e.g. test_array_scalar_like_equivalence + other = bool(other) + + mask = self._propagate_mask(omask, other) + + if other is libmissing.NA: + result = np.ones_like(self._data) + if self.dtype.kind == "b": + if op_name in { + "floordiv", + "rfloordiv", + "pow", + "rpow", + "truediv", + "rtruediv", + }: + # GH#41165 Try to match non-masked Series behavior + # This is still imperfect GH#46043 + raise NotImplementedError( + f"operator '{op_name}' not implemented for bool dtypes" + ) + if op_name in {"mod", "rmod"}: + dtype = "int8" + else: + dtype = "bool" + result = result.astype(dtype) + elif "truediv" in op_name and self.dtype.kind != "f": + # The actual data here doesn't matter since the mask + # will be all-True, but since this is division, we want + # to end up with floating dtype. + result = result.astype(np.float64) + else: + # Make sure we do this before the "pow" mask checks + # to get an expected exception message on shape mismatch. + if self.dtype.kind in "iu" and op_name in ["floordiv", "mod"]: + # TODO(GH#30188) ATM we don't match the behavior of non-masked + # types with respect to floordiv-by-zero + pd_op = op + + with np.errstate(all="ignore"): + result = pd_op(self._data, other) + + if op_name == "pow": + # 1 ** x is 1. + mask = np.where((self._data == 1) & ~self._mask, False, mask) + # x ** 0 is 1. + if omask is not None: + mask = np.where((other == 0) & ~omask, False, mask) + elif other is not libmissing.NA: + mask = np.where(other == 0, False, mask) + + elif op_name == "rpow": + # 1 ** x is 1. + if omask is not None: + mask = np.where((other == 1) & ~omask, False, mask) + elif other is not libmissing.NA: + mask = np.where(other == 1, False, mask) + # x ** 0 is 1. + mask = np.where((self._data == 0) & ~self._mask, False, mask) + + return self._maybe_mask_result(result, mask) + + _logical_method = _arith_method + + def _cmp_method(self, other, op) -> BooleanArray: + from pandas.core.arrays import BooleanArray + + mask = None + + if isinstance(other, BaseMaskedArray): + other, mask = other._data, other._mask + + elif is_list_like(other): + other = np.asarray(other) + if other.ndim > 1: + raise NotImplementedError("can only perform ops with 1-d structures") + if len(self) != len(other): + raise ValueError("Lengths must match to compare") + + if other is libmissing.NA: + # numpy does not handle pd.NA well as "other" scalar (it returns + # a scalar False instead of an array) + # This may be fixed by NA.__array_ufunc__. Revisit this check + # once that's implemented. + result = np.zeros(self._data.shape, dtype="bool") + mask = np.ones(self._data.shape, dtype="bool") + else: + with warnings.catch_warnings(): + # numpy may show a FutureWarning or DeprecationWarning: + # elementwise comparison failed; returning scalar instead, + # but in the future will perform elementwise comparison + # before returning NotImplemented. We fall back to the correct + # behavior today, so that should be fine to ignore. + warnings.filterwarnings("ignore", "elementwise", FutureWarning) + warnings.filterwarnings("ignore", "elementwise", DeprecationWarning) + method = getattr(self._data, f"__{op.__name__}__") + result = method(other) + + if result is NotImplemented: + result = invalid_comparison(self._data, other, op) + + mask = self._propagate_mask(mask, other) + return BooleanArray(result, mask, copy=False) + + def _maybe_mask_result( + self, result: np.ndarray | tuple[np.ndarray, np.ndarray], mask: np.ndarray + ): + """ + Parameters + ---------- + result : array-like or tuple[array-like] + mask : array-like bool + """ + if isinstance(result, tuple): + # i.e. divmod + div, mod = result + return ( + self._maybe_mask_result(div, mask), + self._maybe_mask_result(mod, mask), + ) + + if result.dtype.kind == "f": + from pandas.core.arrays import FloatingArray + + return FloatingArray(result, mask, copy=False) + + elif result.dtype.kind == "b": + from pandas.core.arrays import BooleanArray + + return BooleanArray(result, mask, copy=False) + + elif lib.is_np_dtype(result.dtype, "m") and is_supported_dtype(result.dtype): + # e.g. test_numeric_arr_mul_tdscalar_numexpr_path + from pandas.core.arrays import TimedeltaArray + + result[mask] = result.dtype.type("NaT") + + if not isinstance(result, TimedeltaArray): + return TimedeltaArray._simple_new(result, dtype=result.dtype) + + return result + + elif result.dtype.kind in "iu": + from pandas.core.arrays import IntegerArray + + return IntegerArray(result, mask, copy=False) + + else: + result[mask] = np.nan + return result + + def isna(self) -> np.ndarray: + return self._mask.copy() + + @property + def _na_value(self): + return self.dtype.na_value + + @property + def nbytes(self) -> int: + return self._data.nbytes + self._mask.nbytes + + @classmethod + def _concat_same_type( + cls, + to_concat: Sequence[Self], + axis: AxisInt = 0, + ) -> Self: + data = np.concatenate([x._data for x in to_concat], axis=axis) + mask = np.concatenate([x._mask for x in to_concat], axis=axis) + return cls(data, mask) + + def _hash_pandas_object( + self, *, encoding: str, hash_key: str, categorize: bool + ) -> npt.NDArray[np.uint64]: + hashed_array = hash_array( + self._data, encoding=encoding, hash_key=hash_key, categorize=categorize + ) + hashed_array[self.isna()] = hash(self.dtype.na_value) + return hashed_array + + def take( + self, + indexer, + *, + allow_fill: bool = False, + fill_value: Scalar | None = None, + axis: AxisInt = 0, + ) -> Self: + # we always fill with 1 internally + # to avoid upcasting + data_fill_value = self._internal_fill_value if isna(fill_value) else fill_value + result = take( + self._data, + indexer, + fill_value=data_fill_value, + allow_fill=allow_fill, + axis=axis, + ) + + mask = take( + self._mask, indexer, fill_value=True, allow_fill=allow_fill, axis=axis + ) + + # if we are filling + # we only fill where the indexer is null + # not existing missing values + # TODO(jreback) what if we have a non-na float as a fill value? + if allow_fill and notna(fill_value): + fill_mask = np.asarray(indexer) == -1 + result[fill_mask] = fill_value + mask = mask ^ fill_mask + + return self._simple_new(result, mask) + + # error: Return type "BooleanArray" of "isin" incompatible with return type + # "ndarray" in supertype "ExtensionArray" + def isin(self, values: ArrayLike) -> BooleanArray: # type: ignore[override] + from pandas.core.arrays import BooleanArray + + # algorithms.isin will eventually convert values to an ndarray, so no extra + # cost to doing it here first + values_arr = np.asarray(values) + result = isin(self._data, values_arr) + + if self._hasna: + values_have_NA = values_arr.dtype == object and any( + val is self.dtype.na_value for val in values_arr + ) + + # For now, NA does not propagate so set result according to presence of NA, + # see https://github.com/pandas-dev/pandas/pull/38379 for some discussion + result[self._mask] = values_have_NA + + mask = np.zeros(self._data.shape, dtype=bool) + return BooleanArray(result, mask, copy=False) + + def copy(self) -> Self: + data = self._data.copy() + mask = self._mask.copy() + return self._simple_new(data, mask) + + @doc(ExtensionArray.duplicated) + def duplicated( + self, keep: Literal["first", "last", False] = "first" + ) -> npt.NDArray[np.bool_]: + values = self._data + mask = self._mask + return algos.duplicated(values, keep=keep, mask=mask) + + def unique(self) -> Self: + """ + Compute the BaseMaskedArray of unique values. + + Returns + ------- + uniques : BaseMaskedArray + """ + uniques, mask = algos.unique_with_mask(self._data, self._mask) + return self._simple_new(uniques, mask) + + @doc(ExtensionArray.searchsorted) + def searchsorted( + self, + value: NumpyValueArrayLike | ExtensionArray, + side: Literal["left", "right"] = "left", + sorter: NumpySorter | None = None, + ) -> npt.NDArray[np.intp] | np.intp: + if self._hasna: + raise ValueError( + "searchsorted requires array to be sorted, which is impossible " + "with NAs present." + ) + if isinstance(value, ExtensionArray): + value = value.astype(object) + # Base class searchsorted would cast to object, which is *much* slower. + return self._data.searchsorted(value, side=side, sorter=sorter) + + @doc(ExtensionArray.factorize) + def factorize( + self, + use_na_sentinel: bool = True, + ) -> tuple[np.ndarray, ExtensionArray]: + arr = self._data + mask = self._mask + + # Use a sentinel for na; recode and add NA to uniques if necessary below + codes, uniques = factorize_array(arr, use_na_sentinel=True, mask=mask) + + # check that factorize_array correctly preserves dtype. + assert uniques.dtype == self.dtype.numpy_dtype, (uniques.dtype, self.dtype) + + has_na = mask.any() + if use_na_sentinel or not has_na: + size = len(uniques) + else: + # Make room for an NA value + size = len(uniques) + 1 + uniques_mask = np.zeros(size, dtype=bool) + if not use_na_sentinel and has_na: + na_index = mask.argmax() + # Insert na with the proper code + if na_index == 0: + na_code = np.intp(0) + else: + na_code = codes[:na_index].max() + 1 + codes[codes >= na_code] += 1 + codes[codes == -1] = na_code + # dummy value for uniques; not used since uniques_mask will be True + uniques = np.insert(uniques, na_code, 0) + uniques_mask[na_code] = True + uniques_ea = self._simple_new(uniques, uniques_mask) + + return codes, uniques_ea + + @doc(ExtensionArray._values_for_argsort) + def _values_for_argsort(self) -> np.ndarray: + return self._data + + def value_counts(self, dropna: bool = True) -> Series: + """ + Returns a Series containing counts of each unique value. + + Parameters + ---------- + dropna : bool, default True + Don't include counts of missing values. + + Returns + ------- + counts : Series + + See Also + -------- + Series.value_counts + """ + from pandas import ( + Index, + Series, + ) + from pandas.arrays import IntegerArray + + keys, value_counts, na_counter = algos.value_counts_arraylike( + self._data, dropna=dropna, mask=self._mask + ) + mask_index = np.zeros((len(value_counts),), dtype=np.bool_) + mask = mask_index.copy() + + if na_counter > 0: + mask_index[-1] = True + + arr = IntegerArray(value_counts, mask) + index = Index( + self.dtype.construct_array_type()( + keys, mask_index # type: ignore[arg-type] + ) + ) + return Series(arr, index=index, name="count", copy=False) + + def _mode(self, dropna: bool = True) -> Self: + if dropna: + result = mode(self._data, dropna=dropna, mask=self._mask) + res_mask = np.zeros(result.shape, dtype=np.bool_) + else: + result, res_mask = mode(self._data, dropna=dropna, mask=self._mask) + result = type(self)(result, res_mask) # type: ignore[arg-type] + return result[result.argsort()] + + @doc(ExtensionArray.equals) + def equals(self, other) -> bool: + if type(self) != type(other): + return False + if other.dtype != self.dtype: + return False + + # GH#44382 if e.g. self[1] is np.nan and other[1] is pd.NA, we are NOT + # equal. + if not np.array_equal(self._mask, other._mask): + return False + + left = self._data[~self._mask] + right = other._data[~other._mask] + return array_equivalent(left, right, strict_nan=True, dtype_equal=True) + + def _quantile( + self, qs: npt.NDArray[np.float64], interpolation: str + ) -> BaseMaskedArray: + """ + Dispatch to quantile_with_mask, needed because we do not have + _from_factorized. + + Notes + ----- + We assume that all impacted cases are 1D-only. + """ + res = quantile_with_mask( + self._data, + mask=self._mask, + # TODO(GH#40932): na_value_for_dtype(self.dtype.numpy_dtype) + # instead of np.nan + fill_value=np.nan, + qs=qs, + interpolation=interpolation, + ) + + if self._hasna: + # Our result mask is all-False unless we are all-NA, in which + # case it is all-True. + if self.ndim == 2: + # I think this should be out_mask=self.isna().all(axis=1) + # but am holding off until we have tests + raise NotImplementedError + if self.isna().all(): + out_mask = np.ones(res.shape, dtype=bool) + + if is_integer_dtype(self.dtype): + # We try to maintain int dtype if possible for not all-na case + # as well + res = np.zeros(res.shape, dtype=self.dtype.numpy_dtype) + else: + out_mask = np.zeros(res.shape, dtype=bool) + else: + out_mask = np.zeros(res.shape, dtype=bool) + return self._maybe_mask_result(res, mask=out_mask) + + # ------------------------------------------------------------------ + # Reductions + + def _reduce( + self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs + ): + if name in {"any", "all", "min", "max", "sum", "prod", "mean", "var", "std"}: + result = getattr(self, name)(skipna=skipna, **kwargs) + else: + # median, skew, kurt, sem + data = self._data + mask = self._mask + op = getattr(nanops, f"nan{name}") + axis = kwargs.pop("axis", None) + result = op(data, axis=axis, skipna=skipna, mask=mask, **kwargs) + + if keepdims: + if isna(result): + return self._wrap_na_result(name=name, axis=0, mask_size=(1,)) + else: + result = result.reshape(1) + mask = np.zeros(1, dtype=bool) + return self._maybe_mask_result(result, mask) + + if isna(result): + return libmissing.NA + else: + return result + + def _wrap_reduction_result(self, name: str, result, *, skipna, axis): + if isinstance(result, np.ndarray): + if skipna: + # we only retain mask for all-NA rows/columns + mask = self._mask.all(axis=axis) + else: + mask = self._mask.any(axis=axis) + + return self._maybe_mask_result(result, mask) + return result + + def _wrap_na_result(self, *, name, axis, mask_size): + mask = np.ones(mask_size, dtype=bool) + + float_dtyp = "float32" if self.dtype == "Float32" else "float64" + if name in ["mean", "median", "var", "std", "skew", "kurt"]: + np_dtype = float_dtyp + elif name in ["min", "max"] or self.dtype.itemsize == 8: + np_dtype = self.dtype.numpy_dtype.name + else: + is_windows_or_32bit = is_platform_windows() or not IS64 + int_dtyp = "int32" if is_windows_or_32bit else "int64" + uint_dtyp = "uint32" if is_windows_or_32bit else "uint64" + np_dtype = {"b": int_dtyp, "i": int_dtyp, "u": uint_dtyp, "f": float_dtyp}[ + self.dtype.kind + ] + + value = np.array([1], dtype=np_dtype) + return self._maybe_mask_result(value, mask=mask) + + def _wrap_min_count_reduction_result( + self, name: str, result, *, skipna, min_count, axis + ): + if min_count == 0 and isinstance(result, np.ndarray): + return self._maybe_mask_result(result, np.zeros(result.shape, dtype=bool)) + return self._wrap_reduction_result(name, result, skipna=skipna, axis=axis) + + def sum( + self, + *, + skipna: bool = True, + min_count: int = 0, + axis: AxisInt | None = 0, + **kwargs, + ): + nv.validate_sum((), kwargs) + + result = masked_reductions.sum( + self._data, + self._mask, + skipna=skipna, + min_count=min_count, + axis=axis, + ) + return self._wrap_min_count_reduction_result( + "sum", result, skipna=skipna, min_count=min_count, axis=axis + ) + + def prod( + self, + *, + skipna: bool = True, + min_count: int = 0, + axis: AxisInt | None = 0, + **kwargs, + ): + nv.validate_prod((), kwargs) + + result = masked_reductions.prod( + self._data, + self._mask, + skipna=skipna, + min_count=min_count, + axis=axis, + ) + return self._wrap_min_count_reduction_result( + "prod", result, skipna=skipna, min_count=min_count, axis=axis + ) + + def mean(self, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs): + nv.validate_mean((), kwargs) + result = masked_reductions.mean( + self._data, + self._mask, + skipna=skipna, + axis=axis, + ) + return self._wrap_reduction_result("mean", result, skipna=skipna, axis=axis) + + def var( + self, *, skipna: bool = True, axis: AxisInt | None = 0, ddof: int = 1, **kwargs + ): + nv.validate_stat_ddof_func((), kwargs, fname="var") + result = masked_reductions.var( + self._data, + self._mask, + skipna=skipna, + axis=axis, + ddof=ddof, + ) + return self._wrap_reduction_result("var", result, skipna=skipna, axis=axis) + + def std( + self, *, skipna: bool = True, axis: AxisInt | None = 0, ddof: int = 1, **kwargs + ): + nv.validate_stat_ddof_func((), kwargs, fname="std") + result = masked_reductions.std( + self._data, + self._mask, + skipna=skipna, + axis=axis, + ddof=ddof, + ) + return self._wrap_reduction_result("std", result, skipna=skipna, axis=axis) + + def min(self, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs): + nv.validate_min((), kwargs) + result = masked_reductions.min( + self._data, + self._mask, + skipna=skipna, + axis=axis, + ) + return self._wrap_reduction_result("min", result, skipna=skipna, axis=axis) + + def max(self, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs): + nv.validate_max((), kwargs) + result = masked_reductions.max( + self._data, + self._mask, + skipna=skipna, + axis=axis, + ) + return self._wrap_reduction_result("max", result, skipna=skipna, axis=axis) + + def map(self, mapper, na_action=None): + return map_array(self.to_numpy(), mapper, na_action=na_action) + + def any(self, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs): + """ + Return whether any element is truthy. + + Returns False unless there is at least one element that is truthy. + By default, NAs are skipped. If ``skipna=False`` is specified and + missing values are present, similar :ref:`Kleene logic ` + is used as for logical operations. + + .. versionchanged:: 1.4.0 + + Parameters + ---------- + skipna : bool, default True + Exclude NA values. If the entire array is NA and `skipna` is + True, then the result will be False, as for an empty array. + If `skipna` is False, the result will still be True if there is + at least one element that is truthy, otherwise NA will be returned + if there are NA's present. + axis : int, optional, default 0 + **kwargs : any, default None + Additional keywords have no effect but might be accepted for + compatibility with NumPy. + + Returns + ------- + bool or :attr:`pandas.NA` + + See Also + -------- + numpy.any : Numpy version of this method. + BaseMaskedArray.all : Return whether all elements are truthy. + + Examples + -------- + The result indicates whether any element is truthy (and by default + skips NAs): + + >>> pd.array([True, False, True]).any() + True + >>> pd.array([True, False, pd.NA]).any() + True + >>> pd.array([False, False, pd.NA]).any() + False + >>> pd.array([], dtype="boolean").any() + False + >>> pd.array([pd.NA], dtype="boolean").any() + False + >>> pd.array([pd.NA], dtype="Float64").any() + False + + With ``skipna=False``, the result can be NA if this is logically + required (whether ``pd.NA`` is True or False influences the result): + + >>> pd.array([True, False, pd.NA]).any(skipna=False) + True + >>> pd.array([1, 0, pd.NA]).any(skipna=False) + True + >>> pd.array([False, False, pd.NA]).any(skipna=False) + + >>> pd.array([0, 0, pd.NA]).any(skipna=False) + + """ + nv.validate_any((), kwargs) + + values = self._data.copy() + # error: Argument 3 to "putmask" has incompatible type "object"; + # expected "Union[_SupportsArray[dtype[Any]], + # _NestedSequence[_SupportsArray[dtype[Any]]], + # bool, int, float, complex, str, bytes, + # _NestedSequence[Union[bool, int, float, complex, str, bytes]]]" + np.putmask(values, self._mask, self._falsey_value) # type: ignore[arg-type] + result = values.any() + if skipna: + return result + else: + if result or len(self) == 0 or not self._mask.any(): + return result + else: + return self.dtype.na_value + + def all(self, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs): + """ + Return whether all elements are truthy. + + Returns True unless there is at least one element that is falsey. + By default, NAs are skipped. If ``skipna=False`` is specified and + missing values are present, similar :ref:`Kleene logic ` + is used as for logical operations. + + .. versionchanged:: 1.4.0 + + Parameters + ---------- + skipna : bool, default True + Exclude NA values. If the entire array is NA and `skipna` is + True, then the result will be True, as for an empty array. + If `skipna` is False, the result will still be False if there is + at least one element that is falsey, otherwise NA will be returned + if there are NA's present. + axis : int, optional, default 0 + **kwargs : any, default None + Additional keywords have no effect but might be accepted for + compatibility with NumPy. + + Returns + ------- + bool or :attr:`pandas.NA` + + See Also + -------- + numpy.all : Numpy version of this method. + BooleanArray.any : Return whether any element is truthy. + + Examples + -------- + The result indicates whether all elements are truthy (and by default + skips NAs): + + >>> pd.array([True, True, pd.NA]).all() + True + >>> pd.array([1, 1, pd.NA]).all() + True + >>> pd.array([True, False, pd.NA]).all() + False + >>> pd.array([], dtype="boolean").all() + True + >>> pd.array([pd.NA], dtype="boolean").all() + True + >>> pd.array([pd.NA], dtype="Float64").all() + True + + With ``skipna=False``, the result can be NA if this is logically + required (whether ``pd.NA`` is True or False influences the result): + + >>> pd.array([True, True, pd.NA]).all(skipna=False) + + >>> pd.array([1, 1, pd.NA]).all(skipna=False) + + >>> pd.array([True, False, pd.NA]).all(skipna=False) + False + >>> pd.array([1, 0, pd.NA]).all(skipna=False) + False + """ + nv.validate_all((), kwargs) + + values = self._data.copy() + # error: Argument 3 to "putmask" has incompatible type "object"; + # expected "Union[_SupportsArray[dtype[Any]], + # _NestedSequence[_SupportsArray[dtype[Any]]], + # bool, int, float, complex, str, bytes, + # _NestedSequence[Union[bool, int, float, complex, str, bytes]]]" + np.putmask(values, self._mask, self._truthy_value) # type: ignore[arg-type] + result = values.all(axis=axis) + + if skipna: + return result + else: + if not result or len(self) == 0 or not self._mask.any(): + return result + else: + return self.dtype.na_value + + def interpolate( + self, + *, + method: InterpolateOptions, + axis: int, + index, + limit, + limit_direction, + limit_area, + copy: bool, + **kwargs, + ) -> FloatingArray: + """ + See NDFrame.interpolate.__doc__. + """ + # NB: we return type(self) even if copy=False + if self.dtype.kind == "f": + if copy: + data = self._data.copy() + mask = self._mask.copy() + else: + data = self._data + mask = self._mask + elif self.dtype.kind in "iu": + copy = True + data = self._data.astype("f8") + mask = self._mask.copy() + else: + raise NotImplementedError( + f"interpolate is not implemented for dtype={self.dtype}" + ) + + missing.interpolate_2d_inplace( + data, + method=method, + axis=0, + index=index, + limit=limit, + limit_direction=limit_direction, + limit_area=limit_area, + mask=mask, + **kwargs, + ) + if not copy: + return self # type: ignore[return-value] + if self.dtype.kind == "f": + return type(self)._simple_new(data, mask) # type: ignore[return-value] + else: + from pandas.core.arrays import FloatingArray + + return FloatingArray._simple_new(data, mask) + + def _accumulate( + self, name: str, *, skipna: bool = True, **kwargs + ) -> BaseMaskedArray: + data = self._data + mask = self._mask + + op = getattr(masked_accumulations, name) + data, mask = op(data, mask, skipna=skipna, **kwargs) + + return self._simple_new(data, mask) + + # ------------------------------------------------------------------ + # GroupBy Methods + + def _groupby_op( + self, + *, + how: str, + has_dropped_na: bool, + min_count: int, + ngroups: int, + ids: npt.NDArray[np.intp], + **kwargs, + ): + from pandas.core.groupby.ops import WrappedCythonOp + + kind = WrappedCythonOp.get_kind_from_how(how) + op = WrappedCythonOp(how=how, kind=kind, has_dropped_na=has_dropped_na) + + # libgroupby functions are responsible for NOT altering mask + mask = self._mask + if op.kind != "aggregate": + result_mask = mask.copy() + else: + result_mask = np.zeros(ngroups, dtype=bool) + + if how == "rank" and kwargs.get("na_option") in ["top", "bottom"]: + result_mask[:] = False + + res_values = op._cython_op_ndim_compat( + self._data, + min_count=min_count, + ngroups=ngroups, + comp_ids=ids, + mask=mask, + result_mask=result_mask, + **kwargs, + ) + + if op.how == "ohlc": + arity = op._cython_arity.get(op.how, 1) + result_mask = np.tile(result_mask, (arity, 1)).T + + if op.how in ["idxmin", "idxmax"]: + # Result values are indexes to take, keep as ndarray + return res_values + else: + # res_values should already have the correct dtype, we just need to + # wrap in a MaskedArray + return self._maybe_mask_result(res_values, result_mask) + + +def transpose_homogeneous_masked_arrays( + masked_arrays: Sequence[BaseMaskedArray], +) -> list[BaseMaskedArray]: + """Transpose masked arrays in a list, but faster. + + Input should be a list of 1-dim masked arrays of equal length and all have the + same dtype. The caller is responsible for ensuring validity of input data. + """ + masked_arrays = list(masked_arrays) + dtype = masked_arrays[0].dtype + + values = [arr._data.reshape(1, -1) for arr in masked_arrays] + transposed_values = np.concatenate( + values, + axis=0, + out=np.empty( + (len(masked_arrays), len(masked_arrays[0])), + order="F", + dtype=dtype.numpy_dtype, + ), + ) + + masks = [arr._mask.reshape(1, -1) for arr in masked_arrays] + transposed_masks = np.concatenate( + masks, axis=0, out=np.empty_like(transposed_values, dtype=bool) + ) + + arr_type = dtype.construct_array_type() + transposed_arrays: list[BaseMaskedArray] = [] + for i in range(transposed_values.shape[1]): + transposed_arr = arr_type(transposed_values[:, i], mask=transposed_masks[:, i]) + transposed_arrays.append(transposed_arr) + + return transposed_arrays diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/numeric.py b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/numeric.py new file mode 100644 index 0000000000000000000000000000000000000000..68fa7fcb6573c6b5ec754ca65263f8ddd6a6ba74 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/numeric.py @@ -0,0 +1,286 @@ +from __future__ import annotations + +import numbers +from typing import ( + TYPE_CHECKING, + Any, + Callable, +) + +import numpy as np + +from pandas._libs import ( + lib, + missing as libmissing, +) +from pandas.errors import AbstractMethodError +from pandas.util._decorators import cache_readonly + +from pandas.core.dtypes.common import ( + is_integer_dtype, + is_string_dtype, + pandas_dtype, +) + +from pandas.core.arrays.masked import ( + BaseMaskedArray, + BaseMaskedDtype, +) + +if TYPE_CHECKING: + from collections.abc import Mapping + + import pyarrow + + from pandas._typing import ( + Dtype, + DtypeObj, + Self, + npt, + ) + + +class NumericDtype(BaseMaskedDtype): + _default_np_dtype: np.dtype + _checker: Callable[[Any], bool] # is_foo_dtype + + def __repr__(self) -> str: + return f"{self.name}Dtype()" + + @cache_readonly + def is_signed_integer(self) -> bool: + return self.kind == "i" + + @cache_readonly + def is_unsigned_integer(self) -> bool: + return self.kind == "u" + + @property + def _is_numeric(self) -> bool: + return True + + def __from_arrow__( + self, array: pyarrow.Array | pyarrow.ChunkedArray + ) -> BaseMaskedArray: + """ + Construct IntegerArray/FloatingArray from pyarrow Array/ChunkedArray. + """ + import pyarrow + + from pandas.core.arrays.arrow._arrow_utils import ( + pyarrow_array_to_numpy_and_mask, + ) + + array_class = self.construct_array_type() + + pyarrow_type = pyarrow.from_numpy_dtype(self.type) + if not array.type.equals(pyarrow_type) and not pyarrow.types.is_null( + array.type + ): + # test_from_arrow_type_error raise for string, but allow + # through itemsize conversion GH#31896 + rt_dtype = pandas_dtype(array.type.to_pandas_dtype()) + if rt_dtype.kind not in "iuf": + # Could allow "c" or potentially disallow float<->int conversion, + # but at the moment we specifically test that uint<->int works + raise TypeError( + f"Expected array of {self} type, got {array.type} instead" + ) + + array = array.cast(pyarrow_type) + + if isinstance(array, pyarrow.ChunkedArray): + # TODO this "if" can be removed when requiring pyarrow >= 10.0, which fixed + # combine_chunks for empty arrays https://github.com/apache/arrow/pull/13757 + if array.num_chunks == 0: + array = pyarrow.array([], type=array.type) + else: + array = array.combine_chunks() + + data, mask = pyarrow_array_to_numpy_and_mask(array, dtype=self.numpy_dtype) + return array_class(data.copy(), ~mask, copy=False) + + @classmethod + def _get_dtype_mapping(cls) -> Mapping[np.dtype, NumericDtype]: + raise AbstractMethodError(cls) + + @classmethod + def _standardize_dtype(cls, dtype: NumericDtype | str | np.dtype) -> NumericDtype: + """ + Convert a string representation or a numpy dtype to NumericDtype. + """ + if isinstance(dtype, str) and (dtype.startswith(("Int", "UInt", "Float"))): + # Avoid DeprecationWarning from NumPy about np.dtype("Int64") + # https://github.com/numpy/numpy/pull/7476 + dtype = dtype.lower() + + if not isinstance(dtype, NumericDtype): + mapping = cls._get_dtype_mapping() + try: + dtype = mapping[np.dtype(dtype)] + except KeyError as err: + raise ValueError(f"invalid dtype specified {dtype}") from err + return dtype + + @classmethod + def _safe_cast(cls, values: np.ndarray, dtype: np.dtype, copy: bool) -> np.ndarray: + """ + Safely cast the values to the given dtype. + + "safe" in this context means the casting is lossless. + """ + raise AbstractMethodError(cls) + + +def _coerce_to_data_and_mask( + values, dtype, copy: bool, dtype_cls: type[NumericDtype], default_dtype: np.dtype +): + checker = dtype_cls._checker + + mask = None + inferred_type = None + + if dtype is None and hasattr(values, "dtype"): + if checker(values.dtype): + dtype = values.dtype + + if dtype is not None: + dtype = dtype_cls._standardize_dtype(dtype) + + cls = dtype_cls.construct_array_type() + if isinstance(values, cls): + values, mask = values._data, values._mask + if dtype is not None: + values = values.astype(dtype.numpy_dtype, copy=False) + + if copy: + values = values.copy() + mask = mask.copy() + return values, mask, dtype, inferred_type + + original = values + if not copy: + values = np.asarray(values) + else: + values = np.array(values, copy=copy) + inferred_type = None + if values.dtype == object or is_string_dtype(values.dtype): + inferred_type = lib.infer_dtype(values, skipna=True) + if inferred_type == "boolean" and dtype is None: + name = dtype_cls.__name__.strip("_") + raise TypeError(f"{values.dtype} cannot be converted to {name}") + + elif values.dtype.kind == "b" and checker(dtype): + if not copy: + values = np.asarray(values, dtype=default_dtype) + else: + values = np.array(values, dtype=default_dtype, copy=copy) + + elif values.dtype.kind not in "iuf": + name = dtype_cls.__name__.strip("_") + raise TypeError(f"{values.dtype} cannot be converted to {name}") + + if values.ndim != 1: + raise TypeError("values must be a 1D list-like") + + if mask is None: + if values.dtype.kind in "iu": + # fastpath + mask = np.zeros(len(values), dtype=np.bool_) + else: + mask = libmissing.is_numeric_na(values) + else: + assert len(mask) == len(values) + + if mask.ndim != 1: + raise TypeError("mask must be a 1D list-like") + + # infer dtype if needed + if dtype is None: + dtype = default_dtype + else: + dtype = dtype.numpy_dtype + + if is_integer_dtype(dtype) and values.dtype.kind == "f" and len(values) > 0: + if mask.all(): + values = np.ones(values.shape, dtype=dtype) + else: + idx = np.nanargmax(values) + if int(values[idx]) != original[idx]: + # We have ints that lost precision during the cast. + inferred_type = lib.infer_dtype(original, skipna=True) + if ( + inferred_type not in ["floating", "mixed-integer-float"] + and not mask.any() + ): + values = np.asarray(original, dtype=dtype) + else: + values = np.asarray(original, dtype="object") + + # we copy as need to coerce here + if mask.any(): + values = values.copy() + values[mask] = cls._internal_fill_value + if inferred_type in ("string", "unicode"): + # casts from str are always safe since they raise + # a ValueError if the str cannot be parsed into a float + values = values.astype(dtype, copy=copy) + else: + values = dtype_cls._safe_cast(values, dtype, copy=False) + + return values, mask, dtype, inferred_type + + +class NumericArray(BaseMaskedArray): + """ + Base class for IntegerArray and FloatingArray. + """ + + _dtype_cls: type[NumericDtype] + + def __init__( + self, values: np.ndarray, mask: npt.NDArray[np.bool_], copy: bool = False + ) -> None: + checker = self._dtype_cls._checker + if not (isinstance(values, np.ndarray) and checker(values.dtype)): + descr = ( + "floating" + if self._dtype_cls.kind == "f" # type: ignore[comparison-overlap] + else "integer" + ) + raise TypeError( + f"values should be {descr} numpy array. Use " + "the 'pd.array' function instead" + ) + if values.dtype == np.float16: + # If we don't raise here, then accessing self.dtype would raise + raise TypeError("FloatingArray does not support np.float16 dtype.") + + super().__init__(values, mask, copy=copy) + + @cache_readonly + def dtype(self) -> NumericDtype: + mapping = self._dtype_cls._get_dtype_mapping() + return mapping[self._data.dtype] + + @classmethod + def _coerce_to_array( + cls, value, *, dtype: DtypeObj, copy: bool = False + ) -> tuple[np.ndarray, np.ndarray]: + dtype_cls = cls._dtype_cls + default_dtype = dtype_cls._default_np_dtype + values, mask, _, _ = _coerce_to_data_and_mask( + value, dtype, copy, dtype_cls, default_dtype + ) + return values, mask + + @classmethod + def _from_sequence_of_strings( + cls, strings, *, dtype: Dtype | None = None, copy: bool = False + ) -> Self: + from pandas.core.tools.numeric import to_numeric + + scalars = to_numeric(strings, errors="raise", dtype_backend="numpy_nullable") + return cls._from_sequence(scalars, dtype=dtype, copy=copy) + + _HANDLED_TYPES = (np.ndarray, numbers.Number) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/numpy_.py b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/numpy_.py new file mode 100644 index 0000000000000000000000000000000000000000..07eb91e0cb13bc307086480e352ae76a66e7a7d2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/numpy_.py @@ -0,0 +1,563 @@ +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Literal, +) + +import numpy as np + +from pandas._libs import lib +from pandas._libs.tslibs import is_supported_dtype +from pandas.compat.numpy import function as nv + +from pandas.core.dtypes.astype import astype_array +from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike +from pandas.core.dtypes.common import pandas_dtype +from pandas.core.dtypes.dtypes import NumpyEADtype +from pandas.core.dtypes.missing import isna + +from pandas.core import ( + arraylike, + missing, + nanops, + ops, +) +from pandas.core.arraylike import OpsMixin +from pandas.core.arrays._mixins import NDArrayBackedExtensionArray +from pandas.core.construction import ensure_wrapped_if_datetimelike +from pandas.core.strings.object_array import ObjectStringArrayMixin + +if TYPE_CHECKING: + from pandas._typing import ( + AxisInt, + Dtype, + FillnaOptions, + InterpolateOptions, + NpDtype, + Scalar, + Self, + npt, + ) + + from pandas import Index + + +# error: Definition of "_concat_same_type" in base class "NDArrayBacked" is +# incompatible with definition in base class "ExtensionArray" +class NumpyExtensionArray( # type: ignore[misc] + OpsMixin, + NDArrayBackedExtensionArray, + ObjectStringArrayMixin, +): + """ + A pandas ExtensionArray for NumPy data. + + This is mostly for internal compatibility, and is not especially + useful on its own. + + Parameters + ---------- + values : ndarray + The NumPy ndarray to wrap. Must be 1-dimensional. + copy : bool, default False + Whether to copy `values`. + + Attributes + ---------- + None + + Methods + ------- + None + + Examples + -------- + >>> pd.arrays.NumpyExtensionArray(np.array([0, 1, 2, 3])) + + [0, 1, 2, 3] + Length: 4, dtype: int64 + """ + + # If you're wondering why pd.Series(cls) doesn't put the array in an + # ExtensionBlock, search for `ABCNumpyExtensionArray`. We check for + # that _typ to ensure that users don't unnecessarily use EAs inside + # pandas internals, which turns off things like block consolidation. + _typ = "npy_extension" + __array_priority__ = 1000 + _ndarray: np.ndarray + _dtype: NumpyEADtype + _internal_fill_value = np.nan + + # ------------------------------------------------------------------------ + # Constructors + + def __init__( + self, values: np.ndarray | NumpyExtensionArray, copy: bool = False + ) -> None: + if isinstance(values, type(self)): + values = values._ndarray + if not isinstance(values, np.ndarray): + raise ValueError( + f"'values' must be a NumPy array, not {type(values).__name__}" + ) + + if values.ndim == 0: + # Technically we support 2, but do not advertise that fact. + raise ValueError("NumpyExtensionArray must be 1-dimensional.") + + if copy: + values = values.copy() + + dtype = NumpyEADtype(values.dtype) + super().__init__(values, dtype) + + @classmethod + def _from_sequence( + cls, scalars, *, dtype: Dtype | None = None, copy: bool = False + ) -> NumpyExtensionArray: + if isinstance(dtype, NumpyEADtype): + dtype = dtype._dtype + + # error: Argument "dtype" to "asarray" has incompatible type + # "Union[ExtensionDtype, str, dtype[Any], dtype[floating[_64Bit]], Type[object], + # None]"; expected "Union[dtype[Any], None, type, _SupportsDType, str, + # Union[Tuple[Any, int], Tuple[Any, Union[int, Sequence[int]]], List[Any], + # _DTypeDict, Tuple[Any, Any]]]" + result = np.asarray(scalars, dtype=dtype) # type: ignore[arg-type] + if ( + result.ndim > 1 + and not hasattr(scalars, "dtype") + and (dtype is None or dtype == object) + ): + # e.g. list-of-tuples + result = construct_1d_object_array_from_listlike(scalars) + + if copy and result is scalars: + result = result.copy() + return cls(result) + + def _from_backing_data(self, arr: np.ndarray) -> NumpyExtensionArray: + return type(self)(arr) + + # ------------------------------------------------------------------------ + # Data + + @property + def dtype(self) -> NumpyEADtype: + return self._dtype + + # ------------------------------------------------------------------------ + # NumPy Array Interface + + def __array__( + self, dtype: NpDtype | None = None, copy: bool | None = None + ) -> np.ndarray: + return np.asarray(self._ndarray, dtype=dtype) + + def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): + # Lightly modified version of + # https://numpy.org/doc/stable/reference/generated/numpy.lib.mixins.NDArrayOperatorsMixin.html + # The primary modification is not boxing scalar return values + # in NumpyExtensionArray, since pandas' ExtensionArrays are 1-d. + out = kwargs.get("out", ()) + + result = arraylike.maybe_dispatch_ufunc_to_dunder_op( + self, ufunc, method, *inputs, **kwargs + ) + if result is not NotImplemented: + return result + + if "out" in kwargs: + # e.g. test_ufunc_unary + return arraylike.dispatch_ufunc_with_out( + self, ufunc, method, *inputs, **kwargs + ) + + if method == "reduce": + result = arraylike.dispatch_reduction_ufunc( + self, ufunc, method, *inputs, **kwargs + ) + if result is not NotImplemented: + # e.g. tests.series.test_ufunc.TestNumpyReductions + return result + + # Defer to the implementation of the ufunc on unwrapped values. + inputs = tuple( + x._ndarray if isinstance(x, NumpyExtensionArray) else x for x in inputs + ) + if out: + kwargs["out"] = tuple( + x._ndarray if isinstance(x, NumpyExtensionArray) else x for x in out + ) + result = getattr(ufunc, method)(*inputs, **kwargs) + + if ufunc.nout > 1: + # multiple return values; re-box array-like results + return tuple(type(self)(x) for x in result) + elif method == "at": + # no return value + return None + elif method == "reduce": + if isinstance(result, np.ndarray): + # e.g. test_np_reduce_2d + return type(self)(result) + + # e.g. test_np_max_nested_tuples + return result + else: + # one return value; re-box array-like results + return type(self)(result) + + # ------------------------------------------------------------------------ + # Pandas ExtensionArray Interface + + def astype(self, dtype, copy: bool = True): + dtype = pandas_dtype(dtype) + + if dtype == self.dtype: + if copy: + return self.copy() + return self + + result = astype_array(self._ndarray, dtype=dtype, copy=copy) + return result + + def isna(self) -> np.ndarray: + return isna(self._ndarray) + + def _validate_scalar(self, fill_value): + if fill_value is None: + # Primarily for subclasses + fill_value = self.dtype.na_value + return fill_value + + def _values_for_factorize(self) -> tuple[np.ndarray, float | None]: + if self.dtype.kind in "iub": + fv = None + else: + fv = np.nan + return self._ndarray, fv + + # Base EA class (and all other EA classes) don't have limit_area keyword + # This can be removed here as well when the interpolate ffill/bfill method + # deprecation is enforced + def _pad_or_backfill( + self, + *, + method: FillnaOptions, + limit: int | None = None, + limit_area: Literal["inside", "outside"] | None = None, + copy: bool = True, + ) -> Self: + """ + ffill or bfill along axis=0. + """ + if copy: + out_data = self._ndarray.copy() + else: + out_data = self._ndarray + + meth = missing.clean_fill_method(method) + missing.pad_or_backfill_inplace( + out_data.T, + method=meth, + axis=0, + limit=limit, + limit_area=limit_area, + ) + + if not copy: + return self + return type(self)._simple_new(out_data, dtype=self.dtype) + + def interpolate( + self, + *, + method: InterpolateOptions, + axis: int, + index: Index, + limit, + limit_direction, + limit_area, + copy: bool, + **kwargs, + ) -> Self: + """ + See NDFrame.interpolate.__doc__. + """ + # NB: we return type(self) even if copy=False + if not copy: + out_data = self._ndarray + else: + out_data = self._ndarray.copy() + + # TODO: assert we have floating dtype? + missing.interpolate_2d_inplace( + out_data, + method=method, + axis=axis, + index=index, + limit=limit, + limit_direction=limit_direction, + limit_area=limit_area, + **kwargs, + ) + if not copy: + return self + return type(self)._simple_new(out_data, dtype=self.dtype) + + # ------------------------------------------------------------------------ + # Reductions + + def any( + self, + *, + axis: AxisInt | None = None, + out=None, + keepdims: bool = False, + skipna: bool = True, + ): + nv.validate_any((), {"out": out, "keepdims": keepdims}) + result = nanops.nanany(self._ndarray, axis=axis, skipna=skipna) + return self._wrap_reduction_result(axis, result) + + def all( + self, + *, + axis: AxisInt | None = None, + out=None, + keepdims: bool = False, + skipna: bool = True, + ): + nv.validate_all((), {"out": out, "keepdims": keepdims}) + result = nanops.nanall(self._ndarray, axis=axis, skipna=skipna) + return self._wrap_reduction_result(axis, result) + + def min( + self, *, axis: AxisInt | None = None, skipna: bool = True, **kwargs + ) -> Scalar: + nv.validate_min((), kwargs) + result = nanops.nanmin( + values=self._ndarray, axis=axis, mask=self.isna(), skipna=skipna + ) + return self._wrap_reduction_result(axis, result) + + def max( + self, *, axis: AxisInt | None = None, skipna: bool = True, **kwargs + ) -> Scalar: + nv.validate_max((), kwargs) + result = nanops.nanmax( + values=self._ndarray, axis=axis, mask=self.isna(), skipna=skipna + ) + return self._wrap_reduction_result(axis, result) + + def sum( + self, + *, + axis: AxisInt | None = None, + skipna: bool = True, + min_count: int = 0, + **kwargs, + ) -> Scalar: + nv.validate_sum((), kwargs) + result = nanops.nansum( + self._ndarray, axis=axis, skipna=skipna, min_count=min_count + ) + return self._wrap_reduction_result(axis, result) + + def prod( + self, + *, + axis: AxisInt | None = None, + skipna: bool = True, + min_count: int = 0, + **kwargs, + ) -> Scalar: + nv.validate_prod((), kwargs) + result = nanops.nanprod( + self._ndarray, axis=axis, skipna=skipna, min_count=min_count + ) + return self._wrap_reduction_result(axis, result) + + def mean( + self, + *, + axis: AxisInt | None = None, + dtype: NpDtype | None = None, + out=None, + keepdims: bool = False, + skipna: bool = True, + ): + nv.validate_mean((), {"dtype": dtype, "out": out, "keepdims": keepdims}) + result = nanops.nanmean(self._ndarray, axis=axis, skipna=skipna) + return self._wrap_reduction_result(axis, result) + + def median( + self, + *, + axis: AxisInt | None = None, + out=None, + overwrite_input: bool = False, + keepdims: bool = False, + skipna: bool = True, + ): + nv.validate_median( + (), {"out": out, "overwrite_input": overwrite_input, "keepdims": keepdims} + ) + result = nanops.nanmedian(self._ndarray, axis=axis, skipna=skipna) + return self._wrap_reduction_result(axis, result) + + def std( + self, + *, + axis: AxisInt | None = None, + dtype: NpDtype | None = None, + out=None, + ddof: int = 1, + keepdims: bool = False, + skipna: bool = True, + ): + nv.validate_stat_ddof_func( + (), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="std" + ) + result = nanops.nanstd(self._ndarray, axis=axis, skipna=skipna, ddof=ddof) + return self._wrap_reduction_result(axis, result) + + def var( + self, + *, + axis: AxisInt | None = None, + dtype: NpDtype | None = None, + out=None, + ddof: int = 1, + keepdims: bool = False, + skipna: bool = True, + ): + nv.validate_stat_ddof_func( + (), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="var" + ) + result = nanops.nanvar(self._ndarray, axis=axis, skipna=skipna, ddof=ddof) + return self._wrap_reduction_result(axis, result) + + def sem( + self, + *, + axis: AxisInt | None = None, + dtype: NpDtype | None = None, + out=None, + ddof: int = 1, + keepdims: bool = False, + skipna: bool = True, + ): + nv.validate_stat_ddof_func( + (), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="sem" + ) + result = nanops.nansem(self._ndarray, axis=axis, skipna=skipna, ddof=ddof) + return self._wrap_reduction_result(axis, result) + + def kurt( + self, + *, + axis: AxisInt | None = None, + dtype: NpDtype | None = None, + out=None, + keepdims: bool = False, + skipna: bool = True, + ): + nv.validate_stat_ddof_func( + (), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="kurt" + ) + result = nanops.nankurt(self._ndarray, axis=axis, skipna=skipna) + return self._wrap_reduction_result(axis, result) + + def skew( + self, + *, + axis: AxisInt | None = None, + dtype: NpDtype | None = None, + out=None, + keepdims: bool = False, + skipna: bool = True, + ): + nv.validate_stat_ddof_func( + (), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="skew" + ) + result = nanops.nanskew(self._ndarray, axis=axis, skipna=skipna) + return self._wrap_reduction_result(axis, result) + + # ------------------------------------------------------------------------ + # Additional Methods + + def to_numpy( + self, + dtype: npt.DTypeLike | None = None, + copy: bool = False, + na_value: object = lib.no_default, + ) -> np.ndarray: + mask = self.isna() + if na_value is not lib.no_default and mask.any(): + result = self._ndarray.copy() + result[mask] = na_value + else: + result = self._ndarray + + result = np.asarray(result, dtype=dtype) + + if copy and result is self._ndarray: + result = result.copy() + + return result + + # ------------------------------------------------------------------------ + # Ops + + def __invert__(self) -> NumpyExtensionArray: + return type(self)(~self._ndarray) + + def __neg__(self) -> NumpyExtensionArray: + return type(self)(-self._ndarray) + + def __pos__(self) -> NumpyExtensionArray: + return type(self)(+self._ndarray) + + def __abs__(self) -> NumpyExtensionArray: + return type(self)(abs(self._ndarray)) + + def _cmp_method(self, other, op): + if isinstance(other, NumpyExtensionArray): + other = other._ndarray + + other = ops.maybe_prepare_scalar_for_op(other, (len(self),)) + pd_op = ops.get_array_op(op) + other = ensure_wrapped_if_datetimelike(other) + result = pd_op(self._ndarray, other) + + if op is divmod or op is ops.rdivmod: + a, b = result + if isinstance(a, np.ndarray): + # for e.g. op vs TimedeltaArray, we may already + # have an ExtensionArray, in which case we do not wrap + return self._wrap_ndarray_result(a), self._wrap_ndarray_result(b) + return a, b + + if isinstance(result, np.ndarray): + # for e.g. multiplication vs TimedeltaArray, we may already + # have an ExtensionArray, in which case we do not wrap + return self._wrap_ndarray_result(result) + return result + + _arith_method = _cmp_method + + def _wrap_ndarray_result(self, result: np.ndarray): + # If we have timedelta64[ns] result, return a TimedeltaArray instead + # of a NumpyExtensionArray + if result.dtype.kind == "m" and is_supported_dtype(result.dtype): + from pandas.core.arrays import TimedeltaArray + + return TimedeltaArray._simple_new(result, dtype=result.dtype) + return type(self)(result) + + # ------------------------------------------------------------------------ + # String methods interface + _str_na_value = np.nan diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/period.py b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/period.py new file mode 100644 index 0000000000000000000000000000000000000000..c1229e27ab51a70d40329eb33713a92281c9d479 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/period.py @@ -0,0 +1,1313 @@ +from __future__ import annotations + +from datetime import timedelta +import operator +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, + TypeVar, + cast, + overload, +) +import warnings + +import numpy as np + +from pandas._libs import ( + algos as libalgos, + lib, +) +from pandas._libs.arrays import NDArrayBacked +from pandas._libs.tslibs import ( + BaseOffset, + NaT, + NaTType, + Timedelta, + add_overflowsafe, + astype_overflowsafe, + dt64arr_to_periodarr as c_dt64arr_to_periodarr, + get_unit_from_dtype, + iNaT, + parsing, + period as libperiod, + to_offset, +) +from pandas._libs.tslibs.dtypes import ( + FreqGroup, + PeriodDtypeBase, + freq_to_period_freqstr, +) +from pandas._libs.tslibs.fields import isleapyear_arr +from pandas._libs.tslibs.offsets import ( + Tick, + delta_to_tick, +) +from pandas._libs.tslibs.period import ( + DIFFERENT_FREQ, + IncompatibleFrequency, + Period, + get_period_field_arr, + period_asfreq_arr, +) +from pandas.util._decorators import ( + cache_readonly, + doc, +) +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import ( + ensure_object, + pandas_dtype, +) +from pandas.core.dtypes.dtypes import ( + DatetimeTZDtype, + PeriodDtype, +) +from pandas.core.dtypes.generic import ( + ABCIndex, + ABCPeriodIndex, + ABCSeries, + ABCTimedeltaArray, +) +from pandas.core.dtypes.missing import isna + +from pandas.core.arrays import datetimelike as dtl +import pandas.core.common as com + +if TYPE_CHECKING: + from collections.abc import Sequence + + from pandas._typing import ( + AnyArrayLike, + Dtype, + FillnaOptions, + NpDtype, + NumpySorter, + NumpyValueArrayLike, + Self, + npt, + ) + + from pandas.core.arrays import ( + DatetimeArray, + TimedeltaArray, + ) + from pandas.core.arrays.base import ExtensionArray + + +BaseOffsetT = TypeVar("BaseOffsetT", bound=BaseOffset) + + +_shared_doc_kwargs = { + "klass": "PeriodArray", +} + + +def _field_accessor(name: str, docstring: str | None = None): + def f(self): + base = self.dtype._dtype_code + result = get_period_field_arr(name, self.asi8, base) + return result + + f.__name__ = name + f.__doc__ = docstring + return property(f) + + +# error: Definition of "_concat_same_type" in base class "NDArrayBacked" is +# incompatible with definition in base class "ExtensionArray" +class PeriodArray(dtl.DatelikeOps, libperiod.PeriodMixin): # type: ignore[misc] + """ + Pandas ExtensionArray for storing Period data. + + Users should use :func:`~pandas.array` to create new instances. + + Parameters + ---------- + values : Union[PeriodArray, Series[period], ndarray[int], PeriodIndex] + The data to store. These should be arrays that can be directly + converted to ordinals without inference or copy (PeriodArray, + ndarray[int64]), or a box around such an array (Series[period], + PeriodIndex). + dtype : PeriodDtype, optional + A PeriodDtype instance from which to extract a `freq`. If both + `freq` and `dtype` are specified, then the frequencies must match. + freq : str or DateOffset + The `freq` to use for the array. Mostly applicable when `values` + is an ndarray of integers, when `freq` is required. When `values` + is a PeriodArray (or box around), it's checked that ``values.freq`` + matches `freq`. + copy : bool, default False + Whether to copy the ordinals before storing. + + Attributes + ---------- + None + + Methods + ------- + None + + See Also + -------- + Period: Represents a period of time. + PeriodIndex : Immutable Index for period data. + period_range: Create a fixed-frequency PeriodArray. + array: Construct a pandas array. + + Notes + ----- + There are two components to a PeriodArray + + - ordinals : integer ndarray + - freq : pd.tseries.offsets.Offset + + The values are physically stored as a 1-D ndarray of integers. These are + called "ordinals" and represent some kind of offset from a base. + + The `freq` indicates the span covered by each element of the array. + All elements in the PeriodArray have the same `freq`. + + Examples + -------- + >>> pd.arrays.PeriodArray(pd.PeriodIndex(['2023-01-01', + ... '2023-01-02'], freq='D')) + + ['2023-01-01', '2023-01-02'] + Length: 2, dtype: period[D] + """ + + # array priority higher than numpy scalars + __array_priority__ = 1000 + _typ = "periodarray" # ABCPeriodArray + _internal_fill_value = np.int64(iNaT) + _recognized_scalars = (Period,) + _is_recognized_dtype = lambda x: isinstance( + x, PeriodDtype + ) # check_compatible_with checks freq match + _infer_matches = ("period",) + + @property + def _scalar_type(self) -> type[Period]: + return Period + + # Names others delegate to us + _other_ops: list[str] = [] + _bool_ops: list[str] = ["is_leap_year"] + _object_ops: list[str] = ["start_time", "end_time", "freq"] + _field_ops: list[str] = [ + "year", + "month", + "day", + "hour", + "minute", + "second", + "weekofyear", + "weekday", + "week", + "dayofweek", + "day_of_week", + "dayofyear", + "day_of_year", + "quarter", + "qyear", + "days_in_month", + "daysinmonth", + ] + _datetimelike_ops: list[str] = _field_ops + _object_ops + _bool_ops + _datetimelike_methods: list[str] = ["strftime", "to_timestamp", "asfreq"] + + _dtype: PeriodDtype + + # -------------------------------------------------------------------- + # Constructors + + def __init__( + self, values, dtype: Dtype | None = None, freq=None, copy: bool = False + ) -> None: + if freq is not None: + # GH#52462 + warnings.warn( + "The 'freq' keyword in the PeriodArray constructor is deprecated " + "and will be removed in a future version. Pass 'dtype' instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + freq = validate_dtype_freq(dtype, freq) + dtype = PeriodDtype(freq) + + if dtype is not None: + dtype = pandas_dtype(dtype) + if not isinstance(dtype, PeriodDtype): + raise ValueError(f"Invalid dtype {dtype} for PeriodArray") + + if isinstance(values, ABCSeries): + values = values._values + if not isinstance(values, type(self)): + raise TypeError("Incorrect dtype") + + elif isinstance(values, ABCPeriodIndex): + values = values._values + + if isinstance(values, type(self)): + if dtype is not None and dtype != values.dtype: + raise raise_on_incompatible(values, dtype.freq) + values, dtype = values._ndarray, values.dtype + + if not copy: + values = np.asarray(values, dtype="int64") + else: + values = np.array(values, dtype="int64", copy=copy) + if dtype is None: + raise ValueError("dtype is not specified and cannot be inferred") + dtype = cast(PeriodDtype, dtype) + NDArrayBacked.__init__(self, values, dtype) + + # error: Signature of "_simple_new" incompatible with supertype "NDArrayBacked" + @classmethod + def _simple_new( # type: ignore[override] + cls, + values: npt.NDArray[np.int64], + dtype: PeriodDtype, + ) -> Self: + # alias for PeriodArray.__init__ + assertion_msg = "Should be numpy array of type i8" + assert isinstance(values, np.ndarray) and values.dtype == "i8", assertion_msg + return cls(values, dtype=dtype) + + @classmethod + def _from_sequence( + cls, + scalars, + *, + dtype: Dtype | None = None, + copy: bool = False, + ) -> Self: + if dtype is not None: + dtype = pandas_dtype(dtype) + if dtype and isinstance(dtype, PeriodDtype): + freq = dtype.freq + else: + freq = None + + if isinstance(scalars, cls): + validate_dtype_freq(scalars.dtype, freq) + if copy: + scalars = scalars.copy() + return scalars + + periods = np.asarray(scalars, dtype=object) + + freq = freq or libperiod.extract_freq(periods) + ordinals = libperiod.extract_ordinals(periods, freq) + dtype = PeriodDtype(freq) + return cls(ordinals, dtype=dtype) + + @classmethod + def _from_sequence_of_strings( + cls, strings, *, dtype: Dtype | None = None, copy: bool = False + ) -> Self: + return cls._from_sequence(strings, dtype=dtype, copy=copy) + + @classmethod + def _from_datetime64(cls, data, freq, tz=None) -> Self: + """ + Construct a PeriodArray from a datetime64 array + + Parameters + ---------- + data : ndarray[datetime64[ns], datetime64[ns, tz]] + freq : str or Tick + tz : tzinfo, optional + + Returns + ------- + PeriodArray[freq] + """ + if isinstance(freq, BaseOffset): + freq = freq_to_period_freqstr(freq.n, freq.name) + data, freq = dt64arr_to_periodarr(data, freq, tz) + dtype = PeriodDtype(freq) + return cls(data, dtype=dtype) + + @classmethod + def _generate_range(cls, start, end, periods, freq): + periods = dtl.validate_periods(periods) + + if freq is not None: + freq = Period._maybe_convert_freq(freq) + + if start is not None or end is not None: + subarr, freq = _get_ordinal_range(start, end, periods, freq) + else: + raise ValueError("Not enough parameters to construct Period range") + + return subarr, freq + + @classmethod + def _from_fields(cls, *, fields: dict, freq) -> Self: + subarr, freq = _range_from_fields(freq=freq, **fields) + dtype = PeriodDtype(freq) + return cls._simple_new(subarr, dtype=dtype) + + # ----------------------------------------------------------------- + # DatetimeLike Interface + + # error: Argument 1 of "_unbox_scalar" is incompatible with supertype + # "DatetimeLikeArrayMixin"; supertype defines the argument type as + # "Union[Union[Period, Any, Timedelta], NaTType]" + def _unbox_scalar( # type: ignore[override] + self, + value: Period | NaTType, + ) -> np.int64: + if value is NaT: + # error: Item "Period" of "Union[Period, NaTType]" has no attribute "value" + return np.int64(value._value) # type: ignore[union-attr] + elif isinstance(value, self._scalar_type): + self._check_compatible_with(value) + return np.int64(value.ordinal) + else: + raise ValueError(f"'value' should be a Period. Got '{value}' instead.") + + def _scalar_from_string(self, value: str) -> Period: + return Period(value, freq=self.freq) + + # error: Argument 1 of "_check_compatible_with" is incompatible with + # supertype "DatetimeLikeArrayMixin"; supertype defines the argument type + # as "Period | Timestamp | Timedelta | NaTType" + def _check_compatible_with(self, other: Period | NaTType | PeriodArray) -> None: # type: ignore[override] + if other is NaT: + return + # error: Item "NaTType" of "Period | NaTType | PeriodArray" has no + # attribute "freq" + self._require_matching_freq(other.freq) # type: ignore[union-attr] + + # -------------------------------------------------------------------- + # Data / Attributes + + @cache_readonly + def dtype(self) -> PeriodDtype: + return self._dtype + + # error: Cannot override writeable attribute with read-only property + @property # type: ignore[override] + def freq(self) -> BaseOffset: + """ + Return the frequency object for this PeriodArray. + """ + return self.dtype.freq + + @property + def freqstr(self) -> str: + return freq_to_period_freqstr(self.freq.n, self.freq.name) + + def __array__( + self, dtype: NpDtype | None = None, copy: bool | None = None + ) -> np.ndarray: + if dtype == "i8": + return self.asi8 + elif dtype == bool: + return ~self._isnan + + # This will raise TypeError for non-object dtypes + return np.array(list(self), dtype=object) + + def __arrow_array__(self, type=None): + """ + Convert myself into a pyarrow Array. + """ + import pyarrow + + from pandas.core.arrays.arrow.extension_types import ArrowPeriodType + + if type is not None: + if pyarrow.types.is_integer(type): + return pyarrow.array(self._ndarray, mask=self.isna(), type=type) + elif isinstance(type, ArrowPeriodType): + # ensure we have the same freq + if self.freqstr != type.freq: + raise TypeError( + "Not supported to convert PeriodArray to array with different " + f"'freq' ({self.freqstr} vs {type.freq})" + ) + else: + raise TypeError( + f"Not supported to convert PeriodArray to '{type}' type" + ) + + period_type = ArrowPeriodType(self.freqstr) + storage_array = pyarrow.array(self._ndarray, mask=self.isna(), type="int64") + return pyarrow.ExtensionArray.from_storage(period_type, storage_array) + + # -------------------------------------------------------------------- + # Vectorized analogues of Period properties + + year = _field_accessor( + "year", + """ + The year of the period. + + Examples + -------- + >>> idx = pd.PeriodIndex(["2023", "2024", "2025"], freq="Y") + >>> idx.year + Index([2023, 2024, 2025], dtype='int64') + """, + ) + month = _field_accessor( + "month", + """ + The month as January=1, December=12. + + Examples + -------- + >>> idx = pd.PeriodIndex(["2023-01", "2023-02", "2023-03"], freq="M") + >>> idx.month + Index([1, 2, 3], dtype='int64') + """, + ) + day = _field_accessor( + "day", + """ + The days of the period. + + Examples + -------- + >>> idx = pd.PeriodIndex(['2020-01-31', '2020-02-28'], freq='D') + >>> idx.day + Index([31, 28], dtype='int64') + """, + ) + hour = _field_accessor( + "hour", + """ + The hour of the period. + + Examples + -------- + >>> idx = pd.PeriodIndex(["2023-01-01 10:00", "2023-01-01 11:00"], freq='h') + >>> idx.hour + Index([10, 11], dtype='int64') + """, + ) + minute = _field_accessor( + "minute", + """ + The minute of the period. + + Examples + -------- + >>> idx = pd.PeriodIndex(["2023-01-01 10:30:00", + ... "2023-01-01 11:50:00"], freq='min') + >>> idx.minute + Index([30, 50], dtype='int64') + """, + ) + second = _field_accessor( + "second", + """ + The second of the period. + + Examples + -------- + >>> idx = pd.PeriodIndex(["2023-01-01 10:00:30", + ... "2023-01-01 10:00:31"], freq='s') + >>> idx.second + Index([30, 31], dtype='int64') + """, + ) + weekofyear = _field_accessor( + "week", + """ + The week ordinal of the year. + + Examples + -------- + >>> idx = pd.PeriodIndex(["2023-01", "2023-02", "2023-03"], freq="M") + >>> idx.week # It can be written `weekofyear` + Index([5, 9, 13], dtype='int64') + """, + ) + week = weekofyear + day_of_week = _field_accessor( + "day_of_week", + """ + The day of the week with Monday=0, Sunday=6. + + Examples + -------- + >>> idx = pd.PeriodIndex(["2023-01-01", "2023-01-02", "2023-01-03"], freq="D") + >>> idx.weekday + Index([6, 0, 1], dtype='int64') + """, + ) + dayofweek = day_of_week + weekday = dayofweek + dayofyear = day_of_year = _field_accessor( + "day_of_year", + """ + The ordinal day of the year. + + Examples + -------- + >>> idx = pd.PeriodIndex(["2023-01-10", "2023-02-01", "2023-03-01"], freq="D") + >>> idx.dayofyear + Index([10, 32, 60], dtype='int64') + + >>> idx = pd.PeriodIndex(["2023", "2024", "2025"], freq="Y") + >>> idx + PeriodIndex(['2023', '2024', '2025'], dtype='period[Y-DEC]') + >>> idx.dayofyear + Index([365, 366, 365], dtype='int64') + """, + ) + quarter = _field_accessor( + "quarter", + """ + The quarter of the date. + + Examples + -------- + >>> idx = pd.PeriodIndex(["2023-01", "2023-02", "2023-03"], freq="M") + >>> idx.quarter + Index([1, 1, 1], dtype='int64') + """, + ) + qyear = _field_accessor("qyear") + days_in_month = _field_accessor( + "days_in_month", + """ + The number of days in the month. + + Examples + -------- + For Series: + + >>> period = pd.period_range('2020-1-1 00:00', '2020-3-1 00:00', freq='M') + >>> s = pd.Series(period) + >>> s + 0 2020-01 + 1 2020-02 + 2 2020-03 + dtype: period[M] + >>> s.dt.days_in_month + 0 31 + 1 29 + 2 31 + dtype: int64 + + For PeriodIndex: + + >>> idx = pd.PeriodIndex(["2023-01", "2023-02", "2023-03"], freq="M") + >>> idx.days_in_month # It can be also entered as `daysinmonth` + Index([31, 28, 31], dtype='int64') + """, + ) + daysinmonth = days_in_month + + @property + def is_leap_year(self) -> npt.NDArray[np.bool_]: + """ + Logical indicating if the date belongs to a leap year. + + Examples + -------- + >>> idx = pd.PeriodIndex(["2023", "2024", "2025"], freq="Y") + >>> idx.is_leap_year + array([False, True, False]) + """ + return isleapyear_arr(np.asarray(self.year)) + + def to_timestamp(self, freq=None, how: str = "start") -> DatetimeArray: + """ + Cast to DatetimeArray/Index. + + Parameters + ---------- + freq : str or DateOffset, optional + Target frequency. The default is 'D' for week or longer, + 's' otherwise. + how : {'s', 'e', 'start', 'end'} + Whether to use the start or end of the time period being converted. + + Returns + ------- + DatetimeArray/Index + + Examples + -------- + >>> idx = pd.PeriodIndex(["2023-01", "2023-02", "2023-03"], freq="M") + >>> idx.to_timestamp() + DatetimeIndex(['2023-01-01', '2023-02-01', '2023-03-01'], + dtype='datetime64[ns]', freq='MS') + """ + from pandas.core.arrays import DatetimeArray + + how = libperiod.validate_end_alias(how) + + end = how == "E" + if end: + if freq == "B" or self.freq == "B": + # roll forward to ensure we land on B date + adjust = Timedelta(1, "D") - Timedelta(1, "ns") + return self.to_timestamp(how="start") + adjust + else: + adjust = Timedelta(1, "ns") + return (self + self.freq).to_timestamp(how="start") - adjust + + if freq is None: + freq_code = self._dtype._get_to_timestamp_base() + dtype = PeriodDtypeBase(freq_code, 1) + freq = dtype._freqstr + base = freq_code + else: + freq = Period._maybe_convert_freq(freq) + base = freq._period_dtype_code + + new_parr = self.asfreq(freq, how=how) + + new_data = libperiod.periodarr_to_dt64arr(new_parr.asi8, base) + dta = DatetimeArray._from_sequence(new_data) + + if self.freq.name == "B": + # See if we can retain BDay instead of Day in cases where + # len(self) is too small for infer_freq to distinguish between them + diffs = libalgos.unique_deltas(self.asi8) + if len(diffs) == 1: + diff = diffs[0] + if diff == self.dtype._n: + dta._freq = self.freq + elif diff == 1: + dta._freq = self.freq.base + # TODO: other cases? + return dta + else: + return dta._with_freq("infer") + + # -------------------------------------------------------------------- + + def _box_func(self, x) -> Period | NaTType: + return Period._from_ordinal(ordinal=x, freq=self.freq) + + @doc(**_shared_doc_kwargs, other="PeriodIndex", other_name="PeriodIndex") + def asfreq(self, freq=None, how: str = "E") -> Self: + """ + Convert the {klass} to the specified frequency `freq`. + + Equivalent to applying :meth:`pandas.Period.asfreq` with the given arguments + to each :class:`~pandas.Period` in this {klass}. + + Parameters + ---------- + freq : str + A frequency. + how : str {{'E', 'S'}}, default 'E' + Whether the elements should be aligned to the end + or start within pa period. + + * 'E', 'END', or 'FINISH' for end, + * 'S', 'START', or 'BEGIN' for start. + + January 31st ('END') vs. January 1st ('START') for example. + + Returns + ------- + {klass} + The transformed {klass} with the new frequency. + + See Also + -------- + {other}.asfreq: Convert each Period in a {other_name} to the given frequency. + Period.asfreq : Convert a :class:`~pandas.Period` object to the given frequency. + + Examples + -------- + >>> pidx = pd.period_range('2010-01-01', '2015-01-01', freq='Y') + >>> pidx + PeriodIndex(['2010', '2011', '2012', '2013', '2014', '2015'], + dtype='period[Y-DEC]') + + >>> pidx.asfreq('M') + PeriodIndex(['2010-12', '2011-12', '2012-12', '2013-12', '2014-12', + '2015-12'], dtype='period[M]') + + >>> pidx.asfreq('M', how='S') + PeriodIndex(['2010-01', '2011-01', '2012-01', '2013-01', '2014-01', + '2015-01'], dtype='period[M]') + """ + how = libperiod.validate_end_alias(how) + if isinstance(freq, BaseOffset) and hasattr(freq, "_period_dtype_code"): + freq = PeriodDtype(freq)._freqstr + freq = Period._maybe_convert_freq(freq) + + base1 = self._dtype._dtype_code + base2 = freq._period_dtype_code + + asi8 = self.asi8 + # self.freq.n can't be negative or 0 + end = how == "E" + if end: + ordinal = asi8 + self.dtype._n - 1 + else: + ordinal = asi8 + + new_data = period_asfreq_arr(ordinal, base1, base2, end) + + if self._hasna: + new_data[self._isnan] = iNaT + + dtype = PeriodDtype(freq) + return type(self)(new_data, dtype=dtype) + + # ------------------------------------------------------------------ + # Rendering Methods + + def _formatter(self, boxed: bool = False): + if boxed: + return str + return "'{}'".format + + def _format_native_types( + self, *, na_rep: str | float = "NaT", date_format=None, **kwargs + ) -> npt.NDArray[np.object_]: + """ + actually format my specific types + """ + return libperiod.period_array_strftime( + self.asi8, self.dtype._dtype_code, na_rep, date_format + ) + + # ------------------------------------------------------------------ + + def astype(self, dtype, copy: bool = True): + # We handle Period[T] -> Period[U] + # Our parent handles everything else. + dtype = pandas_dtype(dtype) + if dtype == self._dtype: + if not copy: + return self + else: + return self.copy() + if isinstance(dtype, PeriodDtype): + return self.asfreq(dtype.freq) + + if lib.is_np_dtype(dtype, "M") or isinstance(dtype, DatetimeTZDtype): + # GH#45038 match PeriodIndex behavior. + tz = getattr(dtype, "tz", None) + unit = dtl.dtype_to_unit(dtype) + return self.to_timestamp().tz_localize(tz).as_unit(unit) + + return super().astype(dtype, copy=copy) + + def searchsorted( + self, + value: NumpyValueArrayLike | ExtensionArray, + side: Literal["left", "right"] = "left", + sorter: NumpySorter | None = None, + ) -> npt.NDArray[np.intp] | np.intp: + npvalue = self._validate_setitem_value(value).view("M8[ns]") + + # Cast to M8 to get datetime-like NaT placement, + # similar to dtl._period_dispatch + m8arr = self._ndarray.view("M8[ns]") + return m8arr.searchsorted(npvalue, side=side, sorter=sorter) + + def _pad_or_backfill( + self, + *, + method: FillnaOptions, + limit: int | None = None, + limit_area: Literal["inside", "outside"] | None = None, + copy: bool = True, + ) -> Self: + # view as dt64 so we get treated as timelike in core.missing, + # similar to dtl._period_dispatch + dta = self.view("M8[ns]") + result = dta._pad_or_backfill( + method=method, limit=limit, limit_area=limit_area, copy=copy + ) + if copy: + return cast("Self", result.view(self.dtype)) + else: + return self + + def fillna( + self, value=None, method=None, limit: int | None = None, copy: bool = True + ) -> Self: + if method is not None: + # view as dt64 so we get treated as timelike in core.missing, + # similar to dtl._period_dispatch + dta = self.view("M8[ns]") + result = dta.fillna(value=value, method=method, limit=limit, copy=copy) + # error: Incompatible return value type (got "Union[ExtensionArray, + # ndarray[Any, Any]]", expected "PeriodArray") + return result.view(self.dtype) # type: ignore[return-value] + return super().fillna(value=value, method=method, limit=limit, copy=copy) + + # ------------------------------------------------------------------ + # Arithmetic Methods + + def _addsub_int_array_or_scalar( + self, other: np.ndarray | int, op: Callable[[Any, Any], Any] + ) -> Self: + """ + Add or subtract array of integers. + + Parameters + ---------- + other : np.ndarray[int64] or int + op : {operator.add, operator.sub} + + Returns + ------- + result : PeriodArray + """ + assert op in [operator.add, operator.sub] + if op is operator.sub: + other = -other + res_values = add_overflowsafe(self.asi8, np.asarray(other, dtype="i8")) + return type(self)(res_values, dtype=self.dtype) + + def _add_offset(self, other: BaseOffset): + assert not isinstance(other, Tick) + + self._require_matching_freq(other, base=True) + return self._addsub_int_array_or_scalar(other.n, operator.add) + + # TODO: can we de-duplicate with Period._add_timedeltalike_scalar? + def _add_timedeltalike_scalar(self, other): + """ + Parameters + ---------- + other : timedelta, Tick, np.timedelta64 + + Returns + ------- + PeriodArray + """ + if not isinstance(self.freq, Tick): + # We cannot add timedelta-like to non-tick PeriodArray + raise raise_on_incompatible(self, other) + + if isna(other): + # i.e. np.timedelta64("NaT") + return super()._add_timedeltalike_scalar(other) + + td = np.asarray(Timedelta(other).asm8) + return self._add_timedelta_arraylike(td) + + def _add_timedelta_arraylike( + self, other: TimedeltaArray | npt.NDArray[np.timedelta64] + ) -> Self: + """ + Parameters + ---------- + other : TimedeltaArray or ndarray[timedelta64] + + Returns + ------- + PeriodArray + """ + if not self.dtype._is_tick_like(): + # We cannot add timedelta-like to non-tick PeriodArray + raise TypeError( + f"Cannot add or subtract timedelta64[ns] dtype from {self.dtype}" + ) + + dtype = np.dtype(f"m8[{self.dtype._td64_unit}]") + + # Similar to _check_timedeltalike_freq_compat, but we raise with a + # more specific exception message if necessary. + try: + delta = astype_overflowsafe( + np.asarray(other), dtype=dtype, copy=False, round_ok=False + ) + except ValueError as err: + # e.g. if we have minutes freq and try to add 30s + # "Cannot losslessly convert units" + raise IncompatibleFrequency( + "Cannot add/subtract timedelta-like from PeriodArray that is " + "not an integer multiple of the PeriodArray's freq." + ) from err + + res_values = add_overflowsafe(self.asi8, np.asarray(delta.view("i8"))) + return type(self)(res_values, dtype=self.dtype) + + def _check_timedeltalike_freq_compat(self, other): + """ + Arithmetic operations with timedelta-like scalars or array `other` + are only valid if `other` is an integer multiple of `self.freq`. + If the operation is valid, find that integer multiple. Otherwise, + raise because the operation is invalid. + + Parameters + ---------- + other : timedelta, np.timedelta64, Tick, + ndarray[timedelta64], TimedeltaArray, TimedeltaIndex + + Returns + ------- + multiple : int or ndarray[int64] + + Raises + ------ + IncompatibleFrequency + """ + assert self.dtype._is_tick_like() # checked by calling function + + dtype = np.dtype(f"m8[{self.dtype._td64_unit}]") + + if isinstance(other, (timedelta, np.timedelta64, Tick)): + td = np.asarray(Timedelta(other).asm8) + else: + td = np.asarray(other) + + try: + delta = astype_overflowsafe(td, dtype=dtype, copy=False, round_ok=False) + except ValueError as err: + raise raise_on_incompatible(self, other) from err + + delta = delta.view("i8") + return lib.item_from_zerodim(delta) + + +def raise_on_incompatible(left, right) -> IncompatibleFrequency: + """ + Helper function to render a consistent error message when raising + IncompatibleFrequency. + + Parameters + ---------- + left : PeriodArray + right : None, DateOffset, Period, ndarray, or timedelta-like + + Returns + ------- + IncompatibleFrequency + Exception to be raised by the caller. + """ + # GH#24283 error message format depends on whether right is scalar + if isinstance(right, (np.ndarray, ABCTimedeltaArray)) or right is None: + other_freq = None + elif isinstance(right, BaseOffset): + other_freq = freq_to_period_freqstr(right.n, right.name) + elif isinstance(right, (ABCPeriodIndex, PeriodArray, Period)): + other_freq = right.freqstr + else: + other_freq = delta_to_tick(Timedelta(right)).freqstr + + own_freq = freq_to_period_freqstr(left.freq.n, left.freq.name) + msg = DIFFERENT_FREQ.format( + cls=type(left).__name__, own_freq=own_freq, other_freq=other_freq + ) + return IncompatibleFrequency(msg) + + +# ------------------------------------------------------------------- +# Constructor Helpers + + +def period_array( + data: Sequence[Period | str | None] | AnyArrayLike, + freq: str | Tick | BaseOffset | None = None, + copy: bool = False, +) -> PeriodArray: + """ + Construct a new PeriodArray from a sequence of Period scalars. + + Parameters + ---------- + data : Sequence of Period objects + A sequence of Period objects. These are required to all have + the same ``freq.`` Missing values can be indicated by ``None`` + or ``pandas.NaT``. + freq : str, Tick, or Offset + The frequency of every element of the array. This can be specified + to avoid inferring the `freq` from `data`. + copy : bool, default False + Whether to ensure a copy of the data is made. + + Returns + ------- + PeriodArray + + See Also + -------- + PeriodArray + pandas.PeriodIndex + + Examples + -------- + >>> period_array([pd.Period('2017', freq='Y'), + ... pd.Period('2018', freq='Y')]) + + ['2017', '2018'] + Length: 2, dtype: period[Y-DEC] + + >>> period_array([pd.Period('2017', freq='Y'), + ... pd.Period('2018', freq='Y'), + ... pd.NaT]) + + ['2017', '2018', 'NaT'] + Length: 3, dtype: period[Y-DEC] + + Integers that look like years are handled + + >>> period_array([2000, 2001, 2002], freq='D') + + ['2000-01-01', '2001-01-01', '2002-01-01'] + Length: 3, dtype: period[D] + + Datetime-like strings may also be passed + + >>> period_array(['2000-Q1', '2000-Q2', '2000-Q3', '2000-Q4'], freq='Q') + + ['2000Q1', '2000Q2', '2000Q3', '2000Q4'] + Length: 4, dtype: period[Q-DEC] + """ + data_dtype = getattr(data, "dtype", None) + + if lib.is_np_dtype(data_dtype, "M"): + return PeriodArray._from_datetime64(data, freq) + if isinstance(data_dtype, PeriodDtype): + out = PeriodArray(data) + if freq is not None: + if freq == data_dtype.freq: + return out + return out.asfreq(freq) + return out + + # other iterable of some kind + if not isinstance(data, (np.ndarray, list, tuple, ABCSeries)): + data = list(data) + + arrdata = np.asarray(data) + + dtype: PeriodDtype | None + if freq: + dtype = PeriodDtype(freq) + else: + dtype = None + + if arrdata.dtype.kind == "f" and len(arrdata) > 0: + raise TypeError("PeriodIndex does not allow floating point in construction") + + if arrdata.dtype.kind in "iu": + arr = arrdata.astype(np.int64, copy=False) + # error: Argument 2 to "from_ordinals" has incompatible type "Union[str, + # Tick, None]"; expected "Union[timedelta, BaseOffset, str]" + ordinals = libperiod.from_ordinals(arr, freq) # type: ignore[arg-type] + return PeriodArray(ordinals, dtype=dtype) + + data = ensure_object(arrdata) + if freq is None: + freq = libperiod.extract_freq(data) + dtype = PeriodDtype(freq) + return PeriodArray._from_sequence(data, dtype=dtype) + + +@overload +def validate_dtype_freq(dtype, freq: BaseOffsetT) -> BaseOffsetT: + ... + + +@overload +def validate_dtype_freq(dtype, freq: timedelta | str | None) -> BaseOffset: + ... + + +def validate_dtype_freq( + dtype, freq: BaseOffsetT | BaseOffset | timedelta | str | None +) -> BaseOffsetT: + """ + If both a dtype and a freq are available, ensure they match. If only + dtype is available, extract the implied freq. + + Parameters + ---------- + dtype : dtype + freq : DateOffset or None + + Returns + ------- + freq : DateOffset + + Raises + ------ + ValueError : non-period dtype + IncompatibleFrequency : mismatch between dtype and freq + """ + if freq is not None: + freq = to_offset(freq, is_period=True) + + if dtype is not None: + dtype = pandas_dtype(dtype) + if not isinstance(dtype, PeriodDtype): + raise ValueError("dtype must be PeriodDtype") + if freq is None: + freq = dtype.freq + elif freq != dtype.freq: + raise IncompatibleFrequency("specified freq and dtype are different") + # error: Incompatible return value type (got "Union[BaseOffset, Any, None]", + # expected "BaseOffset") + return freq # type: ignore[return-value] + + +def dt64arr_to_periodarr( + data, freq, tz=None +) -> tuple[npt.NDArray[np.int64], BaseOffset]: + """ + Convert an datetime-like array to values Period ordinals. + + Parameters + ---------- + data : Union[Series[datetime64[ns]], DatetimeIndex, ndarray[datetime64ns]] + freq : Optional[Union[str, Tick]] + Must match the `freq` on the `data` if `data` is a DatetimeIndex + or Series. + tz : Optional[tzinfo] + + Returns + ------- + ordinals : ndarray[int64] + freq : Tick + The frequency extracted from the Series or DatetimeIndex if that's + used. + + """ + if not isinstance(data.dtype, np.dtype) or data.dtype.kind != "M": + raise ValueError(f"Wrong dtype: {data.dtype}") + + if freq is None: + if isinstance(data, ABCIndex): + data, freq = data._values, data.freq + elif isinstance(data, ABCSeries): + data, freq = data._values, data.dt.freq + + elif isinstance(data, (ABCIndex, ABCSeries)): + data = data._values + + reso = get_unit_from_dtype(data.dtype) + freq = Period._maybe_convert_freq(freq) + base = freq._period_dtype_code + return c_dt64arr_to_periodarr(data.view("i8"), base, tz, reso=reso), freq + + +def _get_ordinal_range(start, end, periods, freq, mult: int = 1): + if com.count_not_none(start, end, periods) != 2: + raise ValueError( + "Of the three parameters: start, end, and periods, " + "exactly two must be specified" + ) + + if freq is not None: + freq = to_offset(freq, is_period=True) + mult = freq.n + + if start is not None: + start = Period(start, freq) + if end is not None: + end = Period(end, freq) + + is_start_per = isinstance(start, Period) + is_end_per = isinstance(end, Period) + + if is_start_per and is_end_per and start.freq != end.freq: + raise ValueError("start and end must have same freq") + if start is NaT or end is NaT: + raise ValueError("start and end must not be NaT") + + if freq is None: + if is_start_per: + freq = start.freq + elif is_end_per: + freq = end.freq + else: # pragma: no cover + raise ValueError("Could not infer freq from start/end") + mult = freq.n + + if periods is not None: + periods = periods * mult + if start is None: + data = np.arange( + end.ordinal - periods + mult, end.ordinal + 1, mult, dtype=np.int64 + ) + else: + data = np.arange( + start.ordinal, start.ordinal + periods, mult, dtype=np.int64 + ) + else: + data = np.arange(start.ordinal, end.ordinal + 1, mult, dtype=np.int64) + + return data, freq + + +def _range_from_fields( + year=None, + month=None, + quarter=None, + day=None, + hour=None, + minute=None, + second=None, + freq=None, +) -> tuple[np.ndarray, BaseOffset]: + if hour is None: + hour = 0 + if minute is None: + minute = 0 + if second is None: + second = 0 + if day is None: + day = 1 + + ordinals = [] + + if quarter is not None: + if freq is None: + freq = to_offset("Q", is_period=True) + base = FreqGroup.FR_QTR.value + else: + freq = to_offset(freq, is_period=True) + base = libperiod.freq_to_dtype_code(freq) + if base != FreqGroup.FR_QTR.value: + raise AssertionError("base must equal FR_QTR") + + freqstr = freq.freqstr + year, quarter = _make_field_arrays(year, quarter) + for y, q in zip(year, quarter): + calendar_year, calendar_month = parsing.quarter_to_myear(y, q, freqstr) + val = libperiod.period_ordinal( + calendar_year, calendar_month, 1, 1, 1, 1, 0, 0, base + ) + ordinals.append(val) + else: + freq = to_offset(freq, is_period=True) + base = libperiod.freq_to_dtype_code(freq) + arrays = _make_field_arrays(year, month, day, hour, minute, second) + for y, mth, d, h, mn, s in zip(*arrays): + ordinals.append(libperiod.period_ordinal(y, mth, d, h, mn, s, 0, 0, base)) + + return np.array(ordinals, dtype=np.int64), freq + + +def _make_field_arrays(*fields) -> list[np.ndarray]: + length = None + for x in fields: + if isinstance(x, (list, np.ndarray, ABCSeries)): + if length is not None and len(x) != length: + raise ValueError("Mismatched Period array lengths") + if length is None: + length = len(x) + + # error: Argument 2 to "repeat" has incompatible type "Optional[int]"; expected + # "Union[Union[int, integer[Any]], Union[bool, bool_], ndarray, Sequence[Union[int, + # integer[Any]]], Sequence[Union[bool, bool_]], Sequence[Sequence[Any]]]" + return [ + np.asarray(x) + if isinstance(x, (np.ndarray, list, ABCSeries)) + else np.repeat(x, length) # type: ignore[arg-type] + for x in fields + ] diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/sparse/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/sparse/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..adf83963aca39e7d2ec2da55d21fc69aaca48977 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/sparse/__init__.py @@ -0,0 +1,19 @@ +from pandas.core.arrays.sparse.accessor import ( + SparseAccessor, + SparseFrameAccessor, +) +from pandas.core.arrays.sparse.array import ( + BlockIndex, + IntIndex, + SparseArray, + make_sparse_index, +) + +__all__ = [ + "BlockIndex", + "IntIndex", + "make_sparse_index", + "SparseAccessor", + "SparseArray", + "SparseFrameAccessor", +] diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/sparse/accessor.py b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/sparse/accessor.py new file mode 100644 index 0000000000000000000000000000000000000000..fc7debb1f31e4e3afe8c86e57e1f5b8bf1e343be --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/sparse/accessor.py @@ -0,0 +1,414 @@ +"""Sparse accessor""" +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +from pandas.compat._optional import import_optional_dependency + +from pandas.core.dtypes.cast import find_common_type +from pandas.core.dtypes.dtypes import SparseDtype + +from pandas.core.accessor import ( + PandasDelegate, + delegate_names, +) +from pandas.core.arrays.sparse.array import SparseArray + +if TYPE_CHECKING: + from pandas import ( + DataFrame, + Series, + ) + + +class BaseAccessor: + _validation_msg = "Can only use the '.sparse' accessor with Sparse data." + + def __init__(self, data=None) -> None: + self._parent = data + self._validate(data) + + def _validate(self, data): + raise NotImplementedError + + +@delegate_names( + SparseArray, ["npoints", "density", "fill_value", "sp_values"], typ="property" +) +class SparseAccessor(BaseAccessor, PandasDelegate): + """ + Accessor for SparseSparse from other sparse matrix data types. + + Examples + -------- + >>> ser = pd.Series([0, 0, 2, 2, 2], dtype="Sparse[int]") + >>> ser.sparse.density + 0.6 + >>> ser.sparse.sp_values + array([2, 2, 2]) + """ + + def _validate(self, data): + if not isinstance(data.dtype, SparseDtype): + raise AttributeError(self._validation_msg) + + def _delegate_property_get(self, name: str, *args, **kwargs): + return getattr(self._parent.array, name) + + def _delegate_method(self, name: str, *args, **kwargs): + if name == "from_coo": + return self.from_coo(*args, **kwargs) + elif name == "to_coo": + return self.to_coo(*args, **kwargs) + else: + raise ValueError + + @classmethod + def from_coo(cls, A, dense_index: bool = False) -> Series: + """ + Create a Series with sparse values from a scipy.sparse.coo_matrix. + + Parameters + ---------- + A : scipy.sparse.coo_matrix + dense_index : bool, default False + If False (default), the index consists of only the + coords of the non-null entries of the original coo_matrix. + If True, the index consists of the full sorted + (row, col) coordinates of the coo_matrix. + + Returns + ------- + s : Series + A Series with sparse values. + + Examples + -------- + >>> from scipy import sparse + + >>> A = sparse.coo_matrix( + ... ([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), shape=(3, 4) + ... ) + >>> A + <3x4 sparse matrix of type '' + with 3 stored elements in COOrdinate format> + + >>> A.todense() + matrix([[0., 0., 1., 2.], + [3., 0., 0., 0.], + [0., 0., 0., 0.]]) + + >>> ss = pd.Series.sparse.from_coo(A) + >>> ss + 0 2 1.0 + 3 2.0 + 1 0 3.0 + dtype: Sparse[float64, nan] + """ + from pandas import Series + from pandas.core.arrays.sparse.scipy_sparse import coo_to_sparse_series + + result = coo_to_sparse_series(A, dense_index=dense_index) + result = Series(result.array, index=result.index, copy=False) + + return result + + def to_coo(self, row_levels=(0,), column_levels=(1,), sort_labels: bool = False): + """ + Create a scipy.sparse.coo_matrix from a Series with MultiIndex. + + Use row_levels and column_levels to determine the row and column + coordinates respectively. row_levels and column_levels are the names + (labels) or numbers of the levels. {row_levels, column_levels} must be + a partition of the MultiIndex level names (or numbers). + + Parameters + ---------- + row_levels : tuple/list + column_levels : tuple/list + sort_labels : bool, default False + Sort the row and column labels before forming the sparse matrix. + When `row_levels` and/or `column_levels` refer to a single level, + set to `True` for a faster execution. + + Returns + ------- + y : scipy.sparse.coo_matrix + rows : list (row labels) + columns : list (column labels) + + Examples + -------- + >>> s = pd.Series([3.0, np.nan, 1.0, 3.0, np.nan, np.nan]) + >>> s.index = pd.MultiIndex.from_tuples( + ... [ + ... (1, 2, "a", 0), + ... (1, 2, "a", 1), + ... (1, 1, "b", 0), + ... (1, 1, "b", 1), + ... (2, 1, "b", 0), + ... (2, 1, "b", 1) + ... ], + ... names=["A", "B", "C", "D"], + ... ) + >>> s + A B C D + 1 2 a 0 3.0 + 1 NaN + 1 b 0 1.0 + 1 3.0 + 2 1 b 0 NaN + 1 NaN + dtype: float64 + + >>> ss = s.astype("Sparse") + >>> ss + A B C D + 1 2 a 0 3.0 + 1 NaN + 1 b 0 1.0 + 1 3.0 + 2 1 b 0 NaN + 1 NaN + dtype: Sparse[float64, nan] + + >>> A, rows, columns = ss.sparse.to_coo( + ... row_levels=["A", "B"], column_levels=["C", "D"], sort_labels=True + ... ) + >>> A + <3x4 sparse matrix of type '' + with 3 stored elements in COOrdinate format> + >>> A.todense() + matrix([[0., 0., 1., 3.], + [3., 0., 0., 0.], + [0., 0., 0., 0.]]) + + >>> rows + [(1, 1), (1, 2), (2, 1)] + >>> columns + [('a', 0), ('a', 1), ('b', 0), ('b', 1)] + """ + from pandas.core.arrays.sparse.scipy_sparse import sparse_series_to_coo + + A, rows, columns = sparse_series_to_coo( + self._parent, row_levels, column_levels, sort_labels=sort_labels + ) + return A, rows, columns + + def to_dense(self) -> Series: + """ + Convert a Series from sparse values to dense. + + Returns + ------- + Series: + A Series with the same values, stored as a dense array. + + Examples + -------- + >>> series = pd.Series(pd.arrays.SparseArray([0, 1, 0])) + >>> series + 0 0 + 1 1 + 2 0 + dtype: Sparse[int64, 0] + + >>> series.sparse.to_dense() + 0 0 + 1 1 + 2 0 + dtype: int64 + """ + from pandas import Series + + return Series( + self._parent.array.to_dense(), + index=self._parent.index, + name=self._parent.name, + copy=False, + ) + + +class SparseFrameAccessor(BaseAccessor, PandasDelegate): + """ + DataFrame accessor for sparse data. + + Examples + -------- + >>> df = pd.DataFrame({"a": [1, 2, 0, 0], + ... "b": [3, 0, 0, 4]}, dtype="Sparse[int]") + >>> df.sparse.density + 0.5 + """ + + def _validate(self, data): + dtypes = data.dtypes + if not all(isinstance(t, SparseDtype) for t in dtypes): + raise AttributeError(self._validation_msg) + + @classmethod + def from_spmatrix(cls, data, index=None, columns=None) -> DataFrame: + """ + Create a new DataFrame from a scipy sparse matrix. + + Parameters + ---------- + data : scipy.sparse.spmatrix + Must be convertible to csc format. + index, columns : Index, optional + Row and column labels to use for the resulting DataFrame. + Defaults to a RangeIndex. + + Returns + ------- + DataFrame + Each column of the DataFrame is stored as a + :class:`arrays.SparseArray`. + + Examples + -------- + >>> import scipy.sparse + >>> mat = scipy.sparse.eye(3, dtype=float) + >>> pd.DataFrame.sparse.from_spmatrix(mat) + 0 1 2 + 0 1.0 0 0 + 1 0 1.0 0 + 2 0 0 1.0 + """ + from pandas._libs.sparse import IntIndex + + from pandas import DataFrame + + data = data.tocsc() + index, columns = cls._prep_index(data, index, columns) + n_rows, n_columns = data.shape + # We need to make sure indices are sorted, as we create + # IntIndex with no input validation (i.e. check_integrity=False ). + # Indices may already be sorted in scipy in which case this adds + # a small overhead. + data.sort_indices() + indices = data.indices + indptr = data.indptr + array_data = data.data + dtype = SparseDtype(array_data.dtype, 0) + arrays = [] + for i in range(n_columns): + sl = slice(indptr[i], indptr[i + 1]) + idx = IntIndex(n_rows, indices[sl], check_integrity=False) + arr = SparseArray._simple_new(array_data[sl], idx, dtype) + arrays.append(arr) + return DataFrame._from_arrays( + arrays, columns=columns, index=index, verify_integrity=False + ) + + def to_dense(self) -> DataFrame: + """ + Convert a DataFrame with sparse values to dense. + + Returns + ------- + DataFrame + A DataFrame with the same values stored as dense arrays. + + Examples + -------- + >>> df = pd.DataFrame({"A": pd.arrays.SparseArray([0, 1, 0])}) + >>> df.sparse.to_dense() + A + 0 0 + 1 1 + 2 0 + """ + from pandas import DataFrame + + data = {k: v.array.to_dense() for k, v in self._parent.items()} + return DataFrame(data, index=self._parent.index, columns=self._parent.columns) + + def to_coo(self): + """ + Return the contents of the frame as a sparse SciPy COO matrix. + + Returns + ------- + scipy.sparse.spmatrix + If the caller is heterogeneous and contains booleans or objects, + the result will be of dtype=object. See Notes. + + Notes + ----- + The dtype will be the lowest-common-denominator type (implicit + upcasting); that is to say if the dtypes (even of numeric types) + are mixed, the one that accommodates all will be chosen. + + e.g. If the dtypes are float16 and float32, dtype will be upcast to + float32. By numpy.find_common_type convention, mixing int64 and + and uint64 will result in a float64 dtype. + + Examples + -------- + >>> df = pd.DataFrame({"A": pd.arrays.SparseArray([0, 1, 0, 1])}) + >>> df.sparse.to_coo() + <4x1 sparse matrix of type '' + with 2 stored elements in COOrdinate format> + """ + import_optional_dependency("scipy") + from scipy.sparse import coo_matrix + + dtype = find_common_type(self._parent.dtypes.to_list()) + if isinstance(dtype, SparseDtype): + dtype = dtype.subtype + + cols, rows, data = [], [], [] + for col, (_, ser) in enumerate(self._parent.items()): + sp_arr = ser.array + if sp_arr.fill_value != 0: + raise ValueError("fill value must be 0 when converting to COO matrix") + + row = sp_arr.sp_index.indices + cols.append(np.repeat(col, len(row))) + rows.append(row) + data.append(sp_arr.sp_values.astype(dtype, copy=False)) + + cols = np.concatenate(cols) + rows = np.concatenate(rows) + data = np.concatenate(data) + return coo_matrix((data, (rows, cols)), shape=self._parent.shape) + + @property + def density(self) -> float: + """ + Ratio of non-sparse points to total (dense) data points. + + Examples + -------- + >>> df = pd.DataFrame({"A": pd.arrays.SparseArray([0, 1, 0, 1])}) + >>> df.sparse.density + 0.5 + """ + tmp = np.mean([column.array.density for _, column in self._parent.items()]) + return tmp + + @staticmethod + def _prep_index(data, index, columns): + from pandas.core.indexes.api import ( + default_index, + ensure_index, + ) + + N, K = data.shape + if index is None: + index = default_index(N) + else: + index = ensure_index(index) + if columns is None: + columns = default_index(K) + else: + columns = ensure_index(columns) + + if len(columns) != K: + raise ValueError(f"Column length mismatch: {len(columns)} vs. {K}") + if len(index) != N: + raise ValueError(f"Index length mismatch: {len(index)} vs. {N}") + return index, columns diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/sparse/array.py b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/sparse/array.py new file mode 100644 index 0000000000000000000000000000000000000000..82fcfa74ec7d229faaa7922c7a7dc860da3bc471 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/sparse/array.py @@ -0,0 +1,1929 @@ +""" +SparseArray data structure +""" +from __future__ import annotations + +from collections import abc +import numbers +import operator +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, + cast, + overload, +) +import warnings + +import numpy as np + +from pandas._libs import lib +import pandas._libs.sparse as splib +from pandas._libs.sparse import ( + BlockIndex, + IntIndex, + SparseIndex, +) +from pandas._libs.tslibs import NaT +from pandas.compat.numpy import function as nv +from pandas.errors import PerformanceWarning +from pandas.util._decorators import doc +from pandas.util._exceptions import find_stack_level +from pandas.util._validators import ( + validate_bool_kwarg, + validate_insert_loc, +) + +from pandas.core.dtypes.astype import astype_array +from pandas.core.dtypes.cast import ( + construct_1d_arraylike_from_scalar, + find_common_type, + maybe_box_datetimelike, +) +from pandas.core.dtypes.common import ( + is_bool_dtype, + is_integer, + is_list_like, + is_object_dtype, + is_scalar, + is_string_dtype, + pandas_dtype, +) +from pandas.core.dtypes.dtypes import ( + DatetimeTZDtype, + SparseDtype, +) +from pandas.core.dtypes.generic import ( + ABCIndex, + ABCSeries, +) +from pandas.core.dtypes.missing import ( + isna, + na_value_for_dtype, + notna, +) + +from pandas.core import arraylike +import pandas.core.algorithms as algos +from pandas.core.arraylike import OpsMixin +from pandas.core.arrays import ExtensionArray +from pandas.core.base import PandasObject +import pandas.core.common as com +from pandas.core.construction import ( + ensure_wrapped_if_datetimelike, + extract_array, + sanitize_array, +) +from pandas.core.indexers import ( + check_array_indexer, + unpack_tuple_and_ellipses, +) +from pandas.core.nanops import check_below_min_count + +from pandas.io.formats import printing + +# See https://github.com/python/typing/issues/684 +if TYPE_CHECKING: + from collections.abc import Sequence + from enum import Enum + + class ellipsis(Enum): + Ellipsis = "..." + + Ellipsis = ellipsis.Ellipsis + + from scipy.sparse import spmatrix + + from pandas._typing import ( + FillnaOptions, + NumpySorter, + ) + + SparseIndexKind = Literal["integer", "block"] + + from pandas._typing import ( + ArrayLike, + AstypeArg, + Axis, + AxisInt, + Dtype, + NpDtype, + PositionalIndexer, + Scalar, + ScalarIndexer, + Self, + SequenceIndexer, + npt, + ) + + from pandas import Series + +else: + ellipsis = type(Ellipsis) + + +# ---------------------------------------------------------------------------- +# Array + +_sparray_doc_kwargs = {"klass": "SparseArray"} + + +def _get_fill(arr: SparseArray) -> np.ndarray: + """ + Create a 0-dim ndarray containing the fill value + + Parameters + ---------- + arr : SparseArray + + Returns + ------- + fill_value : ndarray + 0-dim ndarray with just the fill value. + + Notes + ----- + coerce fill_value to arr dtype if possible + int64 SparseArray can have NaN as fill_value if there is no missing + """ + try: + return np.asarray(arr.fill_value, dtype=arr.dtype.subtype) + except ValueError: + return np.asarray(arr.fill_value) + + +def _sparse_array_op( + left: SparseArray, right: SparseArray, op: Callable, name: str +) -> SparseArray: + """ + Perform a binary operation between two arrays. + + Parameters + ---------- + left : Union[SparseArray, ndarray] + right : Union[SparseArray, ndarray] + op : Callable + The binary operation to perform + name str + Name of the callable. + + Returns + ------- + SparseArray + """ + if name.startswith("__"): + # For lookups in _libs.sparse we need non-dunder op name + name = name[2:-2] + + # dtype used to find corresponding sparse method + ltype = left.dtype.subtype + rtype = right.dtype.subtype + + if ltype != rtype: + subtype = find_common_type([ltype, rtype]) + ltype = SparseDtype(subtype, left.fill_value) + rtype = SparseDtype(subtype, right.fill_value) + + left = left.astype(ltype, copy=False) + right = right.astype(rtype, copy=False) + dtype = ltype.subtype + else: + dtype = ltype + + # dtype the result must have + result_dtype = None + + if left.sp_index.ngaps == 0 or right.sp_index.ngaps == 0: + with np.errstate(all="ignore"): + result = op(left.to_dense(), right.to_dense()) + fill = op(_get_fill(left), _get_fill(right)) + + if left.sp_index.ngaps == 0: + index = left.sp_index + else: + index = right.sp_index + elif left.sp_index.equals(right.sp_index): + with np.errstate(all="ignore"): + result = op(left.sp_values, right.sp_values) + fill = op(_get_fill(left), _get_fill(right)) + index = left.sp_index + else: + if name[0] == "r": + left, right = right, left + name = name[1:] + + if name in ("and", "or", "xor") and dtype == "bool": + opname = f"sparse_{name}_uint8" + # to make template simple, cast here + left_sp_values = left.sp_values.view(np.uint8) + right_sp_values = right.sp_values.view(np.uint8) + result_dtype = bool + else: + opname = f"sparse_{name}_{dtype}" + left_sp_values = left.sp_values + right_sp_values = right.sp_values + + if ( + name in ["floordiv", "mod"] + and (right == 0).any() + and left.dtype.kind in "iu" + ): + # Match the non-Sparse Series behavior + opname = f"sparse_{name}_float64" + left_sp_values = left_sp_values.astype("float64") + right_sp_values = right_sp_values.astype("float64") + + sparse_op = getattr(splib, opname) + + with np.errstate(all="ignore"): + result, index, fill = sparse_op( + left_sp_values, + left.sp_index, + left.fill_value, + right_sp_values, + right.sp_index, + right.fill_value, + ) + + if name == "divmod": + # result is a 2-tuple + # error: Incompatible return value type (got "Tuple[SparseArray, + # SparseArray]", expected "SparseArray") + return ( # type: ignore[return-value] + _wrap_result(name, result[0], index, fill[0], dtype=result_dtype), + _wrap_result(name, result[1], index, fill[1], dtype=result_dtype), + ) + + if result_dtype is None: + result_dtype = result.dtype + + return _wrap_result(name, result, index, fill, dtype=result_dtype) + + +def _wrap_result( + name: str, data, sparse_index, fill_value, dtype: Dtype | None = None +) -> SparseArray: + """ + wrap op result to have correct dtype + """ + if name.startswith("__"): + # e.g. __eq__ --> eq + name = name[2:-2] + + if name in ("eq", "ne", "lt", "gt", "le", "ge"): + dtype = bool + + fill_value = lib.item_from_zerodim(fill_value) + + if is_bool_dtype(dtype): + # fill_value may be np.bool_ + fill_value = bool(fill_value) + return SparseArray( + data, sparse_index=sparse_index, fill_value=fill_value, dtype=dtype + ) + + +class SparseArray(OpsMixin, PandasObject, ExtensionArray): + """ + An ExtensionArray for storing sparse data. + + Parameters + ---------- + data : array-like or scalar + A dense array of values to store in the SparseArray. This may contain + `fill_value`. + sparse_index : SparseIndex, optional + fill_value : scalar, optional + Elements in data that are ``fill_value`` are not stored in the + SparseArray. For memory savings, this should be the most common value + in `data`. By default, `fill_value` depends on the dtype of `data`: + + =========== ========== + data.dtype na_value + =========== ========== + float ``np.nan`` + int ``0`` + bool False + datetime64 ``pd.NaT`` + timedelta64 ``pd.NaT`` + =========== ========== + + The fill value is potentially specified in three ways. In order of + precedence, these are + + 1. The `fill_value` argument + 2. ``dtype.fill_value`` if `fill_value` is None and `dtype` is + a ``SparseDtype`` + 3. ``data.dtype.fill_value`` if `fill_value` is None and `dtype` + is not a ``SparseDtype`` and `data` is a ``SparseArray``. + + kind : str + Can be 'integer' or 'block', default is 'integer'. + The type of storage for sparse locations. + + * 'block': Stores a `block` and `block_length` for each + contiguous *span* of sparse values. This is best when + sparse data tends to be clumped together, with large + regions of ``fill-value`` values between sparse values. + * 'integer': uses an integer to store the location of + each sparse value. + + dtype : np.dtype or SparseDtype, optional + The dtype to use for the SparseArray. For numpy dtypes, this + determines the dtype of ``self.sp_values``. For SparseDtype, + this determines ``self.sp_values`` and ``self.fill_value``. + copy : bool, default False + Whether to explicitly copy the incoming `data` array. + + Attributes + ---------- + None + + Methods + ------- + None + + Examples + -------- + >>> from pandas.arrays import SparseArray + >>> arr = SparseArray([0, 0, 1, 2]) + >>> arr + [0, 0, 1, 2] + Fill: 0 + IntIndex + Indices: array([2, 3], dtype=int32) + """ + + _subtyp = "sparse_array" # register ABCSparseArray + _hidden_attrs = PandasObject._hidden_attrs | frozenset([]) + _sparse_index: SparseIndex + _sparse_values: np.ndarray + _dtype: SparseDtype + + def __init__( + self, + data, + sparse_index=None, + fill_value=None, + kind: SparseIndexKind = "integer", + dtype: Dtype | None = None, + copy: bool = False, + ) -> None: + if fill_value is None and isinstance(dtype, SparseDtype): + fill_value = dtype.fill_value + + if isinstance(data, type(self)): + # disable normal inference on dtype, sparse_index, & fill_value + if sparse_index is None: + sparse_index = data.sp_index + if fill_value is None: + fill_value = data.fill_value + if dtype is None: + dtype = data.dtype + # TODO: make kind=None, and use data.kind? + data = data.sp_values + + # Handle use-provided dtype + if isinstance(dtype, str): + # Two options: dtype='int', regular numpy dtype + # or dtype='Sparse[int]', a sparse dtype + try: + dtype = SparseDtype.construct_from_string(dtype) + except TypeError: + dtype = pandas_dtype(dtype) + + if isinstance(dtype, SparseDtype): + if fill_value is None: + fill_value = dtype.fill_value + dtype = dtype.subtype + + if is_scalar(data): + warnings.warn( + f"Constructing {type(self).__name__} with scalar data is deprecated " + "and will raise in a future version. Pass a sequence instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + if sparse_index is None: + npoints = 1 + else: + npoints = sparse_index.length + + data = construct_1d_arraylike_from_scalar(data, npoints, dtype=None) + dtype = data.dtype + + if dtype is not None: + dtype = pandas_dtype(dtype) + + # TODO: disentangle the fill_value dtype inference from + # dtype inference + if data is None: + # TODO: What should the empty dtype be? Object or float? + + # error: Argument "dtype" to "array" has incompatible type + # "Union[ExtensionDtype, dtype[Any], None]"; expected "Union[dtype[Any], + # None, type, _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any, + # Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]" + data = np.array([], dtype=dtype) # type: ignore[arg-type] + + try: + data = sanitize_array(data, index=None) + except ValueError: + # NumPy may raise a ValueError on data like [1, []] + # we retry with object dtype here. + if dtype is None: + dtype = np.dtype(object) + data = np.atleast_1d(np.asarray(data, dtype=dtype)) + else: + raise + + if copy: + # TODO: avoid double copy when dtype forces cast. + data = data.copy() + + if fill_value is None: + fill_value_dtype = data.dtype if dtype is None else dtype + if fill_value_dtype is None: + fill_value = np.nan + else: + fill_value = na_value_for_dtype(fill_value_dtype) + + if isinstance(data, type(self)) and sparse_index is None: + sparse_index = data._sparse_index + # error: Argument "dtype" to "asarray" has incompatible type + # "Union[ExtensionDtype, dtype[Any], None]"; expected "None" + sparse_values = np.asarray( + data.sp_values, dtype=dtype # type: ignore[arg-type] + ) + elif sparse_index is None: + data = extract_array(data, extract_numpy=True) + if not isinstance(data, np.ndarray): + # EA + if isinstance(data.dtype, DatetimeTZDtype): + warnings.warn( + f"Creating SparseArray from {data.dtype} data " + "loses timezone information. Cast to object before " + "sparse to retain timezone information.", + UserWarning, + stacklevel=find_stack_level(), + ) + data = np.asarray(data, dtype="datetime64[ns]") + if fill_value is NaT: + fill_value = np.datetime64("NaT", "ns") + data = np.asarray(data) + sparse_values, sparse_index, fill_value = _make_sparse( + # error: Argument "dtype" to "_make_sparse" has incompatible type + # "Union[ExtensionDtype, dtype[Any], None]"; expected + # "Optional[dtype[Any]]" + data, + kind=kind, + fill_value=fill_value, + dtype=dtype, # type: ignore[arg-type] + ) + else: + # error: Argument "dtype" to "asarray" has incompatible type + # "Union[ExtensionDtype, dtype[Any], None]"; expected "None" + sparse_values = np.asarray(data, dtype=dtype) # type: ignore[arg-type] + if len(sparse_values) != sparse_index.npoints: + raise AssertionError( + f"Non array-like type {type(sparse_values)} must " + "have the same length as the index" + ) + self._sparse_index = sparse_index + self._sparse_values = sparse_values + self._dtype = SparseDtype(sparse_values.dtype, fill_value) + + @classmethod + def _simple_new( + cls, + sparse_array: np.ndarray, + sparse_index: SparseIndex, + dtype: SparseDtype, + ) -> Self: + new = object.__new__(cls) + new._sparse_index = sparse_index + new._sparse_values = sparse_array + new._dtype = dtype + return new + + @classmethod + def from_spmatrix(cls, data: spmatrix) -> Self: + """ + Create a SparseArray from a scipy.sparse matrix. + + Parameters + ---------- + data : scipy.sparse.sp_matrix + This should be a SciPy sparse matrix where the size + of the second dimension is 1. In other words, a + sparse matrix with a single column. + + Returns + ------- + SparseArray + + Examples + -------- + >>> import scipy.sparse + >>> mat = scipy.sparse.coo_matrix((4, 1)) + >>> pd.arrays.SparseArray.from_spmatrix(mat) + [0.0, 0.0, 0.0, 0.0] + Fill: 0.0 + IntIndex + Indices: array([], dtype=int32) + """ + length, ncol = data.shape + + if ncol != 1: + raise ValueError(f"'data' must have a single column, not '{ncol}'") + + # our sparse index classes require that the positions be strictly + # increasing. So we need to sort loc, and arr accordingly. + data = data.tocsc() + data.sort_indices() + arr = data.data + idx = data.indices + + zero = np.array(0, dtype=arr.dtype).item() + dtype = SparseDtype(arr.dtype, zero) + index = IntIndex(length, idx) + + return cls._simple_new(arr, index, dtype) + + def __array__( + self, dtype: NpDtype | None = None, copy: bool | None = None + ) -> np.ndarray: + fill_value = self.fill_value + + if self.sp_index.ngaps == 0: + # Compat for na dtype and int values. + return self.sp_values + if dtype is None: + # Can NumPy represent this type? + # If not, `np.result_type` will raise. We catch that + # and return object. + if self.sp_values.dtype.kind == "M": + # However, we *do* special-case the common case of + # a datetime64 with pandas NaT. + if fill_value is NaT: + # Can't put pd.NaT in a datetime64[ns] + fill_value = np.datetime64("NaT") + try: + dtype = np.result_type(self.sp_values.dtype, type(fill_value)) + except TypeError: + dtype = object + + out = np.full(self.shape, fill_value, dtype=dtype) + out[self.sp_index.indices] = self.sp_values + return out + + def __setitem__(self, key, value) -> None: + # I suppose we could allow setting of non-fill_value elements. + # TODO(SparseArray.__setitem__): remove special cases in + # ExtensionBlock.where + msg = "SparseArray does not support item assignment via setitem" + raise TypeError(msg) + + @classmethod + def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy: bool = False): + return cls(scalars, dtype=dtype) + + @classmethod + def _from_factorized(cls, values, original): + return cls(values, dtype=original.dtype) + + # ------------------------------------------------------------------------ + # Data + # ------------------------------------------------------------------------ + @property + def sp_index(self) -> SparseIndex: + """ + The SparseIndex containing the location of non- ``fill_value`` points. + """ + return self._sparse_index + + @property + def sp_values(self) -> np.ndarray: + """ + An ndarray containing the non- ``fill_value`` values. + + Examples + -------- + >>> from pandas.arrays import SparseArray + >>> s = SparseArray([0, 0, 1, 0, 2], fill_value=0) + >>> s.sp_values + array([1, 2]) + """ + return self._sparse_values + + @property + def dtype(self) -> SparseDtype: + return self._dtype + + @property + def fill_value(self): + """ + Elements in `data` that are `fill_value` are not stored. + + For memory savings, this should be the most common value in the array. + + Examples + -------- + >>> ser = pd.Series([0, 0, 2, 2, 2], dtype="Sparse[int]") + >>> ser.sparse.fill_value + 0 + >>> spa_dtype = pd.SparseDtype(dtype=np.int32, fill_value=2) + >>> ser = pd.Series([0, 0, 2, 2, 2], dtype=spa_dtype) + >>> ser.sparse.fill_value + 2 + """ + return self.dtype.fill_value + + @fill_value.setter + def fill_value(self, value) -> None: + self._dtype = SparseDtype(self.dtype.subtype, value) + + @property + def kind(self) -> SparseIndexKind: + """ + The kind of sparse index for this array. One of {'integer', 'block'}. + """ + if isinstance(self.sp_index, IntIndex): + return "integer" + else: + return "block" + + @property + def _valid_sp_values(self) -> np.ndarray: + sp_vals = self.sp_values + mask = notna(sp_vals) + return sp_vals[mask] + + def __len__(self) -> int: + return self.sp_index.length + + @property + def _null_fill_value(self) -> bool: + return self._dtype._is_na_fill_value + + def _fill_value_matches(self, fill_value) -> bool: + if self._null_fill_value: + return isna(fill_value) + else: + return self.fill_value == fill_value + + @property + def nbytes(self) -> int: + return self.sp_values.nbytes + self.sp_index.nbytes + + @property + def density(self) -> float: + """ + The percent of non- ``fill_value`` points, as decimal. + + Examples + -------- + >>> from pandas.arrays import SparseArray + >>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0) + >>> s.density + 0.6 + """ + return self.sp_index.npoints / self.sp_index.length + + @property + def npoints(self) -> int: + """ + The number of non- ``fill_value`` points. + + Examples + -------- + >>> from pandas.arrays import SparseArray + >>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0) + >>> s.npoints + 3 + """ + return self.sp_index.npoints + + # error: Return type "SparseArray" of "isna" incompatible with return type + # "ndarray[Any, Any] | ExtensionArraySupportsAnyAll" in supertype "ExtensionArray" + def isna(self) -> Self: # type: ignore[override] + # If null fill value, we want SparseDtype[bool, true] + # to preserve the same memory usage. + dtype = SparseDtype(bool, self._null_fill_value) + if self._null_fill_value: + return type(self)._simple_new(isna(self.sp_values), self.sp_index, dtype) + mask = np.full(len(self), False, dtype=np.bool_) + mask[self.sp_index.indices] = isna(self.sp_values) + return type(self)(mask, fill_value=False, dtype=dtype) + + def _pad_or_backfill( # pylint: disable=useless-parent-delegation + self, + *, + method: FillnaOptions, + limit: int | None = None, + limit_area: Literal["inside", "outside"] | None = None, + copy: bool = True, + ) -> Self: + # TODO(3.0): We can remove this method once deprecation for fillna method + # keyword is enforced. + return super()._pad_or_backfill( + method=method, limit=limit, limit_area=limit_area, copy=copy + ) + + def fillna( + self, + value=None, + method: FillnaOptions | None = None, + limit: int | None = None, + copy: bool = True, + ) -> Self: + """ + Fill missing values with `value`. + + Parameters + ---------- + value : scalar, optional + method : str, optional + + .. warning:: + + Using 'method' will result in high memory use, + as all `fill_value` methods will be converted to + an in-memory ndarray + + limit : int, optional + + copy: bool, default True + Ignored for SparseArray. + + Returns + ------- + SparseArray + + Notes + ----- + When `value` is specified, the result's ``fill_value`` depends on + ``self.fill_value``. The goal is to maintain low-memory use. + + If ``self.fill_value`` is NA, the result dtype will be + ``SparseDtype(self.dtype, fill_value=value)``. This will preserve + amount of memory used before and after filling. + + When ``self.fill_value`` is not NA, the result dtype will be + ``self.dtype``. Again, this preserves the amount of memory used. + """ + if (method is None and value is None) or ( + method is not None and value is not None + ): + raise ValueError("Must specify one of 'method' or 'value'.") + + if method is not None: + return super().fillna(method=method, limit=limit) + + else: + new_values = np.where(isna(self.sp_values), value, self.sp_values) + + if self._null_fill_value: + # This is essentially just updating the dtype. + new_dtype = SparseDtype(self.dtype.subtype, fill_value=value) + else: + new_dtype = self.dtype + + return self._simple_new(new_values, self._sparse_index, new_dtype) + + def shift(self, periods: int = 1, fill_value=None) -> Self: + if not len(self) or periods == 0: + return self.copy() + + if isna(fill_value): + fill_value = self.dtype.na_value + + subtype = np.result_type(fill_value, self.dtype.subtype) + + if subtype != self.dtype.subtype: + # just coerce up front + arr = self.astype(SparseDtype(subtype, self.fill_value)) + else: + arr = self + + empty = self._from_sequence( + [fill_value] * min(abs(periods), len(self)), dtype=arr.dtype + ) + + if periods > 0: + a = empty + b = arr[:-periods] + else: + a = arr[abs(periods) :] + b = empty + return arr._concat_same_type([a, b]) + + def _first_fill_value_loc(self): + """ + Get the location of the first fill value. + + Returns + ------- + int + """ + if len(self) == 0 or self.sp_index.npoints == len(self): + return -1 + + indices = self.sp_index.indices + if not len(indices) or indices[0] > 0: + return 0 + + # a number larger than 1 should be appended to + # the last in case of fill value only appears + # in the tail of array + diff = np.r_[np.diff(indices), 2] + return indices[(diff > 1).argmax()] + 1 + + @doc(ExtensionArray.duplicated) + def duplicated( + self, keep: Literal["first", "last", False] = "first" + ) -> npt.NDArray[np.bool_]: + values = np.asarray(self) + mask = np.asarray(self.isna()) + return algos.duplicated(values, keep=keep, mask=mask) + + def unique(self) -> Self: + uniques = algos.unique(self.sp_values) + if len(self.sp_values) != len(self): + fill_loc = self._first_fill_value_loc() + # Inorder to align the behavior of pd.unique or + # pd.Series.unique, we should keep the original + # order, here we use unique again to find the + # insertion place. Since the length of sp_values + # is not large, maybe minor performance hurt + # is worthwhile to the correctness. + insert_loc = len(algos.unique(self.sp_values[:fill_loc])) + uniques = np.insert(uniques, insert_loc, self.fill_value) + return type(self)._from_sequence(uniques, dtype=self.dtype) + + def _values_for_factorize(self): + # Still override this for hash_pandas_object + return np.asarray(self), self.fill_value + + def factorize( + self, + use_na_sentinel: bool = True, + ) -> tuple[np.ndarray, SparseArray]: + # Currently, ExtensionArray.factorize -> Tuple[ndarray, EA] + # The sparsity on this is backwards from what Sparse would want. Want + # ExtensionArray.factorize -> Tuple[EA, EA] + # Given that we have to return a dense array of codes, why bother + # implementing an efficient factorize? + codes, uniques = algos.factorize( + np.asarray(self), use_na_sentinel=use_na_sentinel + ) + uniques_sp = SparseArray(uniques, dtype=self.dtype) + return codes, uniques_sp + + def value_counts(self, dropna: bool = True) -> Series: + """ + Returns a Series containing counts of unique values. + + Parameters + ---------- + dropna : bool, default True + Don't include counts of NaN, even if NaN is in sp_values. + + Returns + ------- + counts : Series + """ + from pandas import ( + Index, + Series, + ) + + keys, counts, _ = algos.value_counts_arraylike(self.sp_values, dropna=dropna) + fcounts = self.sp_index.ngaps + if fcounts > 0 and (not self._null_fill_value or not dropna): + mask = isna(keys) if self._null_fill_value else keys == self.fill_value + if mask.any(): + counts[mask] += fcounts + else: + # error: Argument 1 to "insert" has incompatible type "Union[ + # ExtensionArray,ndarray[Any, Any]]"; expected "Union[ + # _SupportsArray[dtype[Any]], Sequence[_SupportsArray[dtype + # [Any]]], Sequence[Sequence[_SupportsArray[dtype[Any]]]], + # Sequence[Sequence[Sequence[_SupportsArray[dtype[Any]]]]], Sequence + # [Sequence[Sequence[Sequence[_SupportsArray[dtype[Any]]]]]]]" + keys = np.insert(keys, 0, self.fill_value) # type: ignore[arg-type] + counts = np.insert(counts, 0, fcounts) + + if not isinstance(keys, ABCIndex): + index = Index(keys) + else: + index = keys + return Series(counts, index=index, copy=False) + + # -------- + # Indexing + # -------- + @overload + def __getitem__(self, key: ScalarIndexer) -> Any: + ... + + @overload + def __getitem__( + self, + key: SequenceIndexer | tuple[int | ellipsis, ...], + ) -> Self: + ... + + def __getitem__( + self, + key: PositionalIndexer | tuple[int | ellipsis, ...], + ) -> Self | Any: + if isinstance(key, tuple): + key = unpack_tuple_and_ellipses(key) + if key is Ellipsis: + raise ValueError("Cannot slice with Ellipsis") + + if is_integer(key): + return self._get_val_at(key) + elif isinstance(key, tuple): + # error: Invalid index type "Tuple[Union[int, ellipsis], ...]" + # for "ndarray[Any, Any]"; expected type + # "Union[SupportsIndex, _SupportsArray[dtype[Union[bool_, + # integer[Any]]]], _NestedSequence[_SupportsArray[dtype[ + # Union[bool_, integer[Any]]]]], _NestedSequence[Union[ + # bool, int]], Tuple[Union[SupportsIndex, _SupportsArray[ + # dtype[Union[bool_, integer[Any]]]], _NestedSequence[ + # _SupportsArray[dtype[Union[bool_, integer[Any]]]]], + # _NestedSequence[Union[bool, int]]], ...]]" + data_slice = self.to_dense()[key] # type: ignore[index] + elif isinstance(key, slice): + # Avoid densifying when handling contiguous slices + if key.step is None or key.step == 1: + start = 0 if key.start is None else key.start + if start < 0: + start += len(self) + + end = len(self) if key.stop is None else key.stop + if end < 0: + end += len(self) + + indices = self.sp_index.indices + keep_inds = np.flatnonzero((indices >= start) & (indices < end)) + sp_vals = self.sp_values[keep_inds] + + sp_index = indices[keep_inds].copy() + + # If we've sliced to not include the start of the array, all our indices + # should be shifted. NB: here we are careful to also not shift by a + # negative value for a case like [0, 1][-100:] where the start index + # should be treated like 0 + if start > 0: + sp_index -= start + + # Length of our result should match applying this slice to a range + # of the length of our original array + new_len = len(range(len(self))[key]) + new_sp_index = make_sparse_index(new_len, sp_index, self.kind) + return type(self)._simple_new(sp_vals, new_sp_index, self.dtype) + else: + indices = np.arange(len(self), dtype=np.int32)[key] + return self.take(indices) + + elif not is_list_like(key): + # e.g. "foo" or 2.5 + # exception message copied from numpy + raise IndexError( + r"only integers, slices (`:`), ellipsis (`...`), numpy.newaxis " + r"(`None`) and integer or boolean arrays are valid indices" + ) + + else: + if isinstance(key, SparseArray): + # NOTE: If we guarantee that SparseDType(bool) + # has only fill_value - true, false or nan + # (see GH PR 44955) + # we can apply mask very fast: + if is_bool_dtype(key): + if isna(key.fill_value): + return self.take(key.sp_index.indices[key.sp_values]) + if not key.fill_value: + return self.take(key.sp_index.indices) + n = len(self) + mask = np.full(n, True, dtype=np.bool_) + mask[key.sp_index.indices] = False + return self.take(np.arange(n)[mask]) + else: + key = np.asarray(key) + + key = check_array_indexer(self, key) + + if com.is_bool_indexer(key): + # mypy doesn't know we have an array here + key = cast(np.ndarray, key) + return self.take(np.arange(len(key), dtype=np.int32)[key]) + elif hasattr(key, "__len__"): + return self.take(key) + else: + raise ValueError(f"Cannot slice with '{key}'") + + return type(self)(data_slice, kind=self.kind) + + def _get_val_at(self, loc): + loc = validate_insert_loc(loc, len(self)) + + sp_loc = self.sp_index.lookup(loc) + if sp_loc == -1: + return self.fill_value + else: + val = self.sp_values[sp_loc] + val = maybe_box_datetimelike(val, self.sp_values.dtype) + return val + + def take(self, indices, *, allow_fill: bool = False, fill_value=None) -> Self: + if is_scalar(indices): + raise ValueError(f"'indices' must be an array, not a scalar '{indices}'.") + indices = np.asarray(indices, dtype=np.int32) + + dtype = None + if indices.size == 0: + result = np.array([], dtype="object") + dtype = self.dtype + elif allow_fill: + result = self._take_with_fill(indices, fill_value=fill_value) + else: + return self._take_without_fill(indices) + + return type(self)( + result, fill_value=self.fill_value, kind=self.kind, dtype=dtype + ) + + def _take_with_fill(self, indices, fill_value=None) -> np.ndarray: + if fill_value is None: + fill_value = self.dtype.na_value + + if indices.min() < -1: + raise ValueError( + "Invalid value in 'indices'. Must be between -1 " + "and the length of the array." + ) + + if indices.max() >= len(self): + raise IndexError("out of bounds value in 'indices'.") + + if len(self) == 0: + # Empty... Allow taking only if all empty + if (indices == -1).all(): + dtype = np.result_type(self.sp_values, type(fill_value)) + taken = np.empty_like(indices, dtype=dtype) + taken.fill(fill_value) + return taken + else: + raise IndexError("cannot do a non-empty take from an empty axes.") + + # sp_indexer may be -1 for two reasons + # 1.) we took for an index of -1 (new) + # 2.) we took a value that was self.fill_value (old) + sp_indexer = self.sp_index.lookup_array(indices) + new_fill_indices = indices == -1 + old_fill_indices = (sp_indexer == -1) & ~new_fill_indices + + if self.sp_index.npoints == 0 and old_fill_indices.all(): + # We've looked up all valid points on an all-sparse array. + taken = np.full( + sp_indexer.shape, fill_value=self.fill_value, dtype=self.dtype.subtype + ) + + elif self.sp_index.npoints == 0: + # Use the old fill_value unless we took for an index of -1 + _dtype = np.result_type(self.dtype.subtype, type(fill_value)) + taken = np.full(sp_indexer.shape, fill_value=fill_value, dtype=_dtype) + taken[old_fill_indices] = self.fill_value + else: + taken = self.sp_values.take(sp_indexer) + + # Fill in two steps. + # Old fill values + # New fill values + # potentially coercing to a new dtype at each stage. + + m0 = sp_indexer[old_fill_indices] < 0 + m1 = sp_indexer[new_fill_indices] < 0 + + result_type = taken.dtype + + if m0.any(): + result_type = np.result_type(result_type, type(self.fill_value)) + taken = taken.astype(result_type) + taken[old_fill_indices] = self.fill_value + + if m1.any(): + result_type = np.result_type(result_type, type(fill_value)) + taken = taken.astype(result_type) + taken[new_fill_indices] = fill_value + + return taken + + def _take_without_fill(self, indices) -> Self: + to_shift = indices < 0 + + n = len(self) + + if (indices.max() >= n) or (indices.min() < -n): + if n == 0: + raise IndexError("cannot do a non-empty take from an empty axes.") + raise IndexError("out of bounds value in 'indices'.") + + if to_shift.any(): + indices = indices.copy() + indices[to_shift] += n + + sp_indexer = self.sp_index.lookup_array(indices) + value_mask = sp_indexer != -1 + new_sp_values = self.sp_values[sp_indexer[value_mask]] + + value_indices = np.flatnonzero(value_mask).astype(np.int32, copy=False) + + new_sp_index = make_sparse_index(len(indices), value_indices, kind=self.kind) + return type(self)._simple_new(new_sp_values, new_sp_index, dtype=self.dtype) + + def searchsorted( + self, + v: ArrayLike | object, + side: Literal["left", "right"] = "left", + sorter: NumpySorter | None = None, + ) -> npt.NDArray[np.intp] | np.intp: + msg = "searchsorted requires high memory usage." + warnings.warn(msg, PerformanceWarning, stacklevel=find_stack_level()) + v = np.asarray(v) + return np.asarray(self, dtype=self.dtype.subtype).searchsorted(v, side, sorter) + + def copy(self) -> Self: + values = self.sp_values.copy() + return self._simple_new(values, self.sp_index, self.dtype) + + @classmethod + def _concat_same_type(cls, to_concat: Sequence[Self]) -> Self: + fill_value = to_concat[0].fill_value + + values = [] + length = 0 + + if to_concat: + sp_kind = to_concat[0].kind + else: + sp_kind = "integer" + + sp_index: SparseIndex + if sp_kind == "integer": + indices = [] + + for arr in to_concat: + int_idx = arr.sp_index.indices.copy() + int_idx += length # TODO: wraparound + length += arr.sp_index.length + + values.append(arr.sp_values) + indices.append(int_idx) + + data = np.concatenate(values) + indices_arr = np.concatenate(indices) + # error: Argument 2 to "IntIndex" has incompatible type + # "ndarray[Any, dtype[signedinteger[_32Bit]]]"; + # expected "Sequence[int]" + sp_index = IntIndex(length, indices_arr) # type: ignore[arg-type] + + else: + # when concatenating block indices, we don't claim that you'll + # get an identical index as concatenating the values and then + # creating a new index. We don't want to spend the time trying + # to merge blocks across arrays in `to_concat`, so the resulting + # BlockIndex may have more blocks. + blengths = [] + blocs = [] + + for arr in to_concat: + block_idx = arr.sp_index.to_block_index() + + values.append(arr.sp_values) + blocs.append(block_idx.blocs.copy() + length) + blengths.append(block_idx.blengths) + length += arr.sp_index.length + + data = np.concatenate(values) + blocs_arr = np.concatenate(blocs) + blengths_arr = np.concatenate(blengths) + + sp_index = BlockIndex(length, blocs_arr, blengths_arr) + + return cls(data, sparse_index=sp_index, fill_value=fill_value) + + def astype(self, dtype: AstypeArg | None = None, copy: bool = True): + """ + Change the dtype of a SparseArray. + + The output will always be a SparseArray. To convert to a dense + ndarray with a certain dtype, use :meth:`numpy.asarray`. + + Parameters + ---------- + dtype : np.dtype or ExtensionDtype + For SparseDtype, this changes the dtype of + ``self.sp_values`` and the ``self.fill_value``. + + For other dtypes, this only changes the dtype of + ``self.sp_values``. + + copy : bool, default True + Whether to ensure a copy is made, even if not necessary. + + Returns + ------- + SparseArray + + Examples + -------- + >>> arr = pd.arrays.SparseArray([0, 0, 1, 2]) + >>> arr + [0, 0, 1, 2] + Fill: 0 + IntIndex + Indices: array([2, 3], dtype=int32) + + >>> arr.astype(SparseDtype(np.dtype('int32'))) + [0, 0, 1, 2] + Fill: 0 + IntIndex + Indices: array([2, 3], dtype=int32) + + Using a NumPy dtype with a different kind (e.g. float) will coerce + just ``self.sp_values``. + + >>> arr.astype(SparseDtype(np.dtype('float64'))) + ... # doctest: +NORMALIZE_WHITESPACE + [nan, nan, 1.0, 2.0] + Fill: nan + IntIndex + Indices: array([2, 3], dtype=int32) + + Using a SparseDtype, you can also change the fill value as well. + + >>> arr.astype(SparseDtype("float64", fill_value=0.0)) + ... # doctest: +NORMALIZE_WHITESPACE + [0.0, 0.0, 1.0, 2.0] + Fill: 0.0 + IntIndex + Indices: array([2, 3], dtype=int32) + """ + if dtype == self._dtype: + if not copy: + return self + else: + return self.copy() + + future_dtype = pandas_dtype(dtype) + if not isinstance(future_dtype, SparseDtype): + # GH#34457 + values = np.asarray(self) + values = ensure_wrapped_if_datetimelike(values) + return astype_array(values, dtype=future_dtype, copy=False) + + dtype = self.dtype.update_dtype(dtype) + subtype = pandas_dtype(dtype._subtype_with_str) + subtype = cast(np.dtype, subtype) # ensured by update_dtype + values = ensure_wrapped_if_datetimelike(self.sp_values) + sp_values = astype_array(values, subtype, copy=copy) + sp_values = np.asarray(sp_values) + + return self._simple_new(sp_values, self.sp_index, dtype) + + def map(self, mapper, na_action=None) -> Self: + """ + Map categories using an input mapping or function. + + Parameters + ---------- + mapper : dict, Series, callable + The correspondence from old values to new. + na_action : {None, 'ignore'}, default None + If 'ignore', propagate NA values, without passing them to the + mapping correspondence. + + Returns + ------- + SparseArray + The output array will have the same density as the input. + The output fill value will be the result of applying the + mapping to ``self.fill_value`` + + Examples + -------- + >>> arr = pd.arrays.SparseArray([0, 1, 2]) + >>> arr.map(lambda x: x + 10) + [10, 11, 12] + Fill: 10 + IntIndex + Indices: array([1, 2], dtype=int32) + + >>> arr.map({0: 10, 1: 11, 2: 12}) + [10, 11, 12] + Fill: 10 + IntIndex + Indices: array([1, 2], dtype=int32) + + >>> arr.map(pd.Series([10, 11, 12], index=[0, 1, 2])) + [10, 11, 12] + Fill: 10 + IntIndex + Indices: array([1, 2], dtype=int32) + """ + is_map = isinstance(mapper, (abc.Mapping, ABCSeries)) + + fill_val = self.fill_value + + if na_action is None or notna(fill_val): + fill_val = mapper.get(fill_val, fill_val) if is_map else mapper(fill_val) + + def func(sp_val): + new_sp_val = mapper.get(sp_val, None) if is_map else mapper(sp_val) + # check identity and equality because nans are not equal to each other + if new_sp_val is fill_val or new_sp_val == fill_val: + msg = "fill value in the sparse values not supported" + raise ValueError(msg) + return new_sp_val + + sp_values = [func(x) for x in self.sp_values] + + return type(self)(sp_values, sparse_index=self.sp_index, fill_value=fill_val) + + def to_dense(self) -> np.ndarray: + """ + Convert SparseArray to a NumPy array. + + Returns + ------- + arr : NumPy array + """ + return np.asarray(self, dtype=self.sp_values.dtype) + + def _where(self, mask, value): + # NB: may not preserve dtype, e.g. result may be Sparse[float64] + # while self is Sparse[int64] + naive_implementation = np.where(mask, self, value) + dtype = SparseDtype(naive_implementation.dtype, fill_value=self.fill_value) + result = type(self)._from_sequence(naive_implementation, dtype=dtype) + return result + + # ------------------------------------------------------------------------ + # IO + # ------------------------------------------------------------------------ + def __setstate__(self, state) -> None: + """Necessary for making this object picklable""" + if isinstance(state, tuple): + # Compat for pandas < 0.24.0 + nd_state, (fill_value, sp_index) = state + sparse_values = np.array([]) + sparse_values.__setstate__(nd_state) + + self._sparse_values = sparse_values + self._sparse_index = sp_index + self._dtype = SparseDtype(sparse_values.dtype, fill_value) + else: + self.__dict__.update(state) + + def nonzero(self) -> tuple[npt.NDArray[np.int32]]: + if self.fill_value == 0: + return (self.sp_index.indices,) + else: + return (self.sp_index.indices[self.sp_values != 0],) + + # ------------------------------------------------------------------------ + # Reductions + # ------------------------------------------------------------------------ + + def _reduce( + self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs + ): + method = getattr(self, name, None) + + if method is None: + raise TypeError(f"cannot perform {name} with type {self.dtype}") + + if skipna: + arr = self + else: + arr = self.dropna() + + result = getattr(arr, name)(**kwargs) + + if keepdims: + return type(self)([result], dtype=self.dtype) + else: + return result + + def all(self, axis=None, *args, **kwargs): + """ + Tests whether all elements evaluate True + + Returns + ------- + all : bool + + See Also + -------- + numpy.all + """ + nv.validate_all(args, kwargs) + + values = self.sp_values + + if len(values) != len(self) and not np.all(self.fill_value): + return False + + return values.all() + + def any(self, axis: AxisInt = 0, *args, **kwargs) -> bool: + """ + Tests whether at least one of elements evaluate True + + Returns + ------- + any : bool + + See Also + -------- + numpy.any + """ + nv.validate_any(args, kwargs) + + values = self.sp_values + + if len(values) != len(self) and np.any(self.fill_value): + return True + + return values.any().item() + + def sum( + self, + axis: AxisInt = 0, + min_count: int = 0, + skipna: bool = True, + *args, + **kwargs, + ) -> Scalar: + """ + Sum of non-NA/null values + + Parameters + ---------- + axis : int, default 0 + Not Used. NumPy compatibility. + min_count : int, default 0 + The required number of valid values to perform the summation. If fewer + than ``min_count`` valid values are present, the result will be the missing + value indicator for subarray type. + *args, **kwargs + Not Used. NumPy compatibility. + + Returns + ------- + scalar + """ + nv.validate_sum(args, kwargs) + valid_vals = self._valid_sp_values + sp_sum = valid_vals.sum() + has_na = self.sp_index.ngaps > 0 and not self._null_fill_value + + if has_na and not skipna: + return na_value_for_dtype(self.dtype.subtype, compat=False) + + if self._null_fill_value: + if check_below_min_count(valid_vals.shape, None, min_count): + return na_value_for_dtype(self.dtype.subtype, compat=False) + return sp_sum + else: + nsparse = self.sp_index.ngaps + if check_below_min_count(valid_vals.shape, None, min_count - nsparse): + return na_value_for_dtype(self.dtype.subtype, compat=False) + return sp_sum + self.fill_value * nsparse + + def cumsum(self, axis: AxisInt = 0, *args, **kwargs) -> SparseArray: + """ + Cumulative sum of non-NA/null values. + + When performing the cumulative summation, any non-NA/null values will + be skipped. The resulting SparseArray will preserve the locations of + NaN values, but the fill value will be `np.nan` regardless. + + Parameters + ---------- + axis : int or None + Axis over which to perform the cumulative summation. If None, + perform cumulative summation over flattened array. + + Returns + ------- + cumsum : SparseArray + """ + nv.validate_cumsum(args, kwargs) + + if axis is not None and axis >= self.ndim: # Mimic ndarray behaviour. + raise ValueError(f"axis(={axis}) out of bounds") + + if not self._null_fill_value: + return SparseArray(self.to_dense()).cumsum() + + return SparseArray( + self.sp_values.cumsum(), + sparse_index=self.sp_index, + fill_value=self.fill_value, + ) + + def mean(self, axis: Axis = 0, *args, **kwargs): + """ + Mean of non-NA/null values + + Returns + ------- + mean : float + """ + nv.validate_mean(args, kwargs) + valid_vals = self._valid_sp_values + sp_sum = valid_vals.sum() + ct = len(valid_vals) + + if self._null_fill_value: + return sp_sum / ct + else: + nsparse = self.sp_index.ngaps + return (sp_sum + self.fill_value * nsparse) / (ct + nsparse) + + def max(self, *, axis: AxisInt | None = None, skipna: bool = True): + """ + Max of array values, ignoring NA values if specified. + + Parameters + ---------- + axis : int, default 0 + Not Used. NumPy compatibility. + skipna : bool, default True + Whether to ignore NA values. + + Returns + ------- + scalar + """ + nv.validate_minmax_axis(axis, self.ndim) + return self._min_max("max", skipna=skipna) + + def min(self, *, axis: AxisInt | None = None, skipna: bool = True): + """ + Min of array values, ignoring NA values if specified. + + Parameters + ---------- + axis : int, default 0 + Not Used. NumPy compatibility. + skipna : bool, default True + Whether to ignore NA values. + + Returns + ------- + scalar + """ + nv.validate_minmax_axis(axis, self.ndim) + return self._min_max("min", skipna=skipna) + + def _min_max(self, kind: Literal["min", "max"], skipna: bool) -> Scalar: + """ + Min/max of non-NA/null values + + Parameters + ---------- + kind : {"min", "max"} + skipna : bool + + Returns + ------- + scalar + """ + valid_vals = self._valid_sp_values + has_nonnull_fill_vals = not self._null_fill_value and self.sp_index.ngaps > 0 + + if len(valid_vals) > 0: + sp_min_max = getattr(valid_vals, kind)() + + # If a non-null fill value is currently present, it might be the min/max + if has_nonnull_fill_vals: + func = max if kind == "max" else min + return func(sp_min_max, self.fill_value) + elif skipna: + return sp_min_max + elif self.sp_index.ngaps == 0: + # No NAs present + return sp_min_max + else: + return na_value_for_dtype(self.dtype.subtype, compat=False) + elif has_nonnull_fill_vals: + return self.fill_value + else: + return na_value_for_dtype(self.dtype.subtype, compat=False) + + def _argmin_argmax(self, kind: Literal["argmin", "argmax"]) -> int: + values = self._sparse_values + index = self._sparse_index.indices + mask = np.asarray(isna(values)) + func = np.argmax if kind == "argmax" else np.argmin + + idx = np.arange(values.shape[0]) + non_nans = values[~mask] + non_nan_idx = idx[~mask] + + _candidate = non_nan_idx[func(non_nans)] + candidate = index[_candidate] + + if isna(self.fill_value): + return candidate + if kind == "argmin" and self[candidate] < self.fill_value: + return candidate + if kind == "argmax" and self[candidate] > self.fill_value: + return candidate + _loc = self._first_fill_value_loc() + if _loc == -1: + # fill_value doesn't exist + return candidate + else: + return _loc + + def argmax(self, skipna: bool = True) -> int: + validate_bool_kwarg(skipna, "skipna") + if not skipna and self._hasna: + raise NotImplementedError + return self._argmin_argmax("argmax") + + def argmin(self, skipna: bool = True) -> int: + validate_bool_kwarg(skipna, "skipna") + if not skipna and self._hasna: + raise NotImplementedError + return self._argmin_argmax("argmin") + + # ------------------------------------------------------------------------ + # Ufuncs + # ------------------------------------------------------------------------ + + _HANDLED_TYPES = (np.ndarray, numbers.Number) + + def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): + out = kwargs.get("out", ()) + + for x in inputs + out: + if not isinstance(x, self._HANDLED_TYPES + (SparseArray,)): + return NotImplemented + + # for binary ops, use our custom dunder methods + result = arraylike.maybe_dispatch_ufunc_to_dunder_op( + self, ufunc, method, *inputs, **kwargs + ) + if result is not NotImplemented: + return result + + if "out" in kwargs: + # e.g. tests.arrays.sparse.test_arithmetics.test_ndarray_inplace + res = arraylike.dispatch_ufunc_with_out( + self, ufunc, method, *inputs, **kwargs + ) + return res + + if method == "reduce": + result = arraylike.dispatch_reduction_ufunc( + self, ufunc, method, *inputs, **kwargs + ) + if result is not NotImplemented: + # e.g. tests.series.test_ufunc.TestNumpyReductions + return result + + if len(inputs) == 1: + # No alignment necessary. + sp_values = getattr(ufunc, method)(self.sp_values, **kwargs) + fill_value = getattr(ufunc, method)(self.fill_value, **kwargs) + + if ufunc.nout > 1: + # multiple outputs. e.g. modf + arrays = tuple( + self._simple_new( + sp_value, self.sp_index, SparseDtype(sp_value.dtype, fv) + ) + for sp_value, fv in zip(sp_values, fill_value) + ) + return arrays + elif method == "reduce": + # e.g. reductions + return sp_values + + return self._simple_new( + sp_values, self.sp_index, SparseDtype(sp_values.dtype, fill_value) + ) + + new_inputs = tuple(np.asarray(x) for x in inputs) + result = getattr(ufunc, method)(*new_inputs, **kwargs) + if out: + if len(out) == 1: + out = out[0] + return out + + if ufunc.nout > 1: + return tuple(type(self)(x) for x in result) + elif method == "at": + # no return value + return None + else: + return type(self)(result) + + # ------------------------------------------------------------------------ + # Ops + # ------------------------------------------------------------------------ + + def _arith_method(self, other, op): + op_name = op.__name__ + + if isinstance(other, SparseArray): + return _sparse_array_op(self, other, op, op_name) + + elif is_scalar(other): + with np.errstate(all="ignore"): + fill = op(_get_fill(self), np.asarray(other)) + result = op(self.sp_values, other) + + if op_name == "divmod": + left, right = result + lfill, rfill = fill + return ( + _wrap_result(op_name, left, self.sp_index, lfill), + _wrap_result(op_name, right, self.sp_index, rfill), + ) + + return _wrap_result(op_name, result, self.sp_index, fill) + + else: + other = np.asarray(other) + with np.errstate(all="ignore"): + if len(self) != len(other): + raise AssertionError( + f"length mismatch: {len(self)} vs. {len(other)}" + ) + if not isinstance(other, SparseArray): + dtype = getattr(other, "dtype", None) + other = SparseArray(other, fill_value=self.fill_value, dtype=dtype) + return _sparse_array_op(self, other, op, op_name) + + def _cmp_method(self, other, op) -> SparseArray: + if not is_scalar(other) and not isinstance(other, type(self)): + # convert list-like to ndarray + other = np.asarray(other) + + if isinstance(other, np.ndarray): + # TODO: make this more flexible than just ndarray... + other = SparseArray(other, fill_value=self.fill_value) + + if isinstance(other, SparseArray): + if len(self) != len(other): + raise ValueError( + f"operands have mismatched length {len(self)} and {len(other)}" + ) + + op_name = op.__name__.strip("_") + return _sparse_array_op(self, other, op, op_name) + else: + # scalar + fill_value = op(self.fill_value, other) + result = np.full(len(self), fill_value, dtype=np.bool_) + result[self.sp_index.indices] = op(self.sp_values, other) + + return type(self)( + result, + fill_value=fill_value, + dtype=np.bool_, + ) + + _logical_method = _cmp_method + + def _unary_method(self, op) -> SparseArray: + fill_value = op(np.array(self.fill_value)).item() + dtype = SparseDtype(self.dtype.subtype, fill_value) + # NOTE: if fill_value doesn't change + # we just have to apply op to sp_values + if isna(self.fill_value) or fill_value == self.fill_value: + values = op(self.sp_values) + return type(self)._simple_new(values, self.sp_index, self.dtype) + # In the other case we have to recalc indexes + return type(self)(op(self.to_dense()), dtype=dtype) + + def __pos__(self) -> SparseArray: + return self._unary_method(operator.pos) + + def __neg__(self) -> SparseArray: + return self._unary_method(operator.neg) + + def __invert__(self) -> SparseArray: + return self._unary_method(operator.invert) + + def __abs__(self) -> SparseArray: + return self._unary_method(operator.abs) + + # ---------- + # Formatting + # ----------- + def __repr__(self) -> str: + pp_str = printing.pprint_thing(self) + pp_fill = printing.pprint_thing(self.fill_value) + pp_index = printing.pprint_thing(self.sp_index) + return f"{pp_str}\nFill: {pp_fill}\n{pp_index}" + + def _formatter(self, boxed: bool = False): + # Defer to the formatter from the GenericArrayFormatter calling us. + # This will infer the correct formatter from the dtype of the values. + return None + + +def _make_sparse( + arr: np.ndarray, + kind: SparseIndexKind = "block", + fill_value=None, + dtype: np.dtype | None = None, +): + """ + Convert ndarray to sparse format + + Parameters + ---------- + arr : ndarray + kind : {'block', 'integer'} + fill_value : NaN or another value + dtype : np.dtype, optional + copy : bool, default False + + Returns + ------- + (sparse_values, index, fill_value) : (ndarray, SparseIndex, Scalar) + """ + assert isinstance(arr, np.ndarray) + + if arr.ndim > 1: + raise TypeError("expected dimension <= 1 data") + + if fill_value is None: + fill_value = na_value_for_dtype(arr.dtype) + + if isna(fill_value): + mask = notna(arr) + else: + # cast to object comparison to be safe + if is_string_dtype(arr.dtype): + arr = arr.astype(object) + + if is_object_dtype(arr.dtype): + # element-wise equality check method in numpy doesn't treat + # each element type, eg. 0, 0.0, and False are treated as + # same. So we have to check the both of its type and value. + mask = splib.make_mask_object_ndarray(arr, fill_value) + else: + mask = arr != fill_value + + length = len(arr) + if length != len(mask): + # the arr is a SparseArray + indices = mask.sp_index.indices + else: + indices = mask.nonzero()[0].astype(np.int32) + + index = make_sparse_index(length, indices, kind) + sparsified_values = arr[mask] + if dtype is not None: + sparsified_values = ensure_wrapped_if_datetimelike(sparsified_values) + sparsified_values = astype_array(sparsified_values, dtype=dtype) + sparsified_values = np.asarray(sparsified_values) + + # TODO: copy + return sparsified_values, index, fill_value + + +@overload +def make_sparse_index(length: int, indices, kind: Literal["block"]) -> BlockIndex: + ... + + +@overload +def make_sparse_index(length: int, indices, kind: Literal["integer"]) -> IntIndex: + ... + + +def make_sparse_index(length: int, indices, kind: SparseIndexKind) -> SparseIndex: + index: SparseIndex + if kind == "block": + locs, lens = splib.get_blocks(indices) + index = BlockIndex(length, locs, lens) + elif kind == "integer": + index = IntIndex(length, indices) + else: # pragma: no cover + raise ValueError("must be block or integer type") + return index diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/string_.py b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/string_.py new file mode 100644 index 0000000000000000000000000000000000000000..00197a150fb97c47c510e189eef6cd4312b188e3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/string_.py @@ -0,0 +1,657 @@ +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + ClassVar, + Literal, +) + +import numpy as np + +from pandas._config import get_option + +from pandas._libs import ( + lib, + missing as libmissing, +) +from pandas._libs.arrays import NDArrayBacked +from pandas._libs.lib import ensure_string_array +from pandas.compat import pa_version_under10p1 +from pandas.compat.numpy import function as nv +from pandas.util._decorators import doc + +from pandas.core.dtypes.base import ( + ExtensionDtype, + StorageExtensionDtype, + register_extension_dtype, +) +from pandas.core.dtypes.common import ( + is_array_like, + is_bool_dtype, + is_integer_dtype, + is_object_dtype, + is_string_dtype, + pandas_dtype, +) + +from pandas.core import ops +from pandas.core.array_algos import masked_reductions +from pandas.core.arrays.base import ExtensionArray +from pandas.core.arrays.floating import ( + FloatingArray, + FloatingDtype, +) +from pandas.core.arrays.integer import ( + IntegerArray, + IntegerDtype, +) +from pandas.core.arrays.numpy_ import NumpyExtensionArray +from pandas.core.construction import extract_array +from pandas.core.indexers import check_array_indexer +from pandas.core.missing import isna + +if TYPE_CHECKING: + import pyarrow + + from pandas._typing import ( + AxisInt, + Dtype, + DtypeObj, + NumpySorter, + NumpyValueArrayLike, + Scalar, + Self, + npt, + type_t, + ) + + from pandas import Series + + +@register_extension_dtype +class StringDtype(StorageExtensionDtype): + """ + Extension dtype for string data. + + .. warning:: + + StringDtype is considered experimental. The implementation and + parts of the API may change without warning. + + Parameters + ---------- + storage : {"python", "pyarrow", "pyarrow_numpy"}, optional + If not given, the value of ``pd.options.mode.string_storage``. + + Attributes + ---------- + None + + Methods + ------- + None + + Examples + -------- + >>> pd.StringDtype() + string[python] + + >>> pd.StringDtype(storage="pyarrow") + string[pyarrow] + """ + + # error: Cannot override instance variable (previously declared on + # base class "StorageExtensionDtype") with class variable + name: ClassVar[str] = "string" # type: ignore[misc] + + #: StringDtype().na_value uses pandas.NA except the implementation that + # follows NumPy semantics, which uses nan. + @property + def na_value(self) -> libmissing.NAType | float: # type: ignore[override] + if self.storage == "pyarrow_numpy": + return np.nan + else: + return libmissing.NA + + _metadata = ("storage",) + + def __init__(self, storage=None) -> None: + if storage is None: + infer_string = get_option("future.infer_string") + if infer_string: + storage = "pyarrow_numpy" + else: + storage = get_option("mode.string_storage") + if storage not in {"python", "pyarrow", "pyarrow_numpy"}: + raise ValueError( + f"Storage must be 'python', 'pyarrow' or 'pyarrow_numpy'. " + f"Got {storage} instead." + ) + if storage in ("pyarrow", "pyarrow_numpy") and pa_version_under10p1: + raise ImportError( + "pyarrow>=10.0.1 is required for PyArrow backed StringArray." + ) + self.storage = storage + + @property + def type(self) -> type[str]: + return str + + @classmethod + def construct_from_string(cls, string) -> Self: + """ + Construct a StringDtype from a string. + + Parameters + ---------- + string : str + The type of the name. The storage type will be taking from `string`. + Valid options and their storage types are + + ========================== ============================================== + string result storage + ========================== ============================================== + ``'string'`` pd.options.mode.string_storage, default python + ``'string[python]'`` python + ``'string[pyarrow]'`` pyarrow + ========================== ============================================== + + Returns + ------- + StringDtype + + Raise + ----- + TypeError + If the string is not a valid option. + """ + if not isinstance(string, str): + raise TypeError( + f"'construct_from_string' expects a string, got {type(string)}" + ) + if string == "string": + return cls() + elif string == "string[python]": + return cls(storage="python") + elif string == "string[pyarrow]": + return cls(storage="pyarrow") + elif string == "string[pyarrow_numpy]": + return cls(storage="pyarrow_numpy") + else: + raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'") + + # https://github.com/pandas-dev/pandas/issues/36126 + # error: Signature of "construct_array_type" incompatible with supertype + # "ExtensionDtype" + def construct_array_type( # type: ignore[override] + self, + ) -> type_t[BaseStringArray]: + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + from pandas.core.arrays.string_arrow import ( + ArrowStringArray, + ArrowStringArrayNumpySemantics, + ) + + if self.storage == "python": + return StringArray + elif self.storage == "pyarrow": + return ArrowStringArray + else: + return ArrowStringArrayNumpySemantics + + def __from_arrow__( + self, array: pyarrow.Array | pyarrow.ChunkedArray + ) -> BaseStringArray: + """ + Construct StringArray from pyarrow Array/ChunkedArray. + """ + if self.storage == "pyarrow": + from pandas.core.arrays.string_arrow import ArrowStringArray + + return ArrowStringArray(array) + elif self.storage == "pyarrow_numpy": + from pandas.core.arrays.string_arrow import ArrowStringArrayNumpySemantics + + return ArrowStringArrayNumpySemantics(array) + else: + import pyarrow + + if isinstance(array, pyarrow.Array): + chunks = [array] + else: + # pyarrow.ChunkedArray + chunks = array.chunks + + results = [] + for arr in chunks: + # convert chunk by chunk to numpy and concatenate then, to avoid + # overflow for large string data when concatenating the pyarrow arrays + arr = arr.to_numpy(zero_copy_only=False) + arr = ensure_string_array(arr, na_value=libmissing.NA) + results.append(arr) + + if len(chunks) == 0: + arr = np.array([], dtype=object) + else: + arr = np.concatenate(results) + + # Bypass validation inside StringArray constructor, see GH#47781 + new_string_array = StringArray.__new__(StringArray) + NDArrayBacked.__init__( + new_string_array, + arr, + StringDtype(storage="python"), + ) + return new_string_array + + +class BaseStringArray(ExtensionArray): + """ + Mixin class for StringArray, ArrowStringArray. + """ + + @doc(ExtensionArray.tolist) + def tolist(self): + if self.ndim > 1: + return [x.tolist() for x in self] + return list(self.to_numpy()) + + @classmethod + def _from_scalars(cls, scalars, dtype: DtypeObj) -> Self: + if lib.infer_dtype(scalars, skipna=True) not in ["string", "empty"]: + # TODO: require any NAs be valid-for-string + raise ValueError + return cls._from_sequence(scalars, dtype=dtype) + + +# error: Definition of "_concat_same_type" in base class "NDArrayBacked" is +# incompatible with definition in base class "ExtensionArray" +class StringArray(BaseStringArray, NumpyExtensionArray): # type: ignore[misc] + """ + Extension array for string data. + + .. warning:: + + StringArray is considered experimental. The implementation and + parts of the API may change without warning. + + Parameters + ---------- + values : array-like + The array of data. + + .. warning:: + + Currently, this expects an object-dtype ndarray + where the elements are Python strings + or nan-likes (``None``, ``np.nan``, ``NA``). + This may change without warning in the future. Use + :meth:`pandas.array` with ``dtype="string"`` for a stable way of + creating a `StringArray` from any sequence. + + .. versionchanged:: 1.5.0 + + StringArray now accepts array-likes containing + nan-likes(``None``, ``np.nan``) for the ``values`` parameter + in addition to strings and :attr:`pandas.NA` + + copy : bool, default False + Whether to copy the array of data. + + Attributes + ---------- + None + + Methods + ------- + None + + See Also + -------- + :func:`pandas.array` + The recommended function for creating a StringArray. + Series.str + The string methods are available on Series backed by + a StringArray. + + Notes + ----- + StringArray returns a BooleanArray for comparison methods. + + Examples + -------- + >>> pd.array(['This is', 'some text', None, 'data.'], dtype="string") + + ['This is', 'some text', , 'data.'] + Length: 4, dtype: string + + Unlike arrays instantiated with ``dtype="object"``, ``StringArray`` + will convert the values to strings. + + >>> pd.array(['1', 1], dtype="object") + + ['1', 1] + Length: 2, dtype: object + >>> pd.array(['1', 1], dtype="string") + + ['1', '1'] + Length: 2, dtype: string + + However, instantiating StringArrays directly with non-strings will raise an error. + + For comparison methods, `StringArray` returns a :class:`pandas.BooleanArray`: + + >>> pd.array(["a", None, "c"], dtype="string") == "a" + + [True, , False] + Length: 3, dtype: boolean + """ + + # undo the NumpyExtensionArray hack + _typ = "extension" + + def __init__(self, values, copy: bool = False) -> None: + values = extract_array(values) + + super().__init__(values, copy=copy) + if not isinstance(values, type(self)): + self._validate() + NDArrayBacked.__init__(self, self._ndarray, StringDtype(storage="python")) + + def _validate(self): + """Validate that we only store NA or strings.""" + if len(self._ndarray) and not lib.is_string_array(self._ndarray, skipna=True): + raise ValueError("StringArray requires a sequence of strings or pandas.NA") + if self._ndarray.dtype != "object": + raise ValueError( + "StringArray requires a sequence of strings or pandas.NA. Got " + f"'{self._ndarray.dtype}' dtype instead." + ) + # Check to see if need to convert Na values to pd.NA + if self._ndarray.ndim > 2: + # Ravel if ndims > 2 b/c no cythonized version available + lib.convert_nans_to_NA(self._ndarray.ravel("K")) + else: + lib.convert_nans_to_NA(self._ndarray) + + @classmethod + def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy: bool = False): + if dtype and not (isinstance(dtype, str) and dtype == "string"): + dtype = pandas_dtype(dtype) + assert isinstance(dtype, StringDtype) and dtype.storage == "python" + + from pandas.core.arrays.masked import BaseMaskedArray + + if isinstance(scalars, BaseMaskedArray): + # avoid costly conversion to object dtype + na_values = scalars._mask + result = scalars._data + result = lib.ensure_string_array(result, copy=copy, convert_na_value=False) + result[na_values] = libmissing.NA + + else: + if lib.is_pyarrow_array(scalars): + # pyarrow array; we cannot rely on the "to_numpy" check in + # ensure_string_array because calling scalars.to_numpy would set + # zero_copy_only to True which caused problems see GH#52076 + scalars = np.array(scalars) + # convert non-na-likes to str, and nan-likes to StringDtype().na_value + result = lib.ensure_string_array(scalars, na_value=libmissing.NA, copy=copy) + + # Manually creating new array avoids the validation step in the __init__, so is + # faster. Refactor need for validation? + new_string_array = cls.__new__(cls) + NDArrayBacked.__init__(new_string_array, result, StringDtype(storage="python")) + + return new_string_array + + @classmethod + def _from_sequence_of_strings( + cls, strings, *, dtype: Dtype | None = None, copy: bool = False + ): + return cls._from_sequence(strings, dtype=dtype, copy=copy) + + @classmethod + def _empty(cls, shape, dtype) -> StringArray: + values = np.empty(shape, dtype=object) + values[:] = libmissing.NA + return cls(values).astype(dtype, copy=False) + + def __arrow_array__(self, type=None): + """ + Convert myself into a pyarrow Array. + """ + import pyarrow as pa + + if type is None: + type = pa.string() + + values = self._ndarray.copy() + values[self.isna()] = None + return pa.array(values, type=type, from_pandas=True) + + def _values_for_factorize(self): + arr = self._ndarray.copy() + mask = self.isna() + arr[mask] = None + return arr, None + + def __setitem__(self, key, value) -> None: + value = extract_array(value, extract_numpy=True) + if isinstance(value, type(self)): + # extract_array doesn't extract NumpyExtensionArray subclasses + value = value._ndarray + + key = check_array_indexer(self, key) + scalar_key = lib.is_scalar(key) + scalar_value = lib.is_scalar(value) + if scalar_key and not scalar_value: + raise ValueError("setting an array element with a sequence.") + + # validate new items + if scalar_value: + if isna(value): + value = libmissing.NA + elif not isinstance(value, str): + raise TypeError( + f"Cannot set non-string value '{value}' into a StringArray." + ) + else: + if not is_array_like(value): + value = np.asarray(value, dtype=object) + if len(value) and not lib.is_string_array(value, skipna=True): + raise TypeError("Must provide strings.") + + mask = isna(value) + if mask.any(): + value = value.copy() + value[isna(value)] = libmissing.NA + + super().__setitem__(key, value) + + def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None: + # the super() method NDArrayBackedExtensionArray._putmask uses + # np.putmask which doesn't properly handle None/pd.NA, so using the + # base class implementation that uses __setitem__ + ExtensionArray._putmask(self, mask, value) + + def astype(self, dtype, copy: bool = True): + dtype = pandas_dtype(dtype) + + if dtype == self.dtype: + if copy: + return self.copy() + return self + + elif isinstance(dtype, IntegerDtype): + arr = self._ndarray.copy() + mask = self.isna() + arr[mask] = 0 + values = arr.astype(dtype.numpy_dtype) + return IntegerArray(values, mask, copy=False) + elif isinstance(dtype, FloatingDtype): + arr = self.copy() + mask = self.isna() + arr[mask] = "0" + values = arr.astype(dtype.numpy_dtype) + return FloatingArray(values, mask, copy=False) + elif isinstance(dtype, ExtensionDtype): + # Skip the NumpyExtensionArray.astype method + return ExtensionArray.astype(self, dtype, copy) + elif np.issubdtype(dtype, np.floating): + arr = self._ndarray.copy() + mask = self.isna() + arr[mask] = 0 + values = arr.astype(dtype) + values[mask] = np.nan + return values + + return super().astype(dtype, copy) + + def _reduce( + self, name: str, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs + ): + if name in ["min", "max"]: + return getattr(self, name)(skipna=skipna, axis=axis) + + raise TypeError(f"Cannot perform reduction '{name}' with string dtype") + + def min(self, axis=None, skipna: bool = True, **kwargs) -> Scalar: + nv.validate_min((), kwargs) + result = masked_reductions.min( + values=self.to_numpy(), mask=self.isna(), skipna=skipna + ) + return self._wrap_reduction_result(axis, result) + + def max(self, axis=None, skipna: bool = True, **kwargs) -> Scalar: + nv.validate_max((), kwargs) + result = masked_reductions.max( + values=self.to_numpy(), mask=self.isna(), skipna=skipna + ) + return self._wrap_reduction_result(axis, result) + + def value_counts(self, dropna: bool = True) -> Series: + from pandas.core.algorithms import value_counts_internal as value_counts + + result = value_counts(self._ndarray, dropna=dropna).astype("Int64") + result.index = result.index.astype(self.dtype) + return result + + def memory_usage(self, deep: bool = False) -> int: + result = self._ndarray.nbytes + if deep: + return result + lib.memory_usage_of_objects(self._ndarray) + return result + + @doc(ExtensionArray.searchsorted) + def searchsorted( + self, + value: NumpyValueArrayLike | ExtensionArray, + side: Literal["left", "right"] = "left", + sorter: NumpySorter | None = None, + ) -> npt.NDArray[np.intp] | np.intp: + if self._hasna: + raise ValueError( + "searchsorted requires array to be sorted, which is impossible " + "with NAs present." + ) + return super().searchsorted(value=value, side=side, sorter=sorter) + + def _cmp_method(self, other, op): + from pandas.arrays import BooleanArray + + if isinstance(other, StringArray): + other = other._ndarray + + mask = isna(self) | isna(other) + valid = ~mask + + if not lib.is_scalar(other): + if len(other) != len(self): + # prevent improper broadcasting when other is 2D + raise ValueError( + f"Lengths of operands do not match: {len(self)} != {len(other)}" + ) + + other = np.asarray(other) + other = other[valid] + + if op.__name__ in ops.ARITHMETIC_BINOPS: + result = np.empty_like(self._ndarray, dtype="object") + result[mask] = libmissing.NA + result[valid] = op(self._ndarray[valid], other) + return StringArray(result) + else: + # logical + result = np.zeros(len(self._ndarray), dtype="bool") + result[valid] = op(self._ndarray[valid], other) + return BooleanArray(result, mask) + + _arith_method = _cmp_method + + # ------------------------------------------------------------------------ + # String methods interface + # error: Incompatible types in assignment (expression has type "NAType", + # base class "NumpyExtensionArray" defined the type as "float") + _str_na_value = libmissing.NA # type: ignore[assignment] + + def _str_map( + self, f, na_value=None, dtype: Dtype | None = None, convert: bool = True + ): + from pandas.arrays import BooleanArray + + if dtype is None: + dtype = StringDtype(storage="python") + if na_value is None: + na_value = self.dtype.na_value + + mask = isna(self) + arr = np.asarray(self) + + if is_integer_dtype(dtype) or is_bool_dtype(dtype): + constructor: type[IntegerArray | BooleanArray] + if is_integer_dtype(dtype): + constructor = IntegerArray + else: + constructor = BooleanArray + + na_value_is_na = isna(na_value) + if na_value_is_na: + na_value = 1 + elif dtype == np.dtype("bool"): + na_value = bool(na_value) + result = lib.map_infer_mask( + arr, + f, + mask.view("uint8"), + convert=False, + na_value=na_value, + # error: Argument 1 to "dtype" has incompatible type + # "Union[ExtensionDtype, str, dtype[Any], Type[object]]"; expected + # "Type[object]" + dtype=np.dtype(dtype), # type: ignore[arg-type] + ) + + if not na_value_is_na: + mask[:] = False + + return constructor(result, mask) + + elif is_string_dtype(dtype) and not is_object_dtype(dtype): + # i.e. StringDtype + result = lib.map_infer_mask( + arr, f, mask.view("uint8"), convert=False, na_value=na_value + ) + return StringArray(result) + else: + # This is when the result type is object. We reach this when + # -> We know the result type is truly object (e.g. .encode returns bytes + # or .findall returns a list). + # -> We don't know the result type. E.g. `.get` can return anything. + return lib.map_infer_mask(arr, f, mask.view("uint8")) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/string_arrow.py b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/string_arrow.py new file mode 100644 index 0000000000000000000000000000000000000000..e8f614ff855c0f6fa4664104597f6b90ed303f3d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/string_arrow.py @@ -0,0 +1,715 @@ +from __future__ import annotations + +from functools import partial +import operator +import re +from typing import ( + TYPE_CHECKING, + Callable, + Union, +) +import warnings + +import numpy as np + +from pandas._libs import ( + lib, + missing as libmissing, +) +from pandas.compat import ( + pa_version_under10p1, + pa_version_under13p0, +) +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import ( + is_bool_dtype, + is_integer_dtype, + is_object_dtype, + is_scalar, + is_string_dtype, + pandas_dtype, +) +from pandas.core.dtypes.missing import isna + +from pandas.core.arrays._arrow_string_mixins import ArrowStringArrayMixin +from pandas.core.arrays.arrow import ArrowExtensionArray +from pandas.core.arrays.boolean import BooleanDtype +from pandas.core.arrays.integer import Int64Dtype +from pandas.core.arrays.numeric import NumericDtype +from pandas.core.arrays.string_ import ( + BaseStringArray, + StringDtype, +) +from pandas.core.ops import invalid_comparison +from pandas.core.strings.object_array import ObjectStringArrayMixin + +if not pa_version_under10p1: + import pyarrow as pa + import pyarrow.compute as pc + + from pandas.core.arrays.arrow._arrow_utils import fallback_performancewarning + + +if TYPE_CHECKING: + from collections.abc import Sequence + + from pandas._typing import ( + ArrayLike, + AxisInt, + Dtype, + Scalar, + npt, + ) + + from pandas import Series + + +ArrowStringScalarOrNAT = Union[str, libmissing.NAType] + + +def _chk_pyarrow_available() -> None: + if pa_version_under10p1: + msg = "pyarrow>=10.0.1 is required for PyArrow backed ArrowExtensionArray." + raise ImportError(msg) + + +# TODO: Inherit directly from BaseStringArrayMethods. Currently we inherit from +# ObjectStringArrayMixin because we want to have the object-dtype based methods as +# fallback for the ones that pyarrow doesn't yet support + + +class ArrowStringArray(ObjectStringArrayMixin, ArrowExtensionArray, BaseStringArray): + """ + Extension array for string data in a ``pyarrow.ChunkedArray``. + + .. warning:: + + ArrowStringArray is considered experimental. The implementation and + parts of the API may change without warning. + + Parameters + ---------- + values : pyarrow.Array or pyarrow.ChunkedArray + The array of data. + + Attributes + ---------- + None + + Methods + ------- + None + + See Also + -------- + :func:`pandas.array` + The recommended function for creating a ArrowStringArray. + Series.str + The string methods are available on Series backed by + a ArrowStringArray. + + Notes + ----- + ArrowStringArray returns a BooleanArray for comparison methods. + + Examples + -------- + >>> pd.array(['This is', 'some text', None, 'data.'], dtype="string[pyarrow]") + + ['This is', 'some text', , 'data.'] + Length: 4, dtype: string + """ + + # error: Incompatible types in assignment (expression has type "StringDtype", + # base class "ArrowExtensionArray" defined the type as "ArrowDtype") + _dtype: StringDtype # type: ignore[assignment] + _storage = "pyarrow" + + def __init__(self, values) -> None: + _chk_pyarrow_available() + if isinstance(values, (pa.Array, pa.ChunkedArray)) and pa.types.is_string( + values.type + ): + values = pc.cast(values, pa.large_string()) + + super().__init__(values) + self._dtype = StringDtype(storage=self._storage) + + if not pa.types.is_large_string(self._pa_array.type) and not ( + pa.types.is_dictionary(self._pa_array.type) + and pa.types.is_large_string(self._pa_array.type.value_type) + ): + raise ValueError( + "ArrowStringArray requires a PyArrow (chunked) array of " + "large_string type" + ) + + @classmethod + def _box_pa_scalar(cls, value, pa_type: pa.DataType | None = None) -> pa.Scalar: + pa_scalar = super()._box_pa_scalar(value, pa_type) + if pa.types.is_string(pa_scalar.type) and pa_type is None: + pa_scalar = pc.cast(pa_scalar, pa.large_string()) + return pa_scalar + + @classmethod + def _box_pa_array( + cls, value, pa_type: pa.DataType | None = None, copy: bool = False + ) -> pa.Array | pa.ChunkedArray: + pa_array = super()._box_pa_array(value, pa_type) + if pa.types.is_string(pa_array.type) and pa_type is None: + pa_array = pc.cast(pa_array, pa.large_string()) + return pa_array + + def __len__(self) -> int: + """ + Length of this array. + + Returns + ------- + length : int + """ + return len(self._pa_array) + + @classmethod + def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy: bool = False): + from pandas.core.arrays.masked import BaseMaskedArray + + _chk_pyarrow_available() + + if dtype and not (isinstance(dtype, str) and dtype == "string"): + dtype = pandas_dtype(dtype) + assert isinstance(dtype, StringDtype) and dtype.storage in ( + "pyarrow", + "pyarrow_numpy", + ) + + if isinstance(scalars, BaseMaskedArray): + # avoid costly conversion to object dtype in ensure_string_array and + # numerical issues with Float32Dtype + na_values = scalars._mask + result = scalars._data + result = lib.ensure_string_array(result, copy=copy, convert_na_value=False) + return cls(pa.array(result, mask=na_values, type=pa.string())) + elif isinstance(scalars, (pa.Array, pa.ChunkedArray)): + return cls(pc.cast(scalars, pa.string())) + + # convert non-na-likes to str + result = lib.ensure_string_array(scalars, copy=copy) + return cls(pa.array(result, type=pa.string(), from_pandas=True)) + + @classmethod + def _from_sequence_of_strings( + cls, strings, dtype: Dtype | None = None, copy: bool = False + ): + return cls._from_sequence(strings, dtype=dtype, copy=copy) + + @property + def dtype(self) -> StringDtype: # type: ignore[override] + """ + An instance of 'string[pyarrow]'. + """ + return self._dtype + + def insert(self, loc: int, item) -> ArrowStringArray: + if not isinstance(item, str) and item is not libmissing.NA: + raise TypeError("Scalar must be NA or str") + return super().insert(loc, item) + + @classmethod + def _result_converter(cls, values, na=None): + return BooleanDtype().__from_arrow__(values) + + def _maybe_convert_setitem_value(self, value): + """Maybe convert value to be pyarrow compatible.""" + if is_scalar(value): + if isna(value): + value = None + elif not isinstance(value, str): + raise TypeError("Scalar must be NA or str") + else: + value = np.array(value, dtype=object, copy=True) + value[isna(value)] = None + for v in value: + if not (v is None or isinstance(v, str)): + raise TypeError("Scalar must be NA or str") + return super()._maybe_convert_setitem_value(value) + + def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]: + value_set = [ + pa_scalar.as_py() + for pa_scalar in [pa.scalar(value, from_pandas=True) for value in values] + if pa_scalar.type in (pa.string(), pa.null()) + ] + + # short-circuit to return all False array. + if not len(value_set): + return np.zeros(len(self), dtype=bool) + + result = pc.is_in( + self._pa_array, value_set=pa.array(value_set, type=self._pa_array.type) + ) + # pyarrow 2.0.0 returned nulls, so we explicily specify dtype to convert nulls + # to False + return np.array(result, dtype=np.bool_) + + def astype(self, dtype, copy: bool = True): + dtype = pandas_dtype(dtype) + + if dtype == self.dtype: + if copy: + return self.copy() + return self + elif isinstance(dtype, NumericDtype): + data = self._pa_array.cast(pa.from_numpy_dtype(dtype.numpy_dtype)) + return dtype.__from_arrow__(data) + elif isinstance(dtype, np.dtype) and np.issubdtype(dtype, np.floating): + return self.to_numpy(dtype=dtype, na_value=np.nan) + + return super().astype(dtype, copy=copy) + + @property + def _data(self): + # dask accesses ._data directlys + warnings.warn( + f"{type(self).__name__}._data is a deprecated and will be removed " + "in a future version, use ._pa_array instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self._pa_array + + # ------------------------------------------------------------------------ + # String methods interface + + # error: Incompatible types in assignment (expression has type "NAType", + # base class "ObjectStringArrayMixin" defined the type as "float") + _str_na_value = libmissing.NA # type: ignore[assignment] + + def _str_map( + self, f, na_value=None, dtype: Dtype | None = None, convert: bool = True + ): + # TODO: de-duplicate with StringArray method. This method is moreless copy and + # paste. + + from pandas.arrays import ( + BooleanArray, + IntegerArray, + ) + + if dtype is None: + dtype = self.dtype + if na_value is None: + na_value = self.dtype.na_value + + mask = isna(self) + arr = np.asarray(self) + + if is_integer_dtype(dtype) or is_bool_dtype(dtype): + constructor: type[IntegerArray | BooleanArray] + if is_integer_dtype(dtype): + constructor = IntegerArray + else: + constructor = BooleanArray + + na_value_is_na = isna(na_value) + if na_value_is_na: + na_value = 1 + result = lib.map_infer_mask( + arr, + f, + mask.view("uint8"), + convert=False, + na_value=na_value, + # error: Argument 1 to "dtype" has incompatible type + # "Union[ExtensionDtype, str, dtype[Any], Type[object]]"; expected + # "Type[object]" + dtype=np.dtype(dtype), # type: ignore[arg-type] + ) + + if not na_value_is_na: + mask[:] = False + + return constructor(result, mask) + + elif is_string_dtype(dtype) and not is_object_dtype(dtype): + # i.e. StringDtype + result = lib.map_infer_mask( + arr, f, mask.view("uint8"), convert=False, na_value=na_value + ) + result = pa.array(result, mask=mask, type=pa.string(), from_pandas=True) + return type(self)(result) + else: + # This is when the result type is object. We reach this when + # -> We know the result type is truly object (e.g. .encode returns bytes + # or .findall returns a list). + # -> We don't know the result type. E.g. `.get` can return anything. + return lib.map_infer_mask(arr, f, mask.view("uint8")) + + def _str_contains( + self, pat, case: bool = True, flags: int = 0, na=np.nan, regex: bool = True + ): + if flags: + fallback_performancewarning() + return super()._str_contains(pat, case, flags, na, regex) + + if regex: + result = pc.match_substring_regex(self._pa_array, pat, ignore_case=not case) + else: + result = pc.match_substring(self._pa_array, pat, ignore_case=not case) + result = self._result_converter(result, na=na) + if not isna(na): + result[isna(result)] = bool(na) + return result + + def _str_startswith(self, pat: str | tuple[str, ...], na: Scalar | None = None): + if isinstance(pat, str): + result = pc.starts_with(self._pa_array, pattern=pat) + else: + if len(pat) == 0: + # mimic existing behaviour of string extension array + # and python string method + result = pa.array( + np.zeros(len(self._pa_array), dtype=bool), mask=isna(self._pa_array) + ) + else: + result = pc.starts_with(self._pa_array, pattern=pat[0]) + + for p in pat[1:]: + result = pc.or_(result, pc.starts_with(self._pa_array, pattern=p)) + if not isna(na): + result = result.fill_null(na) + return self._result_converter(result) + + def _str_endswith(self, pat: str | tuple[str, ...], na: Scalar | None = None): + if isinstance(pat, str): + result = pc.ends_with(self._pa_array, pattern=pat) + else: + if len(pat) == 0: + # mimic existing behaviour of string extension array + # and python string method + result = pa.array( + np.zeros(len(self._pa_array), dtype=bool), mask=isna(self._pa_array) + ) + else: + result = pc.ends_with(self._pa_array, pattern=pat[0]) + + for p in pat[1:]: + result = pc.or_(result, pc.ends_with(self._pa_array, pattern=p)) + if not isna(na): + result = result.fill_null(na) + return self._result_converter(result) + + def _str_replace( + self, + pat: str | re.Pattern, + repl: str | Callable, + n: int = -1, + case: bool = True, + flags: int = 0, + regex: bool = True, + ): + if isinstance(pat, re.Pattern) or callable(repl) or not case or flags: + fallback_performancewarning() + return super()._str_replace(pat, repl, n, case, flags, regex) + + func = pc.replace_substring_regex if regex else pc.replace_substring + result = func(self._pa_array, pattern=pat, replacement=repl, max_replacements=n) + return type(self)(result) + + def _str_repeat(self, repeats: int | Sequence[int]): + if not isinstance(repeats, int): + return super()._str_repeat(repeats) + else: + return type(self)(pc.binary_repeat(self._pa_array, repeats)) + + def _str_match( + self, pat: str, case: bool = True, flags: int = 0, na: Scalar | None = None + ): + if not pat.startswith("^"): + pat = f"^{pat}" + return self._str_contains(pat, case, flags, na, regex=True) + + def _str_fullmatch( + self, pat, case: bool = True, flags: int = 0, na: Scalar | None = None + ): + if not pat.endswith("$") or pat.endswith("\\$"): + pat = f"{pat}$" + return self._str_match(pat, case, flags, na) + + def _str_slice( + self, start: int | None = None, stop: int | None = None, step: int | None = None + ): + if stop is None: + return super()._str_slice(start, stop, step) + if start is None: + start = 0 + if step is None: + step = 1 + return type(self)( + pc.utf8_slice_codeunits(self._pa_array, start=start, stop=stop, step=step) + ) + + def _str_isalnum(self): + result = pc.utf8_is_alnum(self._pa_array) + return self._result_converter(result) + + def _str_isalpha(self): + result = pc.utf8_is_alpha(self._pa_array) + return self._result_converter(result) + + def _str_isdecimal(self): + result = pc.utf8_is_decimal(self._pa_array) + return self._result_converter(result) + + def _str_isdigit(self): + result = pc.utf8_is_digit(self._pa_array) + return self._result_converter(result) + + def _str_islower(self): + result = pc.utf8_is_lower(self._pa_array) + return self._result_converter(result) + + def _str_isnumeric(self): + result = pc.utf8_is_numeric(self._pa_array) + return self._result_converter(result) + + def _str_isspace(self): + result = pc.utf8_is_space(self._pa_array) + return self._result_converter(result) + + def _str_istitle(self): + result = pc.utf8_is_title(self._pa_array) + return self._result_converter(result) + + def _str_isupper(self): + result = pc.utf8_is_upper(self._pa_array) + return self._result_converter(result) + + def _str_len(self): + result = pc.utf8_length(self._pa_array) + return self._convert_int_dtype(result) + + def _str_lower(self): + return type(self)(pc.utf8_lower(self._pa_array)) + + def _str_upper(self): + return type(self)(pc.utf8_upper(self._pa_array)) + + def _str_strip(self, to_strip=None): + if to_strip is None: + result = pc.utf8_trim_whitespace(self._pa_array) + else: + result = pc.utf8_trim(self._pa_array, characters=to_strip) + return type(self)(result) + + def _str_lstrip(self, to_strip=None): + if to_strip is None: + result = pc.utf8_ltrim_whitespace(self._pa_array) + else: + result = pc.utf8_ltrim(self._pa_array, characters=to_strip) + return type(self)(result) + + def _str_rstrip(self, to_strip=None): + if to_strip is None: + result = pc.utf8_rtrim_whitespace(self._pa_array) + else: + result = pc.utf8_rtrim(self._pa_array, characters=to_strip) + return type(self)(result) + + def _str_removeprefix(self, prefix: str): + if not pa_version_under13p0: + starts_with = pc.starts_with(self._pa_array, pattern=prefix) + removed = pc.utf8_slice_codeunits(self._pa_array, len(prefix)) + result = pc.if_else(starts_with, removed, self._pa_array) + return type(self)(result) + return super()._str_removeprefix(prefix) + + def _str_removesuffix(self, suffix: str): + ends_with = pc.ends_with(self._pa_array, pattern=suffix) + removed = pc.utf8_slice_codeunits(self._pa_array, 0, stop=-len(suffix)) + result = pc.if_else(ends_with, removed, self._pa_array) + return type(self)(result) + + def _str_count(self, pat: str, flags: int = 0): + if flags: + return super()._str_count(pat, flags) + result = pc.count_substring_regex(self._pa_array, pat) + return self._convert_int_dtype(result) + + def _str_find(self, sub: str, start: int = 0, end: int | None = None): + if start != 0 and end is not None: + slices = pc.utf8_slice_codeunits(self._pa_array, start, stop=end) + result = pc.find_substring(slices, sub) + not_found = pc.equal(result, -1) + offset_result = pc.add(result, end - start) + result = pc.if_else(not_found, result, offset_result) + elif start == 0 and end is None: + slices = self._pa_array + result = pc.find_substring(slices, sub) + else: + return super()._str_find(sub, start, end) + return self._convert_int_dtype(result) + + def _str_get_dummies(self, sep: str = "|"): + dummies_pa, labels = ArrowExtensionArray(self._pa_array)._str_get_dummies(sep) + if len(labels) == 0: + return np.empty(shape=(0, 0), dtype=np.int64), labels + dummies = np.vstack(dummies_pa.to_numpy()) + return dummies.astype(np.int64, copy=False), labels + + def _convert_int_dtype(self, result): + return Int64Dtype().__from_arrow__(result) + + def _reduce( + self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs + ): + result = self._reduce_calc(name, skipna=skipna, keepdims=keepdims, **kwargs) + if name in ("argmin", "argmax") and isinstance(result, pa.Array): + return self._convert_int_dtype(result) + elif isinstance(result, pa.Array): + return type(self)(result) + else: + return result + + def _rank( + self, + *, + axis: AxisInt = 0, + method: str = "average", + na_option: str = "keep", + ascending: bool = True, + pct: bool = False, + ): + """ + See Series.rank.__doc__. + """ + return self._convert_int_dtype( + self._rank_calc( + axis=axis, + method=method, + na_option=na_option, + ascending=ascending, + pct=pct, + ) + ) + + +class ArrowStringArrayNumpySemantics(ArrowStringArray): + _storage = "pyarrow_numpy" + + @classmethod + def _result_converter(cls, values, na=None): + if not isna(na): + values = values.fill_null(bool(na)) + return ArrowExtensionArray(values).to_numpy(na_value=np.nan) + + def __getattribute__(self, item): + # ArrowStringArray and we both inherit from ArrowExtensionArray, which + # creates inheritance problems (Diamond inheritance) + if item in ArrowStringArrayMixin.__dict__ and item not in ( + "_pa_array", + "__dict__", + ): + return partial(getattr(ArrowStringArrayMixin, item), self) + return super().__getattribute__(item) + + def _str_map( + self, f, na_value=None, dtype: Dtype | None = None, convert: bool = True + ): + if dtype is None: + dtype = self.dtype + if na_value is None: + na_value = self.dtype.na_value + + mask = isna(self) + arr = np.asarray(self) + + if is_integer_dtype(dtype) or is_bool_dtype(dtype): + if is_integer_dtype(dtype): + na_value = np.nan + else: + na_value = False + try: + result = lib.map_infer_mask( + arr, + f, + mask.view("uint8"), + convert=False, + na_value=na_value, + dtype=np.dtype(dtype), # type: ignore[arg-type] + ) + return result + + except ValueError: + result = lib.map_infer_mask( + arr, + f, + mask.view("uint8"), + convert=False, + na_value=na_value, + ) + if convert and result.dtype == object: + result = lib.maybe_convert_objects(result) + return result + + elif is_string_dtype(dtype) and not is_object_dtype(dtype): + # i.e. StringDtype + result = lib.map_infer_mask( + arr, f, mask.view("uint8"), convert=False, na_value=na_value + ) + result = pa.array(result, mask=mask, type=pa.string(), from_pandas=True) + return type(self)(result) + else: + # This is when the result type is object. We reach this when + # -> We know the result type is truly object (e.g. .encode returns bytes + # or .findall returns a list). + # -> We don't know the result type. E.g. `.get` can return anything. + return lib.map_infer_mask(arr, f, mask.view("uint8")) + + def _convert_int_dtype(self, result): + if isinstance(result, pa.Array): + result = result.to_numpy(zero_copy_only=False) + else: + result = result.to_numpy() + if result.dtype == np.int32: + result = result.astype(np.int64) + return result + + def _cmp_method(self, other, op): + try: + result = super()._cmp_method(other, op) + except pa.ArrowNotImplementedError: + return invalid_comparison(self, other, op) + if op == operator.ne: + return result.to_numpy(np.bool_, na_value=True) + else: + return result.to_numpy(np.bool_, na_value=False) + + def value_counts(self, dropna: bool = True) -> Series: + from pandas import Series + + result = super().value_counts(dropna) + return Series( + result._values.to_numpy(), index=result.index, name=result.name, copy=False + ) + + def _reduce( + self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs + ): + if name in ["any", "all"]: + if not skipna and name == "all": + nas = pc.invert(pc.is_null(self._pa_array)) + arr = pc.and_kleene(nas, pc.not_equal(self._pa_array, "")) + else: + arr = pc.not_equal(self._pa_array, "") + return ArrowExtensionArray(arr)._reduce( + name, skipna=skipna, keepdims=keepdims, **kwargs + ) + else: + return super()._reduce(name, skipna=skipna, keepdims=keepdims, **kwargs) + + def insert(self, loc: int, item) -> ArrowStringArrayNumpySemantics: + if item is np.nan: + item = libmissing.NA + return super().insert(loc, item) # type: ignore[return-value] diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/timedeltas.py b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/timedeltas.py new file mode 100644 index 0000000000000000000000000000000000000000..e9260a3ec50a2b21d8f1c52f1c4cbe4ec2820bd4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/timedeltas.py @@ -0,0 +1,1177 @@ +from __future__ import annotations + +from datetime import timedelta +import operator +from typing import ( + TYPE_CHECKING, + cast, +) + +import numpy as np + +from pandas._libs import ( + lib, + tslibs, +) +from pandas._libs.tslibs import ( + NaT, + NaTType, + Tick, + Timedelta, + astype_overflowsafe, + get_supported_dtype, + iNaT, + is_supported_dtype, + periods_per_second, +) +from pandas._libs.tslibs.conversion import cast_from_unit_vectorized +from pandas._libs.tslibs.fields import ( + get_timedelta_days, + get_timedelta_field, +) +from pandas._libs.tslibs.timedeltas import ( + array_to_timedelta64, + floordiv_object_array, + ints_to_pytimedelta, + parse_timedelta_unit, + truediv_object_array, +) +from pandas.compat.numpy import function as nv +from pandas.util._validators import validate_endpoints + +from pandas.core.dtypes.common import ( + TD64NS_DTYPE, + is_float_dtype, + is_integer_dtype, + is_object_dtype, + is_scalar, + is_string_dtype, + pandas_dtype, +) +from pandas.core.dtypes.dtypes import ExtensionDtype +from pandas.core.dtypes.missing import isna + +from pandas.core import ( + nanops, + roperator, +) +from pandas.core.array_algos import datetimelike_accumulations +from pandas.core.arrays import datetimelike as dtl +from pandas.core.arrays._ranges import generate_regular_range +import pandas.core.common as com +from pandas.core.ops.common import unpack_zerodim_and_defer + +if TYPE_CHECKING: + from collections.abc import Iterator + + from pandas._typing import ( + AxisInt, + DateTimeErrorChoices, + DtypeObj, + NpDtype, + Self, + npt, + ) + + from pandas import DataFrame + +import textwrap + + +def _field_accessor(name: str, alias: str, docstring: str): + def f(self) -> np.ndarray: + values = self.asi8 + if alias == "days": + result = get_timedelta_days(values, reso=self._creso) + else: + # error: Incompatible types in assignment ( + # expression has type "ndarray[Any, dtype[signedinteger[_32Bit]]]", + # variable has type "ndarray[Any, dtype[signedinteger[_64Bit]]] + result = get_timedelta_field(values, alias, reso=self._creso) # type: ignore[assignment] + if self._hasna: + result = self._maybe_mask_results( + result, fill_value=None, convert="float64" + ) + + return result + + f.__name__ = name + f.__doc__ = f"\n{docstring}\n" + return property(f) + + +class TimedeltaArray(dtl.TimelikeOps): + """ + Pandas ExtensionArray for timedelta data. + + .. warning:: + + TimedeltaArray is currently experimental, and its API may change + without warning. In particular, :attr:`TimedeltaArray.dtype` is + expected to change to be an instance of an ``ExtensionDtype`` + subclass. + + Parameters + ---------- + values : array-like + The timedelta data. + + dtype : numpy.dtype + Currently, only ``numpy.dtype("timedelta64[ns]")`` is accepted. + freq : Offset, optional + copy : bool, default False + Whether to copy the underlying array of data. + + Attributes + ---------- + None + + Methods + ------- + None + + Examples + -------- + >>> pd.arrays.TimedeltaArray._from_sequence(pd.TimedeltaIndex(['1h', '2h'])) + + ['0 days 01:00:00', '0 days 02:00:00'] + Length: 2, dtype: timedelta64[ns] + """ + + _typ = "timedeltaarray" + _internal_fill_value = np.timedelta64("NaT", "ns") + _recognized_scalars = (timedelta, np.timedelta64, Tick) + _is_recognized_dtype = lambda x: lib.is_np_dtype(x, "m") + _infer_matches = ("timedelta", "timedelta64") + + @property + def _scalar_type(self) -> type[Timedelta]: + return Timedelta + + __array_priority__ = 1000 + # define my properties & methods for delegation + _other_ops: list[str] = [] + _bool_ops: list[str] = [] + _object_ops: list[str] = ["freq"] + _field_ops: list[str] = ["days", "seconds", "microseconds", "nanoseconds"] + _datetimelike_ops: list[str] = _field_ops + _object_ops + _bool_ops + ["unit"] + _datetimelike_methods: list[str] = [ + "to_pytimedelta", + "total_seconds", + "round", + "floor", + "ceil", + "as_unit", + ] + + # Note: ndim must be defined to ensure NaT.__richcmp__(TimedeltaArray) + # operates pointwise. + + def _box_func(self, x: np.timedelta64) -> Timedelta | NaTType: + y = x.view("i8") + if y == NaT._value: + return NaT + return Timedelta._from_value_and_reso(y, reso=self._creso) + + @property + # error: Return type "dtype" of "dtype" incompatible with return type + # "ExtensionDtype" in supertype "ExtensionArray" + def dtype(self) -> np.dtype[np.timedelta64]: # type: ignore[override] + """ + The dtype for the TimedeltaArray. + + .. warning:: + + A future version of pandas will change dtype to be an instance + of a :class:`pandas.api.extensions.ExtensionDtype` subclass, + not a ``numpy.dtype``. + + Returns + ------- + numpy.dtype + """ + return self._ndarray.dtype + + # ---------------------------------------------------------------- + # Constructors + + _freq = None + _default_dtype = TD64NS_DTYPE # used in TimeLikeOps.__init__ + + @classmethod + def _validate_dtype(cls, values, dtype): + # used in TimeLikeOps.__init__ + dtype = _validate_td64_dtype(dtype) + _validate_td64_dtype(values.dtype) + if dtype != values.dtype: + raise ValueError("Values resolution does not match dtype.") + return dtype + + # error: Signature of "_simple_new" incompatible with supertype "NDArrayBacked" + @classmethod + def _simple_new( # type: ignore[override] + cls, + values: npt.NDArray[np.timedelta64], + freq: Tick | None = None, + dtype: np.dtype[np.timedelta64] = TD64NS_DTYPE, + ) -> Self: + # Require td64 dtype, not unit-less, matching values.dtype + assert lib.is_np_dtype(dtype, "m") + assert not tslibs.is_unitless(dtype) + assert isinstance(values, np.ndarray), type(values) + assert dtype == values.dtype + assert freq is None or isinstance(freq, Tick) + + result = super()._simple_new(values=values, dtype=dtype) + result._freq = freq + return result + + @classmethod + def _from_sequence(cls, data, *, dtype=None, copy: bool = False) -> Self: + if dtype: + dtype = _validate_td64_dtype(dtype) + + data, freq = sequence_to_td64ns(data, copy=copy, unit=None) + + if dtype is not None: + data = astype_overflowsafe(data, dtype=dtype, copy=False) + + return cls._simple_new(data, dtype=data.dtype, freq=freq) + + @classmethod + def _from_sequence_not_strict( + cls, + data, + *, + dtype=None, + copy: bool = False, + freq=lib.no_default, + unit=None, + ) -> Self: + """ + _from_sequence_not_strict but without responsibility for finding the + result's `freq`. + """ + if dtype: + dtype = _validate_td64_dtype(dtype) + + assert unit not in ["Y", "y", "M"] # caller is responsible for checking + + data, inferred_freq = sequence_to_td64ns(data, copy=copy, unit=unit) + + if dtype is not None: + data = astype_overflowsafe(data, dtype=dtype, copy=False) + + result = cls._simple_new(data, dtype=data.dtype, freq=inferred_freq) + + result._maybe_pin_freq(freq, {}) + return result + + @classmethod + def _generate_range( + cls, start, end, periods, freq, closed=None, *, unit: str | None = None + ) -> Self: + periods = dtl.validate_periods(periods) + if freq is None and any(x is None for x in [periods, start, end]): + raise ValueError("Must provide freq argument if no data is supplied") + + if com.count_not_none(start, end, periods, freq) != 3: + raise ValueError( + "Of the four parameters: start, end, periods, " + "and freq, exactly three must be specified" + ) + + if start is not None: + start = Timedelta(start).as_unit("ns") + + if end is not None: + end = Timedelta(end).as_unit("ns") + + if unit is not None: + if unit not in ["s", "ms", "us", "ns"]: + raise ValueError("'unit' must be one of 's', 'ms', 'us', 'ns'") + else: + unit = "ns" + + if start is not None and unit is not None: + start = start.as_unit(unit, round_ok=False) + if end is not None and unit is not None: + end = end.as_unit(unit, round_ok=False) + + left_closed, right_closed = validate_endpoints(closed) + + if freq is not None: + index = generate_regular_range(start, end, periods, freq, unit=unit) + else: + index = np.linspace(start._value, end._value, periods).astype("i8") + + if not left_closed: + index = index[1:] + if not right_closed: + index = index[:-1] + + td64values = index.view(f"m8[{unit}]") + return cls._simple_new(td64values, dtype=td64values.dtype, freq=freq) + + # ---------------------------------------------------------------- + # DatetimeLike Interface + + def _unbox_scalar(self, value) -> np.timedelta64: + if not isinstance(value, self._scalar_type) and value is not NaT: + raise ValueError("'value' should be a Timedelta.") + self._check_compatible_with(value) + if value is NaT: + return np.timedelta64(value._value, self.unit) + else: + return value.as_unit(self.unit).asm8 + + def _scalar_from_string(self, value) -> Timedelta | NaTType: + return Timedelta(value) + + def _check_compatible_with(self, other) -> None: + # we don't have anything to validate. + pass + + # ---------------------------------------------------------------- + # Array-Like / EA-Interface Methods + + def astype(self, dtype, copy: bool = True): + # We handle + # --> timedelta64[ns] + # --> timedelta64 + # DatetimeLikeArrayMixin super call handles other cases + dtype = pandas_dtype(dtype) + + if lib.is_np_dtype(dtype, "m"): + if dtype == self.dtype: + if copy: + return self.copy() + return self + + if is_supported_dtype(dtype): + # unit conversion e.g. timedelta64[s] + res_values = astype_overflowsafe(self._ndarray, dtype, copy=False) + return type(self)._simple_new( + res_values, dtype=res_values.dtype, freq=self.freq + ) + else: + raise ValueError( + f"Cannot convert from {self.dtype} to {dtype}. " + "Supported resolutions are 's', 'ms', 'us', 'ns'" + ) + + return dtl.DatetimeLikeArrayMixin.astype(self, dtype, copy=copy) + + def __iter__(self) -> Iterator: + if self.ndim > 1: + for i in range(len(self)): + yield self[i] + else: + # convert in chunks of 10k for efficiency + data = self._ndarray + length = len(self) + chunksize = 10000 + chunks = (length // chunksize) + 1 + for i in range(chunks): + start_i = i * chunksize + end_i = min((i + 1) * chunksize, length) + converted = ints_to_pytimedelta(data[start_i:end_i], box=True) + yield from converted + + # ---------------------------------------------------------------- + # Reductions + + def sum( + self, + *, + axis: AxisInt | None = None, + dtype: NpDtype | None = None, + out=None, + keepdims: bool = False, + initial=None, + skipna: bool = True, + min_count: int = 0, + ): + nv.validate_sum( + (), {"dtype": dtype, "out": out, "keepdims": keepdims, "initial": initial} + ) + + result = nanops.nansum( + self._ndarray, axis=axis, skipna=skipna, min_count=min_count + ) + return self._wrap_reduction_result(axis, result) + + def std( + self, + *, + axis: AxisInt | None = None, + dtype: NpDtype | None = None, + out=None, + ddof: int = 1, + keepdims: bool = False, + skipna: bool = True, + ): + nv.validate_stat_ddof_func( + (), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="std" + ) + + result = nanops.nanstd(self._ndarray, axis=axis, skipna=skipna, ddof=ddof) + if axis is None or self.ndim == 1: + return self._box_func(result) + return self._from_backing_data(result) + + # ---------------------------------------------------------------- + # Accumulations + + def _accumulate(self, name: str, *, skipna: bool = True, **kwargs): + if name == "cumsum": + op = getattr(datetimelike_accumulations, name) + result = op(self._ndarray.copy(), skipna=skipna, **kwargs) + + return type(self)._simple_new(result, freq=None, dtype=self.dtype) + elif name == "cumprod": + raise TypeError("cumprod not supported for Timedelta.") + + else: + return super()._accumulate(name, skipna=skipna, **kwargs) + + # ---------------------------------------------------------------- + # Rendering Methods + + def _formatter(self, boxed: bool = False): + from pandas.io.formats.format import get_format_timedelta64 + + return get_format_timedelta64(self, box=True) + + def _format_native_types( + self, *, na_rep: str | float = "NaT", date_format=None, **kwargs + ) -> npt.NDArray[np.object_]: + from pandas.io.formats.format import get_format_timedelta64 + + # Relies on TimeDelta._repr_base + formatter = get_format_timedelta64(self, na_rep) + # equiv: np.array([formatter(x) for x in self._ndarray]) + # but independent of dimension + return np.frompyfunc(formatter, 1, 1)(self._ndarray) + + # ---------------------------------------------------------------- + # Arithmetic Methods + + def _add_offset(self, other): + assert not isinstance(other, Tick) + raise TypeError( + f"cannot add the type {type(other).__name__} to a {type(self).__name__}" + ) + + @unpack_zerodim_and_defer("__mul__") + def __mul__(self, other) -> Self: + if is_scalar(other): + # numpy will accept float and int, raise TypeError for others + result = self._ndarray * other + freq = None + if self.freq is not None and not isna(other): + freq = self.freq * other + if freq.n == 0: + # GH#51575 Better to have no freq than an incorrect one + freq = None + return type(self)._simple_new(result, dtype=result.dtype, freq=freq) + + if not hasattr(other, "dtype"): + # list, tuple + other = np.array(other) + if len(other) != len(self) and not lib.is_np_dtype(other.dtype, "m"): + # Exclude timedelta64 here so we correctly raise TypeError + # for that instead of ValueError + raise ValueError("Cannot multiply with unequal lengths") + + if is_object_dtype(other.dtype): + # this multiplication will succeed only if all elements of other + # are int or float scalars, so we will end up with + # timedelta64[ns]-dtyped result + arr = self._ndarray + result = [arr[n] * other[n] for n in range(len(self))] + result = np.array(result) + return type(self)._simple_new(result, dtype=result.dtype) + + # numpy will accept float or int dtype, raise TypeError for others + result = self._ndarray * other + return type(self)._simple_new(result, dtype=result.dtype) + + __rmul__ = __mul__ + + def _scalar_divlike_op(self, other, op): + """ + Shared logic for __truediv__, __rtruediv__, __floordiv__, __rfloordiv__ + with scalar 'other'. + """ + if isinstance(other, self._recognized_scalars): + other = Timedelta(other) + # mypy assumes that __new__ returns an instance of the class + # github.com/python/mypy/issues/1020 + if cast("Timedelta | NaTType", other) is NaT: + # specifically timedelta64-NaT + res = np.empty(self.shape, dtype=np.float64) + res.fill(np.nan) + return res + + # otherwise, dispatch to Timedelta implementation + return op(self._ndarray, other) + + else: + # caller is responsible for checking lib.is_scalar(other) + # assume other is numeric, otherwise numpy will raise + + if op in [roperator.rtruediv, roperator.rfloordiv]: + raise TypeError( + f"Cannot divide {type(other).__name__} by {type(self).__name__}" + ) + + result = op(self._ndarray, other) + freq = None + + if self.freq is not None: + # Note: freq gets division, not floor-division, even if op + # is floordiv. + freq = self.freq / other + if freq.nanos == 0 and self.freq.nanos != 0: + # e.g. if self.freq is Nano(1) then dividing by 2 + # rounds down to zero + freq = None + + return type(self)._simple_new(result, dtype=result.dtype, freq=freq) + + def _cast_divlike_op(self, other): + if not hasattr(other, "dtype"): + # e.g. list, tuple + other = np.array(other) + + if len(other) != len(self): + raise ValueError("Cannot divide vectors with unequal lengths") + return other + + def _vector_divlike_op(self, other, op) -> np.ndarray | Self: + """ + Shared logic for __truediv__, __floordiv__, and their reversed versions + with timedelta64-dtype ndarray other. + """ + # Let numpy handle it + result = op(self._ndarray, np.asarray(other)) + + if (is_integer_dtype(other.dtype) or is_float_dtype(other.dtype)) and op in [ + operator.truediv, + operator.floordiv, + ]: + return type(self)._simple_new(result, dtype=result.dtype) + + if op in [operator.floordiv, roperator.rfloordiv]: + mask = self.isna() | isna(other) + if mask.any(): + result = result.astype(np.float64) + np.putmask(result, mask, np.nan) + + return result + + @unpack_zerodim_and_defer("__truediv__") + def __truediv__(self, other): + # timedelta / X is well-defined for timedelta-like or numeric X + op = operator.truediv + if is_scalar(other): + return self._scalar_divlike_op(other, op) + + other = self._cast_divlike_op(other) + if ( + lib.is_np_dtype(other.dtype, "m") + or is_integer_dtype(other.dtype) + or is_float_dtype(other.dtype) + ): + return self._vector_divlike_op(other, op) + + if is_object_dtype(other.dtype): + other = np.asarray(other) + if self.ndim > 1: + res_cols = [left / right for left, right in zip(self, other)] + res_cols2 = [x.reshape(1, -1) for x in res_cols] + result = np.concatenate(res_cols2, axis=0) + else: + result = truediv_object_array(self._ndarray, other) + + return result + + else: + return NotImplemented + + @unpack_zerodim_and_defer("__rtruediv__") + def __rtruediv__(self, other): + # X / timedelta is defined only for timedelta-like X + op = roperator.rtruediv + if is_scalar(other): + return self._scalar_divlike_op(other, op) + + other = self._cast_divlike_op(other) + if lib.is_np_dtype(other.dtype, "m"): + return self._vector_divlike_op(other, op) + + elif is_object_dtype(other.dtype): + # Note: unlike in __truediv__, we do not _need_ to do type + # inference on the result. It does not raise, a numeric array + # is returned. GH#23829 + result_list = [other[n] / self[n] for n in range(len(self))] + return np.array(result_list) + + else: + return NotImplemented + + @unpack_zerodim_and_defer("__floordiv__") + def __floordiv__(self, other): + op = operator.floordiv + if is_scalar(other): + return self._scalar_divlike_op(other, op) + + other = self._cast_divlike_op(other) + if ( + lib.is_np_dtype(other.dtype, "m") + or is_integer_dtype(other.dtype) + or is_float_dtype(other.dtype) + ): + return self._vector_divlike_op(other, op) + + elif is_object_dtype(other.dtype): + other = np.asarray(other) + if self.ndim > 1: + res_cols = [left // right for left, right in zip(self, other)] + res_cols2 = [x.reshape(1, -1) for x in res_cols] + result = np.concatenate(res_cols2, axis=0) + else: + result = floordiv_object_array(self._ndarray, other) + + assert result.dtype == object + return result + + else: + return NotImplemented + + @unpack_zerodim_and_defer("__rfloordiv__") + def __rfloordiv__(self, other): + op = roperator.rfloordiv + if is_scalar(other): + return self._scalar_divlike_op(other, op) + + other = self._cast_divlike_op(other) + if lib.is_np_dtype(other.dtype, "m"): + return self._vector_divlike_op(other, op) + + elif is_object_dtype(other.dtype): + result_list = [other[n] // self[n] for n in range(len(self))] + result = np.array(result_list) + return result + + else: + return NotImplemented + + @unpack_zerodim_and_defer("__mod__") + def __mod__(self, other): + # Note: This is a naive implementation, can likely be optimized + if isinstance(other, self._recognized_scalars): + other = Timedelta(other) + return self - (self // other) * other + + @unpack_zerodim_and_defer("__rmod__") + def __rmod__(self, other): + # Note: This is a naive implementation, can likely be optimized + if isinstance(other, self._recognized_scalars): + other = Timedelta(other) + return other - (other // self) * self + + @unpack_zerodim_and_defer("__divmod__") + def __divmod__(self, other): + # Note: This is a naive implementation, can likely be optimized + if isinstance(other, self._recognized_scalars): + other = Timedelta(other) + + res1 = self // other + res2 = self - res1 * other + return res1, res2 + + @unpack_zerodim_and_defer("__rdivmod__") + def __rdivmod__(self, other): + # Note: This is a naive implementation, can likely be optimized + if isinstance(other, self._recognized_scalars): + other = Timedelta(other) + + res1 = other // self + res2 = other - res1 * self + return res1, res2 + + def __neg__(self) -> TimedeltaArray: + freq = None + if self.freq is not None: + freq = -self.freq + return type(self)._simple_new(-self._ndarray, dtype=self.dtype, freq=freq) + + def __pos__(self) -> TimedeltaArray: + return type(self)._simple_new( + self._ndarray.copy(), dtype=self.dtype, freq=self.freq + ) + + def __abs__(self) -> TimedeltaArray: + # Note: freq is not preserved + return type(self)._simple_new(np.abs(self._ndarray), dtype=self.dtype) + + # ---------------------------------------------------------------- + # Conversion Methods - Vectorized analogues of Timedelta methods + + def total_seconds(self) -> npt.NDArray[np.float64]: + """ + Return total duration of each element expressed in seconds. + + This method is available directly on TimedeltaArray, TimedeltaIndex + and on Series containing timedelta values under the ``.dt`` namespace. + + Returns + ------- + ndarray, Index or Series + When the calling object is a TimedeltaArray, the return type + is ndarray. When the calling object is a TimedeltaIndex, + the return type is an Index with a float64 dtype. When the calling object + is a Series, the return type is Series of type `float64` whose + index is the same as the original. + + See Also + -------- + datetime.timedelta.total_seconds : Standard library version + of this method. + TimedeltaIndex.components : Return a DataFrame with components of + each Timedelta. + + Examples + -------- + **Series** + + >>> s = pd.Series(pd.to_timedelta(np.arange(5), unit='d')) + >>> s + 0 0 days + 1 1 days + 2 2 days + 3 3 days + 4 4 days + dtype: timedelta64[ns] + + >>> s.dt.total_seconds() + 0 0.0 + 1 86400.0 + 2 172800.0 + 3 259200.0 + 4 345600.0 + dtype: float64 + + **TimedeltaIndex** + + >>> idx = pd.to_timedelta(np.arange(5), unit='d') + >>> idx + TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'], + dtype='timedelta64[ns]', freq=None) + + >>> idx.total_seconds() + Index([0.0, 86400.0, 172800.0, 259200.0, 345600.0], dtype='float64') + """ + pps = periods_per_second(self._creso) + return self._maybe_mask_results(self.asi8 / pps, fill_value=None) + + def to_pytimedelta(self) -> npt.NDArray[np.object_]: + """ + Return an ndarray of datetime.timedelta objects. + + Returns + ------- + numpy.ndarray + + Examples + -------- + >>> tdelta_idx = pd.to_timedelta([1, 2, 3], unit='D') + >>> tdelta_idx + TimedeltaIndex(['1 days', '2 days', '3 days'], + dtype='timedelta64[ns]', freq=None) + >>> tdelta_idx.to_pytimedelta() + array([datetime.timedelta(days=1), datetime.timedelta(days=2), + datetime.timedelta(days=3)], dtype=object) + """ + return ints_to_pytimedelta(self._ndarray) + + days_docstring = textwrap.dedent( + """Number of days for each element. + + Examples + -------- + For Series: + + >>> ser = pd.Series(pd.to_timedelta([1, 2, 3], unit='d')) + >>> ser + 0 1 days + 1 2 days + 2 3 days + dtype: timedelta64[ns] + >>> ser.dt.days + 0 1 + 1 2 + 2 3 + dtype: int64 + + For TimedeltaIndex: + + >>> tdelta_idx = pd.to_timedelta(["0 days", "10 days", "20 days"]) + >>> tdelta_idx + TimedeltaIndex(['0 days', '10 days', '20 days'], + dtype='timedelta64[ns]', freq=None) + >>> tdelta_idx.days + Index([0, 10, 20], dtype='int64')""" + ) + days = _field_accessor("days", "days", days_docstring) + + seconds_docstring = textwrap.dedent( + """Number of seconds (>= 0 and less than 1 day) for each element. + + Examples + -------- + For Series: + + >>> ser = pd.Series(pd.to_timedelta([1, 2, 3], unit='s')) + >>> ser + 0 0 days 00:00:01 + 1 0 days 00:00:02 + 2 0 days 00:00:03 + dtype: timedelta64[ns] + >>> ser.dt.seconds + 0 1 + 1 2 + 2 3 + dtype: int32 + + For TimedeltaIndex: + + >>> tdelta_idx = pd.to_timedelta([1, 2, 3], unit='s') + >>> tdelta_idx + TimedeltaIndex(['0 days 00:00:01', '0 days 00:00:02', '0 days 00:00:03'], + dtype='timedelta64[ns]', freq=None) + >>> tdelta_idx.seconds + Index([1, 2, 3], dtype='int32')""" + ) + seconds = _field_accessor( + "seconds", + "seconds", + seconds_docstring, + ) + + microseconds_docstring = textwrap.dedent( + """Number of microseconds (>= 0 and less than 1 second) for each element. + + Examples + -------- + For Series: + + >>> ser = pd.Series(pd.to_timedelta([1, 2, 3], unit='us')) + >>> ser + 0 0 days 00:00:00.000001 + 1 0 days 00:00:00.000002 + 2 0 days 00:00:00.000003 + dtype: timedelta64[ns] + >>> ser.dt.microseconds + 0 1 + 1 2 + 2 3 + dtype: int32 + + For TimedeltaIndex: + + >>> tdelta_idx = pd.to_timedelta([1, 2, 3], unit='us') + >>> tdelta_idx + TimedeltaIndex(['0 days 00:00:00.000001', '0 days 00:00:00.000002', + '0 days 00:00:00.000003'], + dtype='timedelta64[ns]', freq=None) + >>> tdelta_idx.microseconds + Index([1, 2, 3], dtype='int32')""" + ) + microseconds = _field_accessor( + "microseconds", + "microseconds", + microseconds_docstring, + ) + + nanoseconds_docstring = textwrap.dedent( + """Number of nanoseconds (>= 0 and less than 1 microsecond) for each element. + + Examples + -------- + For Series: + + >>> ser = pd.Series(pd.to_timedelta([1, 2, 3], unit='ns')) + >>> ser + 0 0 days 00:00:00.000000001 + 1 0 days 00:00:00.000000002 + 2 0 days 00:00:00.000000003 + dtype: timedelta64[ns] + >>> ser.dt.nanoseconds + 0 1 + 1 2 + 2 3 + dtype: int32 + + For TimedeltaIndex: + + >>> tdelta_idx = pd.to_timedelta([1, 2, 3], unit='ns') + >>> tdelta_idx + TimedeltaIndex(['0 days 00:00:00.000000001', '0 days 00:00:00.000000002', + '0 days 00:00:00.000000003'], + dtype='timedelta64[ns]', freq=None) + >>> tdelta_idx.nanoseconds + Index([1, 2, 3], dtype='int32')""" + ) + nanoseconds = _field_accessor( + "nanoseconds", + "nanoseconds", + nanoseconds_docstring, + ) + + @property + def components(self) -> DataFrame: + """ + Return a DataFrame of the individual resolution components of the Timedeltas. + + The components (days, hours, minutes seconds, milliseconds, microseconds, + nanoseconds) are returned as columns in a DataFrame. + + Returns + ------- + DataFrame + + Examples + -------- + >>> tdelta_idx = pd.to_timedelta(['1 day 3 min 2 us 42 ns']) + >>> tdelta_idx + TimedeltaIndex(['1 days 00:03:00.000002042'], + dtype='timedelta64[ns]', freq=None) + >>> tdelta_idx.components + days hours minutes seconds milliseconds microseconds nanoseconds + 0 1 0 3 0 0 2 42 + """ + from pandas import DataFrame + + columns = [ + "days", + "hours", + "minutes", + "seconds", + "milliseconds", + "microseconds", + "nanoseconds", + ] + hasnans = self._hasna + if hasnans: + + def f(x): + if isna(x): + return [np.nan] * len(columns) + return x.components + + else: + + def f(x): + return x.components + + result = DataFrame([f(x) for x in self], columns=columns) + if not hasnans: + result = result.astype("int64") + return result + + +# --------------------------------------------------------------------- +# Constructor Helpers + + +def sequence_to_td64ns( + data, + copy: bool = False, + unit=None, + errors: DateTimeErrorChoices = "raise", +) -> tuple[np.ndarray, Tick | None]: + """ + Parameters + ---------- + data : list-like + copy : bool, default False + unit : str, optional + The timedelta unit to treat integers as multiples of. For numeric + data this defaults to ``'ns'``. + Must be un-specified if the data contains a str and ``errors=="raise"``. + errors : {"raise", "coerce", "ignore"}, default "raise" + How to handle elements that cannot be converted to timedelta64[ns]. + See ``pandas.to_timedelta`` for details. + + Returns + ------- + converted : numpy.ndarray + The sequence converted to a numpy array with dtype ``timedelta64[ns]``. + inferred_freq : Tick or None + The inferred frequency of the sequence. + + Raises + ------ + ValueError : Data cannot be converted to timedelta64[ns]. + + Notes + ----- + Unlike `pandas.to_timedelta`, if setting ``errors=ignore`` will not cause + errors to be ignored; they are caught and subsequently ignored at a + higher level. + """ + assert unit not in ["Y", "y", "M"] # caller is responsible for checking + + inferred_freq = None + if unit is not None: + unit = parse_timedelta_unit(unit) + + data, copy = dtl.ensure_arraylike_for_datetimelike( + data, copy, cls_name="TimedeltaArray" + ) + + if isinstance(data, TimedeltaArray): + inferred_freq = data.freq + + # Convert whatever we have into timedelta64[ns] dtype + if data.dtype == object or is_string_dtype(data.dtype): + # no need to make a copy, need to convert if string-dtyped + data = _objects_to_td64ns(data, unit=unit, errors=errors) + copy = False + + elif is_integer_dtype(data.dtype): + # treat as multiples of the given unit + data, copy_made = _ints_to_td64ns(data, unit=unit) + copy = copy and not copy_made + + elif is_float_dtype(data.dtype): + # cast the unit, multiply base/frac separately + # to avoid precision issues from float -> int + if isinstance(data.dtype, ExtensionDtype): + mask = data._mask + data = data._data + else: + mask = np.isnan(data) + + data = cast_from_unit_vectorized(data, unit or "ns") + data[mask] = iNaT + data = data.view("m8[ns]") + copy = False + + elif lib.is_np_dtype(data.dtype, "m"): + if not is_supported_dtype(data.dtype): + # cast to closest supported unit, i.e. s or ns + new_dtype = get_supported_dtype(data.dtype) + data = astype_overflowsafe(data, dtype=new_dtype, copy=False) + copy = False + + else: + # This includes datetime64-dtype, see GH#23539, GH#29794 + raise TypeError(f"dtype {data.dtype} cannot be converted to timedelta64[ns]") + + if not copy: + data = np.asarray(data) + else: + data = np.array(data, copy=copy) + + assert data.dtype.kind == "m" + assert data.dtype != "m8" # i.e. not unit-less + + return data, inferred_freq + + +def _ints_to_td64ns(data, unit: str = "ns"): + """ + Convert an ndarray with integer-dtype to timedelta64[ns] dtype, treating + the integers as multiples of the given timedelta unit. + + Parameters + ---------- + data : numpy.ndarray with integer-dtype + unit : str, default "ns" + The timedelta unit to treat integers as multiples of. + + Returns + ------- + numpy.ndarray : timedelta64[ns] array converted from data + bool : whether a copy was made + """ + copy_made = False + unit = unit if unit is not None else "ns" + + if data.dtype != np.int64: + # converting to int64 makes a copy, so we can avoid + # re-copying later + data = data.astype(np.int64) + copy_made = True + + if unit != "ns": + dtype_str = f"timedelta64[{unit}]" + data = data.view(dtype_str) + + data = astype_overflowsafe(data, dtype=TD64NS_DTYPE) + + # the astype conversion makes a copy, so we can avoid re-copying later + copy_made = True + + else: + data = data.view("timedelta64[ns]") + + return data, copy_made + + +def _objects_to_td64ns(data, unit=None, errors: DateTimeErrorChoices = "raise"): + """ + Convert a object-dtyped or string-dtyped array into an + timedelta64[ns]-dtyped array. + + Parameters + ---------- + data : ndarray or Index + unit : str, default "ns" + The timedelta unit to treat integers as multiples of. + Must not be specified if the data contains a str. + errors : {"raise", "coerce", "ignore"}, default "raise" + How to handle elements that cannot be converted to timedelta64[ns]. + See ``pandas.to_timedelta`` for details. + + Returns + ------- + numpy.ndarray : timedelta64[ns] array converted from data + + Raises + ------ + ValueError : Data cannot be converted to timedelta64[ns]. + + Notes + ----- + Unlike `pandas.to_timedelta`, if setting `errors=ignore` will not cause + errors to be ignored; they are caught and subsequently ignored at a + higher level. + """ + # coerce Index to np.ndarray, converting string-dtype if necessary + values = np.asarray(data, dtype=np.object_) + + result = array_to_timedelta64(values, unit=unit, errors=errors) + return result.view("timedelta64[ns]") + + +def _validate_td64_dtype(dtype) -> DtypeObj: + dtype = pandas_dtype(dtype) + if dtype == np.dtype("m8"): + # no precision disallowed GH#24806 + msg = ( + "Passing in 'timedelta' dtype with no precision is not allowed. " + "Please pass in 'timedelta64[ns]' instead." + ) + raise ValueError(msg) + + if not lib.is_np_dtype(dtype, "m"): + raise ValueError(f"dtype '{dtype}' is invalid, should be np.timedelta64 dtype") + elif not is_supported_dtype(dtype): + raise ValueError("Supported timedelta64 resolutions are 's', 'ms', 'us', 'ns'") + + return dtype diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d329da9036a2e27eac17867de8963c6d459ba288 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/align.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/align.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3bee516a05801900d84ce6a975c6486ce305c06 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/align.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/api.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c725309a4c2f625b37188e53cc558b737f324d7d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/api.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/check.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/check.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..739f20eaec1b0b8a21c2460cd53dd344c872fe2c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/check.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/common.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db7575e11fb56bb42f79a39b2668a8325a5a92a8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/common.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/engines.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/engines.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3fb15218ebb1d79cb94a3ef366f077040fa2d6b5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/engines.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/eval.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/eval.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2a717c436dcf7f0f2f6f735aad77e6734fbfbe1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/eval.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/expr.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/expr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1c7cfcb66d5458d21402b36d4df8578462f1a1d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/expr.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/expressions.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/expressions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e254791e0db3d4027b61599a961b28ff14984d99 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/expressions.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/ops.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..60eb45c6fd03eb13029da2fec020ac89ea375dd1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/ops.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/parsing.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/parsing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6adb59b66fb6eec5e53ac1476d05c85cac914cec Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/parsing.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/pytables.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/pytables.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6b8ff8b09de870f686f425bea7e61f96982be77 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/pytables.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/scope.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/scope.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30903e333febaab3f46e8aa050bc9bca5ee1dc46 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/__pycache__/scope.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/align.py b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/align.py new file mode 100644 index 0000000000000000000000000000000000000000..cd852ba9249cf26ea39b7e51bbfa754ffe9c10ce --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/align.py @@ -0,0 +1,213 @@ +""" +Core eval alignment algorithms. +""" +from __future__ import annotations + +from functools import ( + partial, + wraps, +) +from typing import ( + TYPE_CHECKING, + Callable, +) +import warnings + +import numpy as np + +from pandas.errors import PerformanceWarning +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCSeries, +) + +from pandas.core.base import PandasObject +import pandas.core.common as com +from pandas.core.computation.common import result_type_many + +if TYPE_CHECKING: + from collections.abc import Sequence + + from pandas._typing import F + + from pandas.core.generic import NDFrame + from pandas.core.indexes.api import Index + + +def _align_core_single_unary_op( + term, +) -> tuple[partial | type[NDFrame], dict[str, Index] | None]: + typ: partial | type[NDFrame] + axes: dict[str, Index] | None = None + + if isinstance(term.value, np.ndarray): + typ = partial(np.asanyarray, dtype=term.value.dtype) + else: + typ = type(term.value) + if hasattr(term.value, "axes"): + axes = _zip_axes_from_type(typ, term.value.axes) + + return typ, axes + + +def _zip_axes_from_type( + typ: type[NDFrame], new_axes: Sequence[Index] +) -> dict[str, Index]: + return {name: new_axes[i] for i, name in enumerate(typ._AXIS_ORDERS)} + + +def _any_pandas_objects(terms) -> bool: + """ + Check a sequence of terms for instances of PandasObject. + """ + return any(isinstance(term.value, PandasObject) for term in terms) + + +def _filter_special_cases(f) -> Callable[[F], F]: + @wraps(f) + def wrapper(terms): + # single unary operand + if len(terms) == 1: + return _align_core_single_unary_op(terms[0]) + + term_values = (term.value for term in terms) + + # we don't have any pandas objects + if not _any_pandas_objects(terms): + return result_type_many(*term_values), None + + return f(terms) + + return wrapper + + +@_filter_special_cases +def _align_core(terms): + term_index = [i for i, term in enumerate(terms) if hasattr(term.value, "axes")] + term_dims = [terms[i].value.ndim for i in term_index] + + from pandas import Series + + ndims = Series(dict(zip(term_index, term_dims))) + + # initial axes are the axes of the largest-axis'd term + biggest = terms[ndims.idxmax()].value + typ = biggest._constructor + axes = biggest.axes + naxes = len(axes) + gt_than_one_axis = naxes > 1 + + for value in (terms[i].value for i in term_index): + is_series = isinstance(value, ABCSeries) + is_series_and_gt_one_axis = is_series and gt_than_one_axis + + for axis, items in enumerate(value.axes): + if is_series_and_gt_one_axis: + ax, itm = naxes - 1, value.index + else: + ax, itm = axis, items + + if not axes[ax].is_(itm): + axes[ax] = axes[ax].union(itm) + + for i, ndim in ndims.items(): + for axis, items in zip(range(ndim), axes): + ti = terms[i].value + + if hasattr(ti, "reindex"): + transpose = isinstance(ti, ABCSeries) and naxes > 1 + reindexer = axes[naxes - 1] if transpose else items + + term_axis_size = len(ti.axes[axis]) + reindexer_size = len(reindexer) + + ordm = np.log10(max(1, abs(reindexer_size - term_axis_size))) + if ordm >= 1 and reindexer_size >= 10000: + w = ( + f"Alignment difference on axis {axis} is larger " + f"than an order of magnitude on term {repr(terms[i].name)}, " + f"by more than {ordm:.4g}; performance may suffer." + ) + warnings.warn( + w, category=PerformanceWarning, stacklevel=find_stack_level() + ) + + obj = ti.reindex(reindexer, axis=axis, copy=False) + terms[i].update(obj) + + terms[i].update(terms[i].value.values) + + return typ, _zip_axes_from_type(typ, axes) + + +def align_terms(terms): + """ + Align a set of terms. + """ + try: + # flatten the parse tree (a nested list, really) + terms = list(com.flatten(terms)) + except TypeError: + # can't iterate so it must just be a constant or single variable + if isinstance(terms.value, (ABCSeries, ABCDataFrame)): + typ = type(terms.value) + return typ, _zip_axes_from_type(typ, terms.value.axes) + return np.result_type(terms.type), None + + # if all resolved variables are numeric scalars + if all(term.is_scalar for term in terms): + return result_type_many(*(term.value for term in terms)).type, None + + # perform the main alignment + typ, axes = _align_core(terms) + return typ, axes + + +def reconstruct_object(typ, obj, axes, dtype): + """ + Reconstruct an object given its type, raw value, and possibly empty + (None) axes. + + Parameters + ---------- + typ : object + A type + obj : object + The value to use in the type constructor + axes : dict + The axes to use to construct the resulting pandas object + + Returns + ------- + ret : typ + An object of type ``typ`` with the value `obj` and possible axes + `axes`. + """ + try: + typ = typ.type + except AttributeError: + pass + + res_t = np.result_type(obj.dtype, dtype) + + if not isinstance(typ, partial) and issubclass(typ, PandasObject): + return typ(obj, dtype=res_t, **axes) + + # special case for pathological things like ~True/~False + if hasattr(res_t, "type") and typ == np.bool_ and res_t != np.bool_: + ret_value = res_t.type(obj) + else: + ret_value = typ(obj).astype(res_t) + # The condition is to distinguish 0-dim array (returned in case of + # scalar) and 1 element array + # e.g. np.array(0) and np.array([0]) + if ( + len(obj.shape) == 1 + and len(obj) == 1 + and not isinstance(ret_value, np.ndarray) + ): + ret_value = np.array([ret_value]).astype(res_t) + + return ret_value diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/api.py b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/api.py new file mode 100644 index 0000000000000000000000000000000000000000..bd3be5b3f8c42267c8a61421b7f0877a01b33d34 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/api.py @@ -0,0 +1,2 @@ +__all__ = ["eval"] +from pandas.core.computation.eval import eval diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/check.py b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/check.py new file mode 100644 index 0000000000000000000000000000000000000000..caccf34f811112abbe04d965d6f6be1e21527e8b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/check.py @@ -0,0 +1,8 @@ +from __future__ import annotations + +from pandas.compat._optional import import_optional_dependency + +ne = import_optional_dependency("numexpr", errors="warn") +NUMEXPR_INSTALLED = ne is not None + +__all__ = ["NUMEXPR_INSTALLED"] diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/common.py b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/common.py new file mode 100644 index 0000000000000000000000000000000000000000..115191829f044a7d6d7f17c279025ccc26d44d04 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/common.py @@ -0,0 +1,48 @@ +from __future__ import annotations + +from functools import reduce + +import numpy as np + +from pandas._config import get_option + + +def ensure_decoded(s) -> str: + """ + If we have bytes, decode them to unicode. + """ + if isinstance(s, (np.bytes_, bytes)): + s = s.decode(get_option("display.encoding")) + return s + + +def result_type_many(*arrays_and_dtypes): + """ + Wrapper around numpy.result_type which overcomes the NPY_MAXARGS (32) + argument limit. + """ + try: + return np.result_type(*arrays_and_dtypes) + except ValueError: + # we have > NPY_MAXARGS terms in our expression + return reduce(np.result_type, arrays_and_dtypes) + except TypeError: + from pandas.core.dtypes.cast import find_common_type + from pandas.core.dtypes.common import is_extension_array_dtype + + arr_and_dtypes = list(arrays_and_dtypes) + ea_dtypes, non_ea_dtypes = [], [] + for arr_or_dtype in arr_and_dtypes: + if is_extension_array_dtype(arr_or_dtype): + ea_dtypes.append(arr_or_dtype) + else: + non_ea_dtypes.append(arr_or_dtype) + + if non_ea_dtypes: + try: + np_dtype = np.result_type(*non_ea_dtypes) + except ValueError: + np_dtype = reduce(np.result_type, arrays_and_dtypes) + return find_common_type(ea_dtypes + [np_dtype]) + + return find_common_type(ea_dtypes) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/engines.py b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/engines.py new file mode 100644 index 0000000000000000000000000000000000000000..a3a05a9d75c6ed6b80564a69ff5b6cf5a648c1b3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/engines.py @@ -0,0 +1,143 @@ +""" +Engine classes for :func:`~pandas.eval` +""" +from __future__ import annotations + +import abc +from typing import TYPE_CHECKING + +from pandas.errors import NumExprClobberingError + +from pandas.core.computation.align import ( + align_terms, + reconstruct_object, +) +from pandas.core.computation.ops import ( + MATHOPS, + REDUCTIONS, +) + +from pandas.io.formats import printing + +if TYPE_CHECKING: + from pandas.core.computation.expr import Expr + +_ne_builtins = frozenset(MATHOPS + REDUCTIONS) + + +def _check_ne_builtin_clash(expr: Expr) -> None: + """ + Attempt to prevent foot-shooting in a helpful way. + + Parameters + ---------- + expr : Expr + Terms can contain + """ + names = expr.names + overlap = names & _ne_builtins + + if overlap: + s = ", ".join([repr(x) for x in overlap]) + raise NumExprClobberingError( + f'Variables in expression "{expr}" overlap with builtins: ({s})' + ) + + +class AbstractEngine(metaclass=abc.ABCMeta): + """Object serving as a base class for all engines.""" + + has_neg_frac = False + + def __init__(self, expr) -> None: + self.expr = expr + self.aligned_axes = None + self.result_type = None + + def convert(self) -> str: + """ + Convert an expression for evaluation. + + Defaults to return the expression as a string. + """ + return printing.pprint_thing(self.expr) + + def evaluate(self) -> object: + """ + Run the engine on the expression. + + This method performs alignment which is necessary no matter what engine + is being used, thus its implementation is in the base class. + + Returns + ------- + object + The result of the passed expression. + """ + if not self._is_aligned: + self.result_type, self.aligned_axes = align_terms(self.expr.terms) + + # make sure no names in resolvers and locals/globals clash + res = self._evaluate() + return reconstruct_object( + self.result_type, res, self.aligned_axes, self.expr.terms.return_type + ) + + @property + def _is_aligned(self) -> bool: + return self.aligned_axes is not None and self.result_type is not None + + @abc.abstractmethod + def _evaluate(self): + """ + Return an evaluated expression. + + Parameters + ---------- + env : Scope + The local and global environment in which to evaluate an + expression. + + Notes + ----- + Must be implemented by subclasses. + """ + + +class NumExprEngine(AbstractEngine): + """NumExpr engine class""" + + has_neg_frac = True + + def _evaluate(self): + import numexpr as ne + + # convert the expression to a valid numexpr expression + s = self.convert() + + env = self.expr.env + scope = env.full_scope + _check_ne_builtin_clash(self.expr) + return ne.evaluate(s, local_dict=scope) + + +class PythonEngine(AbstractEngine): + """ + Evaluate an expression in Python space. + + Mostly for testing purposes. + """ + + has_neg_frac = False + + def evaluate(self): + return self.expr() + + def _evaluate(self) -> None: + pass + + +ENGINES: dict[str, type[AbstractEngine]] = { + "numexpr": NumExprEngine, + "python": PythonEngine, +} diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/eval.py b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/eval.py new file mode 100644 index 0000000000000000000000000000000000000000..f1fe528de06f8eeb5bdfb0f22339499c15ebbd9c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/eval.py @@ -0,0 +1,415 @@ +""" +Top level ``eval`` module. +""" +from __future__ import annotations + +import tokenize +from typing import TYPE_CHECKING +import warnings + +from pandas.util._exceptions import find_stack_level +from pandas.util._validators import validate_bool_kwarg + +from pandas.core.dtypes.common import is_extension_array_dtype + +from pandas.core.computation.engines import ENGINES +from pandas.core.computation.expr import ( + PARSERS, + Expr, +) +from pandas.core.computation.parsing import tokenize_string +from pandas.core.computation.scope import ensure_scope +from pandas.core.generic import NDFrame + +from pandas.io.formats.printing import pprint_thing + +if TYPE_CHECKING: + from pandas.core.computation.ops import BinOp + + +def _check_engine(engine: str | None) -> str: + """ + Make sure a valid engine is passed. + + Parameters + ---------- + engine : str + String to validate. + + Raises + ------ + KeyError + * If an invalid engine is passed. + ImportError + * If numexpr was requested but doesn't exist. + + Returns + ------- + str + Engine name. + """ + from pandas.core.computation.check import NUMEXPR_INSTALLED + from pandas.core.computation.expressions import USE_NUMEXPR + + if engine is None: + engine = "numexpr" if USE_NUMEXPR else "python" + + if engine not in ENGINES: + valid_engines = list(ENGINES.keys()) + raise KeyError( + f"Invalid engine '{engine}' passed, valid engines are {valid_engines}" + ) + + # TODO: validate this in a more general way (thinking of future engines + # that won't necessarily be import-able) + # Could potentially be done on engine instantiation + if engine == "numexpr" and not NUMEXPR_INSTALLED: + raise ImportError( + "'numexpr' is not installed or an unsupported version. Cannot use " + "engine='numexpr' for query/eval if 'numexpr' is not installed" + ) + + return engine + + +def _check_parser(parser: str): + """ + Make sure a valid parser is passed. + + Parameters + ---------- + parser : str + + Raises + ------ + KeyError + * If an invalid parser is passed + """ + if parser not in PARSERS: + raise KeyError( + f"Invalid parser '{parser}' passed, valid parsers are {PARSERS.keys()}" + ) + + +def _check_resolvers(resolvers): + if resolvers is not None: + for resolver in resolvers: + if not hasattr(resolver, "__getitem__"): + name = type(resolver).__name__ + raise TypeError( + f"Resolver of type '{name}' does not " + "implement the __getitem__ method" + ) + + +def _check_expression(expr): + """ + Make sure an expression is not an empty string + + Parameters + ---------- + expr : object + An object that can be converted to a string + + Raises + ------ + ValueError + * If expr is an empty string + """ + if not expr: + raise ValueError("expr cannot be an empty string") + + +def _convert_expression(expr) -> str: + """ + Convert an object to an expression. + + This function converts an object to an expression (a unicode string) and + checks to make sure it isn't empty after conversion. This is used to + convert operators to their string representation for recursive calls to + :func:`~pandas.eval`. + + Parameters + ---------- + expr : object + The object to be converted to a string. + + Returns + ------- + str + The string representation of an object. + + Raises + ------ + ValueError + * If the expression is empty. + """ + s = pprint_thing(expr) + _check_expression(s) + return s + + +def _check_for_locals(expr: str, stack_level: int, parser: str): + at_top_of_stack = stack_level == 0 + not_pandas_parser = parser != "pandas" + + if not_pandas_parser: + msg = "The '@' prefix is only supported by the pandas parser" + elif at_top_of_stack: + msg = ( + "The '@' prefix is not allowed in top-level eval calls.\n" + "please refer to your variables by name without the '@' prefix." + ) + + if at_top_of_stack or not_pandas_parser: + for toknum, tokval in tokenize_string(expr): + if toknum == tokenize.OP and tokval == "@": + raise SyntaxError(msg) + + +def eval( + expr: str | BinOp, # we leave BinOp out of the docstr bc it isn't for users + parser: str = "pandas", + engine: str | None = None, + local_dict=None, + global_dict=None, + resolvers=(), + level: int = 0, + target=None, + inplace: bool = False, +): + """ + Evaluate a Python expression as a string using various backends. + + The following arithmetic operations are supported: ``+``, ``-``, ``*``, + ``/``, ``**``, ``%``, ``//`` (python engine only) along with the following + boolean operations: ``|`` (or), ``&`` (and), and ``~`` (not). + Additionally, the ``'pandas'`` parser allows the use of :keyword:`and`, + :keyword:`or`, and :keyword:`not` with the same semantics as the + corresponding bitwise operators. :class:`~pandas.Series` and + :class:`~pandas.DataFrame` objects are supported and behave as they would + with plain ol' Python evaluation. + + Parameters + ---------- + expr : str + The expression to evaluate. This string cannot contain any Python + `statements + `__, + only Python `expressions + `__. + parser : {'pandas', 'python'}, default 'pandas' + The parser to use to construct the syntax tree from the expression. The + default of ``'pandas'`` parses code slightly different than standard + Python. Alternatively, you can parse an expression using the + ``'python'`` parser to retain strict Python semantics. See the + :ref:`enhancing performance ` documentation for + more details. + engine : {'python', 'numexpr'}, default 'numexpr' + + The engine used to evaluate the expression. Supported engines are + + - None : tries to use ``numexpr``, falls back to ``python`` + - ``'numexpr'`` : This default engine evaluates pandas objects using + numexpr for large speed ups in complex expressions with large frames. + - ``'python'`` : Performs operations as if you had ``eval``'d in top + level python. This engine is generally not that useful. + + More backends may be available in the future. + local_dict : dict or None, optional + A dictionary of local variables, taken from locals() by default. + global_dict : dict or None, optional + A dictionary of global variables, taken from globals() by default. + resolvers : list of dict-like or None, optional + A list of objects implementing the ``__getitem__`` special method that + you can use to inject an additional collection of namespaces to use for + variable lookup. For example, this is used in the + :meth:`~DataFrame.query` method to inject the + ``DataFrame.index`` and ``DataFrame.columns`` + variables that refer to their respective :class:`~pandas.DataFrame` + instance attributes. + level : int, optional + The number of prior stack frames to traverse and add to the current + scope. Most users will **not** need to change this parameter. + target : object, optional, default None + This is the target object for assignment. It is used when there is + variable assignment in the expression. If so, then `target` must + support item assignment with string keys, and if a copy is being + returned, it must also support `.copy()`. + inplace : bool, default False + If `target` is provided, and the expression mutates `target`, whether + to modify `target` inplace. Otherwise, return a copy of `target` with + the mutation. + + Returns + ------- + ndarray, numeric scalar, DataFrame, Series, or None + The completion value of evaluating the given code or None if ``inplace=True``. + + Raises + ------ + ValueError + There are many instances where such an error can be raised: + + - `target=None`, but the expression is multiline. + - The expression is multiline, but not all them have item assignment. + An example of such an arrangement is this: + + a = b + 1 + a + 2 + + Here, there are expressions on different lines, making it multiline, + but the last line has no variable assigned to the output of `a + 2`. + - `inplace=True`, but the expression is missing item assignment. + - Item assignment is provided, but the `target` does not support + string item assignment. + - Item assignment is provided and `inplace=False`, but the `target` + does not support the `.copy()` method + + See Also + -------- + DataFrame.query : Evaluates a boolean expression to query the columns + of a frame. + DataFrame.eval : Evaluate a string describing operations on + DataFrame columns. + + Notes + ----- + The ``dtype`` of any objects involved in an arithmetic ``%`` operation are + recursively cast to ``float64``. + + See the :ref:`enhancing performance ` documentation for + more details. + + Examples + -------- + >>> df = pd.DataFrame({"animal": ["dog", "pig"], "age": [10, 20]}) + >>> df + animal age + 0 dog 10 + 1 pig 20 + + We can add a new column using ``pd.eval``: + + >>> pd.eval("double_age = df.age * 2", target=df) + animal age double_age + 0 dog 10 20 + 1 pig 20 40 + """ + inplace = validate_bool_kwarg(inplace, "inplace") + + exprs: list[str | BinOp] + if isinstance(expr, str): + _check_expression(expr) + exprs = [e.strip() for e in expr.splitlines() if e.strip() != ""] + else: + # ops.BinOp; for internal compat, not intended to be passed by users + exprs = [expr] + multi_line = len(exprs) > 1 + + if multi_line and target is None: + raise ValueError( + "multi-line expressions are only valid in the " + "context of data, use DataFrame.eval" + ) + engine = _check_engine(engine) + _check_parser(parser) + _check_resolvers(resolvers) + + ret = None + first_expr = True + target_modified = False + + for expr in exprs: + expr = _convert_expression(expr) + _check_for_locals(expr, level, parser) + + # get our (possibly passed-in) scope + env = ensure_scope( + level + 1, + global_dict=global_dict, + local_dict=local_dict, + resolvers=resolvers, + target=target, + ) + + parsed_expr = Expr(expr, engine=engine, parser=parser, env=env) + + if engine == "numexpr" and ( + is_extension_array_dtype(parsed_expr.terms.return_type) + or getattr(parsed_expr.terms, "operand_types", None) is not None + and any( + is_extension_array_dtype(elem) + for elem in parsed_expr.terms.operand_types + ) + ): + warnings.warn( + "Engine has switched to 'python' because numexpr does not support " + "extension array dtypes. Please set your engine to python manually.", + RuntimeWarning, + stacklevel=find_stack_level(), + ) + engine = "python" + + # construct the engine and evaluate the parsed expression + eng = ENGINES[engine] + eng_inst = eng(parsed_expr) + ret = eng_inst.evaluate() + + if parsed_expr.assigner is None: + if multi_line: + raise ValueError( + "Multi-line expressions are only valid " + "if all expressions contain an assignment" + ) + if inplace: + raise ValueError("Cannot operate inplace if there is no assignment") + + # assign if needed + assigner = parsed_expr.assigner + if env.target is not None and assigner is not None: + target_modified = True + + # if returning a copy, copy only on the first assignment + if not inplace and first_expr: + try: + target = env.target + if isinstance(target, NDFrame): + target = target.copy(deep=None) + else: + target = target.copy() + except AttributeError as err: + raise ValueError("Cannot return a copy of the target") from err + else: + target = env.target + + # TypeError is most commonly raised (e.g. int, list), but you + # get IndexError if you try to do this assignment on np.ndarray. + # we will ignore numpy warnings here; e.g. if trying + # to use a non-numeric indexer + try: + if inplace and isinstance(target, NDFrame): + target.loc[:, assigner] = ret + else: + target[assigner] = ret # pyright: ignore[reportGeneralTypeIssues] + except (TypeError, IndexError) as err: + raise ValueError("Cannot assign expression output to target") from err + + if not resolvers: + resolvers = ({assigner: ret},) + else: + # existing resolver needs updated to handle + # case of mutating existing column in copy + for resolver in resolvers: + if assigner in resolver: + resolver[assigner] = ret + break + else: + resolvers += ({assigner: ret},) + + ret = None + first_expr = False + + # We want to exclude `inplace=None` as being False. + if inplace is False: + return target if target_modified else ret diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/expr.py b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/expr.py new file mode 100644 index 0000000000000000000000000000000000000000..b5861fbaebe9ca28703f378483fff6185aa589de --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/expr.py @@ -0,0 +1,840 @@ +""" +:func:`~pandas.eval` parsers. +""" +from __future__ import annotations + +import ast +from functools import ( + partial, + reduce, +) +from keyword import iskeyword +import tokenize +from typing import ( + Callable, + ClassVar, + TypeVar, +) + +import numpy as np + +from pandas.errors import UndefinedVariableError + +import pandas.core.common as com +from pandas.core.computation.ops import ( + ARITH_OPS_SYMS, + BOOL_OPS_SYMS, + CMP_OPS_SYMS, + LOCAL_TAG, + MATHOPS, + REDUCTIONS, + UNARY_OPS_SYMS, + BinOp, + Constant, + Div, + FuncNode, + Op, + Term, + UnaryOp, + is_term, +) +from pandas.core.computation.parsing import ( + clean_backtick_quoted_toks, + tokenize_string, +) +from pandas.core.computation.scope import Scope + +from pandas.io.formats import printing + + +def _rewrite_assign(tok: tuple[int, str]) -> tuple[int, str]: + """ + Rewrite the assignment operator for PyTables expressions that use ``=`` + as a substitute for ``==``. + + Parameters + ---------- + tok : tuple of int, str + ints correspond to the all caps constants in the tokenize module + + Returns + ------- + tuple of int, str + Either the input or token or the replacement values + """ + toknum, tokval = tok + return toknum, "==" if tokval == "=" else tokval + + +def _replace_booleans(tok: tuple[int, str]) -> tuple[int, str]: + """ + Replace ``&`` with ``and`` and ``|`` with ``or`` so that bitwise + precedence is changed to boolean precedence. + + Parameters + ---------- + tok : tuple of int, str + ints correspond to the all caps constants in the tokenize module + + Returns + ------- + tuple of int, str + Either the input or token or the replacement values + """ + toknum, tokval = tok + if toknum == tokenize.OP: + if tokval == "&": + return tokenize.NAME, "and" + elif tokval == "|": + return tokenize.NAME, "or" + return toknum, tokval + return toknum, tokval + + +def _replace_locals(tok: tuple[int, str]) -> tuple[int, str]: + """ + Replace local variables with a syntactically valid name. + + Parameters + ---------- + tok : tuple of int, str + ints correspond to the all caps constants in the tokenize module + + Returns + ------- + tuple of int, str + Either the input or token or the replacement values + + Notes + ----- + This is somewhat of a hack in that we rewrite a string such as ``'@a'`` as + ``'__pd_eval_local_a'`` by telling the tokenizer that ``__pd_eval_local_`` + is a ``tokenize.OP`` and to replace the ``'@'`` symbol with it. + """ + toknum, tokval = tok + if toknum == tokenize.OP and tokval == "@": + return tokenize.OP, LOCAL_TAG + return toknum, tokval + + +def _compose2(f, g): + """ + Compose 2 callables. + """ + return lambda *args, **kwargs: f(g(*args, **kwargs)) + + +def _compose(*funcs): + """ + Compose 2 or more callables. + """ + assert len(funcs) > 1, "At least 2 callables must be passed to compose" + return reduce(_compose2, funcs) + + +def _preparse( + source: str, + f=_compose( + _replace_locals, _replace_booleans, _rewrite_assign, clean_backtick_quoted_toks + ), +) -> str: + """ + Compose a collection of tokenization functions. + + Parameters + ---------- + source : str + A Python source code string + f : callable + This takes a tuple of (toknum, tokval) as its argument and returns a + tuple with the same structure but possibly different elements. Defaults + to the composition of ``_rewrite_assign``, ``_replace_booleans``, and + ``_replace_locals``. + + Returns + ------- + str + Valid Python source code + + Notes + ----- + The `f` parameter can be any callable that takes *and* returns input of the + form ``(toknum, tokval)``, where ``toknum`` is one of the constants from + the ``tokenize`` module and ``tokval`` is a string. + """ + assert callable(f), "f must be callable" + return tokenize.untokenize(f(x) for x in tokenize_string(source)) + + +def _is_type(t): + """ + Factory for a type checking function of type ``t`` or tuple of types. + """ + return lambda x: isinstance(x.value, t) + + +_is_list = _is_type(list) +_is_str = _is_type(str) + + +# partition all AST nodes +_all_nodes = frozenset( + node + for node in (getattr(ast, name) for name in dir(ast)) + if isinstance(node, type) and issubclass(node, ast.AST) +) + + +def _filter_nodes(superclass, all_nodes=_all_nodes): + """ + Filter out AST nodes that are subclasses of ``superclass``. + """ + node_names = (node.__name__ for node in all_nodes if issubclass(node, superclass)) + return frozenset(node_names) + + +_all_node_names = frozenset(x.__name__ for x in _all_nodes) +_mod_nodes = _filter_nodes(ast.mod) +_stmt_nodes = _filter_nodes(ast.stmt) +_expr_nodes = _filter_nodes(ast.expr) +_expr_context_nodes = _filter_nodes(ast.expr_context) +_boolop_nodes = _filter_nodes(ast.boolop) +_operator_nodes = _filter_nodes(ast.operator) +_unary_op_nodes = _filter_nodes(ast.unaryop) +_cmp_op_nodes = _filter_nodes(ast.cmpop) +_comprehension_nodes = _filter_nodes(ast.comprehension) +_handler_nodes = _filter_nodes(ast.excepthandler) +_arguments_nodes = _filter_nodes(ast.arguments) +_keyword_nodes = _filter_nodes(ast.keyword) +_alias_nodes = _filter_nodes(ast.alias) + + +# nodes that we don't support directly but are needed for parsing +_hacked_nodes = frozenset(["Assign", "Module", "Expr"]) + + +_unsupported_expr_nodes = frozenset( + [ + "Yield", + "GeneratorExp", + "IfExp", + "DictComp", + "SetComp", + "Repr", + "Lambda", + "Set", + "AST", + "Is", + "IsNot", + ] +) + +# these nodes are low priority or won't ever be supported (e.g., AST) +_unsupported_nodes = ( + _stmt_nodes + | _mod_nodes + | _handler_nodes + | _arguments_nodes + | _keyword_nodes + | _alias_nodes + | _expr_context_nodes + | _unsupported_expr_nodes +) - _hacked_nodes + +# we're adding a different assignment in some cases to be equality comparison +# and we don't want `stmt` and friends in their so get only the class whose +# names are capitalized +_base_supported_nodes = (_all_node_names - _unsupported_nodes) | _hacked_nodes +intersection = _unsupported_nodes & _base_supported_nodes +_msg = f"cannot both support and not support {intersection}" +assert not intersection, _msg + + +def _node_not_implemented(node_name: str) -> Callable[..., None]: + """ + Return a function that raises a NotImplementedError with a passed node name. + """ + + def f(self, *args, **kwargs): + raise NotImplementedError(f"'{node_name}' nodes are not implemented") + + return f + + +# should be bound by BaseExprVisitor but that creates a circular dependency: +# _T is used in disallow, but disallow is used to define BaseExprVisitor +# https://github.com/microsoft/pyright/issues/2315 +_T = TypeVar("_T") + + +def disallow(nodes: set[str]) -> Callable[[type[_T]], type[_T]]: + """ + Decorator to disallow certain nodes from parsing. Raises a + NotImplementedError instead. + + Returns + ------- + callable + """ + + def disallowed(cls: type[_T]) -> type[_T]: + # error: "Type[_T]" has no attribute "unsupported_nodes" + cls.unsupported_nodes = () # type: ignore[attr-defined] + for node in nodes: + new_method = _node_not_implemented(node) + name = f"visit_{node}" + # error: "Type[_T]" has no attribute "unsupported_nodes" + cls.unsupported_nodes += (name,) # type: ignore[attr-defined] + setattr(cls, name, new_method) + return cls + + return disallowed + + +def _op_maker(op_class, op_symbol): + """ + Return a function to create an op class with its symbol already passed. + + Returns + ------- + callable + """ + + def f(self, node, *args, **kwargs): + """ + Return a partial function with an Op subclass with an operator already passed. + + Returns + ------- + callable + """ + return partial(op_class, op_symbol, *args, **kwargs) + + return f + + +_op_classes = {"binary": BinOp, "unary": UnaryOp} + + +def add_ops(op_classes): + """ + Decorator to add default implementation of ops. + """ + + def f(cls): + for op_attr_name, op_class in op_classes.items(): + ops = getattr(cls, f"{op_attr_name}_ops") + ops_map = getattr(cls, f"{op_attr_name}_op_nodes_map") + for op in ops: + op_node = ops_map[op] + if op_node is not None: + made_op = _op_maker(op_class, op) + setattr(cls, f"visit_{op_node}", made_op) + return cls + + return f + + +@disallow(_unsupported_nodes) +@add_ops(_op_classes) +class BaseExprVisitor(ast.NodeVisitor): + """ + Custom ast walker. Parsers of other engines should subclass this class + if necessary. + + Parameters + ---------- + env : Scope + engine : str + parser : str + preparser : callable + """ + + const_type: ClassVar[type[Term]] = Constant + term_type: ClassVar[type[Term]] = Term + + binary_ops = CMP_OPS_SYMS + BOOL_OPS_SYMS + ARITH_OPS_SYMS + binary_op_nodes = ( + "Gt", + "Lt", + "GtE", + "LtE", + "Eq", + "NotEq", + "In", + "NotIn", + "BitAnd", + "BitOr", + "And", + "Or", + "Add", + "Sub", + "Mult", + None, + "Pow", + "FloorDiv", + "Mod", + ) + binary_op_nodes_map = dict(zip(binary_ops, binary_op_nodes)) + + unary_ops = UNARY_OPS_SYMS + unary_op_nodes = "UAdd", "USub", "Invert", "Not" + unary_op_nodes_map = dict(zip(unary_ops, unary_op_nodes)) + + rewrite_map = { + ast.Eq: ast.In, + ast.NotEq: ast.NotIn, + ast.In: ast.In, + ast.NotIn: ast.NotIn, + } + + unsupported_nodes: tuple[str, ...] + + def __init__(self, env, engine, parser, preparser=_preparse) -> None: + self.env = env + self.engine = engine + self.parser = parser + self.preparser = preparser + self.assigner = None + + def visit(self, node, **kwargs): + if isinstance(node, str): + clean = self.preparser(node) + try: + node = ast.fix_missing_locations(ast.parse(clean)) + except SyntaxError as e: + if any(iskeyword(x) for x in clean.split()): + e.msg = "Python keyword not valid identifier in numexpr query" + raise e + + method = f"visit_{type(node).__name__}" + visitor = getattr(self, method) + return visitor(node, **kwargs) + + def visit_Module(self, node, **kwargs): + if len(node.body) != 1: + raise SyntaxError("only a single expression is allowed") + expr = node.body[0] + return self.visit(expr, **kwargs) + + def visit_Expr(self, node, **kwargs): + return self.visit(node.value, **kwargs) + + def _rewrite_membership_op(self, node, left, right): + # the kind of the operator (is actually an instance) + op_instance = node.op + op_type = type(op_instance) + + # must be two terms and the comparison operator must be ==/!=/in/not in + if is_term(left) and is_term(right) and op_type in self.rewrite_map: + left_list, right_list = map(_is_list, (left, right)) + left_str, right_str = map(_is_str, (left, right)) + + # if there are any strings or lists in the expression + if left_list or right_list or left_str or right_str: + op_instance = self.rewrite_map[op_type]() + + # pop the string variable out of locals and replace it with a list + # of one string, kind of a hack + if right_str: + name = self.env.add_tmp([right.value]) + right = self.term_type(name, self.env) + + if left_str: + name = self.env.add_tmp([left.value]) + left = self.term_type(name, self.env) + + op = self.visit(op_instance) + return op, op_instance, left, right + + def _maybe_transform_eq_ne(self, node, left=None, right=None): + if left is None: + left = self.visit(node.left, side="left") + if right is None: + right = self.visit(node.right, side="right") + op, op_class, left, right = self._rewrite_membership_op(node, left, right) + return op, op_class, left, right + + def _maybe_downcast_constants(self, left, right): + f32 = np.dtype(np.float32) + if ( + left.is_scalar + and hasattr(left, "value") + and not right.is_scalar + and right.return_type == f32 + ): + # right is a float32 array, left is a scalar + name = self.env.add_tmp(np.float32(left.value)) + left = self.term_type(name, self.env) + if ( + right.is_scalar + and hasattr(right, "value") + and not left.is_scalar + and left.return_type == f32 + ): + # left is a float32 array, right is a scalar + name = self.env.add_tmp(np.float32(right.value)) + right = self.term_type(name, self.env) + + return left, right + + def _maybe_eval(self, binop, eval_in_python): + # eval `in` and `not in` (for now) in "partial" python space + # things that can be evaluated in "eval" space will be turned into + # temporary variables. for example, + # [1,2] in a + 2 * b + # in that case a + 2 * b will be evaluated using numexpr, and the "in" + # call will be evaluated using isin (in python space) + return binop.evaluate( + self.env, self.engine, self.parser, self.term_type, eval_in_python + ) + + def _maybe_evaluate_binop( + self, + op, + op_class, + lhs, + rhs, + eval_in_python=("in", "not in"), + maybe_eval_in_python=("==", "!=", "<", ">", "<=", ">="), + ): + res = op(lhs, rhs) + + if res.has_invalid_return_type: + raise TypeError( + f"unsupported operand type(s) for {res.op}: " + f"'{lhs.type}' and '{rhs.type}'" + ) + + if self.engine != "pytables" and ( + res.op in CMP_OPS_SYMS + and getattr(lhs, "is_datetime", False) + or getattr(rhs, "is_datetime", False) + ): + # all date ops must be done in python bc numexpr doesn't work + # well with NaT + return self._maybe_eval(res, self.binary_ops) + + if res.op in eval_in_python: + # "in"/"not in" ops are always evaluated in python + return self._maybe_eval(res, eval_in_python) + elif self.engine != "pytables": + if ( + getattr(lhs, "return_type", None) == object + or getattr(rhs, "return_type", None) == object + ): + # evaluate "==" and "!=" in python if either of our operands + # has an object return type + return self._maybe_eval(res, eval_in_python + maybe_eval_in_python) + return res + + def visit_BinOp(self, node, **kwargs): + op, op_class, left, right = self._maybe_transform_eq_ne(node) + left, right = self._maybe_downcast_constants(left, right) + return self._maybe_evaluate_binop(op, op_class, left, right) + + def visit_Div(self, node, **kwargs): + return lambda lhs, rhs: Div(lhs, rhs) + + def visit_UnaryOp(self, node, **kwargs): + op = self.visit(node.op) + operand = self.visit(node.operand) + return op(operand) + + def visit_Name(self, node, **kwargs) -> Term: + return self.term_type(node.id, self.env, **kwargs) + + # TODO(py314): deprecated since Python 3.8. Remove after Python 3.14 is min + def visit_NameConstant(self, node, **kwargs) -> Term: + return self.const_type(node.value, self.env) + + # TODO(py314): deprecated since Python 3.8. Remove after Python 3.14 is min + def visit_Num(self, node, **kwargs) -> Term: + return self.const_type(node.value, self.env) + + def visit_Constant(self, node, **kwargs) -> Term: + return self.const_type(node.value, self.env) + + # TODO(py314): deprecated since Python 3.8. Remove after Python 3.14 is min + def visit_Str(self, node, **kwargs) -> Term: + name = self.env.add_tmp(node.s) + return self.term_type(name, self.env) + + def visit_List(self, node, **kwargs) -> Term: + name = self.env.add_tmp([self.visit(e)(self.env) for e in node.elts]) + return self.term_type(name, self.env) + + visit_Tuple = visit_List + + def visit_Index(self, node, **kwargs): + """df.index[4]""" + return self.visit(node.value) + + def visit_Subscript(self, node, **kwargs) -> Term: + from pandas import eval as pd_eval + + value = self.visit(node.value) + slobj = self.visit(node.slice) + result = pd_eval( + slobj, local_dict=self.env, engine=self.engine, parser=self.parser + ) + try: + # a Term instance + v = value.value[result] + except AttributeError: + # an Op instance + lhs = pd_eval( + value, local_dict=self.env, engine=self.engine, parser=self.parser + ) + v = lhs[result] + name = self.env.add_tmp(v) + return self.term_type(name, env=self.env) + + def visit_Slice(self, node, **kwargs) -> slice: + """df.index[slice(4,6)]""" + lower = node.lower + if lower is not None: + lower = self.visit(lower).value + upper = node.upper + if upper is not None: + upper = self.visit(upper).value + step = node.step + if step is not None: + step = self.visit(step).value + + return slice(lower, upper, step) + + def visit_Assign(self, node, **kwargs): + """ + support a single assignment node, like + + c = a + b + + set the assigner at the top level, must be a Name node which + might or might not exist in the resolvers + + """ + if len(node.targets) != 1: + raise SyntaxError("can only assign a single expression") + if not isinstance(node.targets[0], ast.Name): + raise SyntaxError("left hand side of an assignment must be a single name") + if self.env.target is None: + raise ValueError("cannot assign without a target object") + + try: + assigner = self.visit(node.targets[0], **kwargs) + except UndefinedVariableError: + assigner = node.targets[0].id + + self.assigner = getattr(assigner, "name", assigner) + if self.assigner is None: + raise SyntaxError( + "left hand side of an assignment must be a single resolvable name" + ) + + return self.visit(node.value, **kwargs) + + def visit_Attribute(self, node, **kwargs): + attr = node.attr + value = node.value + + ctx = node.ctx + if isinstance(ctx, ast.Load): + # resolve the value + resolved = self.visit(value).value + try: + v = getattr(resolved, attr) + name = self.env.add_tmp(v) + return self.term_type(name, self.env) + except AttributeError: + # something like datetime.datetime where scope is overridden + if isinstance(value, ast.Name) and value.id == attr: + return resolved + raise + + raise ValueError(f"Invalid Attribute context {type(ctx).__name__}") + + def visit_Call(self, node, side=None, **kwargs): + if isinstance(node.func, ast.Attribute) and node.func.attr != "__call__": + res = self.visit_Attribute(node.func) + elif not isinstance(node.func, ast.Name): + raise TypeError("Only named functions are supported") + else: + try: + res = self.visit(node.func) + except UndefinedVariableError: + # Check if this is a supported function name + try: + res = FuncNode(node.func.id) + except ValueError: + # Raise original error + raise + + if res is None: + # error: "expr" has no attribute "id" + raise ValueError( + f"Invalid function call {node.func.id}" # type: ignore[attr-defined] + ) + if hasattr(res, "value"): + res = res.value + + if isinstance(res, FuncNode): + new_args = [self.visit(arg) for arg in node.args] + + if node.keywords: + raise TypeError( + f'Function "{res.name}" does not support keyword arguments' + ) + + return res(*new_args) + + else: + new_args = [self.visit(arg)(self.env) for arg in node.args] + + for key in node.keywords: + if not isinstance(key, ast.keyword): + # error: "expr" has no attribute "id" + raise ValueError( + "keyword error in function call " + f"'{node.func.id}'" # type: ignore[attr-defined] + ) + + if key.arg: + kwargs[key.arg] = self.visit(key.value)(self.env) + + name = self.env.add_tmp(res(*new_args, **kwargs)) + return self.term_type(name=name, env=self.env) + + def translate_In(self, op): + return op + + def visit_Compare(self, node, **kwargs): + ops = node.ops + comps = node.comparators + + # base case: we have something like a CMP b + if len(comps) == 1: + op = self.translate_In(ops[0]) + binop = ast.BinOp(op=op, left=node.left, right=comps[0]) + return self.visit(binop) + + # recursive case: we have a chained comparison, a CMP b CMP c, etc. + left = node.left + values = [] + for op, comp in zip(ops, comps): + new_node = self.visit( + ast.Compare(comparators=[comp], left=left, ops=[self.translate_In(op)]) + ) + left = comp + values.append(new_node) + return self.visit(ast.BoolOp(op=ast.And(), values=values)) + + def _try_visit_binop(self, bop): + if isinstance(bop, (Op, Term)): + return bop + return self.visit(bop) + + def visit_BoolOp(self, node, **kwargs): + def visitor(x, y): + lhs = self._try_visit_binop(x) + rhs = self._try_visit_binop(y) + + op, op_class, lhs, rhs = self._maybe_transform_eq_ne(node, lhs, rhs) + return self._maybe_evaluate_binop(op, node.op, lhs, rhs) + + operands = node.values + return reduce(visitor, operands) + + +_python_not_supported = frozenset(["Dict", "BoolOp", "In", "NotIn"]) +_numexpr_supported_calls = frozenset(REDUCTIONS + MATHOPS) + + +@disallow( + (_unsupported_nodes | _python_not_supported) + - (_boolop_nodes | frozenset(["BoolOp", "Attribute", "In", "NotIn", "Tuple"])) +) +class PandasExprVisitor(BaseExprVisitor): + def __init__( + self, + env, + engine, + parser, + preparser=partial( + _preparse, + f=_compose(_replace_locals, _replace_booleans, clean_backtick_quoted_toks), + ), + ) -> None: + super().__init__(env, engine, parser, preparser) + + +@disallow(_unsupported_nodes | _python_not_supported | frozenset(["Not"])) +class PythonExprVisitor(BaseExprVisitor): + def __init__( + self, env, engine, parser, preparser=lambda source, f=None: source + ) -> None: + super().__init__(env, engine, parser, preparser=preparser) + + +class Expr: + """ + Object encapsulating an expression. + + Parameters + ---------- + expr : str + engine : str, optional, default 'numexpr' + parser : str, optional, default 'pandas' + env : Scope, optional, default None + level : int, optional, default 2 + """ + + env: Scope + engine: str + parser: str + + def __init__( + self, + expr, + engine: str = "numexpr", + parser: str = "pandas", + env: Scope | None = None, + level: int = 0, + ) -> None: + self.expr = expr + self.env = env or Scope(level=level + 1) + self.engine = engine + self.parser = parser + self._visitor = PARSERS[parser](self.env, self.engine, self.parser) + self.terms = self.parse() + + @property + def assigner(self): + return getattr(self._visitor, "assigner", None) + + def __call__(self): + return self.terms(self.env) + + def __repr__(self) -> str: + return printing.pprint_thing(self.terms) + + def __len__(self) -> int: + return len(self.expr) + + def parse(self): + """ + Parse an expression. + """ + return self._visitor.visit(self.expr) + + @property + def names(self): + """ + Get the names in an expression. + """ + if is_term(self.terms): + return frozenset([self.terms.name]) + return frozenset(term.name for term in com.flatten(self.terms)) + + +PARSERS = {"python": PythonExprVisitor, "pandas": PandasExprVisitor} diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/expressions.py b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/expressions.py new file mode 100644 index 0000000000000000000000000000000000000000..6219cac4aeb16ee019551f95a03af59da44c9d06 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/expressions.py @@ -0,0 +1,286 @@ +""" +Expressions +----------- + +Offer fast expression evaluation through numexpr + +""" +from __future__ import annotations + +import operator +from typing import TYPE_CHECKING +import warnings + +import numpy as np + +from pandas._config import get_option + +from pandas.util._exceptions import find_stack_level + +from pandas.core import roperator +from pandas.core.computation.check import NUMEXPR_INSTALLED + +if NUMEXPR_INSTALLED: + import numexpr as ne + +if TYPE_CHECKING: + from pandas._typing import FuncType + +_TEST_MODE: bool | None = None +_TEST_RESULT: list[bool] = [] +USE_NUMEXPR = NUMEXPR_INSTALLED +_evaluate: FuncType | None = None +_where: FuncType | None = None + +# the set of dtypes that we will allow pass to numexpr +_ALLOWED_DTYPES = { + "evaluate": {"int64", "int32", "float64", "float32", "bool"}, + "where": {"int64", "float64", "bool"}, +} + +# the minimum prod shape that we will use numexpr +_MIN_ELEMENTS = 1_000_000 + + +def set_use_numexpr(v: bool = True) -> None: + # set/unset to use numexpr + global USE_NUMEXPR + if NUMEXPR_INSTALLED: + USE_NUMEXPR = v + + # choose what we are going to do + global _evaluate, _where + + _evaluate = _evaluate_numexpr if USE_NUMEXPR else _evaluate_standard + _where = _where_numexpr if USE_NUMEXPR else _where_standard + + +def set_numexpr_threads(n=None) -> None: + # if we are using numexpr, set the threads to n + # otherwise reset + if NUMEXPR_INSTALLED and USE_NUMEXPR: + if n is None: + n = ne.detect_number_of_cores() + ne.set_num_threads(n) + + +def _evaluate_standard(op, op_str, a, b): + """ + Standard evaluation. + """ + if _TEST_MODE: + _store_test_result(False) + return op(a, b) + + +def _can_use_numexpr(op, op_str, a, b, dtype_check) -> bool: + """return a boolean if we WILL be using numexpr""" + if op_str is not None: + # required min elements (otherwise we are adding overhead) + if a.size > _MIN_ELEMENTS: + # check for dtype compatibility + dtypes: set[str] = set() + for o in [a, b]: + # ndarray and Series Case + if hasattr(o, "dtype"): + dtypes |= {o.dtype.name} + + # allowed are a superset + if not len(dtypes) or _ALLOWED_DTYPES[dtype_check] >= dtypes: + return True + + return False + + +def _evaluate_numexpr(op, op_str, a, b): + result = None + + if _can_use_numexpr(op, op_str, a, b, "evaluate"): + is_reversed = op.__name__.strip("_").startswith("r") + if is_reversed: + # we were originally called by a reversed op method + a, b = b, a + + a_value = a + b_value = b + + try: + result = ne.evaluate( + f"a_value {op_str} b_value", + local_dict={"a_value": a_value, "b_value": b_value}, + casting="safe", + ) + except TypeError: + # numexpr raises eg for array ** array with integers + # (https://github.com/pydata/numexpr/issues/379) + pass + except NotImplementedError: + if _bool_arith_fallback(op_str, a, b): + pass + else: + raise + + if is_reversed: + # reverse order to original for fallback + a, b = b, a + + if _TEST_MODE: + _store_test_result(result is not None) + + if result is None: + result = _evaluate_standard(op, op_str, a, b) + + return result + + +_op_str_mapping = { + operator.add: "+", + roperator.radd: "+", + operator.mul: "*", + roperator.rmul: "*", + operator.sub: "-", + roperator.rsub: "-", + operator.truediv: "/", + roperator.rtruediv: "/", + # floordiv not supported by numexpr 2.x + operator.floordiv: None, + roperator.rfloordiv: None, + # we require Python semantics for mod of negative for backwards compatibility + # see https://github.com/pydata/numexpr/issues/365 + # so sticking with unaccelerated for now GH#36552 + operator.mod: None, + roperator.rmod: None, + operator.pow: "**", + roperator.rpow: "**", + operator.eq: "==", + operator.ne: "!=", + operator.le: "<=", + operator.lt: "<", + operator.ge: ">=", + operator.gt: ">", + operator.and_: "&", + roperator.rand_: "&", + operator.or_: "|", + roperator.ror_: "|", + operator.xor: "^", + roperator.rxor: "^", + divmod: None, + roperator.rdivmod: None, +} + + +def _where_standard(cond, a, b): + # Caller is responsible for extracting ndarray if necessary + return np.where(cond, a, b) + + +def _where_numexpr(cond, a, b): + # Caller is responsible for extracting ndarray if necessary + result = None + + if _can_use_numexpr(None, "where", a, b, "where"): + result = ne.evaluate( + "where(cond_value, a_value, b_value)", + local_dict={"cond_value": cond, "a_value": a, "b_value": b}, + casting="safe", + ) + + if result is None: + result = _where_standard(cond, a, b) + + return result + + +# turn myself on +set_use_numexpr(get_option("compute.use_numexpr")) + + +def _has_bool_dtype(x): + try: + return x.dtype == bool + except AttributeError: + return isinstance(x, (bool, np.bool_)) + + +_BOOL_OP_UNSUPPORTED = {"+": "|", "*": "&", "-": "^"} + + +def _bool_arith_fallback(op_str, a, b) -> bool: + """ + Check if we should fallback to the python `_evaluate_standard` in case + of an unsupported operation by numexpr, which is the case for some + boolean ops. + """ + if _has_bool_dtype(a) and _has_bool_dtype(b): + if op_str in _BOOL_OP_UNSUPPORTED: + warnings.warn( + f"evaluating in Python space because the {repr(op_str)} " + "operator is not supported by numexpr for the bool dtype, " + f"use {repr(_BOOL_OP_UNSUPPORTED[op_str])} instead.", + stacklevel=find_stack_level(), + ) + return True + return False + + +def evaluate(op, a, b, use_numexpr: bool = True): + """ + Evaluate and return the expression of the op on a and b. + + Parameters + ---------- + op : the actual operand + a : left operand + b : right operand + use_numexpr : bool, default True + Whether to try to use numexpr. + """ + op_str = _op_str_mapping[op] + if op_str is not None: + if use_numexpr: + # error: "None" not callable + return _evaluate(op, op_str, a, b) # type: ignore[misc] + return _evaluate_standard(op, op_str, a, b) + + +def where(cond, a, b, use_numexpr: bool = True): + """ + Evaluate the where condition cond on a and b. + + Parameters + ---------- + cond : np.ndarray[bool] + a : return if cond is True + b : return if cond is False + use_numexpr : bool, default True + Whether to try to use numexpr. + """ + assert _where is not None + return _where(cond, a, b) if use_numexpr else _where_standard(cond, a, b) + + +def set_test_mode(v: bool = True) -> None: + """ + Keeps track of whether numexpr was used. + + Stores an additional ``True`` for every successful use of evaluate with + numexpr since the last ``get_test_result``. + """ + global _TEST_MODE, _TEST_RESULT + _TEST_MODE = v + _TEST_RESULT = [] + + +def _store_test_result(used_numexpr: bool) -> None: + if used_numexpr: + _TEST_RESULT.append(used_numexpr) + + +def get_test_result() -> list[bool]: + """ + Get test result and reset test_results. + """ + global _TEST_RESULT + res = _TEST_RESULT + _TEST_RESULT = [] + return res diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/ops.py b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/ops.py new file mode 100644 index 0000000000000000000000000000000000000000..95ac20ba39edcd6b3a2920260dbf55f1b72e2f80 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/ops.py @@ -0,0 +1,621 @@ +""" +Operator classes for eval. +""" + +from __future__ import annotations + +from datetime import datetime +from functools import partial +import operator +from typing import ( + TYPE_CHECKING, + Callable, + Literal, +) + +import numpy as np + +from pandas._libs.tslibs import Timestamp + +from pandas.core.dtypes.common import ( + is_list_like, + is_scalar, +) + +import pandas.core.common as com +from pandas.core.computation.common import ( + ensure_decoded, + result_type_many, +) +from pandas.core.computation.scope import DEFAULT_GLOBALS + +from pandas.io.formats.printing import ( + pprint_thing, + pprint_thing_encoded, +) + +if TYPE_CHECKING: + from collections.abc import ( + Iterable, + Iterator, + ) + +REDUCTIONS = ("sum", "prod", "min", "max") + +_unary_math_ops = ( + "sin", + "cos", + "exp", + "log", + "expm1", + "log1p", + "sqrt", + "sinh", + "cosh", + "tanh", + "arcsin", + "arccos", + "arctan", + "arccosh", + "arcsinh", + "arctanh", + "abs", + "log10", + "floor", + "ceil", +) +_binary_math_ops = ("arctan2",) + +MATHOPS = _unary_math_ops + _binary_math_ops + + +LOCAL_TAG = "__pd_eval_local_" + + +class Term: + def __new__(cls, name, env, side=None, encoding=None): + klass = Constant if not isinstance(name, str) else cls + # error: Argument 2 for "super" not an instance of argument 1 + supr_new = super(Term, klass).__new__ # type: ignore[misc] + return supr_new(klass) + + is_local: bool + + def __init__(self, name, env, side=None, encoding=None) -> None: + # name is a str for Term, but may be something else for subclasses + self._name = name + self.env = env + self.side = side + tname = str(name) + self.is_local = tname.startswith(LOCAL_TAG) or tname in DEFAULT_GLOBALS + self._value = self._resolve_name() + self.encoding = encoding + + @property + def local_name(self) -> str: + return self.name.replace(LOCAL_TAG, "") + + def __repr__(self) -> str: + return pprint_thing(self.name) + + def __call__(self, *args, **kwargs): + return self.value + + def evaluate(self, *args, **kwargs) -> Term: + return self + + def _resolve_name(self): + local_name = str(self.local_name) + is_local = self.is_local + if local_name in self.env.scope and isinstance( + self.env.scope[local_name], type + ): + is_local = False + + res = self.env.resolve(local_name, is_local=is_local) + self.update(res) + + if hasattr(res, "ndim") and res.ndim > 2: + raise NotImplementedError( + "N-dimensional objects, where N > 2, are not supported with eval" + ) + return res + + def update(self, value) -> None: + """ + search order for local (i.e., @variable) variables: + + scope, key_variable + [('locals', 'local_name'), + ('globals', 'local_name'), + ('locals', 'key'), + ('globals', 'key')] + """ + key = self.name + + # if it's a variable name (otherwise a constant) + if isinstance(key, str): + self.env.swapkey(self.local_name, key, new_value=value) + + self.value = value + + @property + def is_scalar(self) -> bool: + return is_scalar(self._value) + + @property + def type(self): + try: + # potentially very slow for large, mixed dtype frames + return self._value.values.dtype + except AttributeError: + try: + # ndarray + return self._value.dtype + except AttributeError: + # scalar + return type(self._value) + + return_type = type + + @property + def raw(self) -> str: + return f"{type(self).__name__}(name={repr(self.name)}, type={self.type})" + + @property + def is_datetime(self) -> bool: + try: + t = self.type.type + except AttributeError: + t = self.type + + return issubclass(t, (datetime, np.datetime64)) + + @property + def value(self): + return self._value + + @value.setter + def value(self, new_value) -> None: + self._value = new_value + + @property + def name(self): + return self._name + + @property + def ndim(self) -> int: + return self._value.ndim + + +class Constant(Term): + def _resolve_name(self): + return self._name + + @property + def name(self): + return self.value + + def __repr__(self) -> str: + # in python 2 str() of float + # can truncate shorter than repr() + return repr(self.name) + + +_bool_op_map = {"not": "~", "and": "&", "or": "|"} + + +class Op: + """ + Hold an operator of arbitrary arity. + """ + + op: str + + def __init__(self, op: str, operands: Iterable[Term | Op], encoding=None) -> None: + self.op = _bool_op_map.get(op, op) + self.operands = operands + self.encoding = encoding + + def __iter__(self) -> Iterator: + return iter(self.operands) + + def __repr__(self) -> str: + """ + Print a generic n-ary operator and its operands using infix notation. + """ + # recurse over the operands + parened = (f"({pprint_thing(opr)})" for opr in self.operands) + return pprint_thing(f" {self.op} ".join(parened)) + + @property + def return_type(self): + # clobber types to bool if the op is a boolean operator + if self.op in (CMP_OPS_SYMS + BOOL_OPS_SYMS): + return np.bool_ + return result_type_many(*(term.type for term in com.flatten(self))) + + @property + def has_invalid_return_type(self) -> bool: + types = self.operand_types + obj_dtype_set = frozenset([np.dtype("object")]) + return self.return_type == object and types - obj_dtype_set + + @property + def operand_types(self): + return frozenset(term.type for term in com.flatten(self)) + + @property + def is_scalar(self) -> bool: + return all(operand.is_scalar for operand in self.operands) + + @property + def is_datetime(self) -> bool: + try: + t = self.return_type.type + except AttributeError: + t = self.return_type + + return issubclass(t, (datetime, np.datetime64)) + + +def _in(x, y): + """ + Compute the vectorized membership of ``x in y`` if possible, otherwise + use Python. + """ + try: + return x.isin(y) + except AttributeError: + if is_list_like(x): + try: + return y.isin(x) + except AttributeError: + pass + return x in y + + +def _not_in(x, y): + """ + Compute the vectorized membership of ``x not in y`` if possible, + otherwise use Python. + """ + try: + return ~x.isin(y) + except AttributeError: + if is_list_like(x): + try: + return ~y.isin(x) + except AttributeError: + pass + return x not in y + + +CMP_OPS_SYMS = (">", "<", ">=", "<=", "==", "!=", "in", "not in") +_cmp_ops_funcs = ( + operator.gt, + operator.lt, + operator.ge, + operator.le, + operator.eq, + operator.ne, + _in, + _not_in, +) +_cmp_ops_dict = dict(zip(CMP_OPS_SYMS, _cmp_ops_funcs)) + +BOOL_OPS_SYMS = ("&", "|", "and", "or") +_bool_ops_funcs = (operator.and_, operator.or_, operator.and_, operator.or_) +_bool_ops_dict = dict(zip(BOOL_OPS_SYMS, _bool_ops_funcs)) + +ARITH_OPS_SYMS = ("+", "-", "*", "/", "**", "//", "%") +_arith_ops_funcs = ( + operator.add, + operator.sub, + operator.mul, + operator.truediv, + operator.pow, + operator.floordiv, + operator.mod, +) +_arith_ops_dict = dict(zip(ARITH_OPS_SYMS, _arith_ops_funcs)) + +SPECIAL_CASE_ARITH_OPS_SYMS = ("**", "//", "%") +_special_case_arith_ops_funcs = (operator.pow, operator.floordiv, operator.mod) +_special_case_arith_ops_dict = dict( + zip(SPECIAL_CASE_ARITH_OPS_SYMS, _special_case_arith_ops_funcs) +) + +_binary_ops_dict = {} + +for d in (_cmp_ops_dict, _bool_ops_dict, _arith_ops_dict): + _binary_ops_dict.update(d) + + +def _cast_inplace(terms, acceptable_dtypes, dtype) -> None: + """ + Cast an expression inplace. + + Parameters + ---------- + terms : Op + The expression that should cast. + acceptable_dtypes : list of acceptable numpy.dtype + Will not cast if term's dtype in this list. + dtype : str or numpy.dtype + The dtype to cast to. + """ + dt = np.dtype(dtype) + for term in terms: + if term.type in acceptable_dtypes: + continue + + try: + new_value = term.value.astype(dt) + except AttributeError: + new_value = dt.type(term.value) + term.update(new_value) + + +def is_term(obj) -> bool: + return isinstance(obj, Term) + + +class BinOp(Op): + """ + Hold a binary operator and its operands. + + Parameters + ---------- + op : str + lhs : Term or Op + rhs : Term or Op + """ + + def __init__(self, op: str, lhs, rhs) -> None: + super().__init__(op, (lhs, rhs)) + self.lhs = lhs + self.rhs = rhs + + self._disallow_scalar_only_bool_ops() + + self.convert_values() + + try: + self.func = _binary_ops_dict[op] + except KeyError as err: + # has to be made a list for python3 + keys = list(_binary_ops_dict.keys()) + raise ValueError( + f"Invalid binary operator {repr(op)}, valid operators are {keys}" + ) from err + + def __call__(self, env): + """ + Recursively evaluate an expression in Python space. + + Parameters + ---------- + env : Scope + + Returns + ------- + object + The result of an evaluated expression. + """ + # recurse over the left/right nodes + left = self.lhs(env) + right = self.rhs(env) + + return self.func(left, right) + + def evaluate(self, env, engine: str, parser, term_type, eval_in_python): + """ + Evaluate a binary operation *before* being passed to the engine. + + Parameters + ---------- + env : Scope + engine : str + parser : str + term_type : type + eval_in_python : list + + Returns + ------- + term_type + The "pre-evaluated" expression as an instance of ``term_type`` + """ + if engine == "python": + res = self(env) + else: + # recurse over the left/right nodes + + left = self.lhs.evaluate( + env, + engine=engine, + parser=parser, + term_type=term_type, + eval_in_python=eval_in_python, + ) + + right = self.rhs.evaluate( + env, + engine=engine, + parser=parser, + term_type=term_type, + eval_in_python=eval_in_python, + ) + + # base cases + if self.op in eval_in_python: + res = self.func(left.value, right.value) + else: + from pandas.core.computation.eval import eval + + res = eval(self, local_dict=env, engine=engine, parser=parser) + + name = env.add_tmp(res) + return term_type(name, env=env) + + def convert_values(self) -> None: + """ + Convert datetimes to a comparable value in an expression. + """ + + def stringify(value): + encoder: Callable + if self.encoding is not None: + encoder = partial(pprint_thing_encoded, encoding=self.encoding) + else: + encoder = pprint_thing + return encoder(value) + + lhs, rhs = self.lhs, self.rhs + + if is_term(lhs) and lhs.is_datetime and is_term(rhs) and rhs.is_scalar: + v = rhs.value + if isinstance(v, (int, float)): + v = stringify(v) + v = Timestamp(ensure_decoded(v)) + if v.tz is not None: + v = v.tz_convert("UTC") + self.rhs.update(v) + + if is_term(rhs) and rhs.is_datetime and is_term(lhs) and lhs.is_scalar: + v = lhs.value + if isinstance(v, (int, float)): + v = stringify(v) + v = Timestamp(ensure_decoded(v)) + if v.tz is not None: + v = v.tz_convert("UTC") + self.lhs.update(v) + + def _disallow_scalar_only_bool_ops(self): + rhs = self.rhs + lhs = self.lhs + + # GH#24883 unwrap dtype if necessary to ensure we have a type object + rhs_rt = rhs.return_type + rhs_rt = getattr(rhs_rt, "type", rhs_rt) + lhs_rt = lhs.return_type + lhs_rt = getattr(lhs_rt, "type", lhs_rt) + if ( + (lhs.is_scalar or rhs.is_scalar) + and self.op in _bool_ops_dict + and ( + not ( + issubclass(rhs_rt, (bool, np.bool_)) + and issubclass(lhs_rt, (bool, np.bool_)) + ) + ) + ): + raise NotImplementedError("cannot evaluate scalar only bool ops") + + +def isnumeric(dtype) -> bool: + return issubclass(np.dtype(dtype).type, np.number) + + +class Div(BinOp): + """ + Div operator to special case casting. + + Parameters + ---------- + lhs, rhs : Term or Op + The Terms or Ops in the ``/`` expression. + """ + + def __init__(self, lhs, rhs) -> None: + super().__init__("/", lhs, rhs) + + if not isnumeric(lhs.return_type) or not isnumeric(rhs.return_type): + raise TypeError( + f"unsupported operand type(s) for {self.op}: " + f"'{lhs.return_type}' and '{rhs.return_type}'" + ) + + # do not upcast float32s to float64 un-necessarily + acceptable_dtypes = [np.float32, np.float64] + _cast_inplace(com.flatten(self), acceptable_dtypes, np.float64) + + +UNARY_OPS_SYMS = ("+", "-", "~", "not") +_unary_ops_funcs = (operator.pos, operator.neg, operator.invert, operator.invert) +_unary_ops_dict = dict(zip(UNARY_OPS_SYMS, _unary_ops_funcs)) + + +class UnaryOp(Op): + """ + Hold a unary operator and its operands. + + Parameters + ---------- + op : str + The token used to represent the operator. + operand : Term or Op + The Term or Op operand to the operator. + + Raises + ------ + ValueError + * If no function associated with the passed operator token is found. + """ + + def __init__(self, op: Literal["+", "-", "~", "not"], operand) -> None: + super().__init__(op, (operand,)) + self.operand = operand + + try: + self.func = _unary_ops_dict[op] + except KeyError as err: + raise ValueError( + f"Invalid unary operator {repr(op)}, " + f"valid operators are {UNARY_OPS_SYMS}" + ) from err + + def __call__(self, env) -> MathCall: + operand = self.operand(env) + # error: Cannot call function of unknown type + return self.func(operand) # type: ignore[operator] + + def __repr__(self) -> str: + return pprint_thing(f"{self.op}({self.operand})") + + @property + def return_type(self) -> np.dtype: + operand = self.operand + if operand.return_type == np.dtype("bool"): + return np.dtype("bool") + if isinstance(operand, Op) and ( + operand.op in _cmp_ops_dict or operand.op in _bool_ops_dict + ): + return np.dtype("bool") + return np.dtype("int") + + +class MathCall(Op): + def __init__(self, func, args) -> None: + super().__init__(func.name, args) + self.func = func + + def __call__(self, env): + # error: "Op" not callable + operands = [op(env) for op in self.operands] # type: ignore[operator] + return self.func.func(*operands) + + def __repr__(self) -> str: + operands = map(str, self.operands) + return pprint_thing(f"{self.op}({','.join(operands)})") + + +class FuncNode: + def __init__(self, name: str) -> None: + if name not in MATHOPS: + raise ValueError(f'"{name}" is not a supported function') + self.name = name + self.func = getattr(np, name) + + def __call__(self, *args) -> MathCall: + return MathCall(self, args) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/parsing.py b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/parsing.py new file mode 100644 index 0000000000000000000000000000000000000000..4cfa0f2baffd5ed45db19242c2afd00b6e5e23dc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/parsing.py @@ -0,0 +1,198 @@ +""" +:func:`~pandas.eval` source string parsing functions +""" +from __future__ import annotations + +from io import StringIO +from keyword import iskeyword +import token +import tokenize +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Iterator, + ) + +# A token value Python's tokenizer probably will never use. +BACKTICK_QUOTED_STRING = 100 + + +def create_valid_python_identifier(name: str) -> str: + """ + Create valid Python identifiers from any string. + + Check if name contains any special characters. If it contains any + special characters, the special characters will be replaced by + a special string and a prefix is added. + + Raises + ------ + SyntaxError + If the returned name is not a Python valid identifier, raise an exception. + This can happen if there is a hashtag in the name, as the tokenizer will + than terminate and not find the backtick. + But also for characters that fall out of the range of (U+0001..U+007F). + """ + if name.isidentifier() and not iskeyword(name): + return name + + # Create a dict with the special characters and their replacement string. + # EXACT_TOKEN_TYPES contains these special characters + # token.tok_name contains a readable description of the replacement string. + special_characters_replacements = { + char: f"_{token.tok_name[tokval]}_" + for char, tokval in (tokenize.EXACT_TOKEN_TYPES.items()) + } + special_characters_replacements.update( + { + " ": "_", + "?": "_QUESTIONMARK_", + "!": "_EXCLAMATIONMARK_", + "$": "_DOLLARSIGN_", + "€": "_EUROSIGN_", + "°": "_DEGREESIGN_", + # Including quotes works, but there are exceptions. + "'": "_SINGLEQUOTE_", + '"': "_DOUBLEQUOTE_", + # Currently not possible. Terminates parser and won't find backtick. + # "#": "_HASH_", + } + ) + + name = "".join([special_characters_replacements.get(char, char) for char in name]) + name = f"BACKTICK_QUOTED_STRING_{name}" + + if not name.isidentifier(): + raise SyntaxError(f"Could not convert '{name}' to a valid Python identifier.") + + return name + + +def clean_backtick_quoted_toks(tok: tuple[int, str]) -> tuple[int, str]: + """ + Clean up a column name if surrounded by backticks. + + Backtick quoted string are indicated by a certain tokval value. If a string + is a backtick quoted token it will processed by + :func:`_create_valid_python_identifier` so that the parser can find this + string when the query is executed. + In this case the tok will get the NAME tokval. + + Parameters + ---------- + tok : tuple of int, str + ints correspond to the all caps constants in the tokenize module + + Returns + ------- + tok : Tuple[int, str] + Either the input or token or the replacement values + """ + toknum, tokval = tok + if toknum == BACKTICK_QUOTED_STRING: + return tokenize.NAME, create_valid_python_identifier(tokval) + return toknum, tokval + + +def clean_column_name(name: Hashable) -> Hashable: + """ + Function to emulate the cleaning of a backtick quoted name. + + The purpose for this function is to see what happens to the name of + identifier if it goes to the process of being parsed a Python code + inside a backtick quoted string and than being cleaned + (removed of any special characters). + + Parameters + ---------- + name : hashable + Name to be cleaned. + + Returns + ------- + name : hashable + Returns the name after tokenizing and cleaning. + + Notes + ----- + For some cases, a name cannot be converted to a valid Python identifier. + In that case :func:`tokenize_string` raises a SyntaxError. + In that case, we just return the name unmodified. + + If this name was used in the query string (this makes the query call impossible) + an error will be raised by :func:`tokenize_backtick_quoted_string` instead, + which is not caught and propagates to the user level. + """ + try: + tokenized = tokenize_string(f"`{name}`") + tokval = next(tokenized)[1] + return create_valid_python_identifier(tokval) + except SyntaxError: + return name + + +def tokenize_backtick_quoted_string( + token_generator: Iterator[tokenize.TokenInfo], source: str, string_start: int +) -> tuple[int, str]: + """ + Creates a token from a backtick quoted string. + + Moves the token_generator forwards till right after the next backtick. + + Parameters + ---------- + token_generator : Iterator[tokenize.TokenInfo] + The generator that yields the tokens of the source string (Tuple[int, str]). + The generator is at the first token after the backtick (`) + + source : str + The Python source code string. + + string_start : int + This is the start of backtick quoted string inside the source string. + + Returns + ------- + tok: Tuple[int, str] + The token that represents the backtick quoted string. + The integer is equal to BACKTICK_QUOTED_STRING (100). + """ + for _, tokval, start, _, _ in token_generator: + if tokval == "`": + string_end = start[1] + break + + return BACKTICK_QUOTED_STRING, source[string_start:string_end] + + +def tokenize_string(source: str) -> Iterator[tuple[int, str]]: + """ + Tokenize a Python source code string. + + Parameters + ---------- + source : str + The Python source code string. + + Returns + ------- + tok_generator : Iterator[Tuple[int, str]] + An iterator yielding all tokens with only toknum and tokval (Tuple[ing, str]). + """ + line_reader = StringIO(source).readline + token_generator = tokenize.generate_tokens(line_reader) + + # Loop over all tokens till a backtick (`) is found. + # Then, take all tokens till the next backtick to form a backtick quoted string + for toknum, tokval, start, _, _ in token_generator: + if tokval == "`": + try: + yield tokenize_backtick_quoted_string( + token_generator, source, string_start=start[1] + 1 + ) + except Exception as err: + raise SyntaxError(f"Failed to parse backticks in '{source}'.") from err + else: + yield toknum, tokval diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/pytables.py b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/pytables.py new file mode 100644 index 0000000000000000000000000000000000000000..04a8ad7ef0be6b044baf65b80cbf4161d45f8cac --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/pytables.py @@ -0,0 +1,666 @@ +""" manage PyTables query interface via Expressions """ +from __future__ import annotations + +import ast +from decimal import ( + Decimal, + InvalidOperation, +) +from functools import partial +from typing import ( + TYPE_CHECKING, + Any, + ClassVar, +) + +import numpy as np + +from pandas._libs.tslibs import ( + Timedelta, + Timestamp, +) +from pandas.errors import UndefinedVariableError + +from pandas.core.dtypes.common import is_list_like + +import pandas.core.common as com +from pandas.core.computation import ( + expr, + ops, + scope as _scope, +) +from pandas.core.computation.common import ensure_decoded +from pandas.core.computation.expr import BaseExprVisitor +from pandas.core.computation.ops import is_term +from pandas.core.construction import extract_array +from pandas.core.indexes.base import Index + +from pandas.io.formats.printing import ( + pprint_thing, + pprint_thing_encoded, +) + +if TYPE_CHECKING: + from pandas._typing import ( + Self, + npt, + ) + + +class PyTablesScope(_scope.Scope): + __slots__ = ("queryables",) + + queryables: dict[str, Any] + + def __init__( + self, + level: int, + global_dict=None, + local_dict=None, + queryables: dict[str, Any] | None = None, + ) -> None: + super().__init__(level + 1, global_dict=global_dict, local_dict=local_dict) + self.queryables = queryables or {} + + +class Term(ops.Term): + env: PyTablesScope + + def __new__(cls, name, env, side=None, encoding=None): + if isinstance(name, str): + klass = cls + else: + klass = Constant + return object.__new__(klass) + + def __init__(self, name, env: PyTablesScope, side=None, encoding=None) -> None: + super().__init__(name, env, side=side, encoding=encoding) + + def _resolve_name(self): + # must be a queryables + if self.side == "left": + # Note: The behavior of __new__ ensures that self.name is a str here + if self.name not in self.env.queryables: + raise NameError(f"name {repr(self.name)} is not defined") + return self.name + + # resolve the rhs (and allow it to be None) + try: + return self.env.resolve(self.name, is_local=False) + except UndefinedVariableError: + return self.name + + # read-only property overwriting read/write property + @property # type: ignore[misc] + def value(self): + return self._value + + +class Constant(Term): + def __init__(self, name, env: PyTablesScope, side=None, encoding=None) -> None: + assert isinstance(env, PyTablesScope), type(env) + super().__init__(name, env, side=side, encoding=encoding) + + def _resolve_name(self): + return self._name + + +class BinOp(ops.BinOp): + _max_selectors = 31 + + op: str + queryables: dict[str, Any] + condition: str | None + + def __init__(self, op: str, lhs, rhs, queryables: dict[str, Any], encoding) -> None: + super().__init__(op, lhs, rhs) + self.queryables = queryables + self.encoding = encoding + self.condition = None + + def _disallow_scalar_only_bool_ops(self) -> None: + pass + + def prune(self, klass): + def pr(left, right): + """create and return a new specialized BinOp from myself""" + if left is None: + return right + elif right is None: + return left + + k = klass + if isinstance(left, ConditionBinOp): + if isinstance(right, ConditionBinOp): + k = JointConditionBinOp + elif isinstance(left, k): + return left + elif isinstance(right, k): + return right + + elif isinstance(left, FilterBinOp): + if isinstance(right, FilterBinOp): + k = JointFilterBinOp + elif isinstance(left, k): + return left + elif isinstance(right, k): + return right + + return k( + self.op, left, right, queryables=self.queryables, encoding=self.encoding + ).evaluate() + + left, right = self.lhs, self.rhs + + if is_term(left) and is_term(right): + res = pr(left.value, right.value) + elif not is_term(left) and is_term(right): + res = pr(left.prune(klass), right.value) + elif is_term(left) and not is_term(right): + res = pr(left.value, right.prune(klass)) + elif not (is_term(left) or is_term(right)): + res = pr(left.prune(klass), right.prune(klass)) + + return res + + def conform(self, rhs): + """inplace conform rhs""" + if not is_list_like(rhs): + rhs = [rhs] + if isinstance(rhs, np.ndarray): + rhs = rhs.ravel() + return rhs + + @property + def is_valid(self) -> bool: + """return True if this is a valid field""" + return self.lhs in self.queryables + + @property + def is_in_table(self) -> bool: + """ + return True if this is a valid column name for generation (e.g. an + actual column in the table) + """ + return self.queryables.get(self.lhs) is not None + + @property + def kind(self): + """the kind of my field""" + return getattr(self.queryables.get(self.lhs), "kind", None) + + @property + def meta(self): + """the meta of my field""" + return getattr(self.queryables.get(self.lhs), "meta", None) + + @property + def metadata(self): + """the metadata of my field""" + return getattr(self.queryables.get(self.lhs), "metadata", None) + + def generate(self, v) -> str: + """create and return the op string for this TermValue""" + val = v.tostring(self.encoding) + return f"({self.lhs} {self.op} {val})" + + def convert_value(self, v) -> TermValue: + """ + convert the expression that is in the term to something that is + accepted by pytables + """ + + def stringify(value): + if self.encoding is not None: + return pprint_thing_encoded(value, encoding=self.encoding) + return pprint_thing(value) + + kind = ensure_decoded(self.kind) + meta = ensure_decoded(self.meta) + if kind == "datetime" or (kind and kind.startswith("datetime64")): + if isinstance(v, (int, float)): + v = stringify(v) + v = ensure_decoded(v) + v = Timestamp(v).as_unit("ns") + if v.tz is not None: + v = v.tz_convert("UTC") + return TermValue(v, v._value, kind) + elif kind in ("timedelta64", "timedelta"): + if isinstance(v, str): + v = Timedelta(v) + else: + v = Timedelta(v, unit="s") + v = v.as_unit("ns")._value + return TermValue(int(v), v, kind) + elif meta == "category": + metadata = extract_array(self.metadata, extract_numpy=True) + result: npt.NDArray[np.intp] | np.intp | int + if v not in metadata: + result = -1 + else: + result = metadata.searchsorted(v, side="left") + return TermValue(result, result, "integer") + elif kind == "integer": + try: + v_dec = Decimal(v) + except InvalidOperation: + # GH 54186 + # convert v to float to raise float's ValueError + float(v) + else: + v = int(v_dec.to_integral_exact(rounding="ROUND_HALF_EVEN")) + return TermValue(v, v, kind) + elif kind == "float": + v = float(v) + return TermValue(v, v, kind) + elif kind == "bool": + if isinstance(v, str): + v = v.strip().lower() not in [ + "false", + "f", + "no", + "n", + "none", + "0", + "[]", + "{}", + "", + ] + else: + v = bool(v) + return TermValue(v, v, kind) + elif isinstance(v, str): + # string quoting + return TermValue(v, stringify(v), "string") + else: + raise TypeError(f"Cannot compare {v} of type {type(v)} to {kind} column") + + def convert_values(self) -> None: + pass + + +class FilterBinOp(BinOp): + filter: tuple[Any, Any, Index] | None = None + + def __repr__(self) -> str: + if self.filter is None: + return "Filter: Not Initialized" + return pprint_thing(f"[Filter : [{self.filter[0]}] -> [{self.filter[1]}]") + + def invert(self) -> Self: + """invert the filter""" + if self.filter is not None: + self.filter = ( + self.filter[0], + self.generate_filter_op(invert=True), + self.filter[2], + ) + return self + + def format(self): + """return the actual filter format""" + return [self.filter] + + # error: Signature of "evaluate" incompatible with supertype "BinOp" + def evaluate(self) -> Self | None: # type: ignore[override] + if not self.is_valid: + raise ValueError(f"query term is not valid [{self}]") + + rhs = self.conform(self.rhs) + values = list(rhs) + + if self.is_in_table: + # if too many values to create the expression, use a filter instead + if self.op in ["==", "!="] and len(values) > self._max_selectors: + filter_op = self.generate_filter_op() + self.filter = (self.lhs, filter_op, Index(values)) + + return self + return None + + # equality conditions + if self.op in ["==", "!="]: + filter_op = self.generate_filter_op() + self.filter = (self.lhs, filter_op, Index(values)) + + else: + raise TypeError( + f"passing a filterable condition to a non-table indexer [{self}]" + ) + + return self + + def generate_filter_op(self, invert: bool = False): + if (self.op == "!=" and not invert) or (self.op == "==" and invert): + return lambda axis, vals: ~axis.isin(vals) + else: + return lambda axis, vals: axis.isin(vals) + + +class JointFilterBinOp(FilterBinOp): + def format(self): + raise NotImplementedError("unable to collapse Joint Filters") + + # error: Signature of "evaluate" incompatible with supertype "BinOp" + def evaluate(self) -> Self: # type: ignore[override] + return self + + +class ConditionBinOp(BinOp): + def __repr__(self) -> str: + return pprint_thing(f"[Condition : [{self.condition}]]") + + def invert(self): + """invert the condition""" + # if self.condition is not None: + # self.condition = "~(%s)" % self.condition + # return self + raise NotImplementedError( + "cannot use an invert condition when passing to numexpr" + ) + + def format(self): + """return the actual ne format""" + return self.condition + + # error: Signature of "evaluate" incompatible with supertype "BinOp" + def evaluate(self) -> Self | None: # type: ignore[override] + if not self.is_valid: + raise ValueError(f"query term is not valid [{self}]") + + # convert values if we are in the table + if not self.is_in_table: + return None + + rhs = self.conform(self.rhs) + values = [self.convert_value(v) for v in rhs] + + # equality conditions + if self.op in ["==", "!="]: + # too many values to create the expression? + if len(values) <= self._max_selectors: + vs = [self.generate(v) for v in values] + self.condition = f"({' | '.join(vs)})" + + # use a filter after reading + else: + return None + else: + self.condition = self.generate(values[0]) + + return self + + +class JointConditionBinOp(ConditionBinOp): + # error: Signature of "evaluate" incompatible with supertype "BinOp" + def evaluate(self) -> Self: # type: ignore[override] + self.condition = f"({self.lhs.condition} {self.op} {self.rhs.condition})" + return self + + +class UnaryOp(ops.UnaryOp): + def prune(self, klass): + if self.op != "~": + raise NotImplementedError("UnaryOp only support invert type ops") + + operand = self.operand + operand = operand.prune(klass) + + if operand is not None and ( + issubclass(klass, ConditionBinOp) + and operand.condition is not None + or not issubclass(klass, ConditionBinOp) + and issubclass(klass, FilterBinOp) + and operand.filter is not None + ): + return operand.invert() + return None + + +class PyTablesExprVisitor(BaseExprVisitor): + const_type: ClassVar[type[ops.Term]] = Constant + term_type: ClassVar[type[Term]] = Term + + def __init__(self, env, engine, parser, **kwargs) -> None: + super().__init__(env, engine, parser) + for bin_op in self.binary_ops: + bin_node = self.binary_op_nodes_map[bin_op] + setattr( + self, + f"visit_{bin_node}", + lambda node, bin_op=bin_op: partial(BinOp, bin_op, **kwargs), + ) + + def visit_UnaryOp(self, node, **kwargs) -> ops.Term | UnaryOp | None: + if isinstance(node.op, (ast.Not, ast.Invert)): + return UnaryOp("~", self.visit(node.operand)) + elif isinstance(node.op, ast.USub): + return self.const_type(-self.visit(node.operand).value, self.env) + elif isinstance(node.op, ast.UAdd): + raise NotImplementedError("Unary addition not supported") + # TODO: return None might never be reached + return None + + def visit_Index(self, node, **kwargs): + return self.visit(node.value).value + + def visit_Assign(self, node, **kwargs): + cmpr = ast.Compare( + ops=[ast.Eq()], left=node.targets[0], comparators=[node.value] + ) + return self.visit(cmpr) + + def visit_Subscript(self, node, **kwargs) -> ops.Term: + # only allow simple subscripts + + value = self.visit(node.value) + slobj = self.visit(node.slice) + try: + value = value.value + except AttributeError: + pass + + if isinstance(slobj, Term): + # In py39 np.ndarray lookups with Term containing int raise + slobj = slobj.value + + try: + return self.const_type(value[slobj], self.env) + except TypeError as err: + raise ValueError( + f"cannot subscript {repr(value)} with {repr(slobj)}" + ) from err + + def visit_Attribute(self, node, **kwargs): + attr = node.attr + value = node.value + + ctx = type(node.ctx) + if ctx == ast.Load: + # resolve the value + resolved = self.visit(value) + + # try to get the value to see if we are another expression + try: + resolved = resolved.value + except AttributeError: + pass + + try: + return self.term_type(getattr(resolved, attr), self.env) + except AttributeError: + # something like datetime.datetime where scope is overridden + if isinstance(value, ast.Name) and value.id == attr: + return resolved + + raise ValueError(f"Invalid Attribute context {ctx.__name__}") + + def translate_In(self, op): + return ast.Eq() if isinstance(op, ast.In) else op + + def _rewrite_membership_op(self, node, left, right): + return self.visit(node.op), node.op, left, right + + +def _validate_where(w): + """ + Validate that the where statement is of the right type. + + The type may either be String, Expr, or list-like of Exprs. + + Parameters + ---------- + w : String term expression, Expr, or list-like of Exprs. + + Returns + ------- + where : The original where clause if the check was successful. + + Raises + ------ + TypeError : An invalid data type was passed in for w (e.g. dict). + """ + if not (isinstance(w, (PyTablesExpr, str)) or is_list_like(w)): + raise TypeError( + "where must be passed as a string, PyTablesExpr, " + "or list-like of PyTablesExpr" + ) + + return w + + +class PyTablesExpr(expr.Expr): + """ + Hold a pytables-like expression, comprised of possibly multiple 'terms'. + + Parameters + ---------- + where : string term expression, PyTablesExpr, or list-like of PyTablesExprs + queryables : a "kinds" map (dict of column name -> kind), or None if column + is non-indexable + encoding : an encoding that will encode the query terms + + Returns + ------- + a PyTablesExpr object + + Examples + -------- + 'index>=date' + "columns=['A', 'D']" + 'columns=A' + 'columns==A' + "~(columns=['A','B'])" + 'index>df.index[3] & string="bar"' + '(index>df.index[3] & index<=df.index[6]) | string="bar"' + "ts>=Timestamp('2012-02-01')" + "major_axis>=20130101" + """ + + _visitor: PyTablesExprVisitor | None + env: PyTablesScope + expr: str + + def __init__( + self, + where, + queryables: dict[str, Any] | None = None, + encoding=None, + scope_level: int = 0, + ) -> None: + where = _validate_where(where) + + self.encoding = encoding + self.condition = None + self.filter = None + self.terms = None + self._visitor = None + + # capture the environment if needed + local_dict: _scope.DeepChainMap[Any, Any] | None = None + + if isinstance(where, PyTablesExpr): + local_dict = where.env.scope + _where = where.expr + + elif is_list_like(where): + where = list(where) + for idx, w in enumerate(where): + if isinstance(w, PyTablesExpr): + local_dict = w.env.scope + else: + where[idx] = _validate_where(w) + _where = " & ".join([f"({w})" for w in com.flatten(where)]) + else: + # _validate_where ensures we otherwise have a string + _where = where + + self.expr = _where + self.env = PyTablesScope(scope_level + 1, local_dict=local_dict) + + if queryables is not None and isinstance(self.expr, str): + self.env.queryables.update(queryables) + self._visitor = PyTablesExprVisitor( + self.env, + queryables=queryables, + parser="pytables", + engine="pytables", + encoding=encoding, + ) + self.terms = self.parse() + + def __repr__(self) -> str: + if self.terms is not None: + return pprint_thing(self.terms) + return pprint_thing(self.expr) + + def evaluate(self): + """create and return the numexpr condition and filter""" + try: + self.condition = self.terms.prune(ConditionBinOp) + except AttributeError as err: + raise ValueError( + f"cannot process expression [{self.expr}], [{self}] " + "is not a valid condition" + ) from err + try: + self.filter = self.terms.prune(FilterBinOp) + except AttributeError as err: + raise ValueError( + f"cannot process expression [{self.expr}], [{self}] " + "is not a valid filter" + ) from err + + return self.condition, self.filter + + +class TermValue: + """hold a term value the we use to construct a condition/filter""" + + def __init__(self, value, converted, kind: str) -> None: + assert isinstance(kind, str), kind + self.value = value + self.converted = converted + self.kind = kind + + def tostring(self, encoding) -> str: + """quote the string if not encoded else encode and return""" + if self.kind == "string": + if encoding is not None: + return str(self.converted) + return f'"{self.converted}"' + elif self.kind == "float": + # python 2 str(float) is not always + # round-trippable so use repr() + return repr(self.converted) + return str(self.converted) + + +def maybe_expression(s) -> bool: + """loose checking if s is a pytables-acceptable expression""" + if not isinstance(s, str): + return False + operations = PyTablesExprVisitor.binary_ops + PyTablesExprVisitor.unary_ops + ("=",) + + # make sure we have an op at least + return any(op in s for op in operations) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/scope.py b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/scope.py new file mode 100644 index 0000000000000000000000000000000000000000..7e553ca448218435eafe1fd7ca97dce6f739e2a3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/core/computation/scope.py @@ -0,0 +1,355 @@ +""" +Module for scope operations +""" +from __future__ import annotations + +from collections import ChainMap +import datetime +import inspect +from io import StringIO +import itertools +import pprint +import struct +import sys +from typing import TypeVar + +import numpy as np + +from pandas._libs.tslibs import Timestamp +from pandas.errors import UndefinedVariableError + +_KT = TypeVar("_KT") +_VT = TypeVar("_VT") + + +# https://docs.python.org/3/library/collections.html#chainmap-examples-and-recipes +class DeepChainMap(ChainMap[_KT, _VT]): + """ + Variant of ChainMap that allows direct updates to inner scopes. + + Only works when all passed mapping are mutable. + """ + + def __setitem__(self, key: _KT, value: _VT) -> None: + for mapping in self.maps: + if key in mapping: + mapping[key] = value + return + self.maps[0][key] = value + + def __delitem__(self, key: _KT) -> None: + """ + Raises + ------ + KeyError + If `key` doesn't exist. + """ + for mapping in self.maps: + if key in mapping: + del mapping[key] + return + raise KeyError(key) + + +def ensure_scope( + level: int, global_dict=None, local_dict=None, resolvers=(), target=None +) -> Scope: + """Ensure that we are grabbing the correct scope.""" + return Scope( + level + 1, + global_dict=global_dict, + local_dict=local_dict, + resolvers=resolvers, + target=target, + ) + + +def _replacer(x) -> str: + """ + Replace a number with its hexadecimal representation. Used to tag + temporary variables with their calling scope's id. + """ + # get the hex repr of the binary char and remove 0x and pad by pad_size + # zeros + try: + hexin = ord(x) + except TypeError: + # bytes literals masquerade as ints when iterating in py3 + hexin = x + + return hex(hexin) + + +def _raw_hex_id(obj) -> str: + """Return the padded hexadecimal id of ``obj``.""" + # interpret as a pointer since that's what really what id returns + packed = struct.pack("@P", id(obj)) + return "".join([_replacer(x) for x in packed]) + + +DEFAULT_GLOBALS = { + "Timestamp": Timestamp, + "datetime": datetime.datetime, + "True": True, + "False": False, + "list": list, + "tuple": tuple, + "inf": np.inf, + "Inf": np.inf, +} + + +def _get_pretty_string(obj) -> str: + """ + Return a prettier version of obj. + + Parameters + ---------- + obj : object + Object to pretty print + + Returns + ------- + str + Pretty print object repr + """ + sio = StringIO() + pprint.pprint(obj, stream=sio) + return sio.getvalue() + + +class Scope: + """ + Object to hold scope, with a few bells to deal with some custom syntax + and contexts added by pandas. + + Parameters + ---------- + level : int + global_dict : dict or None, optional, default None + local_dict : dict or Scope or None, optional, default None + resolvers : list-like or None, optional, default None + target : object + + Attributes + ---------- + level : int + scope : DeepChainMap + target : object + temps : dict + """ + + __slots__ = ["level", "scope", "target", "resolvers", "temps"] + level: int + scope: DeepChainMap + resolvers: DeepChainMap + temps: dict + + def __init__( + self, level: int, global_dict=None, local_dict=None, resolvers=(), target=None + ) -> None: + self.level = level + 1 + + # shallow copy because we don't want to keep filling this up with what + # was there before if there are multiple calls to Scope/_ensure_scope + self.scope = DeepChainMap(DEFAULT_GLOBALS.copy()) + self.target = target + + if isinstance(local_dict, Scope): + self.scope.update(local_dict.scope) + if local_dict.target is not None: + self.target = local_dict.target + self._update(local_dict.level) + + frame = sys._getframe(self.level) + + try: + # shallow copy here because we don't want to replace what's in + # scope when we align terms (alignment accesses the underlying + # numpy array of pandas objects) + scope_global = self.scope.new_child( + (global_dict if global_dict is not None else frame.f_globals).copy() + ) + self.scope = DeepChainMap(scope_global) + if not isinstance(local_dict, Scope): + scope_local = self.scope.new_child( + (local_dict if local_dict is not None else frame.f_locals).copy() + ) + self.scope = DeepChainMap(scope_local) + finally: + del frame + + # assumes that resolvers are going from outermost scope to inner + if isinstance(local_dict, Scope): + resolvers += tuple(local_dict.resolvers.maps) + self.resolvers = DeepChainMap(*resolvers) + self.temps = {} + + def __repr__(self) -> str: + scope_keys = _get_pretty_string(list(self.scope.keys())) + res_keys = _get_pretty_string(list(self.resolvers.keys())) + return f"{type(self).__name__}(scope={scope_keys}, resolvers={res_keys})" + + @property + def has_resolvers(self) -> bool: + """ + Return whether we have any extra scope. + + For example, DataFrames pass Their columns as resolvers during calls to + ``DataFrame.eval()`` and ``DataFrame.query()``. + + Returns + ------- + hr : bool + """ + return bool(len(self.resolvers)) + + def resolve(self, key: str, is_local: bool): + """ + Resolve a variable name in a possibly local context. + + Parameters + ---------- + key : str + A variable name + is_local : bool + Flag indicating whether the variable is local or not (prefixed with + the '@' symbol) + + Returns + ------- + value : object + The value of a particular variable + """ + try: + # only look for locals in outer scope + if is_local: + return self.scope[key] + + # not a local variable so check in resolvers if we have them + if self.has_resolvers: + return self.resolvers[key] + + # if we're here that means that we have no locals and we also have + # no resolvers + assert not is_local and not self.has_resolvers + return self.scope[key] + except KeyError: + try: + # last ditch effort we look in temporaries + # these are created when parsing indexing expressions + # e.g., df[df > 0] + return self.temps[key] + except KeyError as err: + raise UndefinedVariableError(key, is_local) from err + + def swapkey(self, old_key: str, new_key: str, new_value=None) -> None: + """ + Replace a variable name, with a potentially new value. + + Parameters + ---------- + old_key : str + Current variable name to replace + new_key : str + New variable name to replace `old_key` with + new_value : object + Value to be replaced along with the possible renaming + """ + if self.has_resolvers: + maps = self.resolvers.maps + self.scope.maps + else: + maps = self.scope.maps + + maps.append(self.temps) + + for mapping in maps: + if old_key in mapping: + mapping[new_key] = new_value + return + + def _get_vars(self, stack, scopes: list[str]) -> None: + """ + Get specifically scoped variables from a list of stack frames. + + Parameters + ---------- + stack : list + A list of stack frames as returned by ``inspect.stack()`` + scopes : sequence of strings + A sequence containing valid stack frame attribute names that + evaluate to a dictionary. For example, ('locals', 'globals') + """ + variables = itertools.product(scopes, stack) + for scope, (frame, _, _, _, _, _) in variables: + try: + d = getattr(frame, f"f_{scope}") + self.scope = DeepChainMap(self.scope.new_child(d)) + finally: + # won't remove it, but DECREF it + # in Py3 this probably isn't necessary since frame won't be + # scope after the loop + del frame + + def _update(self, level: int) -> None: + """ + Update the current scope by going back `level` levels. + + Parameters + ---------- + level : int + """ + sl = level + 1 + + # add sl frames to the scope starting with the + # most distant and overwriting with more current + # makes sure that we can capture variable scope + stack = inspect.stack() + + try: + self._get_vars(stack[:sl], scopes=["locals"]) + finally: + del stack[:], stack + + def add_tmp(self, value) -> str: + """ + Add a temporary variable to the scope. + + Parameters + ---------- + value : object + An arbitrary object to be assigned to a temporary variable. + + Returns + ------- + str + The name of the temporary variable created. + """ + name = f"{type(value).__name__}_{self.ntemps}_{_raw_hex_id(self)}" + + # add to inner most scope + assert name not in self.temps + self.temps[name] = value + assert name in self.temps + + # only increment if the variable gets put in the scope + return name + + @property + def ntemps(self) -> int: + """The number of temporary variables in this scope""" + return len(self.temps) + + @property + def full_scope(self) -> DeepChainMap: + """ + Return the full scope for use with passing to engines transparently + as a mapping. + + Returns + ------- + vars : DeepChainMap + All variables in this scope. + """ + maps = [self.temps] + self.resolvers.maps + self.scope.maps + return DeepChainMap(*maps) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/ops/__pycache__/missing.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/ops/__pycache__/missing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df5abcddfa7f2a5cf0ca7c36e111cb420cf1675a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/ops/__pycache__/missing.cpython-310.pyc differ