diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..daa63b3cf2e9136552938d1281cdaca2a93b54f9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_ranges.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_ranges.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd61414b9306aa4e545cb3387c17667e1cd7845f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_ranges.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a2941186dea92b9ccabb2f907fa0973c3701991 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/categorical.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/categorical.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef4d0e0374fa762abe72513df8709da4f425b6dc Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/categorical.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/floating.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/floating.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a835c64304c0053f0e5bf38ac3fec81ec7aed1d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/floating.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/masked.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/masked.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2f95c53bf399da767b0548fcfe0e8968e89eeae Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/masked.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/numeric.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/numeric.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..087ad4c77e5458e87ff6ef31374d67fc218d5d53 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/numeric.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/numpy_.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/numpy_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ad5bcf684612e3103cad7a4f38b8b31a539c27b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/numpy_.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/period.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/period.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef73fa19d4c7c3d43a3a5f00258795347b8dfe1b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/period.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/string_arrow.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/string_arrow.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23b5ae190a7c76940f00a0aafc629728f5736f2d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/string_arrow.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5fc50f786fc6a6c51f78ef9ebd4ee6ed26a2bab3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/__init__.py @@ -0,0 +1,7 @@ +from pandas.core.arrays.arrow.accessors import ( + ListAccessor, + StructAccessor, +) +from pandas.core.arrays.arrow.array import ArrowExtensionArray + +__all__ = ["ArrowExtensionArray", "StructAccessor", "ListAccessor"] diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..15abfccd9b567f8a5587f7fae4f5a9df2b1764ce Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/_arrow_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/_arrow_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11d59a853e468d7db4bd2cf89669394f365a4643 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/_arrow_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/accessors.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/accessors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38eed2d7f7a055a92f52a0799b7431b69bbe1ad6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/accessors.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/array.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/array.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca27261bd1fdaf3cfa64c6b6fcb15d800bb0443b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/array.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/extension_types.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/extension_types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6d9cc4925a58f75c674587d5194b8c8642a5933 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/extension_types.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/_arrow_utils.py b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/_arrow_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2a053fac2985c7d5a311ff896dcce846b6aa6e97 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/_arrow_utils.py @@ -0,0 +1,66 @@ +from __future__ import annotations + +import warnings + +import numpy as np +import pyarrow + +from pandas.errors import PerformanceWarning +from pandas.util._exceptions import find_stack_level + + +def fallback_performancewarning(version: str | None = None) -> None: + """ + Raise a PerformanceWarning for falling back to ExtensionArray's + non-pyarrow method + """ + msg = "Falling back on a non-pyarrow code path which may decrease performance." + if version is not None: + msg += f" Upgrade to pyarrow >={version} to possibly suppress this warning." + warnings.warn(msg, PerformanceWarning, stacklevel=find_stack_level()) + + +def pyarrow_array_to_numpy_and_mask( + arr, dtype: np.dtype +) -> tuple[np.ndarray, np.ndarray]: + """ + Convert a primitive pyarrow.Array to a numpy array and boolean mask based + on the buffers of the Array. + + At the moment pyarrow.BooleanArray is not supported. + + Parameters + ---------- + arr : pyarrow.Array + dtype : numpy.dtype + + Returns + ------- + (data, mask) + Tuple of two numpy arrays with the raw data (with specified dtype) and + a boolean mask (validity mask, so False means missing) + """ + dtype = np.dtype(dtype) + + if pyarrow.types.is_null(arr.type): + # No initialization of data is needed since everything is null + data = np.empty(len(arr), dtype=dtype) + mask = np.zeros(len(arr), dtype=bool) + return data, mask + buflist = arr.buffers() + # Since Arrow buffers might contain padding and the data might be offset, + # the buffer gets sliced here before handing it to numpy. + # See also https://github.com/pandas-dev/pandas/issues/40896 + offset = arr.offset * dtype.itemsize + length = len(arr) * dtype.itemsize + data_buf = buflist[1][offset : offset + length] + data = np.frombuffer(data_buf, dtype=dtype) + bitmask = buflist[0] + if bitmask is not None: + mask = pyarrow.BooleanArray.from_buffers( + pyarrow.bool_(), len(arr), [None, bitmask], offset=arr.offset + ) + mask = np.asarray(mask) + else: + mask = np.ones(len(arr), dtype=bool) + return data, mask diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/accessors.py b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/accessors.py new file mode 100644 index 0000000000000000000000000000000000000000..124f8fb6ad8bce4b55703e99d730def1dbff90f9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/accessors.py @@ -0,0 +1,473 @@ +"""Accessors for arrow-backed data.""" + +from __future__ import annotations + +from abc import ( + ABCMeta, + abstractmethod, +) +from typing import ( + TYPE_CHECKING, + cast, +) + +from pandas.compat import ( + pa_version_under10p1, + pa_version_under11p0, +) + +from pandas.core.dtypes.common import is_list_like + +if not pa_version_under10p1: + import pyarrow as pa + import pyarrow.compute as pc + + from pandas.core.dtypes.dtypes import ArrowDtype + +if TYPE_CHECKING: + from collections.abc import Iterator + + from pandas import ( + DataFrame, + Series, + ) + + +class ArrowAccessor(metaclass=ABCMeta): + @abstractmethod + def __init__(self, data, validation_msg: str) -> None: + self._data = data + self._validation_msg = validation_msg + self._validate(data) + + @abstractmethod + def _is_valid_pyarrow_dtype(self, pyarrow_dtype) -> bool: + pass + + def _validate(self, data): + dtype = data.dtype + if not isinstance(dtype, ArrowDtype): + # Raise AttributeError so that inspect can handle non-struct Series. + raise AttributeError(self._validation_msg.format(dtype=dtype)) + + if not self._is_valid_pyarrow_dtype(dtype.pyarrow_dtype): + # Raise AttributeError so that inspect can handle invalid Series. + raise AttributeError(self._validation_msg.format(dtype=dtype)) + + @property + def _pa_array(self): + return self._data.array._pa_array + + +class ListAccessor(ArrowAccessor): + """ + Accessor object for list data properties of the Series values. + + Parameters + ---------- + data : Series + Series containing Arrow list data. + """ + + def __init__(self, data=None) -> None: + super().__init__( + data, + validation_msg="Can only use the '.list' accessor with " + "'list[pyarrow]' dtype, not {dtype}.", + ) + + def _is_valid_pyarrow_dtype(self, pyarrow_dtype) -> bool: + return ( + pa.types.is_list(pyarrow_dtype) + or pa.types.is_fixed_size_list(pyarrow_dtype) + or pa.types.is_large_list(pyarrow_dtype) + ) + + def len(self) -> Series: + """ + Return the length of each list in the Series. + + Returns + ------- + pandas.Series + The length of each list. + + Examples + -------- + >>> import pyarrow as pa + >>> s = pd.Series( + ... [ + ... [1, 2, 3], + ... [3], + ... ], + ... dtype=pd.ArrowDtype(pa.list_( + ... pa.int64() + ... )) + ... ) + >>> s.list.len() + 0 3 + 1 1 + dtype: int32[pyarrow] + """ + from pandas import Series + + value_lengths = pc.list_value_length(self._pa_array) + return Series(value_lengths, dtype=ArrowDtype(value_lengths.type)) + + def __getitem__(self, key: int | slice) -> Series: + """ + Index or slice lists in the Series. + + Parameters + ---------- + key : int | slice + Index or slice of indices to access from each list. + + Returns + ------- + pandas.Series + The list at requested index. + + Examples + -------- + >>> import pyarrow as pa + >>> s = pd.Series( + ... [ + ... [1, 2, 3], + ... [3], + ... ], + ... dtype=pd.ArrowDtype(pa.list_( + ... pa.int64() + ... )) + ... ) + >>> s.list[0] + 0 1 + 1 3 + dtype: int64[pyarrow] + """ + from pandas import Series + + if isinstance(key, int): + # TODO: Support negative key but pyarrow does not allow + # element index to be an array. + # if key < 0: + # key = pc.add(key, pc.list_value_length(self._pa_array)) + element = pc.list_element(self._pa_array, key) + return Series(element, dtype=ArrowDtype(element.type)) + elif isinstance(key, slice): + if pa_version_under11p0: + raise NotImplementedError( + f"List slice not supported by pyarrow {pa.__version__}." + ) + + # TODO: Support negative start/stop/step, ideally this would be added + # upstream in pyarrow. + start, stop, step = key.start, key.stop, key.step + if start is None: + # TODO: When adding negative step support + # this should be setto last element of array + # when step is negative. + start = 0 + if step is None: + step = 1 + sliced = pc.list_slice(self._pa_array, start, stop, step) + return Series(sliced, dtype=ArrowDtype(sliced.type)) + else: + raise ValueError(f"key must be an int or slice, got {type(key).__name__}") + + def __iter__(self) -> Iterator: + raise TypeError(f"'{type(self).__name__}' object is not iterable") + + def flatten(self) -> Series: + """ + Flatten list values. + + Returns + ------- + pandas.Series + The data from all lists in the series flattened. + + Examples + -------- + >>> import pyarrow as pa + >>> s = pd.Series( + ... [ + ... [1, 2, 3], + ... [3], + ... ], + ... dtype=pd.ArrowDtype(pa.list_( + ... pa.int64() + ... )) + ... ) + >>> s.list.flatten() + 0 1 + 1 2 + 2 3 + 3 3 + dtype: int64[pyarrow] + """ + from pandas import Series + + flattened = pc.list_flatten(self._pa_array) + return Series(flattened, dtype=ArrowDtype(flattened.type)) + + +class StructAccessor(ArrowAccessor): + """ + Accessor object for structured data properties of the Series values. + + Parameters + ---------- + data : Series + Series containing Arrow struct data. + """ + + def __init__(self, data=None) -> None: + super().__init__( + data, + validation_msg=( + "Can only use the '.struct' accessor with 'struct[pyarrow]' " + "dtype, not {dtype}." + ), + ) + + def _is_valid_pyarrow_dtype(self, pyarrow_dtype) -> bool: + return pa.types.is_struct(pyarrow_dtype) + + @property + def dtypes(self) -> Series: + """ + Return the dtype object of each child field of the struct. + + Returns + ------- + pandas.Series + The data type of each child field. + + Examples + -------- + >>> import pyarrow as pa + >>> s = pd.Series( + ... [ + ... {"version": 1, "project": "pandas"}, + ... {"version": 2, "project": "pandas"}, + ... {"version": 1, "project": "numpy"}, + ... ], + ... dtype=pd.ArrowDtype(pa.struct( + ... [("version", pa.int64()), ("project", pa.string())] + ... )) + ... ) + >>> s.struct.dtypes + version int64[pyarrow] + project string[pyarrow] + dtype: object + """ + from pandas import ( + Index, + Series, + ) + + pa_type = self._data.dtype.pyarrow_dtype + types = [ArrowDtype(struct.type) for struct in pa_type] + names = [struct.name for struct in pa_type] + return Series(types, index=Index(names)) + + def field( + self, + name_or_index: list[str] + | list[bytes] + | list[int] + | pc.Expression + | bytes + | str + | int, + ) -> Series: + """ + Extract a child field of a struct as a Series. + + Parameters + ---------- + name_or_index : str | bytes | int | expression | list + Name or index of the child field to extract. + + For list-like inputs, this will index into a nested + struct. + + Returns + ------- + pandas.Series + The data corresponding to the selected child field. + + See Also + -------- + Series.struct.explode : Return all child fields as a DataFrame. + + Notes + ----- + The name of the resulting Series will be set using the following + rules: + + - For string, bytes, or integer `name_or_index` (or a list of these, for + a nested selection), the Series name is set to the selected + field's name. + - For a :class:`pyarrow.compute.Expression`, this is set to + the string form of the expression. + - For list-like `name_or_index`, the name will be set to the + name of the final field selected. + + Examples + -------- + >>> import pyarrow as pa + >>> s = pd.Series( + ... [ + ... {"version": 1, "project": "pandas"}, + ... {"version": 2, "project": "pandas"}, + ... {"version": 1, "project": "numpy"}, + ... ], + ... dtype=pd.ArrowDtype(pa.struct( + ... [("version", pa.int64()), ("project", pa.string())] + ... )) + ... ) + + Extract by field name. + + >>> s.struct.field("project") + 0 pandas + 1 pandas + 2 numpy + Name: project, dtype: string[pyarrow] + + Extract by field index. + + >>> s.struct.field(0) + 0 1 + 1 2 + 2 1 + Name: version, dtype: int64[pyarrow] + + Or an expression + + >>> import pyarrow.compute as pc + >>> s.struct.field(pc.field("project")) + 0 pandas + 1 pandas + 2 numpy + Name: project, dtype: string[pyarrow] + + For nested struct types, you can pass a list of values to index + multiple levels: + + >>> version_type = pa.struct([ + ... ("major", pa.int64()), + ... ("minor", pa.int64()), + ... ]) + >>> s = pd.Series( + ... [ + ... {"version": {"major": 1, "minor": 5}, "project": "pandas"}, + ... {"version": {"major": 2, "minor": 1}, "project": "pandas"}, + ... {"version": {"major": 1, "minor": 26}, "project": "numpy"}, + ... ], + ... dtype=pd.ArrowDtype(pa.struct( + ... [("version", version_type), ("project", pa.string())] + ... )) + ... ) + >>> s.struct.field(["version", "minor"]) + 0 5 + 1 1 + 2 26 + Name: minor, dtype: int64[pyarrow] + >>> s.struct.field([0, 0]) + 0 1 + 1 2 + 2 1 + Name: major, dtype: int64[pyarrow] + """ + from pandas import Series + + def get_name( + level_name_or_index: list[str] + | list[bytes] + | list[int] + | pc.Expression + | bytes + | str + | int, + data: pa.ChunkedArray, + ): + if isinstance(level_name_or_index, int): + name = data.type.field(level_name_or_index).name + elif isinstance(level_name_or_index, (str, bytes)): + name = level_name_or_index + elif isinstance(level_name_or_index, pc.Expression): + name = str(level_name_or_index) + elif is_list_like(level_name_or_index): + # For nested input like [2, 1, 2] + # iteratively get the struct and field name. The last + # one is used for the name of the index. + level_name_or_index = list(reversed(level_name_or_index)) + selected = data + while level_name_or_index: + # we need the cast, otherwise mypy complains about + # getting ints, bytes, or str here, which isn't possible. + level_name_or_index = cast(list, level_name_or_index) + name_or_index = level_name_or_index.pop() + name = get_name(name_or_index, selected) + selected = selected.type.field(selected.type.get_field_index(name)) + name = selected.name + else: + raise ValueError( + "name_or_index must be an int, str, bytes, " + "pyarrow.compute.Expression, or list of those" + ) + return name + + pa_arr = self._data.array._pa_array + name = get_name(name_or_index, pa_arr) + field_arr = pc.struct_field(pa_arr, name_or_index) + + return Series( + field_arr, + dtype=ArrowDtype(field_arr.type), + index=self._data.index, + name=name, + ) + + def explode(self) -> DataFrame: + """ + Extract all child fields of a struct as a DataFrame. + + Returns + ------- + pandas.DataFrame + The data corresponding to all child fields. + + See Also + -------- + Series.struct.field : Return a single child field as a Series. + + Examples + -------- + >>> import pyarrow as pa + >>> s = pd.Series( + ... [ + ... {"version": 1, "project": "pandas"}, + ... {"version": 2, "project": "pandas"}, + ... {"version": 1, "project": "numpy"}, + ... ], + ... dtype=pd.ArrowDtype(pa.struct( + ... [("version", pa.int64()), ("project", pa.string())] + ... )) + ... ) + + >>> s.struct.explode() + version project + 0 1 pandas + 1 2 pandas + 2 1 numpy + """ + from pandas import concat + + pa_type = self._pa_array.type + return concat( + [self.field(i) for i in range(pa_type.num_fields)], axis="columns" + ) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/array.py b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/array.py new file mode 100644 index 0000000000000000000000000000000000000000..f2b8aa75ca5bfa0a860fe6709539cb19b316758d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/array.py @@ -0,0 +1,2942 @@ +from __future__ import annotations + +import functools +import operator +import re +import textwrap +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, + cast, +) +import unicodedata + +import numpy as np + +from pandas._libs import lib +from pandas._libs.tslibs import ( + NaT, + Timedelta, + Timestamp, + timezones, +) +from pandas.compat import ( + pa_version_under10p1, + pa_version_under11p0, + pa_version_under13p0, +) +from pandas.util._decorators import doc +from pandas.util._validators import validate_fillna_kwargs + +from pandas.core.dtypes.cast import ( + can_hold_element, + infer_dtype_from_scalar, +) +from pandas.core.dtypes.common import ( + CategoricalDtype, + is_array_like, + is_bool_dtype, + is_float_dtype, + is_integer, + is_list_like, + is_numeric_dtype, + is_scalar, +) +from pandas.core.dtypes.dtypes import DatetimeTZDtype +from pandas.core.dtypes.missing import isna + +from pandas.core import ( + algorithms as algos, + missing, + ops, + roperator, +) +from pandas.core.algorithms import map_array +from pandas.core.arraylike import OpsMixin +from pandas.core.arrays._arrow_string_mixins import ArrowStringArrayMixin +from pandas.core.arrays._utils import to_numpy_dtype_inference +from pandas.core.arrays.base import ( + ExtensionArray, + ExtensionArraySupportsAnyAll, +) +from pandas.core.arrays.masked import BaseMaskedArray +from pandas.core.arrays.string_ import StringDtype +import pandas.core.common as com +from pandas.core.indexers import ( + check_array_indexer, + unpack_tuple_and_ellipses, + validate_indices, +) +from pandas.core.strings.base import BaseStringArrayMethods + +from pandas.io._util import _arrow_dtype_mapping +from pandas.tseries.frequencies import to_offset + +if not pa_version_under10p1: + import pyarrow as pa + import pyarrow.compute as pc + + from pandas.core.dtypes.dtypes import ArrowDtype + + ARROW_CMP_FUNCS = { + "eq": pc.equal, + "ne": pc.not_equal, + "lt": pc.less, + "gt": pc.greater, + "le": pc.less_equal, + "ge": pc.greater_equal, + } + + ARROW_LOGICAL_FUNCS = { + "and_": pc.and_kleene, + "rand_": lambda x, y: pc.and_kleene(y, x), + "or_": pc.or_kleene, + "ror_": lambda x, y: pc.or_kleene(y, x), + "xor": pc.xor, + "rxor": lambda x, y: pc.xor(y, x), + } + + ARROW_BIT_WISE_FUNCS = { + "and_": pc.bit_wise_and, + "rand_": lambda x, y: pc.bit_wise_and(y, x), + "or_": pc.bit_wise_or, + "ror_": lambda x, y: pc.bit_wise_or(y, x), + "xor": pc.bit_wise_xor, + "rxor": lambda x, y: pc.bit_wise_xor(y, x), + } + + def cast_for_truediv( + arrow_array: pa.ChunkedArray, pa_object: pa.Array | pa.Scalar + ) -> tuple[pa.ChunkedArray, pa.Array | pa.Scalar]: + # Ensure int / int -> float mirroring Python/Numpy behavior + # as pc.divide_checked(int, int) -> int + if pa.types.is_integer(arrow_array.type) and pa.types.is_integer( + pa_object.type + ): + # GH: 56645. + # https://github.com/apache/arrow/issues/35563 + return pc.cast(arrow_array, pa.float64(), safe=False), pc.cast( + pa_object, pa.float64(), safe=False + ) + + return arrow_array, pa_object + + def floordiv_compat( + left: pa.ChunkedArray | pa.Array | pa.Scalar, + right: pa.ChunkedArray | pa.Array | pa.Scalar, + ) -> pa.ChunkedArray: + # TODO: Replace with pyarrow floordiv kernel. + # https://github.com/apache/arrow/issues/39386 + if pa.types.is_integer(left.type) and pa.types.is_integer(right.type): + divided = pc.divide_checked(left, right) + if pa.types.is_signed_integer(divided.type): + # GH 56676 + has_remainder = pc.not_equal(pc.multiply(divided, right), left) + has_one_negative_operand = pc.less( + pc.bit_wise_xor(left, right), + pa.scalar(0, type=divided.type), + ) + result = pc.if_else( + pc.and_( + has_remainder, + has_one_negative_operand, + ), + # GH: 55561 + pc.subtract(divided, pa.scalar(1, type=divided.type)), + divided, + ) + else: + result = divided + result = result.cast(left.type) + else: + divided = pc.divide(left, right) + result = pc.floor(divided) + return result + + ARROW_ARITHMETIC_FUNCS = { + "add": pc.add_checked, + "radd": lambda x, y: pc.add_checked(y, x), + "sub": pc.subtract_checked, + "rsub": lambda x, y: pc.subtract_checked(y, x), + "mul": pc.multiply_checked, + "rmul": lambda x, y: pc.multiply_checked(y, x), + "truediv": lambda x, y: pc.divide(*cast_for_truediv(x, y)), + "rtruediv": lambda x, y: pc.divide(*cast_for_truediv(y, x)), + "floordiv": lambda x, y: floordiv_compat(x, y), + "rfloordiv": lambda x, y: floordiv_compat(y, x), + "mod": NotImplemented, + "rmod": NotImplemented, + "divmod": NotImplemented, + "rdivmod": NotImplemented, + "pow": pc.power_checked, + "rpow": lambda x, y: pc.power_checked(y, x), + } + +if TYPE_CHECKING: + from collections.abc import Sequence + + from pandas._typing import ( + ArrayLike, + AxisInt, + Dtype, + FillnaOptions, + InterpolateOptions, + Iterator, + NpDtype, + NumpySorter, + NumpyValueArrayLike, + PositionalIndexer, + Scalar, + Self, + SortKind, + TakeIndexer, + TimeAmbiguous, + TimeNonexistent, + npt, + ) + + from pandas import Series + from pandas.core.arrays.datetimes import DatetimeArray + from pandas.core.arrays.timedeltas import TimedeltaArray + + +def get_unit_from_pa_dtype(pa_dtype): + # https://github.com/pandas-dev/pandas/pull/50998#discussion_r1100344804 + if pa_version_under11p0: + unit = str(pa_dtype).split("[", 1)[-1][:-1] + if unit not in ["s", "ms", "us", "ns"]: + raise ValueError(pa_dtype) + return unit + return pa_dtype.unit + + +def to_pyarrow_type( + dtype: ArrowDtype | pa.DataType | Dtype | None, +) -> pa.DataType | None: + """ + Convert dtype to a pyarrow type instance. + """ + if isinstance(dtype, ArrowDtype): + return dtype.pyarrow_dtype + elif isinstance(dtype, pa.DataType): + return dtype + elif isinstance(dtype, DatetimeTZDtype): + return pa.timestamp(dtype.unit, dtype.tz) + elif dtype: + try: + # Accepts python types too + # Doesn't handle all numpy types + return pa.from_numpy_dtype(dtype) + except pa.ArrowNotImplementedError: + pass + return None + + +class ArrowExtensionArray( + OpsMixin, + ExtensionArraySupportsAnyAll, + ArrowStringArrayMixin, + BaseStringArrayMethods, +): + """ + Pandas ExtensionArray backed by a PyArrow ChunkedArray. + + .. warning:: + + ArrowExtensionArray is considered experimental. The implementation and + parts of the API may change without warning. + + Parameters + ---------- + values : pyarrow.Array or pyarrow.ChunkedArray + + Attributes + ---------- + None + + Methods + ------- + None + + Returns + ------- + ArrowExtensionArray + + Notes + ----- + Most methods are implemented using `pyarrow compute functions. `__ + Some methods may either raise an exception or raise a ``PerformanceWarning`` if an + associated compute function is not available based on the installed version of PyArrow. + + Please install the latest version of PyArrow to enable the best functionality and avoid + potential bugs in prior versions of PyArrow. + + Examples + -------- + Create an ArrowExtensionArray with :func:`pandas.array`: + + >>> pd.array([1, 1, None], dtype="int64[pyarrow]") + + [1, 1, ] + Length: 3, dtype: int64[pyarrow] + """ # noqa: E501 (http link too long) + + _pa_array: pa.ChunkedArray + _dtype: ArrowDtype + + def __init__(self, values: pa.Array | pa.ChunkedArray) -> None: + if pa_version_under10p1: + msg = "pyarrow>=10.0.1 is required for PyArrow backed ArrowExtensionArray." + raise ImportError(msg) + if isinstance(values, pa.Array): + self._pa_array = pa.chunked_array([values]) + elif isinstance(values, pa.ChunkedArray): + self._pa_array = values + else: + raise ValueError( + f"Unsupported type '{type(values)}' for ArrowExtensionArray" + ) + self._dtype = ArrowDtype(self._pa_array.type) + + @classmethod + def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy: bool = False): + """ + Construct a new ExtensionArray from a sequence of scalars. + """ + pa_type = to_pyarrow_type(dtype) + pa_array = cls._box_pa_array(scalars, pa_type=pa_type, copy=copy) + arr = cls(pa_array) + return arr + + @classmethod + def _from_sequence_of_strings( + cls, strings, *, dtype: Dtype | None = None, copy: bool = False + ): + """ + Construct a new ExtensionArray from a sequence of strings. + """ + pa_type = to_pyarrow_type(dtype) + if ( + pa_type is None + or pa.types.is_binary(pa_type) + or pa.types.is_string(pa_type) + or pa.types.is_large_string(pa_type) + ): + # pa_type is None: Let pa.array infer + # pa_type is string/binary: scalars already correct type + scalars = strings + elif pa.types.is_timestamp(pa_type): + from pandas.core.tools.datetimes import to_datetime + + scalars = to_datetime(strings, errors="raise") + elif pa.types.is_date(pa_type): + from pandas.core.tools.datetimes import to_datetime + + scalars = to_datetime(strings, errors="raise").date + elif pa.types.is_duration(pa_type): + from pandas.core.tools.timedeltas import to_timedelta + + scalars = to_timedelta(strings, errors="raise") + if pa_type.unit != "ns": + # GH51175: test_from_sequence_of_strings_pa_array + # attempt to parse as int64 reflecting pyarrow's + # duration to string casting behavior + mask = isna(scalars) + if not isinstance(strings, (pa.Array, pa.ChunkedArray)): + strings = pa.array(strings, type=pa.string(), from_pandas=True) + strings = pc.if_else(mask, None, strings) + try: + scalars = strings.cast(pa.int64()) + except pa.ArrowInvalid: + pass + elif pa.types.is_time(pa_type): + from pandas.core.tools.times import to_time + + # "coerce" to allow "null times" (None) to not raise + scalars = to_time(strings, errors="coerce") + elif pa.types.is_boolean(pa_type): + # pyarrow string->bool casting is case-insensitive: + # "true" or "1" -> True + # "false" or "0" -> False + # Note: BooleanArray was previously used to parse these strings + # and allows "1.0" and "0.0". Pyarrow casting does not support + # this, but we allow it here. + if isinstance(strings, (pa.Array, pa.ChunkedArray)): + scalars = strings + else: + scalars = pa.array(strings, type=pa.string(), from_pandas=True) + scalars = pc.if_else(pc.equal(scalars, "1.0"), "1", scalars) + scalars = pc.if_else(pc.equal(scalars, "0.0"), "0", scalars) + scalars = scalars.cast(pa.bool_()) + elif ( + pa.types.is_integer(pa_type) + or pa.types.is_floating(pa_type) + or pa.types.is_decimal(pa_type) + ): + from pandas.core.tools.numeric import to_numeric + + scalars = to_numeric(strings, errors="raise") + else: + raise NotImplementedError( + f"Converting strings to {pa_type} is not implemented." + ) + return cls._from_sequence(scalars, dtype=pa_type, copy=copy) + + @classmethod + def _box_pa( + cls, value, pa_type: pa.DataType | None = None + ) -> pa.Array | pa.ChunkedArray | pa.Scalar: + """ + Box value into a pyarrow Array, ChunkedArray or Scalar. + + Parameters + ---------- + value : any + pa_type : pa.DataType | None + + Returns + ------- + pa.Array or pa.ChunkedArray or pa.Scalar + """ + if isinstance(value, pa.Scalar) or not is_list_like(value): + return cls._box_pa_scalar(value, pa_type) + return cls._box_pa_array(value, pa_type) + + @classmethod + def _box_pa_scalar(cls, value, pa_type: pa.DataType | None = None) -> pa.Scalar: + """ + Box value into a pyarrow Scalar. + + Parameters + ---------- + value : any + pa_type : pa.DataType | None + + Returns + ------- + pa.Scalar + """ + if isinstance(value, pa.Scalar): + pa_scalar = value + elif isna(value): + pa_scalar = pa.scalar(None, type=pa_type) + else: + # Workaround https://github.com/apache/arrow/issues/37291 + if isinstance(value, Timedelta): + if pa_type is None: + pa_type = pa.duration(value.unit) + elif value.unit != pa_type.unit: + value = value.as_unit(pa_type.unit) + value = value._value + elif isinstance(value, Timestamp): + if pa_type is None: + pa_type = pa.timestamp(value.unit, tz=value.tz) + elif value.unit != pa_type.unit: + value = value.as_unit(pa_type.unit) + value = value._value + + pa_scalar = pa.scalar(value, type=pa_type, from_pandas=True) + + if pa_type is not None and pa_scalar.type != pa_type: + pa_scalar = pa_scalar.cast(pa_type) + + return pa_scalar + + @classmethod + def _box_pa_array( + cls, value, pa_type: pa.DataType | None = None, copy: bool = False + ) -> pa.Array | pa.ChunkedArray: + """ + Box value into a pyarrow Array or ChunkedArray. + + Parameters + ---------- + value : Sequence + pa_type : pa.DataType | None + + Returns + ------- + pa.Array or pa.ChunkedArray + """ + if isinstance(value, cls): + pa_array = value._pa_array + elif isinstance(value, (pa.Array, pa.ChunkedArray)): + pa_array = value + elif isinstance(value, BaseMaskedArray): + # GH 52625 + if copy: + value = value.copy() + pa_array = value.__arrow_array__() + else: + if ( + isinstance(value, np.ndarray) + and pa_type is not None + and ( + pa.types.is_large_binary(pa_type) + or pa.types.is_large_string(pa_type) + ) + ): + # See https://github.com/apache/arrow/issues/35289 + value = value.tolist() + elif copy and is_array_like(value): + # pa array should not get updated when numpy array is updated + value = value.copy() + + if ( + pa_type is not None + and pa.types.is_duration(pa_type) + and (not isinstance(value, np.ndarray) or value.dtype.kind not in "mi") + ): + # Workaround https://github.com/apache/arrow/issues/37291 + from pandas.core.tools.timedeltas import to_timedelta + + value = to_timedelta(value, unit=pa_type.unit).as_unit(pa_type.unit) + value = value.to_numpy() + + try: + pa_array = pa.array(value, type=pa_type, from_pandas=True) + except (pa.ArrowInvalid, pa.ArrowTypeError): + # GH50430: let pyarrow infer type, then cast + pa_array = pa.array(value, from_pandas=True) + + if pa_type is None and pa.types.is_duration(pa_array.type): + # Workaround https://github.com/apache/arrow/issues/37291 + from pandas.core.tools.timedeltas import to_timedelta + + value = to_timedelta(value) + value = value.to_numpy() + pa_array = pa.array(value, type=pa_type, from_pandas=True) + + if pa.types.is_duration(pa_array.type) and pa_array.null_count > 0: + # GH52843: upstream bug for duration types when originally + # constructed with data containing numpy NaT. + # https://github.com/apache/arrow/issues/35088 + arr = cls(pa_array) + arr = arr.fillna(arr.dtype.na_value) + pa_array = arr._pa_array + + if pa_type is not None and pa_array.type != pa_type: + if pa.types.is_dictionary(pa_type): + pa_array = pa_array.dictionary_encode() + else: + try: + pa_array = pa_array.cast(pa_type) + except ( + pa.ArrowInvalid, + pa.ArrowTypeError, + pa.ArrowNotImplementedError, + ): + if pa.types.is_string(pa_array.type) or pa.types.is_large_string( + pa_array.type + ): + # TODO: Move logic in _from_sequence_of_strings into + # _box_pa_array + return cls._from_sequence_of_strings( + value, dtype=pa_type + )._pa_array + else: + raise + + return pa_array + + def __getitem__(self, item: PositionalIndexer): + """Select a subset of self. + + Parameters + ---------- + item : int, slice, or ndarray + * int: The position in 'self' to get. + * slice: A slice object, where 'start', 'stop', and 'step' are + integers or None + * ndarray: A 1-d boolean NumPy ndarray the same length as 'self' + + Returns + ------- + item : scalar or ExtensionArray + + Notes + ----- + For scalar ``item``, return a scalar value suitable for the array's + type. This should be an instance of ``self.dtype.type``. + For slice ``key``, return an instance of ``ExtensionArray``, even + if the slice is length 0 or 1. + For a boolean mask, return an instance of ``ExtensionArray``, filtered + to the values where ``item`` is True. + """ + item = check_array_indexer(self, item) + + if isinstance(item, np.ndarray): + if not len(item): + # Removable once we migrate StringDtype[pyarrow] to ArrowDtype[string] + if self._dtype.name == "string" and self._dtype.storage in ( + "pyarrow", + "pyarrow_numpy", + ): + pa_dtype = pa.string() + else: + pa_dtype = self._dtype.pyarrow_dtype + return type(self)(pa.chunked_array([], type=pa_dtype)) + elif item.dtype.kind in "iu": + return self.take(item) + elif item.dtype.kind == "b": + return type(self)(self._pa_array.filter(item)) + else: + raise IndexError( + "Only integers, slices and integer or " + "boolean arrays are valid indices." + ) + elif isinstance(item, tuple): + item = unpack_tuple_and_ellipses(item) + + if item is Ellipsis: + # TODO: should be handled by pyarrow? + item = slice(None) + + if is_scalar(item) and not is_integer(item): + # e.g. "foo" or 2.5 + # exception message copied from numpy + raise IndexError( + r"only integers, slices (`:`), ellipsis (`...`), numpy.newaxis " + r"(`None`) and integer or boolean arrays are valid indices" + ) + # We are not an array indexer, so maybe e.g. a slice or integer + # indexer. We dispatch to pyarrow. + if isinstance(item, slice): + # Arrow bug https://github.com/apache/arrow/issues/38768 + if item.start == item.stop: + pass + elif ( + item.stop is not None + and item.stop < -len(self) + and item.step is not None + and item.step < 0 + ): + item = slice(item.start, None, item.step) + + value = self._pa_array[item] + if isinstance(value, pa.ChunkedArray): + return type(self)(value) + else: + pa_type = self._pa_array.type + scalar = value.as_py() + if scalar is None: + return self._dtype.na_value + elif pa.types.is_timestamp(pa_type) and pa_type.unit != "ns": + # GH 53326 + return Timestamp(scalar).as_unit(pa_type.unit) + elif pa.types.is_duration(pa_type) and pa_type.unit != "ns": + # GH 53326 + return Timedelta(scalar).as_unit(pa_type.unit) + else: + return scalar + + def __iter__(self) -> Iterator[Any]: + """ + Iterate over elements of the array. + """ + na_value = self._dtype.na_value + # GH 53326 + pa_type = self._pa_array.type + box_timestamp = pa.types.is_timestamp(pa_type) and pa_type.unit != "ns" + box_timedelta = pa.types.is_duration(pa_type) and pa_type.unit != "ns" + for value in self._pa_array: + val = value.as_py() + if val is None: + yield na_value + elif box_timestamp: + yield Timestamp(val).as_unit(pa_type.unit) + elif box_timedelta: + yield Timedelta(val).as_unit(pa_type.unit) + else: + yield val + + def __arrow_array__(self, type=None): + """Convert myself to a pyarrow ChunkedArray.""" + return self._pa_array + + def __array__( + self, dtype: NpDtype | None = None, copy: bool | None = None + ) -> np.ndarray: + """Correctly construct numpy arrays when passed to `np.asarray()`.""" + return self.to_numpy(dtype=dtype) + + def __invert__(self) -> Self: + # This is a bit wise op for integer types + if pa.types.is_integer(self._pa_array.type): + return type(self)(pc.bit_wise_not(self._pa_array)) + elif pa.types.is_string(self._pa_array.type) or pa.types.is_large_string( + self._pa_array.type + ): + # Raise TypeError instead of pa.ArrowNotImplementedError + raise TypeError("__invert__ is not supported for string dtypes") + else: + return type(self)(pc.invert(self._pa_array)) + + def __neg__(self) -> Self: + return type(self)(pc.negate_checked(self._pa_array)) + + def __pos__(self) -> Self: + return type(self)(self._pa_array) + + def __abs__(self) -> Self: + return type(self)(pc.abs_checked(self._pa_array)) + + # GH 42600: __getstate__/__setstate__ not necessary once + # https://issues.apache.org/jira/browse/ARROW-10739 is addressed + def __getstate__(self): + state = self.__dict__.copy() + state["_pa_array"] = self._pa_array.combine_chunks() + return state + + def __setstate__(self, state) -> None: + if "_data" in state: + data = state.pop("_data") + else: + data = state["_pa_array"] + state["_pa_array"] = pa.chunked_array(data) + self.__dict__.update(state) + + def _cmp_method(self, other, op): + pc_func = ARROW_CMP_FUNCS[op.__name__] + if isinstance( + other, (ArrowExtensionArray, np.ndarray, list, BaseMaskedArray) + ) or isinstance(getattr(other, "dtype", None), CategoricalDtype): + result = pc_func(self._pa_array, self._box_pa(other)) + elif is_scalar(other): + try: + result = pc_func(self._pa_array, self._box_pa(other)) + except (pa.lib.ArrowNotImplementedError, pa.lib.ArrowInvalid): + mask = isna(self) | isna(other) + valid = ~mask + result = np.zeros(len(self), dtype="bool") + np_array = np.array(self) + try: + result[valid] = op(np_array[valid], other) + except TypeError: + result = ops.invalid_comparison(np_array, other, op) + result = pa.array(result, type=pa.bool_()) + result = pc.if_else(valid, result, None) + else: + raise NotImplementedError( + f"{op.__name__} not implemented for {type(other)}" + ) + return ArrowExtensionArray(result) + + def _evaluate_op_method(self, other, op, arrow_funcs): + pa_type = self._pa_array.type + other = self._box_pa(other) + + if ( + pa.types.is_string(pa_type) + or pa.types.is_large_string(pa_type) + or pa.types.is_binary(pa_type) + ): + if op in [operator.add, roperator.radd]: + sep = pa.scalar("", type=pa_type) + if op is operator.add: + result = pc.binary_join_element_wise(self._pa_array, other, sep) + elif op is roperator.radd: + result = pc.binary_join_element_wise(other, self._pa_array, sep) + return type(self)(result) + elif op in [operator.mul, roperator.rmul]: + binary = self._pa_array + integral = other + if not pa.types.is_integer(integral.type): + raise TypeError("Can only string multiply by an integer.") + pa_integral = pc.if_else(pc.less(integral, 0), 0, integral) + result = pc.binary_repeat(binary, pa_integral) + return type(self)(result) + elif ( + pa.types.is_string(other.type) + or pa.types.is_binary(other.type) + or pa.types.is_large_string(other.type) + ) and op in [operator.mul, roperator.rmul]: + binary = other + integral = self._pa_array + if not pa.types.is_integer(integral.type): + raise TypeError("Can only string multiply by an integer.") + pa_integral = pc.if_else(pc.less(integral, 0), 0, integral) + result = pc.binary_repeat(binary, pa_integral) + return type(self)(result) + if ( + isinstance(other, pa.Scalar) + and pc.is_null(other).as_py() + and op.__name__ in ARROW_LOGICAL_FUNCS + ): + # pyarrow kleene ops require null to be typed + other = other.cast(pa_type) + + pc_func = arrow_funcs[op.__name__] + if pc_func is NotImplemented: + raise NotImplementedError(f"{op.__name__} not implemented.") + + result = pc_func(self._pa_array, other) + return type(self)(result) + + def _logical_method(self, other, op): + # For integer types `^`, `|`, `&` are bitwise operators and return + # integer types. Otherwise these are boolean ops. + if pa.types.is_integer(self._pa_array.type): + return self._evaluate_op_method(other, op, ARROW_BIT_WISE_FUNCS) + else: + return self._evaluate_op_method(other, op, ARROW_LOGICAL_FUNCS) + + def _arith_method(self, other, op): + return self._evaluate_op_method(other, op, ARROW_ARITHMETIC_FUNCS) + + def equals(self, other) -> bool: + if not isinstance(other, ArrowExtensionArray): + return False + # I'm told that pyarrow makes __eq__ behave like pandas' equals; + # TODO: is this documented somewhere? + return self._pa_array == other._pa_array + + @property + def dtype(self) -> ArrowDtype: + """ + An instance of 'ExtensionDtype'. + """ + return self._dtype + + @property + def nbytes(self) -> int: + """ + The number of bytes needed to store this object in memory. + """ + return self._pa_array.nbytes + + def __len__(self) -> int: + """ + Length of this array. + + Returns + ------- + length : int + """ + return len(self._pa_array) + + def __contains__(self, key) -> bool: + # https://github.com/pandas-dev/pandas/pull/51307#issuecomment-1426372604 + if isna(key) and key is not self.dtype.na_value: + if self.dtype.kind == "f" and lib.is_float(key): + return pc.any(pc.is_nan(self._pa_array)).as_py() + + # e.g. date or timestamp types we do not allow None here to match pd.NA + return False + # TODO: maybe complex? object? + + return bool(super().__contains__(key)) + + @property + def _hasna(self) -> bool: + return self._pa_array.null_count > 0 + + def isna(self) -> npt.NDArray[np.bool_]: + """ + Boolean NumPy array indicating if each value is missing. + + This should return a 1-D array the same length as 'self'. + """ + # GH51630: fast paths + null_count = self._pa_array.null_count + if null_count == 0: + return np.zeros(len(self), dtype=np.bool_) + elif null_count == len(self): + return np.ones(len(self), dtype=np.bool_) + + return self._pa_array.is_null().to_numpy() + + def any(self, *, skipna: bool = True, **kwargs): + """ + Return whether any element is truthy. + + Returns False unless there is at least one element that is truthy. + By default, NAs are skipped. If ``skipna=False`` is specified and + missing values are present, similar :ref:`Kleene logic ` + is used as for logical operations. + + Parameters + ---------- + skipna : bool, default True + Exclude NA values. If the entire array is NA and `skipna` is + True, then the result will be False, as for an empty array. + If `skipna` is False, the result will still be True if there is + at least one element that is truthy, otherwise NA will be returned + if there are NA's present. + + Returns + ------- + bool or :attr:`pandas.NA` + + See Also + -------- + ArrowExtensionArray.all : Return whether all elements are truthy. + + Examples + -------- + The result indicates whether any element is truthy (and by default + skips NAs): + + >>> pd.array([True, False, True], dtype="boolean[pyarrow]").any() + True + >>> pd.array([True, False, pd.NA], dtype="boolean[pyarrow]").any() + True + >>> pd.array([False, False, pd.NA], dtype="boolean[pyarrow]").any() + False + >>> pd.array([], dtype="boolean[pyarrow]").any() + False + >>> pd.array([pd.NA], dtype="boolean[pyarrow]").any() + False + >>> pd.array([pd.NA], dtype="float64[pyarrow]").any() + False + + With ``skipna=False``, the result can be NA if this is logically + required (whether ``pd.NA`` is True or False influences the result): + + >>> pd.array([True, False, pd.NA], dtype="boolean[pyarrow]").any(skipna=False) + True + >>> pd.array([1, 0, pd.NA], dtype="boolean[pyarrow]").any(skipna=False) + True + >>> pd.array([False, False, pd.NA], dtype="boolean[pyarrow]").any(skipna=False) + + >>> pd.array([0, 0, pd.NA], dtype="boolean[pyarrow]").any(skipna=False) + + """ + return self._reduce("any", skipna=skipna, **kwargs) + + def all(self, *, skipna: bool = True, **kwargs): + """ + Return whether all elements are truthy. + + Returns True unless there is at least one element that is falsey. + By default, NAs are skipped. If ``skipna=False`` is specified and + missing values are present, similar :ref:`Kleene logic ` + is used as for logical operations. + + Parameters + ---------- + skipna : bool, default True + Exclude NA values. If the entire array is NA and `skipna` is + True, then the result will be True, as for an empty array. + If `skipna` is False, the result will still be False if there is + at least one element that is falsey, otherwise NA will be returned + if there are NA's present. + + Returns + ------- + bool or :attr:`pandas.NA` + + See Also + -------- + ArrowExtensionArray.any : Return whether any element is truthy. + + Examples + -------- + The result indicates whether all elements are truthy (and by default + skips NAs): + + >>> pd.array([True, True, pd.NA], dtype="boolean[pyarrow]").all() + True + >>> pd.array([1, 1, pd.NA], dtype="boolean[pyarrow]").all() + True + >>> pd.array([True, False, pd.NA], dtype="boolean[pyarrow]").all() + False + >>> pd.array([], dtype="boolean[pyarrow]").all() + True + >>> pd.array([pd.NA], dtype="boolean[pyarrow]").all() + True + >>> pd.array([pd.NA], dtype="float64[pyarrow]").all() + True + + With ``skipna=False``, the result can be NA if this is logically + required (whether ``pd.NA`` is True or False influences the result): + + >>> pd.array([True, True, pd.NA], dtype="boolean[pyarrow]").all(skipna=False) + + >>> pd.array([1, 1, pd.NA], dtype="boolean[pyarrow]").all(skipna=False) + + >>> pd.array([True, False, pd.NA], dtype="boolean[pyarrow]").all(skipna=False) + False + >>> pd.array([1, 0, pd.NA], dtype="boolean[pyarrow]").all(skipna=False) + False + """ + return self._reduce("all", skipna=skipna, **kwargs) + + def argsort( + self, + *, + ascending: bool = True, + kind: SortKind = "quicksort", + na_position: str = "last", + **kwargs, + ) -> np.ndarray: + order = "ascending" if ascending else "descending" + null_placement = {"last": "at_end", "first": "at_start"}.get(na_position, None) + if null_placement is None: + raise ValueError(f"invalid na_position: {na_position}") + + result = pc.array_sort_indices( + self._pa_array, order=order, null_placement=null_placement + ) + np_result = result.to_numpy() + return np_result.astype(np.intp, copy=False) + + def _argmin_max(self, skipna: bool, method: str) -> int: + if self._pa_array.length() in (0, self._pa_array.null_count) or ( + self._hasna and not skipna + ): + # For empty or all null, pyarrow returns -1 but pandas expects TypeError + # For skipna=False and data w/ null, pandas expects NotImplementedError + # let ExtensionArray.arg{max|min} raise + return getattr(super(), f"arg{method}")(skipna=skipna) + + data = self._pa_array + if pa.types.is_duration(data.type): + data = data.cast(pa.int64()) + + value = getattr(pc, method)(data, skip_nulls=skipna) + return pc.index(data, value).as_py() + + def argmin(self, skipna: bool = True) -> int: + return self._argmin_max(skipna, "min") + + def argmax(self, skipna: bool = True) -> int: + return self._argmin_max(skipna, "max") + + def copy(self) -> Self: + """ + Return a shallow copy of the array. + + Underlying ChunkedArray is immutable, so a deep copy is unnecessary. + + Returns + ------- + type(self) + """ + return type(self)(self._pa_array) + + def dropna(self) -> Self: + """ + Return ArrowExtensionArray without NA values. + + Returns + ------- + ArrowExtensionArray + """ + return type(self)(pc.drop_null(self._pa_array)) + + def _pad_or_backfill( + self, + *, + method: FillnaOptions, + limit: int | None = None, + limit_area: Literal["inside", "outside"] | None = None, + copy: bool = True, + ) -> Self: + if not self._hasna: + # TODO(CoW): Not necessary anymore when CoW is the default + return self.copy() + + if limit is None and limit_area is None: + method = missing.clean_fill_method(method) + try: + if method == "pad": + return type(self)(pc.fill_null_forward(self._pa_array)) + elif method == "backfill": + return type(self)(pc.fill_null_backward(self._pa_array)) + except pa.ArrowNotImplementedError: + # ArrowNotImplementedError: Function 'coalesce' has no kernel + # matching input types (duration[ns], duration[ns]) + # TODO: remove try/except wrapper if/when pyarrow implements + # a kernel for duration types. + pass + + # TODO(3.0): after EA.fillna 'method' deprecation is enforced, we can remove + # this method entirely. + return super()._pad_or_backfill( + method=method, limit=limit, limit_area=limit_area, copy=copy + ) + + @doc(ExtensionArray.fillna) + def fillna( + self, + value: object | ArrayLike | None = None, + method: FillnaOptions | None = None, + limit: int | None = None, + copy: bool = True, + ) -> Self: + value, method = validate_fillna_kwargs(value, method) + + if not self._hasna: + # TODO(CoW): Not necessary anymore when CoW is the default + return self.copy() + + if limit is not None: + return super().fillna(value=value, method=method, limit=limit, copy=copy) + + if method is not None: + return super().fillna(method=method, limit=limit, copy=copy) + + if isinstance(value, (np.ndarray, ExtensionArray)): + # Similar to check_value_size, but we do not mask here since we may + # end up passing it to the super() method. + if len(value) != len(self): + raise ValueError( + f"Length of 'value' does not match. Got ({len(value)}) " + f" expected {len(self)}" + ) + + try: + fill_value = self._box_pa(value, pa_type=self._pa_array.type) + except pa.ArrowTypeError as err: + msg = f"Invalid value '{str(value)}' for dtype {self.dtype}" + raise TypeError(msg) from err + + try: + return type(self)(pc.fill_null(self._pa_array, fill_value=fill_value)) + except pa.ArrowNotImplementedError: + # ArrowNotImplementedError: Function 'coalesce' has no kernel + # matching input types (duration[ns], duration[ns]) + # TODO: remove try/except wrapper if/when pyarrow implements + # a kernel for duration types. + pass + + return super().fillna(value=value, method=method, limit=limit, copy=copy) + + def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]: + # short-circuit to return all False array. + if not len(values): + return np.zeros(len(self), dtype=bool) + + result = pc.is_in(self._pa_array, value_set=pa.array(values, from_pandas=True)) + # pyarrow 2.0.0 returned nulls, so we explicitly specify dtype to convert nulls + # to False + return np.array(result, dtype=np.bool_) + + def _values_for_factorize(self) -> tuple[np.ndarray, Any]: + """ + Return an array and missing value suitable for factorization. + + Returns + ------- + values : ndarray + na_value : pd.NA + + Notes + ----- + The values returned by this method are also used in + :func:`pandas.util.hash_pandas_object`. + """ + values = self._pa_array.to_numpy() + return values, self.dtype.na_value + + @doc(ExtensionArray.factorize) + def factorize( + self, + use_na_sentinel: bool = True, + ) -> tuple[np.ndarray, ExtensionArray]: + null_encoding = "mask" if use_na_sentinel else "encode" + + data = self._pa_array + pa_type = data.type + if pa_version_under11p0 and pa.types.is_duration(pa_type): + # https://github.com/apache/arrow/issues/15226#issuecomment-1376578323 + data = data.cast(pa.int64()) + + if pa.types.is_dictionary(data.type): + encoded = data + else: + encoded = data.dictionary_encode(null_encoding=null_encoding) + if encoded.length() == 0: + indices = np.array([], dtype=np.intp) + uniques = type(self)(pa.chunked_array([], type=encoded.type.value_type)) + else: + # GH 54844 + combined = encoded.combine_chunks() + pa_indices = combined.indices + if pa_indices.null_count > 0: + pa_indices = pc.fill_null(pa_indices, -1) + indices = pa_indices.to_numpy(zero_copy_only=False, writable=True).astype( + np.intp, copy=False + ) + uniques = type(self)(combined.dictionary) + + if pa_version_under11p0 and pa.types.is_duration(pa_type): + uniques = cast(ArrowExtensionArray, uniques.astype(self.dtype)) + return indices, uniques + + def reshape(self, *args, **kwargs): + raise NotImplementedError( + f"{type(self)} does not support reshape " + f"as backed by a 1D pyarrow.ChunkedArray." + ) + + def round(self, decimals: int = 0, *args, **kwargs) -> Self: + """ + Round each value in the array a to the given number of decimals. + + Parameters + ---------- + decimals : int, default 0 + Number of decimal places to round to. If decimals is negative, + it specifies the number of positions to the left of the decimal point. + *args, **kwargs + Additional arguments and keywords have no effect. + + Returns + ------- + ArrowExtensionArray + Rounded values of the ArrowExtensionArray. + + See Also + -------- + DataFrame.round : Round values of a DataFrame. + Series.round : Round values of a Series. + """ + return type(self)(pc.round(self._pa_array, ndigits=decimals)) + + @doc(ExtensionArray.searchsorted) + def searchsorted( + self, + value: NumpyValueArrayLike | ExtensionArray, + side: Literal["left", "right"] = "left", + sorter: NumpySorter | None = None, + ) -> npt.NDArray[np.intp] | np.intp: + if self._hasna: + raise ValueError( + "searchsorted requires array to be sorted, which is impossible " + "with NAs present." + ) + if isinstance(value, ExtensionArray): + value = value.astype(object) + # Base class searchsorted would cast to object, which is *much* slower. + dtype = None + if isinstance(self.dtype, ArrowDtype): + pa_dtype = self.dtype.pyarrow_dtype + if ( + pa.types.is_timestamp(pa_dtype) or pa.types.is_duration(pa_dtype) + ) and pa_dtype.unit == "ns": + # np.array[datetime/timedelta].searchsorted(datetime/timedelta) + # erroneously fails when numpy type resolution is nanoseconds + dtype = object + return self.to_numpy(dtype=dtype).searchsorted(value, side=side, sorter=sorter) + + def take( + self, + indices: TakeIndexer, + allow_fill: bool = False, + fill_value: Any = None, + ) -> ArrowExtensionArray: + """ + Take elements from an array. + + Parameters + ---------- + indices : sequence of int or one-dimensional np.ndarray of int + Indices to be taken. + allow_fill : bool, default False + How to handle negative values in `indices`. + + * False: negative values in `indices` indicate positional indices + from the right (the default). This is similar to + :func:`numpy.take`. + + * True: negative values in `indices` indicate + missing values. These values are set to `fill_value`. Any other + other negative values raise a ``ValueError``. + + fill_value : any, optional + Fill value to use for NA-indices when `allow_fill` is True. + This may be ``None``, in which case the default NA value for + the type, ``self.dtype.na_value``, is used. + + For many ExtensionArrays, there will be two representations of + `fill_value`: a user-facing "boxed" scalar, and a low-level + physical NA value. `fill_value` should be the user-facing version, + and the implementation should handle translating that to the + physical version for processing the take if necessary. + + Returns + ------- + ExtensionArray + + Raises + ------ + IndexError + When the indices are out of bounds for the array. + ValueError + When `indices` contains negative values other than ``-1`` + and `allow_fill` is True. + + See Also + -------- + numpy.take + api.extensions.take + + Notes + ----- + ExtensionArray.take is called by ``Series.__getitem__``, ``.loc``, + ``iloc``, when `indices` is a sequence of values. Additionally, + it's called by :meth:`Series.reindex`, or any other method + that causes realignment, with a `fill_value`. + """ + indices_array = np.asanyarray(indices) + + if len(self._pa_array) == 0 and (indices_array >= 0).any(): + raise IndexError("cannot do a non-empty take") + if indices_array.size > 0 and indices_array.max() >= len(self._pa_array): + raise IndexError("out of bounds value in 'indices'.") + + if allow_fill: + fill_mask = indices_array < 0 + if fill_mask.any(): + validate_indices(indices_array, len(self._pa_array)) + # TODO(ARROW-9433): Treat negative indices as NULL + indices_array = pa.array(indices_array, mask=fill_mask) + result = self._pa_array.take(indices_array) + if isna(fill_value): + return type(self)(result) + # TODO: ArrowNotImplementedError: Function fill_null has no + # kernel matching input types (array[string], scalar[string]) + result = type(self)(result) + result[fill_mask] = fill_value + return result + # return type(self)(pc.fill_null(result, pa.scalar(fill_value))) + else: + # Nothing to fill + return type(self)(self._pa_array.take(indices)) + else: # allow_fill=False + # TODO(ARROW-9432): Treat negative indices as indices from the right. + if (indices_array < 0).any(): + # Don't modify in-place + indices_array = np.copy(indices_array) + indices_array[indices_array < 0] += len(self._pa_array) + return type(self)(self._pa_array.take(indices_array)) + + def _maybe_convert_datelike_array(self): + """Maybe convert to a datelike array.""" + pa_type = self._pa_array.type + if pa.types.is_timestamp(pa_type): + return self._to_datetimearray() + elif pa.types.is_duration(pa_type): + return self._to_timedeltaarray() + return self + + def _to_datetimearray(self) -> DatetimeArray: + """Convert a pyarrow timestamp typed array to a DatetimeArray.""" + from pandas.core.arrays.datetimes import ( + DatetimeArray, + tz_to_dtype, + ) + + pa_type = self._pa_array.type + assert pa.types.is_timestamp(pa_type) + np_dtype = np.dtype(f"M8[{pa_type.unit}]") + dtype = tz_to_dtype(pa_type.tz, pa_type.unit) + np_array = self._pa_array.to_numpy() + np_array = np_array.astype(np_dtype) + return DatetimeArray._simple_new(np_array, dtype=dtype) + + def _to_timedeltaarray(self) -> TimedeltaArray: + """Convert a pyarrow duration typed array to a TimedeltaArray.""" + from pandas.core.arrays.timedeltas import TimedeltaArray + + pa_type = self._pa_array.type + assert pa.types.is_duration(pa_type) + np_dtype = np.dtype(f"m8[{pa_type.unit}]") + np_array = self._pa_array.to_numpy() + np_array = np_array.astype(np_dtype) + return TimedeltaArray._simple_new(np_array, dtype=np_dtype) + + def _values_for_json(self) -> np.ndarray: + if is_numeric_dtype(self.dtype): + return np.asarray(self, dtype=object) + return super()._values_for_json() + + @doc(ExtensionArray.to_numpy) + def to_numpy( + self, + dtype: npt.DTypeLike | None = None, + copy: bool = False, + na_value: object = lib.no_default, + ) -> np.ndarray: + original_na_value = na_value + dtype, na_value = to_numpy_dtype_inference(self, dtype, na_value, self._hasna) + pa_type = self._pa_array.type + if not self._hasna or isna(na_value) or pa.types.is_null(pa_type): + data = self + else: + data = self.fillna(na_value) + copy = False + + if pa.types.is_timestamp(pa_type) or pa.types.is_duration(pa_type): + # GH 55997 + if dtype != object and na_value is self.dtype.na_value: + na_value = lib.no_default + result = data._maybe_convert_datelike_array().to_numpy( + dtype=dtype, na_value=na_value + ) + elif pa.types.is_time(pa_type) or pa.types.is_date(pa_type): + # convert to list of python datetime.time objects before + # wrapping in ndarray + result = np.array(list(data), dtype=dtype) + if data._hasna: + result[data.isna()] = na_value + elif pa.types.is_null(pa_type): + if dtype is not None and isna(na_value): + na_value = None + result = np.full(len(data), fill_value=na_value, dtype=dtype) + elif not data._hasna or ( + pa.types.is_floating(pa_type) + and ( + na_value is np.nan + or original_na_value is lib.no_default + and is_float_dtype(dtype) + ) + ): + result = data._pa_array.to_numpy() + if dtype is not None: + result = result.astype(dtype, copy=False) + if copy: + result = result.copy() + else: + if dtype is None: + empty = pa.array([], type=pa_type).to_numpy(zero_copy_only=False) + if can_hold_element(empty, na_value): + dtype = empty.dtype + else: + dtype = np.object_ + result = np.empty(len(data), dtype=dtype) + mask = data.isna() + result[mask] = na_value + result[~mask] = data[~mask]._pa_array.to_numpy() + return result + + def map(self, mapper, na_action=None): + if is_numeric_dtype(self.dtype): + return map_array(self.to_numpy(), mapper, na_action=na_action) + else: + return super().map(mapper, na_action) + + @doc(ExtensionArray.duplicated) + def duplicated( + self, keep: Literal["first", "last", False] = "first" + ) -> npt.NDArray[np.bool_]: + pa_type = self._pa_array.type + if pa.types.is_floating(pa_type) or pa.types.is_integer(pa_type): + values = self.to_numpy(na_value=0) + elif pa.types.is_boolean(pa_type): + values = self.to_numpy(na_value=False) + elif pa.types.is_temporal(pa_type): + if pa_type.bit_width == 32: + pa_type = pa.int32() + else: + pa_type = pa.int64() + arr = self.astype(ArrowDtype(pa_type)) + values = arr.to_numpy(na_value=0) + else: + # factorize the values to avoid the performance penalty of + # converting to object dtype + values = self.factorize()[0] + + mask = self.isna() if self._hasna else None + return algos.duplicated(values, keep=keep, mask=mask) + + def unique(self) -> Self: + """ + Compute the ArrowExtensionArray of unique values. + + Returns + ------- + ArrowExtensionArray + """ + pa_type = self._pa_array.type + + if pa_version_under11p0 and pa.types.is_duration(pa_type): + # https://github.com/apache/arrow/issues/15226#issuecomment-1376578323 + data = self._pa_array.cast(pa.int64()) + else: + data = self._pa_array + + pa_result = pc.unique(data) + + if pa_version_under11p0 and pa.types.is_duration(pa_type): + pa_result = pa_result.cast(pa_type) + + return type(self)(pa_result) + + def value_counts(self, dropna: bool = True) -> Series: + """ + Return a Series containing counts of each unique value. + + Parameters + ---------- + dropna : bool, default True + Don't include counts of missing values. + + Returns + ------- + counts : Series + + See Also + -------- + Series.value_counts + """ + pa_type = self._pa_array.type + if pa_version_under11p0 and pa.types.is_duration(pa_type): + # https://github.com/apache/arrow/issues/15226#issuecomment-1376578323 + data = self._pa_array.cast(pa.int64()) + else: + data = self._pa_array + + from pandas import ( + Index, + Series, + ) + + vc = data.value_counts() + + values = vc.field(0) + counts = vc.field(1) + if dropna and data.null_count > 0: + mask = values.is_valid() + values = values.filter(mask) + counts = counts.filter(mask) + + if pa_version_under11p0 and pa.types.is_duration(pa_type): + values = values.cast(pa_type) + + counts = ArrowExtensionArray(counts) + + index = Index(type(self)(values)) + + return Series(counts, index=index, name="count", copy=False) + + @classmethod + def _concat_same_type(cls, to_concat) -> Self: + """ + Concatenate multiple ArrowExtensionArrays. + + Parameters + ---------- + to_concat : sequence of ArrowExtensionArrays + + Returns + ------- + ArrowExtensionArray + """ + chunks = [array for ea in to_concat for array in ea._pa_array.iterchunks()] + if to_concat[0].dtype == "string": + # StringDtype has no attribute pyarrow_dtype + pa_dtype = pa.large_string() + else: + pa_dtype = to_concat[0].dtype.pyarrow_dtype + arr = pa.chunked_array(chunks, type=pa_dtype) + return cls(arr) + + def _accumulate( + self, name: str, *, skipna: bool = True, **kwargs + ) -> ArrowExtensionArray | ExtensionArray: + """ + Return an ExtensionArray performing an accumulation operation. + + The underlying data type might change. + + Parameters + ---------- + name : str + Name of the function, supported values are: + - cummin + - cummax + - cumsum + - cumprod + skipna : bool, default True + If True, skip NA values. + **kwargs + Additional keyword arguments passed to the accumulation function. + Currently, there is no supported kwarg. + + Returns + ------- + array + + Raises + ------ + NotImplementedError : subclass does not define accumulations + """ + pyarrow_name = { + "cummax": "cumulative_max", + "cummin": "cumulative_min", + "cumprod": "cumulative_prod_checked", + "cumsum": "cumulative_sum_checked", + }.get(name, name) + pyarrow_meth = getattr(pc, pyarrow_name, None) + if pyarrow_meth is None: + return super()._accumulate(name, skipna=skipna, **kwargs) + + data_to_accum = self._pa_array + + pa_dtype = data_to_accum.type + + convert_to_int = ( + pa.types.is_temporal(pa_dtype) and name in ["cummax", "cummin"] + ) or (pa.types.is_duration(pa_dtype) and name == "cumsum") + + if convert_to_int: + if pa_dtype.bit_width == 32: + data_to_accum = data_to_accum.cast(pa.int32()) + else: + data_to_accum = data_to_accum.cast(pa.int64()) + + result = pyarrow_meth(data_to_accum, skip_nulls=skipna, **kwargs) + + if convert_to_int: + result = result.cast(pa_dtype) + + return type(self)(result) + + def _reduce_pyarrow(self, name: str, *, skipna: bool = True, **kwargs) -> pa.Scalar: + """ + Return a pyarrow scalar result of performing the reduction operation. + + Parameters + ---------- + name : str + Name of the function, supported values are: + { any, all, min, max, sum, mean, median, prod, + std, var, sem, kurt, skew }. + skipna : bool, default True + If True, skip NaN values. + **kwargs + Additional keyword arguments passed to the reduction function. + Currently, `ddof` is the only supported kwarg. + + Returns + ------- + pyarrow scalar + + Raises + ------ + TypeError : subclass does not define reductions + """ + pa_type = self._pa_array.type + + data_to_reduce = self._pa_array + + cast_kwargs = {} if pa_version_under13p0 else {"safe": False} + + if name in ["any", "all"] and ( + pa.types.is_integer(pa_type) + or pa.types.is_floating(pa_type) + or pa.types.is_duration(pa_type) + or pa.types.is_decimal(pa_type) + ): + # pyarrow only supports any/all for boolean dtype, we allow + # for other dtypes, matching our non-pyarrow behavior + + if pa.types.is_duration(pa_type): + data_to_cmp = self._pa_array.cast(pa.int64()) + else: + data_to_cmp = self._pa_array + + not_eq = pc.not_equal(data_to_cmp, 0) + data_to_reduce = not_eq + + elif name in ["min", "max", "sum"] and pa.types.is_duration(pa_type): + data_to_reduce = self._pa_array.cast(pa.int64()) + + elif name in ["median", "mean", "std", "sem"] and pa.types.is_temporal(pa_type): + nbits = pa_type.bit_width + if nbits == 32: + data_to_reduce = self._pa_array.cast(pa.int32()) + else: + data_to_reduce = self._pa_array.cast(pa.int64()) + + if name == "sem": + + def pyarrow_meth(data, skip_nulls, **kwargs): + numerator = pc.stddev(data, skip_nulls=skip_nulls, **kwargs) + denominator = pc.sqrt_checked(pc.count(self._pa_array)) + return pc.divide_checked(numerator, denominator) + + else: + pyarrow_name = { + "median": "quantile", + "prod": "product", + "std": "stddev", + "var": "variance", + }.get(name, name) + # error: Incompatible types in assignment + # (expression has type "Optional[Any]", variable has type + # "Callable[[Any, Any, KwArg(Any)], Any]") + pyarrow_meth = getattr(pc, pyarrow_name, None) # type: ignore[assignment] + if pyarrow_meth is None: + # Let ExtensionArray._reduce raise the TypeError + return super()._reduce(name, skipna=skipna, **kwargs) + + # GH51624: pyarrow defaults to min_count=1, pandas behavior is min_count=0 + if name in ["any", "all"] and "min_count" not in kwargs: + kwargs["min_count"] = 0 + elif name == "median": + # GH 52679: Use quantile instead of approximate_median + kwargs["q"] = 0.5 + + try: + result = pyarrow_meth(data_to_reduce, skip_nulls=skipna, **kwargs) + except (AttributeError, NotImplementedError, TypeError) as err: + msg = ( + f"'{type(self).__name__}' with dtype {self.dtype} " + f"does not support reduction '{name}' with pyarrow " + f"version {pa.__version__}. '{name}' may be supported by " + f"upgrading pyarrow." + ) + raise TypeError(msg) from err + if name == "median": + # GH 52679: Use quantile instead of approximate_median; returns array + result = result[0] + if pc.is_null(result).as_py(): + return result + + if name in ["min", "max", "sum"] and pa.types.is_duration(pa_type): + result = result.cast(pa_type) + if name in ["median", "mean"] and pa.types.is_temporal(pa_type): + if not pa_version_under13p0: + nbits = pa_type.bit_width + if nbits == 32: + result = result.cast(pa.int32(), **cast_kwargs) + else: + result = result.cast(pa.int64(), **cast_kwargs) + result = result.cast(pa_type) + if name in ["std", "sem"] and pa.types.is_temporal(pa_type): + result = result.cast(pa.int64(), **cast_kwargs) + if pa.types.is_duration(pa_type): + result = result.cast(pa_type) + elif pa.types.is_time(pa_type): + unit = get_unit_from_pa_dtype(pa_type) + result = result.cast(pa.duration(unit)) + elif pa.types.is_date(pa_type): + # go with closest available unit, i.e. "s" + result = result.cast(pa.duration("s")) + else: + # i.e. timestamp + result = result.cast(pa.duration(pa_type.unit)) + + return result + + def _reduce( + self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs + ): + """ + Return a scalar result of performing the reduction operation. + + Parameters + ---------- + name : str + Name of the function, supported values are: + { any, all, min, max, sum, mean, median, prod, + std, var, sem, kurt, skew }. + skipna : bool, default True + If True, skip NaN values. + **kwargs + Additional keyword arguments passed to the reduction function. + Currently, `ddof` is the only supported kwarg. + + Returns + ------- + scalar + + Raises + ------ + TypeError : subclass does not define reductions + """ + result = self._reduce_calc(name, skipna=skipna, keepdims=keepdims, **kwargs) + if isinstance(result, pa.Array): + return type(self)(result) + else: + return result + + def _reduce_calc( + self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs + ): + pa_result = self._reduce_pyarrow(name, skipna=skipna, **kwargs) + + if keepdims: + if isinstance(pa_result, pa.Scalar): + result = pa.array([pa_result.as_py()], type=pa_result.type) + else: + result = pa.array( + [pa_result], + type=to_pyarrow_type(infer_dtype_from_scalar(pa_result)[0]), + ) + return result + + if pc.is_null(pa_result).as_py(): + return self.dtype.na_value + elif isinstance(pa_result, pa.Scalar): + return pa_result.as_py() + else: + return pa_result + + def _explode(self): + """ + See Series.explode.__doc__. + """ + # child class explode method supports only list types; return + # default implementation for non list types. + if not pa.types.is_list(self.dtype.pyarrow_dtype): + return super()._explode() + values = self + counts = pa.compute.list_value_length(values._pa_array) + counts = counts.fill_null(1).to_numpy() + fill_value = pa.scalar([None], type=self._pa_array.type) + mask = counts == 0 + if mask.any(): + values = values.copy() + values[mask] = fill_value + counts = counts.copy() + counts[mask] = 1 + values = values.fillna(fill_value) + values = type(self)(pa.compute.list_flatten(values._pa_array)) + return values, counts + + def __setitem__(self, key, value) -> None: + """Set one or more values inplace. + + Parameters + ---------- + key : int, ndarray, or slice + When called from, e.g. ``Series.__setitem__``, ``key`` will be + one of + + * scalar int + * ndarray of integers. + * boolean ndarray + * slice object + + value : ExtensionDtype.type, Sequence[ExtensionDtype.type], or object + value or values to be set of ``key``. + + Returns + ------- + None + """ + # GH50085: unwrap 1D indexers + if isinstance(key, tuple) and len(key) == 1: + key = key[0] + + key = check_array_indexer(self, key) + value = self._maybe_convert_setitem_value(value) + + if com.is_null_slice(key): + # fast path (GH50248) + data = self._if_else(True, value, self._pa_array) + + elif is_integer(key): + # fast path + key = cast(int, key) + n = len(self) + if key < 0: + key += n + if not 0 <= key < n: + raise IndexError( + f"index {key} is out of bounds for axis 0 with size {n}" + ) + if isinstance(value, pa.Scalar): + value = value.as_py() + elif is_list_like(value): + raise ValueError("Length of indexer and values mismatch") + chunks = [ + *self._pa_array[:key].chunks, + pa.array([value], type=self._pa_array.type, from_pandas=True), + *self._pa_array[key + 1 :].chunks, + ] + data = pa.chunked_array(chunks).combine_chunks() + + elif is_bool_dtype(key): + key = np.asarray(key, dtype=np.bool_) + data = self._replace_with_mask(self._pa_array, key, value) + + elif is_scalar(value) or isinstance(value, pa.Scalar): + mask = np.zeros(len(self), dtype=np.bool_) + mask[key] = True + data = self._if_else(mask, value, self._pa_array) + + else: + indices = np.arange(len(self))[key] + if len(indices) != len(value): + raise ValueError("Length of indexer and values mismatch") + if len(indices) == 0: + return + argsort = np.argsort(indices) + indices = indices[argsort] + value = value.take(argsort) + mask = np.zeros(len(self), dtype=np.bool_) + mask[indices] = True + data = self._replace_with_mask(self._pa_array, mask, value) + + if isinstance(data, pa.Array): + data = pa.chunked_array([data]) + self._pa_array = data + + def _rank_calc( + self, + *, + axis: AxisInt = 0, + method: str = "average", + na_option: str = "keep", + ascending: bool = True, + pct: bool = False, + ): + if axis != 0: + ranked = super()._rank( + axis=axis, + method=method, + na_option=na_option, + ascending=ascending, + pct=pct, + ) + # keep dtypes consistent with the implementation below + if method == "average" or pct: + pa_type = pa.float64() + else: + pa_type = pa.uint64() + result = pa.array(ranked, type=pa_type, from_pandas=True) + return result + + data = self._pa_array.combine_chunks() + sort_keys = "ascending" if ascending else "descending" + null_placement = "at_start" if na_option == "top" else "at_end" + tiebreaker = "min" if method == "average" else method + + result = pc.rank( + data, + sort_keys=sort_keys, + null_placement=null_placement, + tiebreaker=tiebreaker, + ) + + if na_option == "keep": + mask = pc.is_null(self._pa_array) + null = pa.scalar(None, type=result.type) + result = pc.if_else(mask, null, result) + + if method == "average": + result_max = pc.rank( + data, + sort_keys=sort_keys, + null_placement=null_placement, + tiebreaker="max", + ) + result_max = result_max.cast(pa.float64()) + result_min = result.cast(pa.float64()) + result = pc.divide(pc.add(result_min, result_max), 2) + + if pct: + if not pa.types.is_floating(result.type): + result = result.cast(pa.float64()) + if method == "dense": + divisor = pc.max(result) + else: + divisor = pc.count(result) + result = pc.divide(result, divisor) + + return result + + def _rank( + self, + *, + axis: AxisInt = 0, + method: str = "average", + na_option: str = "keep", + ascending: bool = True, + pct: bool = False, + ): + """ + See Series.rank.__doc__. + """ + return type(self)( + self._rank_calc( + axis=axis, + method=method, + na_option=na_option, + ascending=ascending, + pct=pct, + ) + ) + + def _quantile(self, qs: npt.NDArray[np.float64], interpolation: str) -> Self: + """ + Compute the quantiles of self for each quantile in `qs`. + + Parameters + ---------- + qs : np.ndarray[float64] + interpolation: str + + Returns + ------- + same type as self + """ + pa_dtype = self._pa_array.type + + data = self._pa_array + if pa.types.is_temporal(pa_dtype): + # https://github.com/apache/arrow/issues/33769 in these cases + # we can cast to ints and back + nbits = pa_dtype.bit_width + if nbits == 32: + data = data.cast(pa.int32()) + else: + data = data.cast(pa.int64()) + + result = pc.quantile(data, q=qs, interpolation=interpolation) + + if pa.types.is_temporal(pa_dtype): + if pa.types.is_floating(result.type): + result = pc.floor(result) + nbits = pa_dtype.bit_width + if nbits == 32: + result = result.cast(pa.int32()) + else: + result = result.cast(pa.int64()) + result = result.cast(pa_dtype) + + return type(self)(result) + + def _mode(self, dropna: bool = True) -> Self: + """ + Returns the mode(s) of the ExtensionArray. + + Always returns `ExtensionArray` even if only one value. + + Parameters + ---------- + dropna : bool, default True + Don't consider counts of NA values. + + Returns + ------- + same type as self + Sorted, if possible. + """ + pa_type = self._pa_array.type + if pa.types.is_temporal(pa_type): + nbits = pa_type.bit_width + if nbits == 32: + data = self._pa_array.cast(pa.int32()) + elif nbits == 64: + data = self._pa_array.cast(pa.int64()) + else: + raise NotImplementedError(pa_type) + else: + data = self._pa_array + + if dropna: + data = data.drop_null() + + res = pc.value_counts(data) + most_common = res.field("values").filter( + pc.equal(res.field("counts"), pc.max(res.field("counts"))) + ) + + if pa.types.is_temporal(pa_type): + most_common = most_common.cast(pa_type) + + most_common = most_common.take(pc.array_sort_indices(most_common)) + return type(self)(most_common) + + def _maybe_convert_setitem_value(self, value): + """Maybe convert value to be pyarrow compatible.""" + try: + value = self._box_pa(value, self._pa_array.type) + except pa.ArrowTypeError as err: + msg = f"Invalid value '{str(value)}' for dtype {self.dtype}" + raise TypeError(msg) from err + return value + + def interpolate( + self, + *, + method: InterpolateOptions, + axis: int, + index, + limit, + limit_direction, + limit_area, + copy: bool, + **kwargs, + ) -> Self: + """ + See NDFrame.interpolate.__doc__. + """ + # NB: we return type(self) even if copy=False + mask = self.isna() + if self.dtype.kind == "f": + data = self._pa_array.to_numpy() + elif self.dtype.kind in "iu": + data = self.to_numpy(dtype="f8", na_value=0.0) + else: + raise NotImplementedError( + f"interpolate is not implemented for dtype={self.dtype}" + ) + + missing.interpolate_2d_inplace( + data, + method=method, + axis=0, + index=index, + limit=limit, + limit_direction=limit_direction, + limit_area=limit_area, + mask=mask, + **kwargs, + ) + return type(self)(self._box_pa_array(pa.array(data, mask=mask))) + + @classmethod + def _if_else( + cls, + cond: npt.NDArray[np.bool_] | bool, + left: ArrayLike | Scalar, + right: ArrayLike | Scalar, + ): + """ + Choose values based on a condition. + + Analogous to pyarrow.compute.if_else, with logic + to fallback to numpy for unsupported types. + + Parameters + ---------- + cond : npt.NDArray[np.bool_] or bool + left : ArrayLike | Scalar + right : ArrayLike | Scalar + + Returns + ------- + pa.Array + """ + try: + return pc.if_else(cond, left, right) + except pa.ArrowNotImplementedError: + pass + + def _to_numpy_and_type(value) -> tuple[np.ndarray, pa.DataType | None]: + if isinstance(value, (pa.Array, pa.ChunkedArray)): + pa_type = value.type + elif isinstance(value, pa.Scalar): + pa_type = value.type + value = value.as_py() + else: + pa_type = None + return np.array(value, dtype=object), pa_type + + left, left_type = _to_numpy_and_type(left) + right, right_type = _to_numpy_and_type(right) + pa_type = left_type or right_type + result = np.where(cond, left, right) + return pa.array(result, type=pa_type, from_pandas=True) + + @classmethod + def _replace_with_mask( + cls, + values: pa.Array | pa.ChunkedArray, + mask: npt.NDArray[np.bool_] | bool, + replacements: ArrayLike | Scalar, + ): + """ + Replace items selected with a mask. + + Analogous to pyarrow.compute.replace_with_mask, with logic + to fallback to numpy for unsupported types. + + Parameters + ---------- + values : pa.Array or pa.ChunkedArray + mask : npt.NDArray[np.bool_] or bool + replacements : ArrayLike or Scalar + Replacement value(s) + + Returns + ------- + pa.Array or pa.ChunkedArray + """ + if isinstance(replacements, pa.ChunkedArray): + # replacements must be array or scalar, not ChunkedArray + replacements = replacements.combine_chunks() + if isinstance(values, pa.ChunkedArray) and pa.types.is_boolean(values.type): + # GH#52059 replace_with_mask segfaults for chunked array + # https://github.com/apache/arrow/issues/34634 + values = values.combine_chunks() + try: + return pc.replace_with_mask(values, mask, replacements) + except pa.ArrowNotImplementedError: + pass + if isinstance(replacements, pa.Array): + replacements = np.array(replacements, dtype=object) + elif isinstance(replacements, pa.Scalar): + replacements = replacements.as_py() + result = np.array(values, dtype=object) + result[mask] = replacements + return pa.array(result, type=values.type, from_pandas=True) + + # ------------------------------------------------------------------ + # GroupBy Methods + + def _to_masked(self): + pa_dtype = self._pa_array.type + + if pa.types.is_floating(pa_dtype) or pa.types.is_integer(pa_dtype): + na_value = 1 + elif pa.types.is_boolean(pa_dtype): + na_value = True + else: + raise NotImplementedError + + dtype = _arrow_dtype_mapping()[pa_dtype] + mask = self.isna() + arr = self.to_numpy(dtype=dtype.numpy_dtype, na_value=na_value) + return dtype.construct_array_type()(arr, mask) + + def _groupby_op( + self, + *, + how: str, + has_dropped_na: bool, + min_count: int, + ngroups: int, + ids: npt.NDArray[np.intp], + **kwargs, + ): + if isinstance(self.dtype, StringDtype): + return super()._groupby_op( + how=how, + has_dropped_na=has_dropped_na, + min_count=min_count, + ngroups=ngroups, + ids=ids, + **kwargs, + ) + + # maybe convert to a compatible dtype optimized for groupby + values: ExtensionArray + pa_type = self._pa_array.type + if pa.types.is_timestamp(pa_type): + values = self._to_datetimearray() + elif pa.types.is_duration(pa_type): + values = self._to_timedeltaarray() + else: + values = self._to_masked() + + result = values._groupby_op( + how=how, + has_dropped_na=has_dropped_na, + min_count=min_count, + ngroups=ngroups, + ids=ids, + **kwargs, + ) + if isinstance(result, np.ndarray): + return result + return type(self)._from_sequence(result, copy=False) + + def _apply_elementwise(self, func: Callable) -> list[list[Any]]: + """Apply a callable to each element while maintaining the chunking structure.""" + return [ + [ + None if val is None else func(val) + for val in chunk.to_numpy(zero_copy_only=False) + ] + for chunk in self._pa_array.iterchunks() + ] + + def _str_count(self, pat: str, flags: int = 0): + if flags: + raise NotImplementedError(f"count not implemented with {flags=}") + return type(self)(pc.count_substring_regex(self._pa_array, pat)) + + def _str_contains( + self, pat, case: bool = True, flags: int = 0, na=None, regex: bool = True + ): + if flags: + raise NotImplementedError(f"contains not implemented with {flags=}") + + if regex: + pa_contains = pc.match_substring_regex + else: + pa_contains = pc.match_substring + result = pa_contains(self._pa_array, pat, ignore_case=not case) + if not isna(na): + result = result.fill_null(na) + return type(self)(result) + + def _str_startswith(self, pat: str | tuple[str, ...], na=None): + if isinstance(pat, str): + result = pc.starts_with(self._pa_array, pattern=pat) + else: + if len(pat) == 0: + # For empty tuple, pd.StringDtype() returns null for missing values + # and false for valid values. + result = pc.if_else(pc.is_null(self._pa_array), None, False) + else: + result = pc.starts_with(self._pa_array, pattern=pat[0]) + + for p in pat[1:]: + result = pc.or_(result, pc.starts_with(self._pa_array, pattern=p)) + if not isna(na): + result = result.fill_null(na) + return type(self)(result) + + def _str_endswith(self, pat: str | tuple[str, ...], na=None): + if isinstance(pat, str): + result = pc.ends_with(self._pa_array, pattern=pat) + else: + if len(pat) == 0: + # For empty tuple, pd.StringDtype() returns null for missing values + # and false for valid values. + result = pc.if_else(pc.is_null(self._pa_array), None, False) + else: + result = pc.ends_with(self._pa_array, pattern=pat[0]) + + for p in pat[1:]: + result = pc.or_(result, pc.ends_with(self._pa_array, pattern=p)) + if not isna(na): + result = result.fill_null(na) + return type(self)(result) + + def _str_replace( + self, + pat: str | re.Pattern, + repl: str | Callable, + n: int = -1, + case: bool = True, + flags: int = 0, + regex: bool = True, + ): + if isinstance(pat, re.Pattern) or callable(repl) or not case or flags: + raise NotImplementedError( + "replace is not supported with a re.Pattern, callable repl, " + "case=False, or flags!=0" + ) + + func = pc.replace_substring_regex if regex else pc.replace_substring + # https://github.com/apache/arrow/issues/39149 + # GH 56404, unexpected behavior with negative max_replacements with pyarrow. + pa_max_replacements = None if n < 0 else n + result = func( + self._pa_array, + pattern=pat, + replacement=repl, + max_replacements=pa_max_replacements, + ) + return type(self)(result) + + def _str_repeat(self, repeats: int | Sequence[int]): + if not isinstance(repeats, int): + raise NotImplementedError( + f"repeat is not implemented when repeats is {type(repeats).__name__}" + ) + else: + return type(self)(pc.binary_repeat(self._pa_array, repeats)) + + def _str_match( + self, pat: str, case: bool = True, flags: int = 0, na: Scalar | None = None + ): + if not pat.startswith("^"): + pat = f"^{pat}" + return self._str_contains(pat, case, flags, na, regex=True) + + def _str_fullmatch( + self, pat, case: bool = True, flags: int = 0, na: Scalar | None = None + ): + if not pat.endswith("$") or pat.endswith("\\$"): + pat = f"{pat}$" + return self._str_match(pat, case, flags, na) + + def _str_find(self, sub: str, start: int = 0, end: int | None = None): + if start != 0 and end is not None: + slices = pc.utf8_slice_codeunits(self._pa_array, start, stop=end) + result = pc.find_substring(slices, sub) + not_found = pc.equal(result, -1) + start_offset = max(0, start) + offset_result = pc.add(result, start_offset) + result = pc.if_else(not_found, result, offset_result) + elif start == 0 and end is None: + slices = self._pa_array + result = pc.find_substring(slices, sub) + else: + raise NotImplementedError( + f"find not implemented with {sub=}, {start=}, {end=}" + ) + return type(self)(result) + + def _str_join(self, sep: str): + if pa.types.is_string(self._pa_array.type) or pa.types.is_large_string( + self._pa_array.type + ): + result = self._apply_elementwise(list) + result = pa.chunked_array(result, type=pa.list_(pa.string())) + else: + result = self._pa_array + return type(self)(pc.binary_join(result, sep)) + + def _str_partition(self, sep: str, expand: bool): + predicate = lambda val: val.partition(sep) + result = self._apply_elementwise(predicate) + return type(self)(pa.chunked_array(result)) + + def _str_rpartition(self, sep: str, expand: bool): + predicate = lambda val: val.rpartition(sep) + result = self._apply_elementwise(predicate) + return type(self)(pa.chunked_array(result)) + + def _str_slice( + self, start: int | None = None, stop: int | None = None, step: int | None = None + ): + if start is None: + start = 0 + if step is None: + step = 1 + return type(self)( + pc.utf8_slice_codeunits(self._pa_array, start=start, stop=stop, step=step) + ) + + def _str_isalnum(self): + return type(self)(pc.utf8_is_alnum(self._pa_array)) + + def _str_isalpha(self): + return type(self)(pc.utf8_is_alpha(self._pa_array)) + + def _str_isdecimal(self): + return type(self)(pc.utf8_is_decimal(self._pa_array)) + + def _str_isdigit(self): + return type(self)(pc.utf8_is_digit(self._pa_array)) + + def _str_islower(self): + return type(self)(pc.utf8_is_lower(self._pa_array)) + + def _str_isnumeric(self): + return type(self)(pc.utf8_is_numeric(self._pa_array)) + + def _str_isspace(self): + return type(self)(pc.utf8_is_space(self._pa_array)) + + def _str_istitle(self): + return type(self)(pc.utf8_is_title(self._pa_array)) + + def _str_isupper(self): + return type(self)(pc.utf8_is_upper(self._pa_array)) + + def _str_len(self): + return type(self)(pc.utf8_length(self._pa_array)) + + def _str_lower(self): + return type(self)(pc.utf8_lower(self._pa_array)) + + def _str_upper(self): + return type(self)(pc.utf8_upper(self._pa_array)) + + def _str_strip(self, to_strip=None): + if to_strip is None: + result = pc.utf8_trim_whitespace(self._pa_array) + else: + result = pc.utf8_trim(self._pa_array, characters=to_strip) + return type(self)(result) + + def _str_lstrip(self, to_strip=None): + if to_strip is None: + result = pc.utf8_ltrim_whitespace(self._pa_array) + else: + result = pc.utf8_ltrim(self._pa_array, characters=to_strip) + return type(self)(result) + + def _str_rstrip(self, to_strip=None): + if to_strip is None: + result = pc.utf8_rtrim_whitespace(self._pa_array) + else: + result = pc.utf8_rtrim(self._pa_array, characters=to_strip) + return type(self)(result) + + def _str_removeprefix(self, prefix: str): + if not pa_version_under13p0: + starts_with = pc.starts_with(self._pa_array, pattern=prefix) + removed = pc.utf8_slice_codeunits(self._pa_array, len(prefix)) + result = pc.if_else(starts_with, removed, self._pa_array) + return type(self)(result) + predicate = lambda val: val.removeprefix(prefix) + result = self._apply_elementwise(predicate) + return type(self)(pa.chunked_array(result)) + + def _str_casefold(self): + predicate = lambda val: val.casefold() + result = self._apply_elementwise(predicate) + return type(self)(pa.chunked_array(result)) + + def _str_encode(self, encoding: str, errors: str = "strict"): + predicate = lambda val: val.encode(encoding, errors) + result = self._apply_elementwise(predicate) + return type(self)(pa.chunked_array(result)) + + def _str_extract(self, pat: str, flags: int = 0, expand: bool = True): + if flags: + raise NotImplementedError("Only flags=0 is implemented.") + groups = re.compile(pat).groupindex.keys() + if len(groups) == 0: + raise ValueError(f"{pat=} must contain a symbolic group name.") + result = pc.extract_regex(self._pa_array, pat) + if expand: + return { + col: type(self)(pc.struct_field(result, [i])) + for col, i in zip(groups, range(result.type.num_fields)) + } + else: + return type(self)(pc.struct_field(result, [0])) + + def _str_findall(self, pat: str, flags: int = 0): + regex = re.compile(pat, flags=flags) + predicate = lambda val: regex.findall(val) + result = self._apply_elementwise(predicate) + return type(self)(pa.chunked_array(result)) + + def _str_get_dummies(self, sep: str = "|"): + split = pc.split_pattern(self._pa_array, sep) + flattened_values = pc.list_flatten(split) + uniques = flattened_values.unique() + uniques_sorted = uniques.take(pa.compute.array_sort_indices(uniques)) + lengths = pc.list_value_length(split).fill_null(0).to_numpy() + n_rows = len(self) + n_cols = len(uniques) + indices = pc.index_in(flattened_values, uniques_sorted).to_numpy() + indices = indices + np.arange(n_rows).repeat(lengths) * n_cols + dummies = np.zeros(n_rows * n_cols, dtype=np.bool_) + dummies[indices] = True + dummies = dummies.reshape((n_rows, n_cols)) + result = type(self)(pa.array(list(dummies))) + return result, uniques_sorted.to_pylist() + + def _str_index(self, sub: str, start: int = 0, end: int | None = None): + predicate = lambda val: val.index(sub, start, end) + result = self._apply_elementwise(predicate) + return type(self)(pa.chunked_array(result)) + + def _str_rindex(self, sub: str, start: int = 0, end: int | None = None): + predicate = lambda val: val.rindex(sub, start, end) + result = self._apply_elementwise(predicate) + return type(self)(pa.chunked_array(result)) + + def _str_normalize(self, form: str): + predicate = lambda val: unicodedata.normalize(form, val) + result = self._apply_elementwise(predicate) + return type(self)(pa.chunked_array(result)) + + def _str_rfind(self, sub: str, start: int = 0, end=None): + predicate = lambda val: val.rfind(sub, start, end) + result = self._apply_elementwise(predicate) + return type(self)(pa.chunked_array(result)) + + def _str_split( + self, + pat: str | None = None, + n: int | None = -1, + expand: bool = False, + regex: bool | None = None, + ): + if n in {-1, 0}: + n = None + if pat is None: + split_func = pc.utf8_split_whitespace + elif regex: + split_func = functools.partial(pc.split_pattern_regex, pattern=pat) + else: + split_func = functools.partial(pc.split_pattern, pattern=pat) + return type(self)(split_func(self._pa_array, max_splits=n)) + + def _str_rsplit(self, pat: str | None = None, n: int | None = -1): + if n in {-1, 0}: + n = None + if pat is None: + return type(self)( + pc.utf8_split_whitespace(self._pa_array, max_splits=n, reverse=True) + ) + else: + return type(self)( + pc.split_pattern(self._pa_array, pat, max_splits=n, reverse=True) + ) + + def _str_translate(self, table: dict[int, str]): + predicate = lambda val: val.translate(table) + result = self._apply_elementwise(predicate) + return type(self)(pa.chunked_array(result)) + + def _str_wrap(self, width: int, **kwargs): + kwargs["width"] = width + tw = textwrap.TextWrapper(**kwargs) + predicate = lambda val: "\n".join(tw.wrap(val)) + result = self._apply_elementwise(predicate) + return type(self)(pa.chunked_array(result)) + + @property + def _dt_days(self): + return type(self)( + pa.array(self._to_timedeltaarray().days, from_pandas=True, type=pa.int32()) + ) + + @property + def _dt_hours(self): + return type(self)( + pa.array( + [ + td.components.hours if td is not NaT else None + for td in self._to_timedeltaarray() + ], + type=pa.int32(), + ) + ) + + @property + def _dt_minutes(self): + return type(self)( + pa.array( + [ + td.components.minutes if td is not NaT else None + for td in self._to_timedeltaarray() + ], + type=pa.int32(), + ) + ) + + @property + def _dt_seconds(self): + return type(self)( + pa.array( + self._to_timedeltaarray().seconds, from_pandas=True, type=pa.int32() + ) + ) + + @property + def _dt_milliseconds(self): + return type(self)( + pa.array( + [ + td.components.milliseconds if td is not NaT else None + for td in self._to_timedeltaarray() + ], + type=pa.int32(), + ) + ) + + @property + def _dt_microseconds(self): + return type(self)( + pa.array( + self._to_timedeltaarray().microseconds, + from_pandas=True, + type=pa.int32(), + ) + ) + + @property + def _dt_nanoseconds(self): + return type(self)( + pa.array( + self._to_timedeltaarray().nanoseconds, from_pandas=True, type=pa.int32() + ) + ) + + def _dt_to_pytimedelta(self): + data = self._pa_array.to_pylist() + if self._dtype.pyarrow_dtype.unit == "ns": + data = [None if ts is None else ts.to_pytimedelta() for ts in data] + return np.array(data, dtype=object) + + def _dt_total_seconds(self): + return type(self)( + pa.array(self._to_timedeltaarray().total_seconds(), from_pandas=True) + ) + + def _dt_as_unit(self, unit: str): + if pa.types.is_date(self.dtype.pyarrow_dtype): + raise NotImplementedError("as_unit not implemented for date types") + pd_array = self._maybe_convert_datelike_array() + # Don't just cast _pa_array in order to follow pandas unit conversion rules + return type(self)(pa.array(pd_array.as_unit(unit), from_pandas=True)) + + @property + def _dt_year(self): + return type(self)(pc.year(self._pa_array)) + + @property + def _dt_day(self): + return type(self)(pc.day(self._pa_array)) + + @property + def _dt_day_of_week(self): + return type(self)(pc.day_of_week(self._pa_array)) + + _dt_dayofweek = _dt_day_of_week + _dt_weekday = _dt_day_of_week + + @property + def _dt_day_of_year(self): + return type(self)(pc.day_of_year(self._pa_array)) + + _dt_dayofyear = _dt_day_of_year + + @property + def _dt_hour(self): + return type(self)(pc.hour(self._pa_array)) + + def _dt_isocalendar(self): + return type(self)(pc.iso_calendar(self._pa_array)) + + @property + def _dt_is_leap_year(self): + return type(self)(pc.is_leap_year(self._pa_array)) + + @property + def _dt_is_month_start(self): + return type(self)(pc.equal(pc.day(self._pa_array), 1)) + + @property + def _dt_is_month_end(self): + result = pc.equal( + pc.days_between( + pc.floor_temporal(self._pa_array, unit="day"), + pc.ceil_temporal(self._pa_array, unit="month"), + ), + 1, + ) + return type(self)(result) + + @property + def _dt_is_year_start(self): + return type(self)( + pc.and_( + pc.equal(pc.month(self._pa_array), 1), + pc.equal(pc.day(self._pa_array), 1), + ) + ) + + @property + def _dt_is_year_end(self): + return type(self)( + pc.and_( + pc.equal(pc.month(self._pa_array), 12), + pc.equal(pc.day(self._pa_array), 31), + ) + ) + + @property + def _dt_is_quarter_start(self): + result = pc.equal( + pc.floor_temporal(self._pa_array, unit="quarter"), + pc.floor_temporal(self._pa_array, unit="day"), + ) + return type(self)(result) + + @property + def _dt_is_quarter_end(self): + result = pc.equal( + pc.days_between( + pc.floor_temporal(self._pa_array, unit="day"), + pc.ceil_temporal(self._pa_array, unit="quarter"), + ), + 1, + ) + return type(self)(result) + + @property + def _dt_days_in_month(self): + result = pc.days_between( + pc.floor_temporal(self._pa_array, unit="month"), + pc.ceil_temporal(self._pa_array, unit="month"), + ) + return type(self)(result) + + _dt_daysinmonth = _dt_days_in_month + + @property + def _dt_microsecond(self): + return type(self)(pc.microsecond(self._pa_array)) + + @property + def _dt_minute(self): + return type(self)(pc.minute(self._pa_array)) + + @property + def _dt_month(self): + return type(self)(pc.month(self._pa_array)) + + @property + def _dt_nanosecond(self): + return type(self)(pc.nanosecond(self._pa_array)) + + @property + def _dt_quarter(self): + return type(self)(pc.quarter(self._pa_array)) + + @property + def _dt_second(self): + return type(self)(pc.second(self._pa_array)) + + @property + def _dt_date(self): + return type(self)(self._pa_array.cast(pa.date32())) + + @property + def _dt_time(self): + unit = ( + self.dtype.pyarrow_dtype.unit + if self.dtype.pyarrow_dtype.unit in {"us", "ns"} + else "ns" + ) + return type(self)(self._pa_array.cast(pa.time64(unit))) + + @property + def _dt_tz(self): + return timezones.maybe_get_tz(self.dtype.pyarrow_dtype.tz) + + @property + def _dt_unit(self): + return self.dtype.pyarrow_dtype.unit + + def _dt_normalize(self): + return type(self)(pc.floor_temporal(self._pa_array, 1, "day")) + + def _dt_strftime(self, format: str): + return type(self)(pc.strftime(self._pa_array, format=format)) + + def _round_temporally( + self, + method: Literal["ceil", "floor", "round"], + freq, + ambiguous: TimeAmbiguous = "raise", + nonexistent: TimeNonexistent = "raise", + ): + if ambiguous != "raise": + raise NotImplementedError("ambiguous is not supported.") + if nonexistent != "raise": + raise NotImplementedError("nonexistent is not supported.") + offset = to_offset(freq) + if offset is None: + raise ValueError(f"Must specify a valid frequency: {freq}") + pa_supported_unit = { + "Y": "year", + "YS": "year", + "Q": "quarter", + "QS": "quarter", + "M": "month", + "MS": "month", + "W": "week", + "D": "day", + "h": "hour", + "min": "minute", + "s": "second", + "ms": "millisecond", + "us": "microsecond", + "ns": "nanosecond", + } + unit = pa_supported_unit.get(offset._prefix, None) + if unit is None: + raise ValueError(f"{freq=} is not supported") + multiple = offset.n + rounding_method = getattr(pc, f"{method}_temporal") + return type(self)(rounding_method(self._pa_array, multiple=multiple, unit=unit)) + + def _dt_ceil( + self, + freq, + ambiguous: TimeAmbiguous = "raise", + nonexistent: TimeNonexistent = "raise", + ): + return self._round_temporally("ceil", freq, ambiguous, nonexistent) + + def _dt_floor( + self, + freq, + ambiguous: TimeAmbiguous = "raise", + nonexistent: TimeNonexistent = "raise", + ): + return self._round_temporally("floor", freq, ambiguous, nonexistent) + + def _dt_round( + self, + freq, + ambiguous: TimeAmbiguous = "raise", + nonexistent: TimeNonexistent = "raise", + ): + return self._round_temporally("round", freq, ambiguous, nonexistent) + + def _dt_day_name(self, locale: str | None = None): + if locale is None: + locale = "C" + return type(self)(pc.strftime(self._pa_array, format="%A", locale=locale)) + + def _dt_month_name(self, locale: str | None = None): + if locale is None: + locale = "C" + return type(self)(pc.strftime(self._pa_array, format="%B", locale=locale)) + + def _dt_to_pydatetime(self): + if pa.types.is_date(self.dtype.pyarrow_dtype): + raise ValueError( + f"to_pydatetime cannot be called with {self.dtype.pyarrow_dtype} type. " + "Convert to pyarrow timestamp type." + ) + data = self._pa_array.to_pylist() + if self._dtype.pyarrow_dtype.unit == "ns": + data = [None if ts is None else ts.to_pydatetime(warn=False) for ts in data] + return np.array(data, dtype=object) + + def _dt_tz_localize( + self, + tz, + ambiguous: TimeAmbiguous = "raise", + nonexistent: TimeNonexistent = "raise", + ): + if ambiguous != "raise": + raise NotImplementedError(f"{ambiguous=} is not supported") + nonexistent_pa = { + "raise": "raise", + "shift_backward": "earliest", + "shift_forward": "latest", + }.get( + nonexistent, None # type: ignore[arg-type] + ) + if nonexistent_pa is None: + raise NotImplementedError(f"{nonexistent=} is not supported") + if tz is None: + result = self._pa_array.cast(pa.timestamp(self.dtype.pyarrow_dtype.unit)) + else: + result = pc.assume_timezone( + self._pa_array, str(tz), ambiguous=ambiguous, nonexistent=nonexistent_pa + ) + return type(self)(result) + + def _dt_tz_convert(self, tz): + if self.dtype.pyarrow_dtype.tz is None: + raise TypeError( + "Cannot convert tz-naive timestamps, use tz_localize to localize" + ) + current_unit = self.dtype.pyarrow_dtype.unit + result = self._pa_array.cast(pa.timestamp(current_unit, tz)) + return type(self)(result) + + +def transpose_homogeneous_pyarrow( + arrays: Sequence[ArrowExtensionArray], +) -> list[ArrowExtensionArray]: + """Transpose arrow extension arrays in a list, but faster. + + Input should be a list of arrays of equal length and all have the same + dtype. The caller is responsible for ensuring validity of input data. + """ + arrays = list(arrays) + nrows, ncols = len(arrays[0]), len(arrays) + indices = np.arange(nrows * ncols).reshape(ncols, nrows).T.flatten() + arr = pa.chunked_array([chunk for arr in arrays for chunk in arr._pa_array.chunks]) + arr = arr.take(indices) + return [ArrowExtensionArray(arr.slice(i * ncols, ncols)) for i in range(nrows)] diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/extension_types.py b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/extension_types.py new file mode 100644 index 0000000000000000000000000000000000000000..72bfd6f2212f8fae6ea7786599de44beaeb3f902 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/extension_types.py @@ -0,0 +1,174 @@ +from __future__ import annotations + +import json +from typing import TYPE_CHECKING + +import pyarrow + +from pandas.compat import pa_version_under14p1 + +from pandas.core.dtypes.dtypes import ( + IntervalDtype, + PeriodDtype, +) + +from pandas.core.arrays.interval import VALID_CLOSED + +if TYPE_CHECKING: + from pandas._typing import IntervalClosedType + + +class ArrowPeriodType(pyarrow.ExtensionType): + def __init__(self, freq) -> None: + # attributes need to be set first before calling + # super init (as that calls serialize) + self._freq = freq + pyarrow.ExtensionType.__init__(self, pyarrow.int64(), "pandas.period") + + @property + def freq(self): + return self._freq + + def __arrow_ext_serialize__(self) -> bytes: + metadata = {"freq": self.freq} + return json.dumps(metadata).encode() + + @classmethod + def __arrow_ext_deserialize__(cls, storage_type, serialized) -> ArrowPeriodType: + metadata = json.loads(serialized.decode()) + return ArrowPeriodType(metadata["freq"]) + + def __eq__(self, other): + if isinstance(other, pyarrow.BaseExtensionType): + return type(self) == type(other) and self.freq == other.freq + else: + return NotImplemented + + def __ne__(self, other) -> bool: + return not self == other + + def __hash__(self) -> int: + return hash((str(self), self.freq)) + + def to_pandas_dtype(self) -> PeriodDtype: + return PeriodDtype(freq=self.freq) + + +# register the type with a dummy instance +_period_type = ArrowPeriodType("D") +pyarrow.register_extension_type(_period_type) + + +class ArrowIntervalType(pyarrow.ExtensionType): + def __init__(self, subtype, closed: IntervalClosedType) -> None: + # attributes need to be set first before calling + # super init (as that calls serialize) + assert closed in VALID_CLOSED + self._closed: IntervalClosedType = closed + if not isinstance(subtype, pyarrow.DataType): + subtype = pyarrow.type_for_alias(str(subtype)) + self._subtype = subtype + + storage_type = pyarrow.struct([("left", subtype), ("right", subtype)]) + pyarrow.ExtensionType.__init__(self, storage_type, "pandas.interval") + + @property + def subtype(self): + return self._subtype + + @property + def closed(self) -> IntervalClosedType: + return self._closed + + def __arrow_ext_serialize__(self) -> bytes: + metadata = {"subtype": str(self.subtype), "closed": self.closed} + return json.dumps(metadata).encode() + + @classmethod + def __arrow_ext_deserialize__(cls, storage_type, serialized) -> ArrowIntervalType: + metadata = json.loads(serialized.decode()) + subtype = pyarrow.type_for_alias(metadata["subtype"]) + closed = metadata["closed"] + return ArrowIntervalType(subtype, closed) + + def __eq__(self, other): + if isinstance(other, pyarrow.BaseExtensionType): + return ( + type(self) == type(other) + and self.subtype == other.subtype + and self.closed == other.closed + ) + else: + return NotImplemented + + def __ne__(self, other) -> bool: + return not self == other + + def __hash__(self) -> int: + return hash((str(self), str(self.subtype), self.closed)) + + def to_pandas_dtype(self) -> IntervalDtype: + return IntervalDtype(self.subtype.to_pandas_dtype(), self.closed) + + +# register the type with a dummy instance +_interval_type = ArrowIntervalType(pyarrow.int64(), "left") +pyarrow.register_extension_type(_interval_type) + + +_ERROR_MSG = """\ +Disallowed deserialization of 'arrow.py_extension_type': +storage_type = {storage_type} +serialized = {serialized} +pickle disassembly:\n{pickle_disassembly} + +Reading of untrusted Parquet or Feather files with a PyExtensionType column +allows arbitrary code execution. +If you trust this file, you can enable reading the extension type by one of: + +- upgrading to pyarrow >= 14.0.1, and call `pa.PyExtensionType.set_auto_load(True)` +- install pyarrow-hotfix (`pip install pyarrow-hotfix`) and disable it by running + `import pyarrow_hotfix; pyarrow_hotfix.uninstall()` + +We strongly recommend updating your Parquet/Feather files to use extension types +derived from `pyarrow.ExtensionType` instead, and register this type explicitly. +""" + + +def patch_pyarrow(): + # starting from pyarrow 14.0.1, it has its own mechanism + if not pa_version_under14p1: + return + + # if https://github.com/pitrou/pyarrow-hotfix was installed and enabled + if getattr(pyarrow, "_hotfix_installed", False): + return + + class ForbiddenExtensionType(pyarrow.ExtensionType): + def __arrow_ext_serialize__(self): + return b"" + + @classmethod + def __arrow_ext_deserialize__(cls, storage_type, serialized): + import io + import pickletools + + out = io.StringIO() + pickletools.dis(serialized, out) + raise RuntimeError( + _ERROR_MSG.format( + storage_type=storage_type, + serialized=serialized, + pickle_disassembly=out.getvalue(), + ) + ) + + pyarrow.unregister_extension_type("arrow.py_extension_type") + pyarrow.register_extension_type( + ForbiddenExtensionType(pyarrow.null(), "arrow.py_extension_type") + ) + + pyarrow._hotfix_installed = True + + +patch_pyarrow() diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/sparse/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/sparse/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..adf83963aca39e7d2ec2da55d21fc69aaca48977 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/sparse/__init__.py @@ -0,0 +1,19 @@ +from pandas.core.arrays.sparse.accessor import ( + SparseAccessor, + SparseFrameAccessor, +) +from pandas.core.arrays.sparse.array import ( + BlockIndex, + IntIndex, + SparseArray, + make_sparse_index, +) + +__all__ = [ + "BlockIndex", + "IntIndex", + "make_sparse_index", + "SparseAccessor", + "SparseArray", + "SparseFrameAccessor", +] diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a5f492b2bd7d90d6960ef775c76621af163fda9f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/accessor.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/accessor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9d70e8b18ee44731a37b9096a0442927f680ea4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/accessor.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/array.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/array.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8c0b247313feb0baa97f8affab9c43353b06dd0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/array.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/scipy_sparse.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/scipy_sparse.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90a514406b03428359a1b00aeea19d7621235df8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/scipy_sparse.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/sparse/accessor.py b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/sparse/accessor.py new file mode 100644 index 0000000000000000000000000000000000000000..fc7debb1f31e4e3afe8c86e57e1f5b8bf1e343be --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/sparse/accessor.py @@ -0,0 +1,414 @@ +"""Sparse accessor""" +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +from pandas.compat._optional import import_optional_dependency + +from pandas.core.dtypes.cast import find_common_type +from pandas.core.dtypes.dtypes import SparseDtype + +from pandas.core.accessor import ( + PandasDelegate, + delegate_names, +) +from pandas.core.arrays.sparse.array import SparseArray + +if TYPE_CHECKING: + from pandas import ( + DataFrame, + Series, + ) + + +class BaseAccessor: + _validation_msg = "Can only use the '.sparse' accessor with Sparse data." + + def __init__(self, data=None) -> None: + self._parent = data + self._validate(data) + + def _validate(self, data): + raise NotImplementedError + + +@delegate_names( + SparseArray, ["npoints", "density", "fill_value", "sp_values"], typ="property" +) +class SparseAccessor(BaseAccessor, PandasDelegate): + """ + Accessor for SparseSparse from other sparse matrix data types. + + Examples + -------- + >>> ser = pd.Series([0, 0, 2, 2, 2], dtype="Sparse[int]") + >>> ser.sparse.density + 0.6 + >>> ser.sparse.sp_values + array([2, 2, 2]) + """ + + def _validate(self, data): + if not isinstance(data.dtype, SparseDtype): + raise AttributeError(self._validation_msg) + + def _delegate_property_get(self, name: str, *args, **kwargs): + return getattr(self._parent.array, name) + + def _delegate_method(self, name: str, *args, **kwargs): + if name == "from_coo": + return self.from_coo(*args, **kwargs) + elif name == "to_coo": + return self.to_coo(*args, **kwargs) + else: + raise ValueError + + @classmethod + def from_coo(cls, A, dense_index: bool = False) -> Series: + """ + Create a Series with sparse values from a scipy.sparse.coo_matrix. + + Parameters + ---------- + A : scipy.sparse.coo_matrix + dense_index : bool, default False + If False (default), the index consists of only the + coords of the non-null entries of the original coo_matrix. + If True, the index consists of the full sorted + (row, col) coordinates of the coo_matrix. + + Returns + ------- + s : Series + A Series with sparse values. + + Examples + -------- + >>> from scipy import sparse + + >>> A = sparse.coo_matrix( + ... ([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), shape=(3, 4) + ... ) + >>> A + <3x4 sparse matrix of type '' + with 3 stored elements in COOrdinate format> + + >>> A.todense() + matrix([[0., 0., 1., 2.], + [3., 0., 0., 0.], + [0., 0., 0., 0.]]) + + >>> ss = pd.Series.sparse.from_coo(A) + >>> ss + 0 2 1.0 + 3 2.0 + 1 0 3.0 + dtype: Sparse[float64, nan] + """ + from pandas import Series + from pandas.core.arrays.sparse.scipy_sparse import coo_to_sparse_series + + result = coo_to_sparse_series(A, dense_index=dense_index) + result = Series(result.array, index=result.index, copy=False) + + return result + + def to_coo(self, row_levels=(0,), column_levels=(1,), sort_labels: bool = False): + """ + Create a scipy.sparse.coo_matrix from a Series with MultiIndex. + + Use row_levels and column_levels to determine the row and column + coordinates respectively. row_levels and column_levels are the names + (labels) or numbers of the levels. {row_levels, column_levels} must be + a partition of the MultiIndex level names (or numbers). + + Parameters + ---------- + row_levels : tuple/list + column_levels : tuple/list + sort_labels : bool, default False + Sort the row and column labels before forming the sparse matrix. + When `row_levels` and/or `column_levels` refer to a single level, + set to `True` for a faster execution. + + Returns + ------- + y : scipy.sparse.coo_matrix + rows : list (row labels) + columns : list (column labels) + + Examples + -------- + >>> s = pd.Series([3.0, np.nan, 1.0, 3.0, np.nan, np.nan]) + >>> s.index = pd.MultiIndex.from_tuples( + ... [ + ... (1, 2, "a", 0), + ... (1, 2, "a", 1), + ... (1, 1, "b", 0), + ... (1, 1, "b", 1), + ... (2, 1, "b", 0), + ... (2, 1, "b", 1) + ... ], + ... names=["A", "B", "C", "D"], + ... ) + >>> s + A B C D + 1 2 a 0 3.0 + 1 NaN + 1 b 0 1.0 + 1 3.0 + 2 1 b 0 NaN + 1 NaN + dtype: float64 + + >>> ss = s.astype("Sparse") + >>> ss + A B C D + 1 2 a 0 3.0 + 1 NaN + 1 b 0 1.0 + 1 3.0 + 2 1 b 0 NaN + 1 NaN + dtype: Sparse[float64, nan] + + >>> A, rows, columns = ss.sparse.to_coo( + ... row_levels=["A", "B"], column_levels=["C", "D"], sort_labels=True + ... ) + >>> A + <3x4 sparse matrix of type '' + with 3 stored elements in COOrdinate format> + >>> A.todense() + matrix([[0., 0., 1., 3.], + [3., 0., 0., 0.], + [0., 0., 0., 0.]]) + + >>> rows + [(1, 1), (1, 2), (2, 1)] + >>> columns + [('a', 0), ('a', 1), ('b', 0), ('b', 1)] + """ + from pandas.core.arrays.sparse.scipy_sparse import sparse_series_to_coo + + A, rows, columns = sparse_series_to_coo( + self._parent, row_levels, column_levels, sort_labels=sort_labels + ) + return A, rows, columns + + def to_dense(self) -> Series: + """ + Convert a Series from sparse values to dense. + + Returns + ------- + Series: + A Series with the same values, stored as a dense array. + + Examples + -------- + >>> series = pd.Series(pd.arrays.SparseArray([0, 1, 0])) + >>> series + 0 0 + 1 1 + 2 0 + dtype: Sparse[int64, 0] + + >>> series.sparse.to_dense() + 0 0 + 1 1 + 2 0 + dtype: int64 + """ + from pandas import Series + + return Series( + self._parent.array.to_dense(), + index=self._parent.index, + name=self._parent.name, + copy=False, + ) + + +class SparseFrameAccessor(BaseAccessor, PandasDelegate): + """ + DataFrame accessor for sparse data. + + Examples + -------- + >>> df = pd.DataFrame({"a": [1, 2, 0, 0], + ... "b": [3, 0, 0, 4]}, dtype="Sparse[int]") + >>> df.sparse.density + 0.5 + """ + + def _validate(self, data): + dtypes = data.dtypes + if not all(isinstance(t, SparseDtype) for t in dtypes): + raise AttributeError(self._validation_msg) + + @classmethod + def from_spmatrix(cls, data, index=None, columns=None) -> DataFrame: + """ + Create a new DataFrame from a scipy sparse matrix. + + Parameters + ---------- + data : scipy.sparse.spmatrix + Must be convertible to csc format. + index, columns : Index, optional + Row and column labels to use for the resulting DataFrame. + Defaults to a RangeIndex. + + Returns + ------- + DataFrame + Each column of the DataFrame is stored as a + :class:`arrays.SparseArray`. + + Examples + -------- + >>> import scipy.sparse + >>> mat = scipy.sparse.eye(3, dtype=float) + >>> pd.DataFrame.sparse.from_spmatrix(mat) + 0 1 2 + 0 1.0 0 0 + 1 0 1.0 0 + 2 0 0 1.0 + """ + from pandas._libs.sparse import IntIndex + + from pandas import DataFrame + + data = data.tocsc() + index, columns = cls._prep_index(data, index, columns) + n_rows, n_columns = data.shape + # We need to make sure indices are sorted, as we create + # IntIndex with no input validation (i.e. check_integrity=False ). + # Indices may already be sorted in scipy in which case this adds + # a small overhead. + data.sort_indices() + indices = data.indices + indptr = data.indptr + array_data = data.data + dtype = SparseDtype(array_data.dtype, 0) + arrays = [] + for i in range(n_columns): + sl = slice(indptr[i], indptr[i + 1]) + idx = IntIndex(n_rows, indices[sl], check_integrity=False) + arr = SparseArray._simple_new(array_data[sl], idx, dtype) + arrays.append(arr) + return DataFrame._from_arrays( + arrays, columns=columns, index=index, verify_integrity=False + ) + + def to_dense(self) -> DataFrame: + """ + Convert a DataFrame with sparse values to dense. + + Returns + ------- + DataFrame + A DataFrame with the same values stored as dense arrays. + + Examples + -------- + >>> df = pd.DataFrame({"A": pd.arrays.SparseArray([0, 1, 0])}) + >>> df.sparse.to_dense() + A + 0 0 + 1 1 + 2 0 + """ + from pandas import DataFrame + + data = {k: v.array.to_dense() for k, v in self._parent.items()} + return DataFrame(data, index=self._parent.index, columns=self._parent.columns) + + def to_coo(self): + """ + Return the contents of the frame as a sparse SciPy COO matrix. + + Returns + ------- + scipy.sparse.spmatrix + If the caller is heterogeneous and contains booleans or objects, + the result will be of dtype=object. See Notes. + + Notes + ----- + The dtype will be the lowest-common-denominator type (implicit + upcasting); that is to say if the dtypes (even of numeric types) + are mixed, the one that accommodates all will be chosen. + + e.g. If the dtypes are float16 and float32, dtype will be upcast to + float32. By numpy.find_common_type convention, mixing int64 and + and uint64 will result in a float64 dtype. + + Examples + -------- + >>> df = pd.DataFrame({"A": pd.arrays.SparseArray([0, 1, 0, 1])}) + >>> df.sparse.to_coo() + <4x1 sparse matrix of type '' + with 2 stored elements in COOrdinate format> + """ + import_optional_dependency("scipy") + from scipy.sparse import coo_matrix + + dtype = find_common_type(self._parent.dtypes.to_list()) + if isinstance(dtype, SparseDtype): + dtype = dtype.subtype + + cols, rows, data = [], [], [] + for col, (_, ser) in enumerate(self._parent.items()): + sp_arr = ser.array + if sp_arr.fill_value != 0: + raise ValueError("fill value must be 0 when converting to COO matrix") + + row = sp_arr.sp_index.indices + cols.append(np.repeat(col, len(row))) + rows.append(row) + data.append(sp_arr.sp_values.astype(dtype, copy=False)) + + cols = np.concatenate(cols) + rows = np.concatenate(rows) + data = np.concatenate(data) + return coo_matrix((data, (rows, cols)), shape=self._parent.shape) + + @property + def density(self) -> float: + """ + Ratio of non-sparse points to total (dense) data points. + + Examples + -------- + >>> df = pd.DataFrame({"A": pd.arrays.SparseArray([0, 1, 0, 1])}) + >>> df.sparse.density + 0.5 + """ + tmp = np.mean([column.array.density for _, column in self._parent.items()]) + return tmp + + @staticmethod + def _prep_index(data, index, columns): + from pandas.core.indexes.api import ( + default_index, + ensure_index, + ) + + N, K = data.shape + if index is None: + index = default_index(N) + else: + index = ensure_index(index) + if columns is None: + columns = default_index(K) + else: + columns = ensure_index(columns) + + if len(columns) != K: + raise ValueError(f"Column length mismatch: {len(columns)} vs. {K}") + if len(index) != N: + raise ValueError(f"Index length mismatch: {len(index)} vs. {N}") + return index, columns diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/sparse/scipy_sparse.py b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/sparse/scipy_sparse.py new file mode 100644 index 0000000000000000000000000000000000000000..71b71a9779da5c1a584e0ef98bc8320d81bc2a35 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/sparse/scipy_sparse.py @@ -0,0 +1,207 @@ +""" +Interaction with scipy.sparse matrices. + +Currently only includes to_coo helpers. +""" +from __future__ import annotations + +from typing import TYPE_CHECKING + +from pandas._libs import lib + +from pandas.core.dtypes.missing import notna + +from pandas.core.algorithms import factorize +from pandas.core.indexes.api import MultiIndex +from pandas.core.series import Series + +if TYPE_CHECKING: + from collections.abc import Iterable + + import numpy as np + import scipy.sparse + + from pandas._typing import ( + IndexLabel, + npt, + ) + + +def _check_is_partition(parts: Iterable, whole: Iterable): + whole = set(whole) + parts = [set(x) for x in parts] + if set.intersection(*parts) != set(): + raise ValueError("Is not a partition because intersection is not null.") + if set.union(*parts) != whole: + raise ValueError("Is not a partition because union is not the whole.") + + +def _levels_to_axis( + ss, + levels: tuple[int] | list[int], + valid_ilocs: npt.NDArray[np.intp], + sort_labels: bool = False, +) -> tuple[npt.NDArray[np.intp], list[IndexLabel]]: + """ + For a MultiIndexed sparse Series `ss`, return `ax_coords` and `ax_labels`, + where `ax_coords` are the coordinates along one of the two axes of the + destination sparse matrix, and `ax_labels` are the labels from `ss`' Index + which correspond to these coordinates. + + Parameters + ---------- + ss : Series + levels : tuple/list + valid_ilocs : numpy.ndarray + Array of integer positions of valid values for the sparse matrix in ss. + sort_labels : bool, default False + Sort the axis labels before forming the sparse matrix. When `levels` + refers to a single level, set to True for a faster execution. + + Returns + ------- + ax_coords : numpy.ndarray (axis coordinates) + ax_labels : list (axis labels) + """ + # Since the labels are sorted in `Index.levels`, when we wish to sort and + # there is only one level of the MultiIndex for this axis, the desired + # output can be obtained in the following simpler, more efficient way. + if sort_labels and len(levels) == 1: + ax_coords = ss.index.codes[levels[0]][valid_ilocs] + ax_labels = ss.index.levels[levels[0]] + + else: + levels_values = lib.fast_zip( + [ss.index.get_level_values(lvl).to_numpy() for lvl in levels] + ) + codes, ax_labels = factorize(levels_values, sort=sort_labels) + ax_coords = codes[valid_ilocs] + + ax_labels = ax_labels.tolist() + return ax_coords, ax_labels + + +def _to_ijv( + ss, + row_levels: tuple[int] | list[int] = (0,), + column_levels: tuple[int] | list[int] = (1,), + sort_labels: bool = False, +) -> tuple[ + np.ndarray, + npt.NDArray[np.intp], + npt.NDArray[np.intp], + list[IndexLabel], + list[IndexLabel], +]: + """ + For an arbitrary MultiIndexed sparse Series return (v, i, j, ilabels, + jlabels) where (v, (i, j)) is suitable for passing to scipy.sparse.coo + constructor, and ilabels and jlabels are the row and column labels + respectively. + + Parameters + ---------- + ss : Series + row_levels : tuple/list + column_levels : tuple/list + sort_labels : bool, default False + Sort the row and column labels before forming the sparse matrix. + When `row_levels` and/or `column_levels` refer to a single level, + set to `True` for a faster execution. + + Returns + ------- + values : numpy.ndarray + Valid values to populate a sparse matrix, extracted from + ss. + i_coords : numpy.ndarray (row coordinates of the values) + j_coords : numpy.ndarray (column coordinates of the values) + i_labels : list (row labels) + j_labels : list (column labels) + """ + # index and column levels must be a partition of the index + _check_is_partition([row_levels, column_levels], range(ss.index.nlevels)) + # From the sparse Series, get the integer indices and data for valid sparse + # entries. + sp_vals = ss.array.sp_values + na_mask = notna(sp_vals) + values = sp_vals[na_mask] + valid_ilocs = ss.array.sp_index.indices[na_mask] + + i_coords, i_labels = _levels_to_axis( + ss, row_levels, valid_ilocs, sort_labels=sort_labels + ) + + j_coords, j_labels = _levels_to_axis( + ss, column_levels, valid_ilocs, sort_labels=sort_labels + ) + + return values, i_coords, j_coords, i_labels, j_labels + + +def sparse_series_to_coo( + ss: Series, + row_levels: Iterable[int] = (0,), + column_levels: Iterable[int] = (1,), + sort_labels: bool = False, +) -> tuple[scipy.sparse.coo_matrix, list[IndexLabel], list[IndexLabel]]: + """ + Convert a sparse Series to a scipy.sparse.coo_matrix using index + levels row_levels, column_levels as the row and column + labels respectively. Returns the sparse_matrix, row and column labels. + """ + import scipy.sparse + + if ss.index.nlevels < 2: + raise ValueError("to_coo requires MultiIndex with nlevels >= 2.") + if not ss.index.is_unique: + raise ValueError( + "Duplicate index entries are not allowed in to_coo transformation." + ) + + # to keep things simple, only rely on integer indexing (not labels) + row_levels = [ss.index._get_level_number(x) for x in row_levels] + column_levels = [ss.index._get_level_number(x) for x in column_levels] + + v, i, j, rows, columns = _to_ijv( + ss, row_levels=row_levels, column_levels=column_levels, sort_labels=sort_labels + ) + sparse_matrix = scipy.sparse.coo_matrix( + (v, (i, j)), shape=(len(rows), len(columns)) + ) + return sparse_matrix, rows, columns + + +def coo_to_sparse_series( + A: scipy.sparse.coo_matrix, dense_index: bool = False +) -> Series: + """ + Convert a scipy.sparse.coo_matrix to a Series with type sparse. + + Parameters + ---------- + A : scipy.sparse.coo_matrix + dense_index : bool, default False + + Returns + ------- + Series + + Raises + ------ + TypeError if A is not a coo_matrix + """ + from pandas import SparseDtype + + try: + ser = Series(A.data, MultiIndex.from_arrays((A.row, A.col)), copy=False) + except AttributeError as err: + raise TypeError( + f"Expected coo_matrix. Got {type(A).__name__} instead." + ) from err + ser = ser.sort_index() + ser = ser.astype(SparseDtype(ser.dtype)) + if dense_index: + ind = MultiIndex.from_product([A.row, A.col]) + ser = ser.reindex(ind) + return ser diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d845435979fdcc1779bacb549104311bfc5da61a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/buffer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/buffer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2469187e31686e078845d0eccf50b914ebc6fad8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/buffer.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/column.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/column.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e31120d15ef5dc4ad5002d10bebbcdb2c8c0ea8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/column.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/dataframe.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/dataframe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a76a999ad6cb70623aee16db45a79f8b90fb74c0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/dataframe.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/dataframe_protocol.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/dataframe_protocol.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ddd868098fd5c29b892b022323d3524cf8f2ded5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/dataframe_protocol.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/from_dataframe.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/from_dataframe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2eae9eac7f5dae39ab1c96adf37e0994989432ce Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/from_dataframe.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ed790a964b7e9d913f8b939662b02d83a95327a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/buffer.py b/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/buffer.py new file mode 100644 index 0000000000000000000000000000000000000000..5d24325e67f62a5da0fa3863d81576dd61f86869 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/buffer.py @@ -0,0 +1,136 @@ +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, +) + +from pandas.core.interchange.dataframe_protocol import ( + Buffer, + DlpackDeviceType, +) + +if TYPE_CHECKING: + import numpy as np + import pyarrow as pa + + +class PandasBuffer(Buffer): + """ + Data in the buffer is guaranteed to be contiguous in memory. + """ + + def __init__(self, x: np.ndarray, allow_copy: bool = True) -> None: + """ + Handle only regular columns (= numpy arrays) for now. + """ + if x.strides[0] and not x.strides == (x.dtype.itemsize,): + # The protocol does not support strided buffers, so a copy is + # necessary. If that's not allowed, we need to raise an exception. + if allow_copy: + x = x.copy() + else: + raise RuntimeError( + "Exports cannot be zero-copy in the case " + "of a non-contiguous buffer" + ) + + # Store the numpy array in which the data resides as a private + # attribute, so we can use it to retrieve the public attributes + self._x = x + + @property + def bufsize(self) -> int: + """ + Buffer size in bytes. + """ + return self._x.size * self._x.dtype.itemsize + + @property + def ptr(self) -> int: + """ + Pointer to start of the buffer as an integer. + """ + return self._x.__array_interface__["data"][0] + + def __dlpack__(self) -> Any: + """ + Represent this structure as DLPack interface. + """ + return self._x.__dlpack__() + + def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]: + """ + Device type and device ID for where the data in the buffer resides. + """ + return (DlpackDeviceType.CPU, None) + + def __repr__(self) -> str: + return ( + "PandasBuffer(" + + str( + { + "bufsize": self.bufsize, + "ptr": self.ptr, + "device": self.__dlpack_device__()[0].name, + } + ) + + ")" + ) + + +class PandasBufferPyarrow(Buffer): + """ + Data in the buffer is guaranteed to be contiguous in memory. + """ + + def __init__( + self, + buffer: pa.Buffer, + *, + length: int, + ) -> None: + """ + Handle pyarrow chunked arrays. + """ + self._buffer = buffer + self._length = length + + @property + def bufsize(self) -> int: + """ + Buffer size in bytes. + """ + return self._buffer.size + + @property + def ptr(self) -> int: + """ + Pointer to start of the buffer as an integer. + """ + return self._buffer.address + + def __dlpack__(self) -> Any: + """ + Represent this structure as DLPack interface. + """ + raise NotImplementedError() + + def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]: + """ + Device type and device ID for where the data in the buffer resides. + """ + return (DlpackDeviceType.CPU, None) + + def __repr__(self) -> str: + return ( + "PandasBuffer[pyarrow](" + + str( + { + "bufsize": self.bufsize, + "ptr": self.ptr, + "device": "CPU", + } + ) + + ")" + ) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/column.py b/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/column.py new file mode 100644 index 0000000000000000000000000000000000000000..d59a3df694bb3f6ab5716c25592704d49737a215 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/column.py @@ -0,0 +1,461 @@ +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, +) + +import numpy as np + +from pandas._libs.lib import infer_dtype +from pandas._libs.tslibs import iNaT +from pandas.errors import NoBufferPresent +from pandas.util._decorators import cache_readonly + +from pandas.core.dtypes.dtypes import BaseMaskedDtype + +import pandas as pd +from pandas import ( + ArrowDtype, + DatetimeTZDtype, +) +from pandas.api.types import is_string_dtype +from pandas.core.interchange.buffer import ( + PandasBuffer, + PandasBufferPyarrow, +) +from pandas.core.interchange.dataframe_protocol import ( + Column, + ColumnBuffers, + ColumnNullType, + DtypeKind, +) +from pandas.core.interchange.utils import ( + ArrowCTypes, + Endianness, + dtype_to_arrow_c_fmt, +) + +if TYPE_CHECKING: + from pandas.core.interchange.dataframe_protocol import Buffer + +_NP_KINDS = { + "i": DtypeKind.INT, + "u": DtypeKind.UINT, + "f": DtypeKind.FLOAT, + "b": DtypeKind.BOOL, + "U": DtypeKind.STRING, + "M": DtypeKind.DATETIME, + "m": DtypeKind.DATETIME, +} + +_NULL_DESCRIPTION = { + DtypeKind.FLOAT: (ColumnNullType.USE_NAN, None), + DtypeKind.DATETIME: (ColumnNullType.USE_SENTINEL, iNaT), + DtypeKind.INT: (ColumnNullType.NON_NULLABLE, None), + DtypeKind.UINT: (ColumnNullType.NON_NULLABLE, None), + DtypeKind.BOOL: (ColumnNullType.NON_NULLABLE, None), + # Null values for categoricals are stored as `-1` sentinel values + # in the category date (e.g., `col.values.codes` is int8 np.ndarray) + DtypeKind.CATEGORICAL: (ColumnNullType.USE_SENTINEL, -1), + # follow Arrow in using 1 as valid value and 0 for missing/null value + DtypeKind.STRING: (ColumnNullType.USE_BYTEMASK, 0), +} + +_NO_VALIDITY_BUFFER = { + ColumnNullType.NON_NULLABLE: "This column is non-nullable", + ColumnNullType.USE_NAN: "This column uses NaN as null", + ColumnNullType.USE_SENTINEL: "This column uses a sentinel value", +} + + +class PandasColumn(Column): + """ + A column object, with only the methods and properties required by the + interchange protocol defined. + A column can contain one or more chunks. Each chunk can contain up to three + buffers - a data buffer, a mask buffer (depending on null representation), + and an offsets buffer (if variable-size binary; e.g., variable-length + strings). + Note: this Column object can only be produced by ``__dataframe__``, so + doesn't need its own version or ``__column__`` protocol. + """ + + def __init__(self, column: pd.Series, allow_copy: bool = True) -> None: + """ + Note: doesn't deal with extension arrays yet, just assume a regular + Series/ndarray for now. + """ + if isinstance(column, pd.DataFrame): + raise TypeError( + "Expected a Series, got a DataFrame. This likely happened " + "because you called __dataframe__ on a DataFrame which, " + "after converting column names to string, resulted in duplicated " + f"names: {column.columns}. Please rename these columns before " + "using the interchange protocol." + ) + if not isinstance(column, pd.Series): + raise NotImplementedError(f"Columns of type {type(column)} not handled yet") + + # Store the column as a private attribute + self._col = column + self._allow_copy = allow_copy + + def size(self) -> int: + """ + Size of the column, in elements. + """ + return self._col.size + + @property + def offset(self) -> int: + """ + Offset of first element. Always zero. + """ + # TODO: chunks are implemented now, probably this should return something + return 0 + + @cache_readonly + def dtype(self) -> tuple[DtypeKind, int, str, str]: + dtype = self._col.dtype + + if isinstance(dtype, pd.CategoricalDtype): + codes = self._col.values.codes + ( + _, + bitwidth, + c_arrow_dtype_f_str, + _, + ) = self._dtype_from_pandasdtype(codes.dtype) + return ( + DtypeKind.CATEGORICAL, + bitwidth, + c_arrow_dtype_f_str, + Endianness.NATIVE, + ) + elif is_string_dtype(dtype): + if infer_dtype(self._col) in ("string", "empty"): + return ( + DtypeKind.STRING, + 8, + dtype_to_arrow_c_fmt(dtype), + Endianness.NATIVE, + ) + raise NotImplementedError("Non-string object dtypes are not supported yet") + else: + return self._dtype_from_pandasdtype(dtype) + + def _dtype_from_pandasdtype(self, dtype) -> tuple[DtypeKind, int, str, str]: + """ + See `self.dtype` for details. + """ + # Note: 'c' (complex) not handled yet (not in array spec v1). + # 'b', 'B' (bytes), 'S', 'a', (old-style string) 'V' (void) not handled + # datetime and timedelta both map to datetime (is timedelta handled?) + + kind = _NP_KINDS.get(dtype.kind, None) + if kind is None: + # Not a NumPy dtype. Check if it's a categorical maybe + raise ValueError(f"Data type {dtype} not supported by interchange protocol") + if isinstance(dtype, ArrowDtype): + byteorder = dtype.numpy_dtype.byteorder + elif isinstance(dtype, DatetimeTZDtype): + byteorder = dtype.base.byteorder # type: ignore[union-attr] + elif isinstance(dtype, BaseMaskedDtype): + byteorder = dtype.numpy_dtype.byteorder + else: + byteorder = dtype.byteorder + + if dtype == "bool[pyarrow]": + # return early to avoid the `* 8` below, as this is a bitmask + # rather than a bytemask + return ( + kind, + dtype.itemsize, # pyright: ignore[reportGeneralTypeIssues] + ArrowCTypes.BOOL, + byteorder, + ) + + return kind, dtype.itemsize * 8, dtype_to_arrow_c_fmt(dtype), byteorder + + @property + def describe_categorical(self): + """ + If the dtype is categorical, there are two options: + - There are only values in the data buffer. + - There is a separate non-categorical Column encoding for categorical values. + + Raises TypeError if the dtype is not categorical + + Content of returned dict: + - "is_ordered" : bool, whether the ordering of dictionary indices is + semantically meaningful. + - "is_dictionary" : bool, whether a dictionary-style mapping of + categorical values to other objects exists + - "categories" : Column representing the (implicit) mapping of indices to + category values (e.g. an array of cat1, cat2, ...). + None if not a dictionary-style categorical. + """ + if not self.dtype[0] == DtypeKind.CATEGORICAL: + raise TypeError( + "describe_categorical only works on a column with categorical dtype!" + ) + + return { + "is_ordered": self._col.cat.ordered, + "is_dictionary": True, + "categories": PandasColumn(pd.Series(self._col.cat.categories)), + } + + @property + def describe_null(self): + if isinstance(self._col.dtype, BaseMaskedDtype): + column_null_dtype = ColumnNullType.USE_BYTEMASK + null_value = 1 + return column_null_dtype, null_value + if isinstance(self._col.dtype, ArrowDtype): + # We already rechunk (if necessary / allowed) upon initialization, so this + # is already single-chunk by the time we get here. + if self._col.array._pa_array.chunks[0].buffers()[0] is None: # type: ignore[attr-defined] + return ColumnNullType.NON_NULLABLE, None + return ColumnNullType.USE_BITMASK, 0 + kind = self.dtype[0] + try: + null, value = _NULL_DESCRIPTION[kind] + except KeyError: + raise NotImplementedError(f"Data type {kind} not yet supported") + + return null, value + + @cache_readonly + def null_count(self) -> int: + """ + Number of null elements. Should always be known. + """ + return self._col.isna().sum().item() + + @property + def metadata(self) -> dict[str, pd.Index]: + """ + Store specific metadata of the column. + """ + return {"pandas.index": self._col.index} + + def num_chunks(self) -> int: + """ + Return the number of chunks the column consists of. + """ + return 1 + + def get_chunks(self, n_chunks: int | None = None): + """ + Return an iterator yielding the chunks. + See `DataFrame.get_chunks` for details on ``n_chunks``. + """ + if n_chunks and n_chunks > 1: + size = len(self._col) + step = size // n_chunks + if size % n_chunks != 0: + step += 1 + for start in range(0, step * n_chunks, step): + yield PandasColumn( + self._col.iloc[start : start + step], self._allow_copy + ) + else: + yield self + + def get_buffers(self) -> ColumnBuffers: + """ + Return a dictionary containing the underlying buffers. + The returned dictionary has the following contents: + - "data": a two-element tuple whose first element is a buffer + containing the data and whose second element is the data + buffer's associated dtype. + - "validity": a two-element tuple whose first element is a buffer + containing mask values indicating missing data and + whose second element is the mask value buffer's + associated dtype. None if the null representation is + not a bit or byte mask. + - "offsets": a two-element tuple whose first element is a buffer + containing the offset values for variable-size binary + data (e.g., variable-length strings) and whose second + element is the offsets buffer's associated dtype. None + if the data buffer does not have an associated offsets + buffer. + """ + buffers: ColumnBuffers = { + "data": self._get_data_buffer(), + "validity": None, + "offsets": None, + } + + try: + buffers["validity"] = self._get_validity_buffer() + except NoBufferPresent: + pass + + try: + buffers["offsets"] = self._get_offsets_buffer() + except NoBufferPresent: + pass + + return buffers + + def _get_data_buffer( + self, + ) -> tuple[Buffer, tuple[DtypeKind, int, str, str]]: + """ + Return the buffer containing the data and the buffer's associated dtype. + """ + buffer: Buffer + if self.dtype[0] in ( + DtypeKind.INT, + DtypeKind.UINT, + DtypeKind.FLOAT, + DtypeKind.BOOL, + DtypeKind.DATETIME, + ): + # self.dtype[2] is an ArrowCTypes.TIMESTAMP where the tz will make + # it longer than 4 characters + dtype = self.dtype + if self.dtype[0] == DtypeKind.DATETIME and len(self.dtype[2]) > 4: + np_arr = self._col.dt.tz_convert(None).to_numpy() + else: + arr = self._col.array + if isinstance(self._col.dtype, BaseMaskedDtype): + np_arr = arr._data # type: ignore[attr-defined] + elif isinstance(self._col.dtype, ArrowDtype): + # We already rechunk (if necessary / allowed) upon initialization, + # so this is already single-chunk by the time we get here. + arr = arr._pa_array.chunks[0] # type: ignore[attr-defined] + buffer = PandasBufferPyarrow( + arr.buffers()[1], # type: ignore[attr-defined] + length=len(arr), + ) + return buffer, dtype + else: + np_arr = arr._ndarray # type: ignore[attr-defined] + buffer = PandasBuffer(np_arr, allow_copy=self._allow_copy) + elif self.dtype[0] == DtypeKind.CATEGORICAL: + codes = self._col.values._codes + buffer = PandasBuffer(codes, allow_copy=self._allow_copy) + dtype = self._dtype_from_pandasdtype(codes.dtype) + elif self.dtype[0] == DtypeKind.STRING: + # Marshal the strings from a NumPy object array into a byte array + buf = self._col.to_numpy() + b = bytearray() + + # TODO: this for-loop is slow; can be implemented in Cython/C/C++ later + for obj in buf: + if isinstance(obj, str): + b.extend(obj.encode(encoding="utf-8")) + + # Convert the byte array to a Pandas "buffer" using + # a NumPy array as the backing store + buffer = PandasBuffer(np.frombuffer(b, dtype="uint8")) + + # Define the dtype for the returned buffer + # TODO: this will need correcting + # https://github.com/pandas-dev/pandas/issues/54781 + dtype = self.dtype + else: + raise NotImplementedError(f"Data type {self._col.dtype} not handled yet") + + return buffer, dtype + + def _get_validity_buffer(self) -> tuple[Buffer, Any] | None: + """ + Return the buffer containing the mask values indicating missing data and + the buffer's associated dtype. + Raises NoBufferPresent if null representation is not a bit or byte mask. + """ + null, invalid = self.describe_null + buffer: Buffer + if isinstance(self._col.dtype, ArrowDtype): + # We already rechunk (if necessary / allowed) upon initialization, so this + # is already single-chunk by the time we get here. + arr = self._col.array._pa_array.chunks[0] # type: ignore[attr-defined] + dtype = (DtypeKind.BOOL, 1, ArrowCTypes.BOOL, Endianness.NATIVE) + if arr.buffers()[0] is None: + return None + buffer = PandasBufferPyarrow( + arr.buffers()[0], + length=len(arr), + ) + return buffer, dtype + + if isinstance(self._col.dtype, BaseMaskedDtype): + mask = self._col.array._mask # type: ignore[attr-defined] + buffer = PandasBuffer(mask) + dtype = (DtypeKind.BOOL, 8, ArrowCTypes.BOOL, Endianness.NATIVE) + return buffer, dtype + + if self.dtype[0] == DtypeKind.STRING: + # For now, use byte array as the mask. + # TODO: maybe store as bit array to save space?.. + buf = self._col.to_numpy() + + # Determine the encoding for valid values + valid = invalid == 0 + invalid = not valid + + mask = np.zeros(shape=(len(buf),), dtype=np.bool_) + for i, obj in enumerate(buf): + mask[i] = valid if isinstance(obj, str) else invalid + + # Convert the mask array to a Pandas "buffer" using + # a NumPy array as the backing store + buffer = PandasBuffer(mask) + + # Define the dtype of the returned buffer + dtype = (DtypeKind.BOOL, 8, ArrowCTypes.BOOL, Endianness.NATIVE) + + return buffer, dtype + + try: + msg = f"{_NO_VALIDITY_BUFFER[null]} so does not have a separate mask" + except KeyError: + # TODO: implement for other bit/byte masks? + raise NotImplementedError("See self.describe_null") + + raise NoBufferPresent(msg) + + def _get_offsets_buffer(self) -> tuple[PandasBuffer, Any]: + """ + Return the buffer containing the offset values for variable-size binary + data (e.g., variable-length strings) and the buffer's associated dtype. + Raises NoBufferPresent if the data buffer does not have an associated + offsets buffer. + """ + if self.dtype[0] == DtypeKind.STRING: + # For each string, we need to manually determine the next offset + values = self._col.to_numpy() + ptr = 0 + offsets = np.zeros(shape=(len(values) + 1,), dtype=np.int64) + for i, v in enumerate(values): + # For missing values (in this case, `np.nan` values) + # we don't increment the pointer + if isinstance(v, str): + b = v.encode(encoding="utf-8") + ptr += len(b) + + offsets[i + 1] = ptr + + # Convert the offsets to a Pandas "buffer" using + # the NumPy array as the backing store + buffer = PandasBuffer(offsets) + + # Assemble the buffer dtype info + dtype = ( + DtypeKind.INT, + 64, + ArrowCTypes.INT64, + Endianness.NATIVE, + ) # note: currently only support native endianness + else: + raise NoBufferPresent( + "This column has a fixed-length dtype so " + "it does not have an offsets buffer" + ) + + return buffer, dtype diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/dataframe.py b/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/dataframe.py new file mode 100644 index 0000000000000000000000000000000000000000..1abacddfc7e3b7035a9446cd4118097c6accd385 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/dataframe.py @@ -0,0 +1,113 @@ +from __future__ import annotations + +from collections import abc +from typing import TYPE_CHECKING + +from pandas.core.interchange.column import PandasColumn +from pandas.core.interchange.dataframe_protocol import DataFrame as DataFrameXchg +from pandas.core.interchange.utils import maybe_rechunk + +if TYPE_CHECKING: + from collections.abc import ( + Iterable, + Sequence, + ) + + from pandas import ( + DataFrame, + Index, + ) + + +class PandasDataFrameXchg(DataFrameXchg): + """ + A data frame class, with only the methods required by the interchange + protocol defined. + Instances of this (private) class are returned from + ``pd.DataFrame.__dataframe__`` as objects with the methods and + attributes defined on this class. + """ + + def __init__(self, df: DataFrame, allow_copy: bool = True) -> None: + """ + Constructor - an instance of this (private) class is returned from + `pd.DataFrame.__dataframe__`. + """ + self._df = df.rename(columns=str, copy=False) + self._allow_copy = allow_copy + for i, _col in enumerate(self._df.columns): + rechunked = maybe_rechunk(self._df.iloc[:, i], allow_copy=allow_copy) + if rechunked is not None: + self._df.isetitem(i, rechunked) + + def __dataframe__( + self, nan_as_null: bool = False, allow_copy: bool = True + ) -> PandasDataFrameXchg: + # `nan_as_null` can be removed here once it's removed from + # Dataframe.__dataframe__ + return PandasDataFrameXchg(self._df, allow_copy) + + @property + def metadata(self) -> dict[str, Index]: + # `index` isn't a regular column, and the protocol doesn't support row + # labels - so we export it as Pandas-specific metadata here. + return {"pandas.index": self._df.index} + + def num_columns(self) -> int: + return len(self._df.columns) + + def num_rows(self) -> int: + return len(self._df) + + def num_chunks(self) -> int: + return 1 + + def column_names(self) -> Index: + return self._df.columns + + def get_column(self, i: int) -> PandasColumn: + return PandasColumn(self._df.iloc[:, i], allow_copy=self._allow_copy) + + def get_column_by_name(self, name: str) -> PandasColumn: + return PandasColumn(self._df[name], allow_copy=self._allow_copy) + + def get_columns(self) -> list[PandasColumn]: + return [ + PandasColumn(self._df[name], allow_copy=self._allow_copy) + for name in self._df.columns + ] + + def select_columns(self, indices: Sequence[int]) -> PandasDataFrameXchg: + if not isinstance(indices, abc.Sequence): + raise ValueError("`indices` is not a sequence") + if not isinstance(indices, list): + indices = list(indices) + + return PandasDataFrameXchg( + self._df.iloc[:, indices], allow_copy=self._allow_copy + ) + + def select_columns_by_name(self, names: list[str]) -> PandasDataFrameXchg: # type: ignore[override] + if not isinstance(names, abc.Sequence): + raise ValueError("`names` is not a sequence") + if not isinstance(names, list): + names = list(names) + + return PandasDataFrameXchg(self._df.loc[:, names], allow_copy=self._allow_copy) + + def get_chunks(self, n_chunks: int | None = None) -> Iterable[PandasDataFrameXchg]: + """ + Return an iterator yielding the chunks. + """ + if n_chunks and n_chunks > 1: + size = len(self._df) + step = size // n_chunks + if size % n_chunks != 0: + step += 1 + for start in range(0, step * n_chunks, step): + yield PandasDataFrameXchg( + self._df.iloc[start : start + step, :], + allow_copy=self._allow_copy, + ) + else: + yield self diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/dataframe_protocol.py b/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/dataframe_protocol.py new file mode 100644 index 0000000000000000000000000000000000000000..95e7b6a26f93a8cd10048076bd6906190e04d2ba --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/dataframe_protocol.py @@ -0,0 +1,465 @@ +""" +A verbatim copy (vendored) of the spec from https://github.com/data-apis/dataframe-api +""" + +from __future__ import annotations + +from abc import ( + ABC, + abstractmethod, +) +import enum +from typing import ( + TYPE_CHECKING, + Any, + TypedDict, +) + +if TYPE_CHECKING: + from collections.abc import ( + Iterable, + Sequence, + ) + + +class DlpackDeviceType(enum.IntEnum): + """Integer enum for device type codes matching DLPack.""" + + CPU = 1 + CUDA = 2 + CPU_PINNED = 3 + OPENCL = 4 + VULKAN = 7 + METAL = 8 + VPI = 9 + ROCM = 10 + + +class DtypeKind(enum.IntEnum): + """ + Integer enum for data types. + + Attributes + ---------- + INT : int + Matches to signed integer data type. + UINT : int + Matches to unsigned integer data type. + FLOAT : int + Matches to floating point data type. + BOOL : int + Matches to boolean data type. + STRING : int + Matches to string data type (UTF-8 encoded). + DATETIME : int + Matches to datetime data type. + CATEGORICAL : int + Matches to categorical data type. + """ + + INT = 0 + UINT = 1 + FLOAT = 2 + BOOL = 20 + STRING = 21 # UTF-8 + DATETIME = 22 + CATEGORICAL = 23 + + +class ColumnNullType(enum.IntEnum): + """ + Integer enum for null type representation. + + Attributes + ---------- + NON_NULLABLE : int + Non-nullable column. + USE_NAN : int + Use explicit float NaN value. + USE_SENTINEL : int + Sentinel value besides NaN/NaT. + USE_BITMASK : int + The bit is set/unset representing a null on a certain position. + USE_BYTEMASK : int + The byte is set/unset representing a null on a certain position. + """ + + NON_NULLABLE = 0 + USE_NAN = 1 + USE_SENTINEL = 2 + USE_BITMASK = 3 + USE_BYTEMASK = 4 + + +class ColumnBuffers(TypedDict): + # first element is a buffer containing the column data; + # second element is the data buffer's associated dtype + data: tuple[Buffer, Any] + + # first element is a buffer containing mask values indicating missing data; + # second element is the mask value buffer's associated dtype. + # None if the null representation is not a bit or byte mask + validity: tuple[Buffer, Any] | None + + # first element is a buffer containing the offset values for + # variable-size binary data (e.g., variable-length strings); + # second element is the offsets buffer's associated dtype. + # None if the data buffer does not have an associated offsets buffer + offsets: tuple[Buffer, Any] | None + + +class CategoricalDescription(TypedDict): + # whether the ordering of dictionary indices is semantically meaningful + is_ordered: bool + # whether a dictionary-style mapping of categorical values to other objects exists + is_dictionary: bool + # Python-level only (e.g. ``{int: str}``). + # None if not a dictionary-style categorical. + categories: Column | None + + +class Buffer(ABC): + """ + Data in the buffer is guaranteed to be contiguous in memory. + + Note that there is no dtype attribute present, a buffer can be thought of + as simply a block of memory. However, if the column that the buffer is + attached to has a dtype that's supported by DLPack and ``__dlpack__`` is + implemented, then that dtype information will be contained in the return + value from ``__dlpack__``. + + This distinction is useful to support both data exchange via DLPack on a + buffer and (b) dtypes like variable-length strings which do not have a + fixed number of bytes per element. + """ + + @property + @abstractmethod + def bufsize(self) -> int: + """ + Buffer size in bytes. + """ + + @property + @abstractmethod + def ptr(self) -> int: + """ + Pointer to start of the buffer as an integer. + """ + + @abstractmethod + def __dlpack__(self): + """ + Produce DLPack capsule (see array API standard). + + Raises: + + - TypeError : if the buffer contains unsupported dtypes. + - NotImplementedError : if DLPack support is not implemented + + Useful to have to connect to array libraries. Support optional because + it's not completely trivial to implement for a Python-only library. + """ + raise NotImplementedError("__dlpack__") + + @abstractmethod + def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]: + """ + Device type and device ID for where the data in the buffer resides. + Uses device type codes matching DLPack. + Note: must be implemented even if ``__dlpack__`` is not. + """ + + +class Column(ABC): + """ + A column object, with only the methods and properties required by the + interchange protocol defined. + + A column can contain one or more chunks. Each chunk can contain up to three + buffers - a data buffer, a mask buffer (depending on null representation), + and an offsets buffer (if variable-size binary; e.g., variable-length + strings). + + TBD: Arrow has a separate "null" dtype, and has no separate mask concept. + Instead, it seems to use "children" for both columns with a bit mask, + and for nested dtypes. Unclear whether this is elegant or confusing. + This design requires checking the null representation explicitly. + + The Arrow design requires checking: + 1. the ARROW_FLAG_NULLABLE (for sentinel values) + 2. if a column has two children, combined with one of those children + having a null dtype. + + Making the mask concept explicit seems useful. One null dtype would + not be enough to cover both bit and byte masks, so that would mean + even more checking if we did it the Arrow way. + + TBD: there's also the "chunk" concept here, which is implicit in Arrow as + multiple buffers per array (= column here). Semantically it may make + sense to have both: chunks were meant for example for lazy evaluation + of data which doesn't fit in memory, while multiple buffers per column + could also come from doing a selection operation on a single + contiguous buffer. + + Given these concepts, one would expect chunks to be all of the same + size (say a 10,000 row dataframe could have 10 chunks of 1,000 rows), + while multiple buffers could have data-dependent lengths. Not an issue + in pandas if one column is backed by a single NumPy array, but in + Arrow it seems possible. + Are multiple chunks *and* multiple buffers per column necessary for + the purposes of this interchange protocol, or must producers either + reuse the chunk concept for this or copy the data? + + Note: this Column object can only be produced by ``__dataframe__``, so + doesn't need its own version or ``__column__`` protocol. + """ + + @abstractmethod + def size(self) -> int: + """ + Size of the column, in elements. + + Corresponds to DataFrame.num_rows() if column is a single chunk; + equal to size of this current chunk otherwise. + """ + + @property + @abstractmethod + def offset(self) -> int: + """ + Offset of first element. + + May be > 0 if using chunks; for example for a column with N chunks of + equal size M (only the last chunk may be shorter), + ``offset = n * M``, ``n = 0 .. N-1``. + """ + + @property + @abstractmethod + def dtype(self) -> tuple[DtypeKind, int, str, str]: + """ + Dtype description as a tuple ``(kind, bit-width, format string, endianness)``. + + Bit-width : the number of bits as an integer + Format string : data type description format string in Apache Arrow C + Data Interface format. + Endianness : current only native endianness (``=``) is supported + + Notes: + - Kind specifiers are aligned with DLPack where possible (hence the + jump to 20, leave enough room for future extension) + - Masks must be specified as boolean with either bit width 1 (for bit + masks) or 8 (for byte masks). + - Dtype width in bits was preferred over bytes + - Endianness isn't too useful, but included now in case in the future + we need to support non-native endianness + - Went with Apache Arrow format strings over NumPy format strings + because they're more complete from a dataframe perspective + - Format strings are mostly useful for datetime specification, and + for categoricals. + - For categoricals, the format string describes the type of the + categorical in the data buffer. In case of a separate encoding of + the categorical (e.g. an integer to string mapping), this can + be derived from ``self.describe_categorical``. + - Data types not included: complex, Arrow-style null, binary, decimal, + and nested (list, struct, map, union) dtypes. + """ + + @property + @abstractmethod + def describe_categorical(self) -> CategoricalDescription: + """ + If the dtype is categorical, there are two options: + - There are only values in the data buffer. + - There is a separate non-categorical Column encoding for categorical values. + + Raises TypeError if the dtype is not categorical + + Returns the dictionary with description on how to interpret the data buffer: + - "is_ordered" : bool, whether the ordering of dictionary indices is + semantically meaningful. + - "is_dictionary" : bool, whether a mapping of + categorical values to other objects exists + - "categories" : Column representing the (implicit) mapping of indices to + category values (e.g. an array of cat1, cat2, ...). + None if not a dictionary-style categorical. + + TBD: are there any other in-memory representations that are needed? + """ + + @property + @abstractmethod + def describe_null(self) -> tuple[ColumnNullType, Any]: + """ + Return the missing value (or "null") representation the column dtype + uses, as a tuple ``(kind, value)``. + + Value : if kind is "sentinel value", the actual value. If kind is a bit + mask or a byte mask, the value (0 or 1) indicating a missing value. None + otherwise. + """ + + @property + @abstractmethod + def null_count(self) -> int | None: + """ + Number of null elements, if known. + + Note: Arrow uses -1 to indicate "unknown", but None seems cleaner. + """ + + @property + @abstractmethod + def metadata(self) -> dict[str, Any]: + """ + The metadata for the column. See `DataFrame.metadata` for more details. + """ + + @abstractmethod + def num_chunks(self) -> int: + """ + Return the number of chunks the column consists of. + """ + + @abstractmethod + def get_chunks(self, n_chunks: int | None = None) -> Iterable[Column]: + """ + Return an iterator yielding the chunks. + + See `DataFrame.get_chunks` for details on ``n_chunks``. + """ + + @abstractmethod + def get_buffers(self) -> ColumnBuffers: + """ + Return a dictionary containing the underlying buffers. + + The returned dictionary has the following contents: + + - "data": a two-element tuple whose first element is a buffer + containing the data and whose second element is the data + buffer's associated dtype. + - "validity": a two-element tuple whose first element is a buffer + containing mask values indicating missing data and + whose second element is the mask value buffer's + associated dtype. None if the null representation is + not a bit or byte mask. + - "offsets": a two-element tuple whose first element is a buffer + containing the offset values for variable-size binary + data (e.g., variable-length strings) and whose second + element is the offsets buffer's associated dtype. None + if the data buffer does not have an associated offsets + buffer. + """ + + +# def get_children(self) -> Iterable[Column]: +# """ +# Children columns underneath the column, each object in this iterator +# must adhere to the column specification. +# """ +# pass + + +class DataFrame(ABC): + """ + A data frame class, with only the methods required by the interchange + protocol defined. + + A "data frame" represents an ordered collection of named columns. + A column's "name" must be a unique string. + Columns may be accessed by name or by position. + + This could be a public data frame class, or an object with the methods and + attributes defined on this DataFrame class could be returned from the + ``__dataframe__`` method of a public data frame class in a library adhering + to the dataframe interchange protocol specification. + """ + + version = 0 # version of the protocol + + @abstractmethod + def __dataframe__(self, nan_as_null: bool = False, allow_copy: bool = True): + """Construct a new interchange object, potentially changing the parameters.""" + + @property + @abstractmethod + def metadata(self) -> dict[str, Any]: + """ + The metadata for the data frame, as a dictionary with string keys. The + contents of `metadata` may be anything, they are meant for a library + to store information that it needs to, e.g., roundtrip losslessly or + for two implementations to share data that is not (yet) part of the + interchange protocol specification. For avoiding collisions with other + entries, please add name the keys with the name of the library + followed by a period and the desired name, e.g, ``pandas.indexcol``. + """ + + @abstractmethod + def num_columns(self) -> int: + """ + Return the number of columns in the DataFrame. + """ + + @abstractmethod + def num_rows(self) -> int | None: + # TODO: not happy with Optional, but need to flag it may be expensive + # why include it if it may be None - what do we expect consumers + # to do here? + """ + Return the number of rows in the DataFrame, if available. + """ + + @abstractmethod + def num_chunks(self) -> int: + """ + Return the number of chunks the DataFrame consists of. + """ + + @abstractmethod + def column_names(self) -> Iterable[str]: + """ + Return an iterator yielding the column names. + """ + + @abstractmethod + def get_column(self, i: int) -> Column: + """ + Return the column at the indicated position. + """ + + @abstractmethod + def get_column_by_name(self, name: str) -> Column: + """ + Return the column whose name is the indicated name. + """ + + @abstractmethod + def get_columns(self) -> Iterable[Column]: + """ + Return an iterator yielding the columns. + """ + + @abstractmethod + def select_columns(self, indices: Sequence[int]) -> DataFrame: + """ + Create a new DataFrame by selecting a subset of columns by index. + """ + + @abstractmethod + def select_columns_by_name(self, names: Sequence[str]) -> DataFrame: + """ + Create a new DataFrame by selecting a subset of columns by name. + """ + + @abstractmethod + def get_chunks(self, n_chunks: int | None = None) -> Iterable[DataFrame]: + """ + Return an iterator yielding the chunks. + + By default (None), yields the chunks that the data is stored as by the + producer. If given, ``n_chunks`` must be a multiple of + ``self.num_chunks()``, meaning the producer must subdivide each chunk + before yielding it. + """ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/from_dataframe.py b/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/from_dataframe.py new file mode 100644 index 0000000000000000000000000000000000000000..4162ebc33f0d6bff8a1caff23b1db1dca20e4f3a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/from_dataframe.py @@ -0,0 +1,526 @@ +from __future__ import annotations + +import ctypes +import re +from typing import Any + +import numpy as np + +from pandas.compat._optional import import_optional_dependency +from pandas.errors import SettingWithCopyError + +import pandas as pd +from pandas.core.interchange.dataframe_protocol import ( + Buffer, + Column, + ColumnNullType, + DataFrame as DataFrameXchg, + DtypeKind, +) +from pandas.core.interchange.utils import ( + ArrowCTypes, + Endianness, +) + +_NP_DTYPES: dict[DtypeKind, dict[int, Any]] = { + DtypeKind.INT: {8: np.int8, 16: np.int16, 32: np.int32, 64: np.int64}, + DtypeKind.UINT: {8: np.uint8, 16: np.uint16, 32: np.uint32, 64: np.uint64}, + DtypeKind.FLOAT: {32: np.float32, 64: np.float64}, + DtypeKind.BOOL: {1: bool, 8: bool}, +} + + +def from_dataframe(df, allow_copy: bool = True) -> pd.DataFrame: + """ + Build a ``pd.DataFrame`` from any DataFrame supporting the interchange protocol. + + Parameters + ---------- + df : DataFrameXchg + Object supporting the interchange protocol, i.e. `__dataframe__` method. + allow_copy : bool, default: True + Whether to allow copying the memory to perform the conversion + (if false then zero-copy approach is requested). + + Returns + ------- + pd.DataFrame + + Examples + -------- + >>> df_not_necessarily_pandas = pd.DataFrame({'A': [1, 2], 'B': [3, 4]}) + >>> interchange_object = df_not_necessarily_pandas.__dataframe__() + >>> interchange_object.column_names() + Index(['A', 'B'], dtype='object') + >>> df_pandas = (pd.api.interchange.from_dataframe + ... (interchange_object.select_columns_by_name(['A']))) + >>> df_pandas + A + 0 1 + 1 2 + + These methods (``column_names``, ``select_columns_by_name``) should work + for any dataframe library which implements the interchange protocol. + """ + if isinstance(df, pd.DataFrame): + return df + + if not hasattr(df, "__dataframe__"): + raise ValueError("`df` does not support __dataframe__") + + return _from_dataframe( + df.__dataframe__(allow_copy=allow_copy), allow_copy=allow_copy + ) + + +def _from_dataframe(df: DataFrameXchg, allow_copy: bool = True): + """ + Build a ``pd.DataFrame`` from the DataFrame interchange object. + + Parameters + ---------- + df : DataFrameXchg + Object supporting the interchange protocol, i.e. `__dataframe__` method. + allow_copy : bool, default: True + Whether to allow copying the memory to perform the conversion + (if false then zero-copy approach is requested). + + Returns + ------- + pd.DataFrame + """ + pandas_dfs = [] + for chunk in df.get_chunks(): + pandas_df = protocol_df_chunk_to_pandas(chunk) + pandas_dfs.append(pandas_df) + + if not allow_copy and len(pandas_dfs) > 1: + raise RuntimeError( + "To join chunks a copy is required which is forbidden by allow_copy=False" + ) + if not pandas_dfs: + pandas_df = protocol_df_chunk_to_pandas(df) + elif len(pandas_dfs) == 1: + pandas_df = pandas_dfs[0] + else: + pandas_df = pd.concat(pandas_dfs, axis=0, ignore_index=True, copy=False) + + index_obj = df.metadata.get("pandas.index", None) + if index_obj is not None: + pandas_df.index = index_obj + + return pandas_df + + +def protocol_df_chunk_to_pandas(df: DataFrameXchg) -> pd.DataFrame: + """ + Convert interchange protocol chunk to ``pd.DataFrame``. + + Parameters + ---------- + df : DataFrameXchg + + Returns + ------- + pd.DataFrame + """ + # We need a dict of columns here, with each column being a NumPy array (at + # least for now, deal with non-NumPy dtypes later). + columns: dict[str, Any] = {} + buffers = [] # hold on to buffers, keeps memory alive + for name in df.column_names(): + if not isinstance(name, str): + raise ValueError(f"Column {name} is not a string") + if name in columns: + raise ValueError(f"Column {name} is not unique") + col = df.get_column_by_name(name) + dtype = col.dtype[0] + if dtype in ( + DtypeKind.INT, + DtypeKind.UINT, + DtypeKind.FLOAT, + DtypeKind.BOOL, + ): + columns[name], buf = primitive_column_to_ndarray(col) + elif dtype == DtypeKind.CATEGORICAL: + columns[name], buf = categorical_column_to_series(col) + elif dtype == DtypeKind.STRING: + columns[name], buf = string_column_to_ndarray(col) + elif dtype == DtypeKind.DATETIME: + columns[name], buf = datetime_column_to_ndarray(col) + else: + raise NotImplementedError(f"Data type {dtype} not handled yet") + + buffers.append(buf) + + pandas_df = pd.DataFrame(columns) + pandas_df.attrs["_INTERCHANGE_PROTOCOL_BUFFERS"] = buffers + return pandas_df + + +def primitive_column_to_ndarray(col: Column) -> tuple[np.ndarray, Any]: + """ + Convert a column holding one of the primitive dtypes to a NumPy array. + + A primitive type is one of: int, uint, float, bool. + + Parameters + ---------- + col : Column + + Returns + ------- + tuple + Tuple of np.ndarray holding the data and the memory owner object + that keeps the memory alive. + """ + buffers = col.get_buffers() + + data_buff, data_dtype = buffers["data"] + data = buffer_to_ndarray( + data_buff, data_dtype, offset=col.offset, length=col.size() + ) + + data = set_nulls(data, col, buffers["validity"]) + return data, buffers + + +def categorical_column_to_series(col: Column) -> tuple[pd.Series, Any]: + """ + Convert a column holding categorical data to a pandas Series. + + Parameters + ---------- + col : Column + + Returns + ------- + tuple + Tuple of pd.Series holding the data and the memory owner object + that keeps the memory alive. + """ + categorical = col.describe_categorical + + if not categorical["is_dictionary"]: + raise NotImplementedError("Non-dictionary categoricals not supported yet") + + cat_column = categorical["categories"] + if hasattr(cat_column, "_col"): + # Item "Column" of "Optional[Column]" has no attribute "_col" + # Item "None" of "Optional[Column]" has no attribute "_col" + categories = np.array(cat_column._col) # type: ignore[union-attr] + else: + raise NotImplementedError( + "Interchanging categorical columns isn't supported yet, and our " + "fallback of using the `col._col` attribute (a ndarray) failed." + ) + buffers = col.get_buffers() + + codes_buff, codes_dtype = buffers["data"] + codes = buffer_to_ndarray( + codes_buff, codes_dtype, offset=col.offset, length=col.size() + ) + + # Doing module in order to not get ``IndexError`` for + # out-of-bounds sentinel values in `codes` + if len(categories) > 0: + values = categories[codes % len(categories)] + else: + values = codes + + cat = pd.Categorical( + values, categories=categories, ordered=categorical["is_ordered"] + ) + data = pd.Series(cat) + + data = set_nulls(data, col, buffers["validity"]) + return data, buffers + + +def string_column_to_ndarray(col: Column) -> tuple[np.ndarray, Any]: + """ + Convert a column holding string data to a NumPy array. + + Parameters + ---------- + col : Column + + Returns + ------- + tuple + Tuple of np.ndarray holding the data and the memory owner object + that keeps the memory alive. + """ + null_kind, sentinel_val = col.describe_null + + if null_kind not in ( + ColumnNullType.NON_NULLABLE, + ColumnNullType.USE_BITMASK, + ColumnNullType.USE_BYTEMASK, + ): + raise NotImplementedError( + f"{null_kind} null kind is not yet supported for string columns." + ) + + buffers = col.get_buffers() + + assert buffers["offsets"], "String buffers must contain offsets" + # Retrieve the data buffer containing the UTF-8 code units + data_buff, _ = buffers["data"] + # We're going to reinterpret the buffer as uint8, so make sure we can do it safely + assert col.dtype[2] in ( + ArrowCTypes.STRING, + ArrowCTypes.LARGE_STRING, + ) # format_str == utf-8 + # Convert the buffers to NumPy arrays. In order to go from STRING to + # an equivalent ndarray, we claim that the buffer is uint8 (i.e., a byte array) + data_dtype = ( + DtypeKind.UINT, + 8, + ArrowCTypes.UINT8, + Endianness.NATIVE, + ) + # Specify zero offset as we don't want to chunk the string data + data = buffer_to_ndarray(data_buff, data_dtype, offset=0, length=data_buff.bufsize) + + # Retrieve the offsets buffer containing the index offsets demarcating + # the beginning and the ending of each string + offset_buff, offset_dtype = buffers["offsets"] + # Offsets buffer contains start-stop positions of strings in the data buffer, + # meaning that it has more elements than in the data buffer, do `col.size() + 1` + # here to pass a proper offsets buffer size + offsets = buffer_to_ndarray( + offset_buff, offset_dtype, offset=col.offset, length=col.size() + 1 + ) + + null_pos = None + if null_kind in (ColumnNullType.USE_BITMASK, ColumnNullType.USE_BYTEMASK): + validity = buffers["validity"] + if validity is not None: + valid_buff, valid_dtype = validity + null_pos = buffer_to_ndarray( + valid_buff, valid_dtype, offset=col.offset, length=col.size() + ) + if sentinel_val == 0: + null_pos = ~null_pos + + # Assemble the strings from the code units + str_list: list[None | float | str] = [None] * col.size() + for i in range(col.size()): + # Check for missing values + if null_pos is not None and null_pos[i]: + str_list[i] = np.nan + continue + + # Extract a range of code units + units = data[offsets[i] : offsets[i + 1]] + + # Convert the list of code units to bytes + str_bytes = bytes(units) + + # Create the string + string = str_bytes.decode(encoding="utf-8") + + # Add to our list of strings + str_list[i] = string + + # Convert the string list to a NumPy array + return np.asarray(str_list, dtype="object"), buffers + + +def parse_datetime_format_str(format_str, data) -> pd.Series | np.ndarray: + """Parse datetime `format_str` to interpret the `data`.""" + # timestamp 'ts{unit}:tz' + timestamp_meta = re.match(r"ts([smun]):(.*)", format_str) + if timestamp_meta: + unit, tz = timestamp_meta.group(1), timestamp_meta.group(2) + if unit != "s": + # the format string describes only a first letter of the unit, so + # add one extra letter to convert the unit to numpy-style: + # 'm' -> 'ms', 'u' -> 'us', 'n' -> 'ns' + unit += "s" + data = data.astype(f"datetime64[{unit}]") + if tz != "": + data = pd.Series(data).dt.tz_localize("UTC").dt.tz_convert(tz) + return data + + # date 'td{Days/Ms}' + date_meta = re.match(r"td([Dm])", format_str) + if date_meta: + unit = date_meta.group(1) + if unit == "D": + # NumPy doesn't support DAY unit, so converting days to seconds + # (converting to uint64 to avoid overflow) + data = (data.astype(np.uint64) * (24 * 60 * 60)).astype("datetime64[s]") + elif unit == "m": + data = data.astype("datetime64[ms]") + else: + raise NotImplementedError(f"Date unit is not supported: {unit}") + return data + + raise NotImplementedError(f"DateTime kind is not supported: {format_str}") + + +def datetime_column_to_ndarray(col: Column) -> tuple[np.ndarray | pd.Series, Any]: + """ + Convert a column holding DateTime data to a NumPy array. + + Parameters + ---------- + col : Column + + Returns + ------- + tuple + Tuple of np.ndarray holding the data and the memory owner object + that keeps the memory alive. + """ + buffers = col.get_buffers() + + _, col_bit_width, format_str, _ = col.dtype + dbuf, _ = buffers["data"] + # Consider dtype being `uint` to get number of units passed since the 01.01.1970 + + data = buffer_to_ndarray( + dbuf, + ( + DtypeKind.INT, + col_bit_width, + getattr(ArrowCTypes, f"INT{col_bit_width}"), + Endianness.NATIVE, + ), + offset=col.offset, + length=col.size(), + ) + + data = parse_datetime_format_str(format_str, data) # type: ignore[assignment] + data = set_nulls(data, col, buffers["validity"]) + return data, buffers + + +def buffer_to_ndarray( + buffer: Buffer, + dtype: tuple[DtypeKind, int, str, str], + *, + length: int, + offset: int = 0, +) -> np.ndarray: + """ + Build a NumPy array from the passed buffer. + + Parameters + ---------- + buffer : Buffer + Buffer to build a NumPy array from. + dtype : tuple + Data type of the buffer conforming protocol dtypes format. + offset : int, default: 0 + Number of elements to offset from the start of the buffer. + length : int, optional + If the buffer is a bit-mask, specifies a number of bits to read + from the buffer. Has no effect otherwise. + + Returns + ------- + np.ndarray + + Notes + ----- + The returned array doesn't own the memory. The caller of this function is + responsible for keeping the memory owner object alive as long as + the returned NumPy array is being used. + """ + kind, bit_width, _, _ = dtype + + column_dtype = _NP_DTYPES.get(kind, {}).get(bit_width, None) + if column_dtype is None: + raise NotImplementedError(f"Conversion for {dtype} is not yet supported.") + + # TODO: No DLPack yet, so need to construct a new ndarray from the data pointer + # and size in the buffer plus the dtype on the column. Use DLPack as NumPy supports + # it since https://github.com/numpy/numpy/pull/19083 + ctypes_type = np.ctypeslib.as_ctypes_type(column_dtype) + + if bit_width == 1: + assert length is not None, "`length` must be specified for a bit-mask buffer." + pa = import_optional_dependency("pyarrow") + arr = pa.BooleanArray.from_buffers( + pa.bool_(), + length, + [None, pa.foreign_buffer(buffer.ptr, length)], + offset=offset, + ) + return np.asarray(arr) + else: + data_pointer = ctypes.cast( + buffer.ptr + (offset * bit_width // 8), ctypes.POINTER(ctypes_type) + ) + if length > 0: + return np.ctypeslib.as_array(data_pointer, shape=(length,)) + return np.array([], dtype=ctypes_type) + + +def set_nulls( + data: np.ndarray | pd.Series, + col: Column, + validity: tuple[Buffer, tuple[DtypeKind, int, str, str]] | None, + allow_modify_inplace: bool = True, +): + """ + Set null values for the data according to the column null kind. + + Parameters + ---------- + data : np.ndarray or pd.Series + Data to set nulls in. + col : Column + Column object that describes the `data`. + validity : tuple(Buffer, dtype) or None + The return value of ``col.buffers()``. We do not access the ``col.buffers()`` + here to not take the ownership of the memory of buffer objects. + allow_modify_inplace : bool, default: True + Whether to modify the `data` inplace when zero-copy is possible (True) or always + modify a copy of the `data` (False). + + Returns + ------- + np.ndarray or pd.Series + Data with the nulls being set. + """ + if validity is None: + return data + null_kind, sentinel_val = col.describe_null + null_pos = None + + if null_kind == ColumnNullType.USE_SENTINEL: + null_pos = pd.Series(data) == sentinel_val + elif null_kind in (ColumnNullType.USE_BITMASK, ColumnNullType.USE_BYTEMASK): + assert validity, "Expected to have a validity buffer for the mask" + valid_buff, valid_dtype = validity + null_pos = buffer_to_ndarray( + valid_buff, valid_dtype, offset=col.offset, length=col.size() + ) + if sentinel_val == 0: + null_pos = ~null_pos + elif null_kind in (ColumnNullType.NON_NULLABLE, ColumnNullType.USE_NAN): + pass + else: + raise NotImplementedError(f"Null kind {null_kind} is not yet supported.") + + if null_pos is not None and np.any(null_pos): + if not allow_modify_inplace: + data = data.copy() + try: + data[null_pos] = None + except TypeError: + # TypeError happens if the `data` dtype appears to be non-nullable + # in numpy notation (bool, int, uint). If this happens, + # cast the `data` to nullable float dtype. + data = data.astype(float) + data[null_pos] = None + except SettingWithCopyError: + # `SettingWithCopyError` may happen for datetime-like with missing values. + data = data.copy() + data[null_pos] = None + + return data diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/utils.py b/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..fd1c7c96392428a44543bbecc7b37931cf69fe59 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/utils.py @@ -0,0 +1,178 @@ +""" +Utility functions and objects for implementing the interchange API. +""" + +from __future__ import annotations + +import typing + +import numpy as np + +from pandas._libs import lib + +from pandas.core.dtypes.dtypes import ( + ArrowDtype, + CategoricalDtype, + DatetimeTZDtype, +) + +import pandas as pd + +if typing.TYPE_CHECKING: + from pandas._typing import DtypeObj + + +# Maps str(pyarrow.DataType) = C type format string +# Currently, no pyarrow API for this +PYARROW_CTYPES = { + "null": "n", + "bool": "b", + "uint8": "C", + "uint16": "S", + "uint32": "I", + "uint64": "L", + "int8": "c", + "int16": "S", + "int32": "i", + "int64": "l", + "halffloat": "e", # float16 + "float": "f", # float32 + "double": "g", # float64 + "string": "u", + "large_string": "U", + "binary": "z", + "time32[s]": "tts", + "time32[ms]": "ttm", + "time64[us]": "ttu", + "time64[ns]": "ttn", + "date32[day]": "tdD", + "date64[ms]": "tdm", + "timestamp[s]": "tss:", + "timestamp[ms]": "tsm:", + "timestamp[us]": "tsu:", + "timestamp[ns]": "tsn:", + "duration[s]": "tDs", + "duration[ms]": "tDm", + "duration[us]": "tDu", + "duration[ns]": "tDn", +} + + +class ArrowCTypes: + """ + Enum for Apache Arrow C type format strings. + + The Arrow C data interface: + https://arrow.apache.org/docs/format/CDataInterface.html#data-type-description-format-strings + """ + + NULL = "n" + BOOL = "b" + INT8 = "c" + UINT8 = "C" + INT16 = "s" + UINT16 = "S" + INT32 = "i" + UINT32 = "I" + INT64 = "l" + UINT64 = "L" + FLOAT16 = "e" + FLOAT32 = "f" + FLOAT64 = "g" + STRING = "u" # utf-8 + LARGE_STRING = "U" # utf-8 + DATE32 = "tdD" + DATE64 = "tdm" + # Resoulution: + # - seconds -> 's' + # - milliseconds -> 'm' + # - microseconds -> 'u' + # - nanoseconds -> 'n' + TIMESTAMP = "ts{resolution}:{tz}" + TIME = "tt{resolution}" + + +class Endianness: + """Enum indicating the byte-order of a data-type.""" + + LITTLE = "<" + BIG = ">" + NATIVE = "=" + NA = "|" + + +def dtype_to_arrow_c_fmt(dtype: DtypeObj) -> str: + """ + Represent pandas `dtype` as a format string in Apache Arrow C notation. + + Parameters + ---------- + dtype : np.dtype + Datatype of pandas DataFrame to represent. + + Returns + ------- + str + Format string in Apache Arrow C notation of the given `dtype`. + """ + if isinstance(dtype, CategoricalDtype): + return ArrowCTypes.INT64 + elif dtype == np.dtype("O"): + return ArrowCTypes.STRING + elif isinstance(dtype, ArrowDtype): + import pyarrow as pa + + pa_type = dtype.pyarrow_dtype + if pa.types.is_decimal(pa_type): + return f"d:{pa_type.precision},{pa_type.scale}" + elif pa.types.is_timestamp(pa_type) and pa_type.tz is not None: + return f"ts{pa_type.unit[0]}:{pa_type.tz}" + format_str = PYARROW_CTYPES.get(str(pa_type), None) + if format_str is not None: + return format_str + + format_str = getattr(ArrowCTypes, dtype.name.upper(), None) + if format_str is not None: + return format_str + + if lib.is_np_dtype(dtype, "M"): + # Selecting the first char of resolution string: + # dtype.str -> ' 'n' + resolution = np.datetime_data(dtype)[0][0] + return ArrowCTypes.TIMESTAMP.format(resolution=resolution, tz="") + + elif isinstance(dtype, DatetimeTZDtype): + return ArrowCTypes.TIMESTAMP.format(resolution=dtype.unit[0], tz=dtype.tz) + + elif isinstance(dtype, pd.BooleanDtype): + return ArrowCTypes.BOOL + + raise NotImplementedError( + f"Conversion of {dtype} to Arrow C format string is not implemented." + ) + + +def maybe_rechunk(series: pd.Series, *, allow_copy: bool) -> pd.Series | None: + """ + Rechunk a multi-chunk pyarrow array into a single-chunk array, if necessary. + + - Returns `None` if the input series is not backed by a multi-chunk pyarrow array + (and so doesn't need rechunking) + - Returns a single-chunk-backed-Series if the input is backed by a multi-chunk + pyarrow array and `allow_copy` is `True`. + - Raises a `RuntimeError` if `allow_copy` is `False` and input is a + based by a multi-chunk pyarrow array. + """ + if not isinstance(series.dtype, pd.ArrowDtype): + return None + chunked_array = series.array._pa_array # type: ignore[attr-defined] + if len(chunked_array.chunks) == 1: + return None + if not allow_copy: + raise RuntimeError( + "Found multi-chunk pyarrow array, but `allow_copy` is False. " + "Please rechunk the array before calling this function, or set " + "`allow_copy=True`." + ) + arr = chunked_array.combine_chunks() + return pd.Series(arr, dtype=series.dtype, name=series.name, index=series.index) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ae889a7fdbc24935c0884e8bbcdf56bde8946460 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/__init__.py @@ -0,0 +1,93 @@ +""" +Arithmetic operations for PandasObjects + +This is not a public API. +""" +from __future__ import annotations + +from pandas.core.ops.array_ops import ( + arithmetic_op, + comp_method_OBJECT_ARRAY, + comparison_op, + fill_binop, + get_array_op, + logical_op, + maybe_prepare_scalar_for_op, +) +from pandas.core.ops.common import ( + get_op_result_name, + unpack_zerodim_and_defer, +) +from pandas.core.ops.docstrings import make_flex_doc +from pandas.core.ops.invalid import invalid_comparison +from pandas.core.ops.mask_ops import ( + kleene_and, + kleene_or, + kleene_xor, +) +from pandas.core.roperator import ( + radd, + rand_, + rdiv, + rdivmod, + rfloordiv, + rmod, + rmul, + ror_, + rpow, + rsub, + rtruediv, + rxor, +) + +# ----------------------------------------------------------------------------- +# constants +ARITHMETIC_BINOPS: set[str] = { + "add", + "sub", + "mul", + "pow", + "mod", + "floordiv", + "truediv", + "divmod", + "radd", + "rsub", + "rmul", + "rpow", + "rmod", + "rfloordiv", + "rtruediv", + "rdivmod", +} + + +__all__ = [ + "ARITHMETIC_BINOPS", + "arithmetic_op", + "comparison_op", + "comp_method_OBJECT_ARRAY", + "invalid_comparison", + "fill_binop", + "kleene_and", + "kleene_or", + "kleene_xor", + "logical_op", + "make_flex_doc", + "radd", + "rand_", + "rdiv", + "rdivmod", + "rfloordiv", + "rmod", + "rmul", + "ror_", + "rpow", + "rsub", + "rtruediv", + "rxor", + "unpack_zerodim_and_defer", + "get_op_result_name", + "maybe_prepare_scalar_for_op", + "get_array_op", +] diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/__pycache__/array_ops.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/__pycache__/array_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d36cf6ed9eda461463b771cc50068de140cfb2a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/__pycache__/array_ops.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/__pycache__/common.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39961ed33d52b56df09d1c6989a3d41c68517c8b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/__pycache__/common.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/__pycache__/docstrings.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/__pycache__/docstrings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8e0eba64100a698cc23b82fa1db9a107e4a98ff Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/__pycache__/docstrings.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/__pycache__/invalid.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/__pycache__/invalid.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35fa7c3c99edb8e0ca9a4f08a8ebdd145be395db Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/__pycache__/invalid.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/__pycache__/mask_ops.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/__pycache__/mask_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6310157277e6adf55d55e00a990e78d09ab92182 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/__pycache__/mask_ops.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/__pycache__/missing.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/__pycache__/missing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b71b3e46fad36cec425fe0c3ee8aed5a81126ee4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/__pycache__/missing.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/array_ops.py b/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/array_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..4b762a359d321ea61660e78ec63d392f8436939d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/array_ops.py @@ -0,0 +1,604 @@ +""" +Functions for arithmetic and comparison operations on NumPy arrays and +ExtensionArrays. +""" +from __future__ import annotations + +import datetime +from functools import partial +import operator +from typing import ( + TYPE_CHECKING, + Any, +) +import warnings + +import numpy as np + +from pandas._libs import ( + NaT, + Timedelta, + Timestamp, + lib, + ops as libops, +) +from pandas._libs.tslibs import ( + BaseOffset, + get_supported_dtype, + is_supported_dtype, + is_unitless, +) +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.cast import ( + construct_1d_object_array_from_listlike, + find_common_type, +) +from pandas.core.dtypes.common import ( + ensure_object, + is_bool_dtype, + is_list_like, + is_numeric_v_string_like, + is_object_dtype, + is_scalar, +) +from pandas.core.dtypes.generic import ( + ABCExtensionArray, + ABCIndex, + ABCSeries, +) +from pandas.core.dtypes.missing import ( + isna, + notna, +) + +from pandas.core import roperator +from pandas.core.computation import expressions +from pandas.core.construction import ensure_wrapped_if_datetimelike +from pandas.core.ops import missing +from pandas.core.ops.dispatch import should_extension_dispatch +from pandas.core.ops.invalid import invalid_comparison + +if TYPE_CHECKING: + from pandas._typing import ( + ArrayLike, + Shape, + ) + +# ----------------------------------------------------------------------------- +# Masking NA values and fallbacks for operations numpy does not support + + +def fill_binop(left, right, fill_value): + """ + If a non-None fill_value is given, replace null entries in left and right + with this value, but only in positions where _one_ of left/right is null, + not both. + + Parameters + ---------- + left : array-like + right : array-like + fill_value : object + + Returns + ------- + left : array-like + right : array-like + + Notes + ----- + Makes copies if fill_value is not None and NAs are present. + """ + if fill_value is not None: + left_mask = isna(left) + right_mask = isna(right) + + # one but not both + mask = left_mask ^ right_mask + + if left_mask.any(): + # Avoid making a copy if we can + left = left.copy() + left[left_mask & mask] = fill_value + + if right_mask.any(): + # Avoid making a copy if we can + right = right.copy() + right[right_mask & mask] = fill_value + + return left, right + + +def comp_method_OBJECT_ARRAY(op, x, y): + if isinstance(y, list): + # e.g. test_tuple_categories + y = construct_1d_object_array_from_listlike(y) + + if isinstance(y, (np.ndarray, ABCSeries, ABCIndex)): + if not is_object_dtype(y.dtype): + y = y.astype(np.object_) + + if isinstance(y, (ABCSeries, ABCIndex)): + y = y._values + + if x.shape != y.shape: + raise ValueError("Shapes must match", x.shape, y.shape) + result = libops.vec_compare(x.ravel(), y.ravel(), op) + else: + result = libops.scalar_compare(x.ravel(), y, op) + return result.reshape(x.shape) + + +def _masked_arith_op(x: np.ndarray, y, op): + """ + If the given arithmetic operation fails, attempt it again on + only the non-null elements of the input array(s). + + Parameters + ---------- + x : np.ndarray + y : np.ndarray, Series, Index + op : binary operator + """ + # For Series `x` is 1D so ravel() is a no-op; calling it anyway makes + # the logic valid for both Series and DataFrame ops. + xrav = x.ravel() + + if isinstance(y, np.ndarray): + dtype = find_common_type([x.dtype, y.dtype]) + result = np.empty(x.size, dtype=dtype) + + if len(x) != len(y): + raise ValueError(x.shape, y.shape) + ymask = notna(y) + + # NB: ravel() is only safe since y is ndarray; for e.g. PeriodIndex + # we would get int64 dtype, see GH#19956 + yrav = y.ravel() + mask = notna(xrav) & ymask.ravel() + + # See GH#5284, GH#5035, GH#19448 for historical reference + if mask.any(): + result[mask] = op(xrav[mask], yrav[mask]) + + else: + if not is_scalar(y): + raise TypeError( + f"Cannot broadcast np.ndarray with operand of type { type(y) }" + ) + + # mask is only meaningful for x + result = np.empty(x.size, dtype=x.dtype) + mask = notna(xrav) + + # 1 ** np.nan is 1. So we have to unmask those. + if op is pow: + mask = np.where(x == 1, False, mask) + elif op is roperator.rpow: + mask = np.where(y == 1, False, mask) + + if mask.any(): + result[mask] = op(xrav[mask], y) + + np.putmask(result, ~mask, np.nan) + result = result.reshape(x.shape) # 2D compat + return result + + +def _na_arithmetic_op(left: np.ndarray, right, op, is_cmp: bool = False): + """ + Return the result of evaluating op on the passed in values. + + If native types are not compatible, try coercion to object dtype. + + Parameters + ---------- + left : np.ndarray + right : np.ndarray or scalar + Excludes DataFrame, Series, Index, ExtensionArray. + is_cmp : bool, default False + If this a comparison operation. + + Returns + ------- + array-like + + Raises + ------ + TypeError : invalid operation + """ + if isinstance(right, str): + # can never use numexpr + func = op + else: + func = partial(expressions.evaluate, op) + + try: + result = func(left, right) + except TypeError: + if not is_cmp and ( + left.dtype == object or getattr(right, "dtype", None) == object + ): + # For object dtype, fallback to a masked operation (only operating + # on the non-missing values) + # Don't do this for comparisons, as that will handle complex numbers + # incorrectly, see GH#32047 + result = _masked_arith_op(left, right, op) + else: + raise + + if is_cmp and (is_scalar(result) or result is NotImplemented): + # numpy returned a scalar instead of operating element-wise + # e.g. numeric array vs str + # TODO: can remove this after dropping some future numpy version? + return invalid_comparison(left, right, op) + + return missing.dispatch_fill_zeros(op, left, right, result) + + +def arithmetic_op(left: ArrayLike, right: Any, op): + """ + Evaluate an arithmetic operation `+`, `-`, `*`, `/`, `//`, `%`, `**`, ... + + Note: the caller is responsible for ensuring that numpy warnings are + suppressed (with np.errstate(all="ignore")) if needed. + + Parameters + ---------- + left : np.ndarray or ExtensionArray + right : object + Cannot be a DataFrame or Index. Series is *not* excluded. + op : {operator.add, operator.sub, ...} + Or one of the reversed variants from roperator. + + Returns + ------- + ndarray or ExtensionArray + Or a 2-tuple of these in the case of divmod or rdivmod. + """ + # NB: We assume that extract_array and ensure_wrapped_if_datetimelike + # have already been called on `left` and `right`, + # and `maybe_prepare_scalar_for_op` has already been called on `right` + # We need to special-case datetime64/timedelta64 dtypes (e.g. because numpy + # casts integer dtypes to timedelta64 when operating with timedelta64 - GH#22390) + + if ( + should_extension_dispatch(left, right) + or isinstance(right, (Timedelta, BaseOffset, Timestamp)) + or right is NaT + ): + # Timedelta/Timestamp and other custom scalars are included in the check + # because numexpr will fail on it, see GH#31457 + res_values = op(left, right) + else: + # TODO we should handle EAs consistently and move this check before the if/else + # (https://github.com/pandas-dev/pandas/issues/41165) + # error: Argument 2 to "_bool_arith_check" has incompatible type + # "Union[ExtensionArray, ndarray[Any, Any]]"; expected "ndarray[Any, Any]" + _bool_arith_check(op, left, right) # type: ignore[arg-type] + + # error: Argument 1 to "_na_arithmetic_op" has incompatible type + # "Union[ExtensionArray, ndarray[Any, Any]]"; expected "ndarray[Any, Any]" + res_values = _na_arithmetic_op(left, right, op) # type: ignore[arg-type] + + return res_values + + +def comparison_op(left: ArrayLike, right: Any, op) -> ArrayLike: + """ + Evaluate a comparison operation `=`, `!=`, `>=`, `>`, `<=`, or `<`. + + Note: the caller is responsible for ensuring that numpy warnings are + suppressed (with np.errstate(all="ignore")) if needed. + + Parameters + ---------- + left : np.ndarray or ExtensionArray + right : object + Cannot be a DataFrame, Series, or Index. + op : {operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le} + + Returns + ------- + ndarray or ExtensionArray + """ + # NB: We assume extract_array has already been called on left and right + lvalues = ensure_wrapped_if_datetimelike(left) + rvalues = ensure_wrapped_if_datetimelike(right) + + rvalues = lib.item_from_zerodim(rvalues) + if isinstance(rvalues, list): + # We don't catch tuple here bc we may be comparing e.g. MultiIndex + # to a tuple that represents a single entry, see test_compare_tuple_strs + rvalues = np.asarray(rvalues) + + if isinstance(rvalues, (np.ndarray, ABCExtensionArray)): + # TODO: make this treatment consistent across ops and classes. + # We are not catching all listlikes here (e.g. frozenset, tuple) + # The ambiguous case is object-dtype. See GH#27803 + if len(lvalues) != len(rvalues): + raise ValueError( + "Lengths must match to compare", lvalues.shape, rvalues.shape + ) + + if should_extension_dispatch(lvalues, rvalues) or ( + (isinstance(rvalues, (Timedelta, BaseOffset, Timestamp)) or right is NaT) + and lvalues.dtype != object + ): + # Call the method on lvalues + res_values = op(lvalues, rvalues) + + elif is_scalar(rvalues) and isna(rvalues): # TODO: but not pd.NA? + # numpy does not like comparisons vs None + if op is operator.ne: + res_values = np.ones(lvalues.shape, dtype=bool) + else: + res_values = np.zeros(lvalues.shape, dtype=bool) + + elif is_numeric_v_string_like(lvalues, rvalues): + # GH#36377 going through the numexpr path would incorrectly raise + return invalid_comparison(lvalues, rvalues, op) + + elif lvalues.dtype == object or isinstance(rvalues, str): + res_values = comp_method_OBJECT_ARRAY(op, lvalues, rvalues) + + else: + res_values = _na_arithmetic_op(lvalues, rvalues, op, is_cmp=True) + + return res_values + + +def na_logical_op(x: np.ndarray, y, op): + try: + # For exposition, write: + # yarr = isinstance(y, np.ndarray) + # yint = is_integer(y) or (yarr and y.dtype.kind == "i") + # ybool = is_bool(y) or (yarr and y.dtype.kind == "b") + # xint = x.dtype.kind == "i" + # xbool = x.dtype.kind == "b" + # Then Cases where this goes through without raising include: + # (xint or xbool) and (yint or bool) + result = op(x, y) + except TypeError: + if isinstance(y, np.ndarray): + # bool-bool dtype operations should be OK, should not get here + assert not (x.dtype.kind == "b" and y.dtype.kind == "b") + x = ensure_object(x) + y = ensure_object(y) + result = libops.vec_binop(x.ravel(), y.ravel(), op) + else: + # let null fall thru + assert lib.is_scalar(y) + if not isna(y): + y = bool(y) + try: + result = libops.scalar_binop(x, y, op) + except ( + TypeError, + ValueError, + AttributeError, + OverflowError, + NotImplementedError, + ) as err: + typ = type(y).__name__ + raise TypeError( + f"Cannot perform '{op.__name__}' with a dtyped [{x.dtype}] array " + f"and scalar of type [{typ}]" + ) from err + + return result.reshape(x.shape) + + +def logical_op(left: ArrayLike, right: Any, op) -> ArrayLike: + """ + Evaluate a logical operation `|`, `&`, or `^`. + + Parameters + ---------- + left : np.ndarray or ExtensionArray + right : object + Cannot be a DataFrame, Series, or Index. + op : {operator.and_, operator.or_, operator.xor} + Or one of the reversed variants from roperator. + + Returns + ------- + ndarray or ExtensionArray + """ + + def fill_bool(x, left=None): + # if `left` is specifically not-boolean, we do not cast to bool + if x.dtype.kind in "cfO": + # dtypes that can hold NA + mask = isna(x) + if mask.any(): + x = x.astype(object) + x[mask] = False + + if left is None or left.dtype.kind == "b": + x = x.astype(bool) + return x + + right = lib.item_from_zerodim(right) + if is_list_like(right) and not hasattr(right, "dtype"): + # e.g. list, tuple + warnings.warn( + "Logical ops (and, or, xor) between Pandas objects and dtype-less " + "sequences (e.g. list, tuple) are deprecated and will raise in a " + "future version. Wrap the object in a Series, Index, or np.array " + "before operating instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + right = construct_1d_object_array_from_listlike(right) + + # NB: We assume extract_array has already been called on left and right + lvalues = ensure_wrapped_if_datetimelike(left) + rvalues = right + + if should_extension_dispatch(lvalues, rvalues): + # Call the method on lvalues + res_values = op(lvalues, rvalues) + + else: + if isinstance(rvalues, np.ndarray): + is_other_int_dtype = rvalues.dtype.kind in "iu" + if not is_other_int_dtype: + rvalues = fill_bool(rvalues, lvalues) + + else: + # i.e. scalar + is_other_int_dtype = lib.is_integer(rvalues) + + res_values = na_logical_op(lvalues, rvalues, op) + + # For int vs int `^`, `|`, `&` are bitwise operators and return + # integer dtypes. Otherwise these are boolean ops + if not (left.dtype.kind in "iu" and is_other_int_dtype): + res_values = fill_bool(res_values) + + return res_values + + +def get_array_op(op): + """ + Return a binary array operation corresponding to the given operator op. + + Parameters + ---------- + op : function + Binary operator from operator or roperator module. + + Returns + ------- + functools.partial + """ + if isinstance(op, partial): + # We get here via dispatch_to_series in DataFrame case + # e.g. test_rolling_consistency_var_debiasing_factors + return op + + op_name = op.__name__.strip("_").lstrip("r") + if op_name == "arith_op": + # Reached via DataFrame._combine_frame i.e. flex methods + # e.g. test_df_add_flex_filled_mixed_dtypes + return op + + if op_name in {"eq", "ne", "lt", "le", "gt", "ge"}: + return partial(comparison_op, op=op) + elif op_name in {"and", "or", "xor", "rand", "ror", "rxor"}: + return partial(logical_op, op=op) + elif op_name in { + "add", + "sub", + "mul", + "truediv", + "floordiv", + "mod", + "divmod", + "pow", + }: + return partial(arithmetic_op, op=op) + else: + raise NotImplementedError(op_name) + + +def maybe_prepare_scalar_for_op(obj, shape: Shape): + """ + Cast non-pandas objects to pandas types to unify behavior of arithmetic + and comparison operations. + + Parameters + ---------- + obj: object + shape : tuple[int] + + Returns + ------- + out : object + + Notes + ----- + Be careful to call this *after* determining the `name` attribute to be + attached to the result of the arithmetic operation. + """ + if type(obj) is datetime.timedelta: + # GH#22390 cast up to Timedelta to rely on Timedelta + # implementation; otherwise operation against numeric-dtype + # raises TypeError + return Timedelta(obj) + elif type(obj) is datetime.datetime: + # cast up to Timestamp to rely on Timestamp implementation, see Timedelta above + return Timestamp(obj) + elif isinstance(obj, np.datetime64): + # GH#28080 numpy casts integer-dtype to datetime64 when doing + # array[int] + datetime64, which we do not allow + if isna(obj): + from pandas.core.arrays import DatetimeArray + + # Avoid possible ambiguities with pd.NaT + # GH 52295 + if is_unitless(obj.dtype): + obj = obj.astype("datetime64[ns]") + elif not is_supported_dtype(obj.dtype): + new_dtype = get_supported_dtype(obj.dtype) + obj = obj.astype(new_dtype) + right = np.broadcast_to(obj, shape) + return DatetimeArray._simple_new(right, dtype=right.dtype) + + return Timestamp(obj) + + elif isinstance(obj, np.timedelta64): + if isna(obj): + from pandas.core.arrays import TimedeltaArray + + # wrapping timedelta64("NaT") in Timedelta returns NaT, + # which would incorrectly be treated as a datetime-NaT, so + # we broadcast and wrap in a TimedeltaArray + # GH 52295 + if is_unitless(obj.dtype): + obj = obj.astype("timedelta64[ns]") + elif not is_supported_dtype(obj.dtype): + new_dtype = get_supported_dtype(obj.dtype) + obj = obj.astype(new_dtype) + right = np.broadcast_to(obj, shape) + return TimedeltaArray._simple_new(right, dtype=right.dtype) + + # In particular non-nanosecond timedelta64 needs to be cast to + # nanoseconds, or else we get undesired behavior like + # np.timedelta64(3, 'D') / 2 == np.timedelta64(1, 'D') + return Timedelta(obj) + + # We want NumPy numeric scalars to behave like Python scalars + # post NEP 50 + elif isinstance(obj, np.integer): + return int(obj) + + elif isinstance(obj, np.floating): + return float(obj) + + return obj + + +_BOOL_OP_NOT_ALLOWED = { + operator.truediv, + roperator.rtruediv, + operator.floordiv, + roperator.rfloordiv, + operator.pow, + roperator.rpow, +} + + +def _bool_arith_check(op, a: np.ndarray, b): + """ + In contrast to numpy, pandas raises an error for certain operations + with booleans. + """ + if op in _BOOL_OP_NOT_ALLOWED: + if a.dtype.kind == "b" and (is_bool_dtype(b) or lib.is_bool(b)): + op_name = op.__name__.strip("_").lstrip("r") + raise NotImplementedError( + f"operator '{op_name}' not implemented for bool dtypes" + ) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/common.py b/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/common.py new file mode 100644 index 0000000000000000000000000000000000000000..559977bacf881552d546e7704d4cf4b12b4a32fe --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/common.py @@ -0,0 +1,146 @@ +""" +Boilerplate functions used in defining binary operations. +""" +from __future__ import annotations + +from functools import wraps +from typing import ( + TYPE_CHECKING, + Callable, +) + +from pandas._libs.lib import item_from_zerodim +from pandas._libs.missing import is_matching_na + +from pandas.core.dtypes.generic import ( + ABCIndex, + ABCSeries, +) + +if TYPE_CHECKING: + from pandas._typing import F + + +def unpack_zerodim_and_defer(name: str) -> Callable[[F], F]: + """ + Boilerplate for pandas conventions in arithmetic and comparison methods. + + Parameters + ---------- + name : str + + Returns + ------- + decorator + """ + + def wrapper(method: F) -> F: + return _unpack_zerodim_and_defer(method, name) + + return wrapper + + +def _unpack_zerodim_and_defer(method, name: str): + """ + Boilerplate for pandas conventions in arithmetic and comparison methods. + + Ensure method returns NotImplemented when operating against "senior" + classes. Ensure zero-dimensional ndarrays are always unpacked. + + Parameters + ---------- + method : binary method + name : str + + Returns + ------- + method + """ + stripped_name = name.removeprefix("__").removesuffix("__") + is_cmp = stripped_name in {"eq", "ne", "lt", "le", "gt", "ge"} + + @wraps(method) + def new_method(self, other): + if is_cmp and isinstance(self, ABCIndex) and isinstance(other, ABCSeries): + # For comparison ops, Index does *not* defer to Series + pass + else: + prio = getattr(other, "__pandas_priority__", None) + if prio is not None: + if prio > self.__pandas_priority__: + # e.g. other is DataFrame while self is Index/Series/EA + return NotImplemented + + other = item_from_zerodim(other) + + return method(self, other) + + return new_method + + +def get_op_result_name(left, right): + """ + Find the appropriate name to pin to an operation result. This result + should always be either an Index or a Series. + + Parameters + ---------- + left : {Series, Index} + right : object + + Returns + ------- + name : object + Usually a string + """ + if isinstance(right, (ABCSeries, ABCIndex)): + name = _maybe_match_name(left, right) + else: + name = left.name + return name + + +def _maybe_match_name(a, b): + """ + Try to find a name to attach to the result of an operation between + a and b. If only one of these has a `name` attribute, return that + name. Otherwise return a consensus name if they match or None if + they have different names. + + Parameters + ---------- + a : object + b : object + + Returns + ------- + name : str or None + + See Also + -------- + pandas.core.common.consensus_name_attr + """ + a_has = hasattr(a, "name") + b_has = hasattr(b, "name") + if a_has and b_has: + try: + if a.name == b.name: + return a.name + elif is_matching_na(a.name, b.name): + # e.g. both are np.nan + return a.name + else: + return None + except TypeError: + # pd.NA + if is_matching_na(a.name, b.name): + return a.name + return None + except ValueError: + # e.g. np.int64(1) vs (np.int64(1), np.int64(2)) + return None + elif a_has: + return a.name + elif b_has: + return b.name + return None diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/dispatch.py b/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/dispatch.py new file mode 100644 index 0000000000000000000000000000000000000000..a939fdd3d041e9f99dde7ea40fd7aa0572d0d9b7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/dispatch.py @@ -0,0 +1,30 @@ +""" +Functions for defining unary operations. +""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, +) + +from pandas.core.dtypes.generic import ABCExtensionArray + +if TYPE_CHECKING: + from pandas._typing import ArrayLike + + +def should_extension_dispatch(left: ArrayLike, right: Any) -> bool: + """ + Identify cases where Series operation should dispatch to ExtensionArray method. + + Parameters + ---------- + left : np.ndarray or ExtensionArray + right : object + + Returns + ------- + bool + """ + return isinstance(left, ABCExtensionArray) or isinstance(right, ABCExtensionArray) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/docstrings.py b/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/docstrings.py new file mode 100644 index 0000000000000000000000000000000000000000..bd2e532536d8491af44631e52982217a04ef5b17 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/docstrings.py @@ -0,0 +1,772 @@ +""" +Templating for ops docstrings +""" +from __future__ import annotations + + +def make_flex_doc(op_name: str, typ: str) -> str: + """ + Make the appropriate substitutions for the given operation and class-typ + into either _flex_doc_SERIES or _flex_doc_FRAME to return the docstring + to attach to a generated method. + + Parameters + ---------- + op_name : str {'__add__', '__sub__', ... '__eq__', '__ne__', ...} + typ : str {series, 'dataframe']} + + Returns + ------- + doc : str + """ + op_name = op_name.replace("__", "") + op_desc = _op_descriptions[op_name] + + op_desc_op = op_desc["op"] + assert op_desc_op is not None # for mypy + if op_name.startswith("r"): + equiv = f"other {op_desc_op} {typ}" + elif op_name == "divmod": + equiv = f"{op_name}({typ}, other)" + else: + equiv = f"{typ} {op_desc_op} other" + + if typ == "series": + base_doc = _flex_doc_SERIES + if op_desc["reverse"]: + base_doc += _see_also_reverse_SERIES.format( + reverse=op_desc["reverse"], see_also_desc=op_desc["see_also_desc"] + ) + doc_no_examples = base_doc.format( + desc=op_desc["desc"], + op_name=op_name, + equiv=equiv, + series_returns=op_desc["series_returns"], + ) + ser_example = op_desc["series_examples"] + if ser_example: + doc = doc_no_examples + ser_example + else: + doc = doc_no_examples + elif typ == "dataframe": + if op_name in ["eq", "ne", "le", "lt", "ge", "gt"]: + base_doc = _flex_comp_doc_FRAME + doc = _flex_comp_doc_FRAME.format( + op_name=op_name, + desc=op_desc["desc"], + ) + else: + base_doc = _flex_doc_FRAME + doc = base_doc.format( + desc=op_desc["desc"], + op_name=op_name, + equiv=equiv, + reverse=op_desc["reverse"], + ) + else: + raise AssertionError("Invalid typ argument.") + return doc + + +_common_examples_algebra_SERIES = """ +Examples +-------- +>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd']) +>>> a +a 1.0 +b 1.0 +c 1.0 +d NaN +dtype: float64 +>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e']) +>>> b +a 1.0 +b NaN +d 1.0 +e NaN +dtype: float64""" + +_common_examples_comparison_SERIES = """ +Examples +-------- +>>> a = pd.Series([1, 1, 1, np.nan, 1], index=['a', 'b', 'c', 'd', 'e']) +>>> a +a 1.0 +b 1.0 +c 1.0 +d NaN +e 1.0 +dtype: float64 +>>> b = pd.Series([0, 1, 2, np.nan, 1], index=['a', 'b', 'c', 'd', 'f']) +>>> b +a 0.0 +b 1.0 +c 2.0 +d NaN +f 1.0 +dtype: float64""" + +_add_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.add(b, fill_value=0) +a 2.0 +b 1.0 +c 1.0 +d 1.0 +e NaN +dtype: float64 +""" +) + +_sub_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.subtract(b, fill_value=0) +a 0.0 +b 1.0 +c 1.0 +d -1.0 +e NaN +dtype: float64 +""" +) + +_mul_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.multiply(b, fill_value=0) +a 1.0 +b 0.0 +c 0.0 +d 0.0 +e NaN +dtype: float64 +""" +) + +_div_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.divide(b, fill_value=0) +a 1.0 +b inf +c inf +d 0.0 +e NaN +dtype: float64 +""" +) + +_floordiv_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.floordiv(b, fill_value=0) +a 1.0 +b inf +c inf +d 0.0 +e NaN +dtype: float64 +""" +) + +_divmod_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.divmod(b, fill_value=0) +(a 1.0 + b inf + c inf + d 0.0 + e NaN + dtype: float64, + a 0.0 + b NaN + c NaN + d 0.0 + e NaN + dtype: float64) +""" +) + +_mod_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.mod(b, fill_value=0) +a 0.0 +b NaN +c NaN +d 0.0 +e NaN +dtype: float64 +""" +) +_pow_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.pow(b, fill_value=0) +a 1.0 +b 1.0 +c 1.0 +d 0.0 +e NaN +dtype: float64 +""" +) + +_ne_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.ne(b, fill_value=0) +a False +b True +c True +d True +e True +dtype: bool +""" +) + +_eq_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.eq(b, fill_value=0) +a True +b False +c False +d False +e False +dtype: bool +""" +) + +_lt_example_SERIES = ( + _common_examples_comparison_SERIES + + """ +>>> a.lt(b, fill_value=0) +a False +b False +c True +d False +e False +f True +dtype: bool +""" +) + +_le_example_SERIES = ( + _common_examples_comparison_SERIES + + """ +>>> a.le(b, fill_value=0) +a False +b True +c True +d False +e False +f True +dtype: bool +""" +) + +_gt_example_SERIES = ( + _common_examples_comparison_SERIES + + """ +>>> a.gt(b, fill_value=0) +a True +b False +c False +d False +e True +f False +dtype: bool +""" +) + +_ge_example_SERIES = ( + _common_examples_comparison_SERIES + + """ +>>> a.ge(b, fill_value=0) +a True +b True +c False +d False +e True +f False +dtype: bool +""" +) + +_returns_series = """Series\n The result of the operation.""" + +_returns_tuple = """2-Tuple of Series\n The result of the operation.""" + +_op_descriptions: dict[str, dict[str, str | None]] = { + # Arithmetic Operators + "add": { + "op": "+", + "desc": "Addition", + "reverse": "radd", + "series_examples": _add_example_SERIES, + "series_returns": _returns_series, + }, + "sub": { + "op": "-", + "desc": "Subtraction", + "reverse": "rsub", + "series_examples": _sub_example_SERIES, + "series_returns": _returns_series, + }, + "mul": { + "op": "*", + "desc": "Multiplication", + "reverse": "rmul", + "series_examples": _mul_example_SERIES, + "series_returns": _returns_series, + "df_examples": None, + }, + "mod": { + "op": "%", + "desc": "Modulo", + "reverse": "rmod", + "series_examples": _mod_example_SERIES, + "series_returns": _returns_series, + }, + "pow": { + "op": "**", + "desc": "Exponential power", + "reverse": "rpow", + "series_examples": _pow_example_SERIES, + "series_returns": _returns_series, + "df_examples": None, + }, + "truediv": { + "op": "/", + "desc": "Floating division", + "reverse": "rtruediv", + "series_examples": _div_example_SERIES, + "series_returns": _returns_series, + "df_examples": None, + }, + "floordiv": { + "op": "//", + "desc": "Integer division", + "reverse": "rfloordiv", + "series_examples": _floordiv_example_SERIES, + "series_returns": _returns_series, + "df_examples": None, + }, + "divmod": { + "op": "divmod", + "desc": "Integer division and modulo", + "reverse": "rdivmod", + "series_examples": _divmod_example_SERIES, + "series_returns": _returns_tuple, + "df_examples": None, + }, + # Comparison Operators + "eq": { + "op": "==", + "desc": "Equal to", + "reverse": None, + "series_examples": _eq_example_SERIES, + "series_returns": _returns_series, + }, + "ne": { + "op": "!=", + "desc": "Not equal to", + "reverse": None, + "series_examples": _ne_example_SERIES, + "series_returns": _returns_series, + }, + "lt": { + "op": "<", + "desc": "Less than", + "reverse": None, + "series_examples": _lt_example_SERIES, + "series_returns": _returns_series, + }, + "le": { + "op": "<=", + "desc": "Less than or equal to", + "reverse": None, + "series_examples": _le_example_SERIES, + "series_returns": _returns_series, + }, + "gt": { + "op": ">", + "desc": "Greater than", + "reverse": None, + "series_examples": _gt_example_SERIES, + "series_returns": _returns_series, + }, + "ge": { + "op": ">=", + "desc": "Greater than or equal to", + "reverse": None, + "series_examples": _ge_example_SERIES, + "series_returns": _returns_series, + }, +} + +_py_num_ref = """see + `Python documentation + `_ + for more details""" +_op_names = list(_op_descriptions.keys()) +for key in _op_names: + reverse_op = _op_descriptions[key]["reverse"] + if reverse_op is not None: + _op_descriptions[reverse_op] = _op_descriptions[key].copy() + _op_descriptions[reverse_op]["reverse"] = key + _op_descriptions[key][ + "see_also_desc" + ] = f"Reverse of the {_op_descriptions[key]['desc']} operator, {_py_num_ref}" + _op_descriptions[reverse_op][ + "see_also_desc" + ] = f"Element-wise {_op_descriptions[key]['desc']}, {_py_num_ref}" + +_flex_doc_SERIES = """ +Return {desc} of series and other, element-wise (binary operator `{op_name}`). + +Equivalent to ``{equiv}``, but with support to substitute a fill_value for +missing data in either one of the inputs. + +Parameters +---------- +other : Series or scalar value +level : int or name + Broadcast across a level, matching Index values on the + passed MultiIndex level. +fill_value : None or float value, default None (NaN) + Fill existing missing (NaN) values, and any new element needed for + successful Series alignment, with this value before computation. + If data in both corresponding Series locations is missing + the result of filling (at that location) will be missing. +axis : {{0 or 'index'}} + Unused. Parameter needed for compatibility with DataFrame. + +Returns +------- +{series_returns} +""" + +_see_also_reverse_SERIES = """ +See Also +-------- +Series.{reverse} : {see_also_desc}. +""" + +_flex_doc_FRAME = """ +Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`). + +Equivalent to ``{equiv}``, but with support to substitute a fill_value +for missing data in one of the inputs. With reverse version, `{reverse}`. + +Among flexible wrappers (`add`, `sub`, `mul`, `div`, `floordiv`, `mod`, `pow`) to +arithmetic operators: `+`, `-`, `*`, `/`, `//`, `%`, `**`. + +Parameters +---------- +other : scalar, sequence, Series, dict or DataFrame + Any single or multiple element data structure, or list-like object. +axis : {{0 or 'index', 1 or 'columns'}} + Whether to compare by the index (0 or 'index') or columns. + (1 or 'columns'). For Series input, axis to match Series index on. +level : int or label + Broadcast across a level, matching Index values on the + passed MultiIndex level. +fill_value : float or None, default None + Fill existing missing (NaN) values, and any new element needed for + successful DataFrame alignment, with this value before computation. + If data in both corresponding DataFrame locations is missing + the result will be missing. + +Returns +------- +DataFrame + Result of the arithmetic operation. + +See Also +-------- +DataFrame.add : Add DataFrames. +DataFrame.sub : Subtract DataFrames. +DataFrame.mul : Multiply DataFrames. +DataFrame.div : Divide DataFrames (float division). +DataFrame.truediv : Divide DataFrames (float division). +DataFrame.floordiv : Divide DataFrames (integer division). +DataFrame.mod : Calculate modulo (remainder after division). +DataFrame.pow : Calculate exponential power. + +Notes +----- +Mismatched indices will be unioned together. + +Examples +-------- +>>> df = pd.DataFrame({{'angles': [0, 3, 4], +... 'degrees': [360, 180, 360]}}, +... index=['circle', 'triangle', 'rectangle']) +>>> df + angles degrees +circle 0 360 +triangle 3 180 +rectangle 4 360 + +Add a scalar with operator version which return the same +results. + +>>> df + 1 + angles degrees +circle 1 361 +triangle 4 181 +rectangle 5 361 + +>>> df.add(1) + angles degrees +circle 1 361 +triangle 4 181 +rectangle 5 361 + +Divide by constant with reverse version. + +>>> df.div(10) + angles degrees +circle 0.0 36.0 +triangle 0.3 18.0 +rectangle 0.4 36.0 + +>>> df.rdiv(10) + angles degrees +circle inf 0.027778 +triangle 3.333333 0.055556 +rectangle 2.500000 0.027778 + +Subtract a list and Series by axis with operator version. + +>>> df - [1, 2] + angles degrees +circle -1 358 +triangle 2 178 +rectangle 3 358 + +>>> df.sub([1, 2], axis='columns') + angles degrees +circle -1 358 +triangle 2 178 +rectangle 3 358 + +>>> df.sub(pd.Series([1, 1, 1], index=['circle', 'triangle', 'rectangle']), +... axis='index') + angles degrees +circle -1 359 +triangle 2 179 +rectangle 3 359 + +Multiply a dictionary by axis. + +>>> df.mul({{'angles': 0, 'degrees': 2}}) + angles degrees +circle 0 720 +triangle 0 360 +rectangle 0 720 + +>>> df.mul({{'circle': 0, 'triangle': 2, 'rectangle': 3}}, axis='index') + angles degrees +circle 0 0 +triangle 6 360 +rectangle 12 1080 + +Multiply a DataFrame of different shape with operator version. + +>>> other = pd.DataFrame({{'angles': [0, 3, 4]}}, +... index=['circle', 'triangle', 'rectangle']) +>>> other + angles +circle 0 +triangle 3 +rectangle 4 + +>>> df * other + angles degrees +circle 0 NaN +triangle 9 NaN +rectangle 16 NaN + +>>> df.mul(other, fill_value=0) + angles degrees +circle 0 0.0 +triangle 9 0.0 +rectangle 16 0.0 + +Divide by a MultiIndex by level. + +>>> df_multindex = pd.DataFrame({{'angles': [0, 3, 4, 4, 5, 6], +... 'degrees': [360, 180, 360, 360, 540, 720]}}, +... index=[['A', 'A', 'A', 'B', 'B', 'B'], +... ['circle', 'triangle', 'rectangle', +... 'square', 'pentagon', 'hexagon']]) +>>> df_multindex + angles degrees +A circle 0 360 + triangle 3 180 + rectangle 4 360 +B square 4 360 + pentagon 5 540 + hexagon 6 720 + +>>> df.div(df_multindex, level=1, fill_value=0) + angles degrees +A circle NaN 1.0 + triangle 1.0 1.0 + rectangle 1.0 1.0 +B square 0.0 0.0 + pentagon 0.0 0.0 + hexagon 0.0 0.0 +""" + +_flex_comp_doc_FRAME = """ +Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`). + +Among flexible wrappers (`eq`, `ne`, `le`, `lt`, `ge`, `gt`) to comparison +operators. + +Equivalent to `==`, `!=`, `<=`, `<`, `>=`, `>` with support to choose axis +(rows or columns) and level for comparison. + +Parameters +---------- +other : scalar, sequence, Series, or DataFrame + Any single or multiple element data structure, or list-like object. +axis : {{0 or 'index', 1 or 'columns'}}, default 'columns' + Whether to compare by the index (0 or 'index') or columns + (1 or 'columns'). +level : int or label + Broadcast across a level, matching Index values on the passed + MultiIndex level. + +Returns +------- +DataFrame of bool + Result of the comparison. + +See Also +-------- +DataFrame.eq : Compare DataFrames for equality elementwise. +DataFrame.ne : Compare DataFrames for inequality elementwise. +DataFrame.le : Compare DataFrames for less than inequality + or equality elementwise. +DataFrame.lt : Compare DataFrames for strictly less than + inequality elementwise. +DataFrame.ge : Compare DataFrames for greater than inequality + or equality elementwise. +DataFrame.gt : Compare DataFrames for strictly greater than + inequality elementwise. + +Notes +----- +Mismatched indices will be unioned together. +`NaN` values are considered different (i.e. `NaN` != `NaN`). + +Examples +-------- +>>> df = pd.DataFrame({{'cost': [250, 150, 100], +... 'revenue': [100, 250, 300]}}, +... index=['A', 'B', 'C']) +>>> df + cost revenue +A 250 100 +B 150 250 +C 100 300 + +Comparison with a scalar, using either the operator or method: + +>>> df == 100 + cost revenue +A False True +B False False +C True False + +>>> df.eq(100) + cost revenue +A False True +B False False +C True False + +When `other` is a :class:`Series`, the columns of a DataFrame are aligned +with the index of `other` and broadcast: + +>>> df != pd.Series([100, 250], index=["cost", "revenue"]) + cost revenue +A True True +B True False +C False True + +Use the method to control the broadcast axis: + +>>> df.ne(pd.Series([100, 300], index=["A", "D"]), axis='index') + cost revenue +A True False +B True True +C True True +D True True + +When comparing to an arbitrary sequence, the number of columns must +match the number elements in `other`: + +>>> df == [250, 100] + cost revenue +A True True +B False False +C False False + +Use the method to control the axis: + +>>> df.eq([250, 250, 100], axis='index') + cost revenue +A True False +B False True +C True False + +Compare to a DataFrame of different shape. + +>>> other = pd.DataFrame({{'revenue': [300, 250, 100, 150]}}, +... index=['A', 'B', 'C', 'D']) +>>> other + revenue +A 300 +B 250 +C 100 +D 150 + +>>> df.gt(other) + cost revenue +A False False +B False False +C False True +D False False + +Compare to a MultiIndex by level. + +>>> df_multindex = pd.DataFrame({{'cost': [250, 150, 100, 150, 300, 220], +... 'revenue': [100, 250, 300, 200, 175, 225]}}, +... index=[['Q1', 'Q1', 'Q1', 'Q2', 'Q2', 'Q2'], +... ['A', 'B', 'C', 'A', 'B', 'C']]) +>>> df_multindex + cost revenue +Q1 A 250 100 + B 150 250 + C 100 300 +Q2 A 150 200 + B 300 175 + C 220 225 + +>>> df.le(df_multindex, level=1) + cost revenue +Q1 A True True + B True True + C True True +Q2 A False True + B True False + C True False +""" diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/invalid.py b/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/invalid.py new file mode 100644 index 0000000000000000000000000000000000000000..e5ae6d359ac2205b01706211382d116b29176c7a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/invalid.py @@ -0,0 +1,62 @@ +""" +Templates for invalid operations. +""" +from __future__ import annotations + +import operator +from typing import TYPE_CHECKING + +import numpy as np + +if TYPE_CHECKING: + from pandas._typing import npt + + +def invalid_comparison(left, right, op) -> npt.NDArray[np.bool_]: + """ + If a comparison has mismatched types and is not necessarily meaningful, + follow python3 conventions by: + + - returning all-False for equality + - returning all-True for inequality + - raising TypeError otherwise + + Parameters + ---------- + left : array-like + right : scalar, array-like + op : operator.{eq, ne, lt, le, gt} + + Raises + ------ + TypeError : on inequality comparisons + """ + if op is operator.eq: + res_values = np.zeros(left.shape, dtype=bool) + elif op is operator.ne: + res_values = np.ones(left.shape, dtype=bool) + else: + typ = type(right).__name__ + raise TypeError(f"Invalid comparison between dtype={left.dtype} and {typ}") + return res_values + + +def make_invalid_op(name: str): + """ + Return a binary method that always raises a TypeError. + + Parameters + ---------- + name : str + + Returns + ------- + invalid_op : function + """ + + def invalid_op(self, other=None): + typ = type(self).__name__ + raise TypeError(f"cannot perform {name} with this index type: {typ}") + + invalid_op.__name__ = name + return invalid_op diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/mask_ops.py b/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/mask_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..adc1f63c568bf579f31b13446f4614435d443df1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/mask_ops.py @@ -0,0 +1,189 @@ +""" +Ops for masked arrays. +""" +from __future__ import annotations + +import numpy as np + +from pandas._libs import ( + lib, + missing as libmissing, +) + + +def kleene_or( + left: bool | np.ndarray | libmissing.NAType, + right: bool | np.ndarray | libmissing.NAType, + left_mask: np.ndarray | None, + right_mask: np.ndarray | None, +): + """ + Boolean ``or`` using Kleene logic. + + Values are NA where we have ``NA | NA`` or ``NA | False``. + ``NA | True`` is considered True. + + Parameters + ---------- + left, right : ndarray, NA, or bool + The values of the array. + left_mask, right_mask : ndarray, optional + The masks. Only one of these may be None, which implies that + the associated `left` or `right` value is a scalar. + + Returns + ------- + result, mask: ndarray[bool] + The result of the logical or, and the new mask. + """ + # To reduce the number of cases, we ensure that `left` & `left_mask` + # always come from an array, not a scalar. This is safe, since + # A | B == B | A + if left_mask is None: + return kleene_or(right, left, right_mask, left_mask) + + if not isinstance(left, np.ndarray): + raise TypeError("Either `left` or `right` need to be a np.ndarray.") + + raise_for_nan(right, method="or") + + if right is libmissing.NA: + result = left.copy() + else: + result = left | right + + if right_mask is not None: + # output is unknown where (False & NA), (NA & False), (NA & NA) + left_false = ~(left | left_mask) + right_false = ~(right | right_mask) + mask = ( + (left_false & right_mask) + | (right_false & left_mask) + | (left_mask & right_mask) + ) + else: + if right is True: + mask = np.zeros_like(left_mask) + elif right is libmissing.NA: + mask = (~left & ~left_mask) | left_mask + else: + # False + mask = left_mask.copy() + + return result, mask + + +def kleene_xor( + left: bool | np.ndarray | libmissing.NAType, + right: bool | np.ndarray | libmissing.NAType, + left_mask: np.ndarray | None, + right_mask: np.ndarray | None, +): + """ + Boolean ``xor`` using Kleene logic. + + This is the same as ``or``, with the following adjustments + + * True, True -> False + * True, NA -> NA + + Parameters + ---------- + left, right : ndarray, NA, or bool + The values of the array. + left_mask, right_mask : ndarray, optional + The masks. Only one of these may be None, which implies that + the associated `left` or `right` value is a scalar. + + Returns + ------- + result, mask: ndarray[bool] + The result of the logical xor, and the new mask. + """ + # To reduce the number of cases, we ensure that `left` & `left_mask` + # always come from an array, not a scalar. This is safe, since + # A ^ B == B ^ A + if left_mask is None: + return kleene_xor(right, left, right_mask, left_mask) + + if not isinstance(left, np.ndarray): + raise TypeError("Either `left` or `right` need to be a np.ndarray.") + + raise_for_nan(right, method="xor") + if right is libmissing.NA: + result = np.zeros_like(left) + else: + result = left ^ right + + if right_mask is None: + if right is libmissing.NA: + mask = np.ones_like(left_mask) + else: + mask = left_mask.copy() + else: + mask = left_mask | right_mask + + return result, mask + + +def kleene_and( + left: bool | libmissing.NAType | np.ndarray, + right: bool | libmissing.NAType | np.ndarray, + left_mask: np.ndarray | None, + right_mask: np.ndarray | None, +): + """ + Boolean ``and`` using Kleene logic. + + Values are ``NA`` for ``NA & NA`` or ``True & NA``. + + Parameters + ---------- + left, right : ndarray, NA, or bool + The values of the array. + left_mask, right_mask : ndarray, optional + The masks. Only one of these may be None, which implies that + the associated `left` or `right` value is a scalar. + + Returns + ------- + result, mask: ndarray[bool] + The result of the logical xor, and the new mask. + """ + # To reduce the number of cases, we ensure that `left` & `left_mask` + # always come from an array, not a scalar. This is safe, since + # A & B == B & A + if left_mask is None: + return kleene_and(right, left, right_mask, left_mask) + + if not isinstance(left, np.ndarray): + raise TypeError("Either `left` or `right` need to be a np.ndarray.") + raise_for_nan(right, method="and") + + if right is libmissing.NA: + result = np.zeros_like(left) + else: + result = left & right + + if right_mask is None: + # Scalar `right` + if right is libmissing.NA: + mask = (left & ~left_mask) | left_mask + + else: + mask = left_mask.copy() + if right is False: + # unmask everything + mask[:] = False + else: + # unmask where either left or right is False + left_false = ~(left | left_mask) + right_false = ~(right | right_mask) + mask = (left_mask & ~right_false) | (right_mask & ~left_false) + + return result, mask + + +def raise_for_nan(value, method: str) -> None: + if lib.is_float(value) and np.isnan(value): + raise ValueError(f"Cannot perform logical '{method}' with floating NaN") diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/missing.py b/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/missing.py new file mode 100644 index 0000000000000000000000000000000000000000..fc685935a35fceab74012912d3c3cae65b9c1818 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/core/ops/missing.py @@ -0,0 +1,176 @@ +""" +Missing data handling for arithmetic operations. + +In particular, pandas conventions regarding division by zero differ +from numpy in the following ways: + 1) np.array([-1, 0, 1], dtype=dtype1) // np.array([0, 0, 0], dtype=dtype2) + gives [nan, nan, nan] for most dtype combinations, and [0, 0, 0] for + the remaining pairs + (the remaining being dtype1==dtype2==intN and dtype==dtype2==uintN). + + pandas convention is to return [-inf, nan, inf] for all dtype + combinations. + + Note: the numpy behavior described here is py3-specific. + + 2) np.array([-1, 0, 1], dtype=dtype1) % np.array([0, 0, 0], dtype=dtype2) + gives precisely the same results as the // operation. + + pandas convention is to return [nan, nan, nan] for all dtype + combinations. + + 3) divmod behavior consistent with 1) and 2). +""" +from __future__ import annotations + +import operator + +import numpy as np + +from pandas.core import roperator + + +def _fill_zeros(result: np.ndarray, x, y): + """ + If this is a reversed op, then flip x,y + + If we have an integer value (or array in y) + and we have 0's, fill them with np.nan, + return the result. + + Mask the nan's from x. + """ + if result.dtype.kind == "f": + return result + + is_variable_type = hasattr(y, "dtype") + is_scalar_type = not isinstance(y, np.ndarray) + + if not is_variable_type and not is_scalar_type: + # e.g. test_series_ops_name_retention with mod we get here with list/tuple + return result + + if is_scalar_type: + y = np.array(y) + + if y.dtype.kind in "iu": + ymask = y == 0 + if ymask.any(): + # GH#7325, mask and nans must be broadcastable + mask = ymask & ~np.isnan(result) + + # GH#9308 doing ravel on result and mask can improve putmask perf, + # but can also make unwanted copies. + result = result.astype("float64", copy=False) + + np.putmask(result, mask, np.nan) + + return result + + +def mask_zero_div_zero(x, y, result: np.ndarray) -> np.ndarray: + """ + Set results of 0 // 0 to np.nan, regardless of the dtypes + of the numerator or the denominator. + + Parameters + ---------- + x : ndarray + y : ndarray + result : ndarray + + Returns + ------- + ndarray + The filled result. + + Examples + -------- + >>> x = np.array([1, 0, -1], dtype=np.int64) + >>> x + array([ 1, 0, -1]) + >>> y = 0 # int 0; numpy behavior is different with float + >>> result = x // y + >>> result # raw numpy result does not fill division by zero + array([0, 0, 0]) + >>> mask_zero_div_zero(x, y, result) + array([ inf, nan, -inf]) + """ + + if not hasattr(y, "dtype"): + # e.g. scalar, tuple + y = np.array(y) + if not hasattr(x, "dtype"): + # e.g scalar, tuple + x = np.array(x) + + zmask = y == 0 + + if zmask.any(): + # Flip sign if necessary for -0.0 + zneg_mask = zmask & np.signbit(y) + zpos_mask = zmask & ~zneg_mask + + x_lt0 = x < 0 + x_gt0 = x > 0 + nan_mask = zmask & (x == 0) + neginf_mask = (zpos_mask & x_lt0) | (zneg_mask & x_gt0) + posinf_mask = (zpos_mask & x_gt0) | (zneg_mask & x_lt0) + + if nan_mask.any() or neginf_mask.any() or posinf_mask.any(): + # Fill negative/0 with -inf, positive/0 with +inf, 0/0 with NaN + result = result.astype("float64", copy=False) + + result[nan_mask] = np.nan + result[posinf_mask] = np.inf + result[neginf_mask] = -np.inf + + return result + + +def dispatch_fill_zeros(op, left, right, result): + """ + Call _fill_zeros with the appropriate fill value depending on the operation, + with special logic for divmod and rdivmod. + + Parameters + ---------- + op : function (operator.add, operator.div, ...) + left : object (np.ndarray for non-reversed ops) + We have excluded ExtensionArrays here + right : object (np.ndarray for reversed ops) + We have excluded ExtensionArrays here + result : ndarray + + Returns + ------- + result : np.ndarray + + Notes + ----- + For divmod and rdivmod, the `result` parameter and returned `result` + is a 2-tuple of ndarray objects. + """ + if op is divmod: + result = ( + mask_zero_div_zero(left, right, result[0]), + _fill_zeros(result[1], left, right), + ) + elif op is roperator.rdivmod: + result = ( + mask_zero_div_zero(right, left, result[0]), + _fill_zeros(result[1], right, left), + ) + elif op is operator.floordiv: + # Note: no need to do this for truediv; in py3 numpy behaves the way + # we want. + result = mask_zero_div_zero(left, right, result) + elif op is roperator.rfloordiv: + # Note: no need to do this for rtruediv; in py3 numpy behaves the way + # we want. + result = mask_zero_div_zero(right, left, result) + elif op is operator.mod: + result = _fill_zeros(result, left, right) + elif op is roperator.rmod: + result = _fill_zeros(result, right, left) + return result diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/dim2.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/dim2.py new file mode 100644 index 0000000000000000000000000000000000000000..132cda5a94ed00e74b7f36869bfe0c789dd5ccd7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/dim2.py @@ -0,0 +1,345 @@ +""" +Tests for 2D compatibility. +""" +import numpy as np +import pytest + +from pandas._libs.missing import is_matching_na + +from pandas.core.dtypes.common import ( + is_bool_dtype, + is_integer_dtype, +) + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays.integer import NUMPY_INT_TO_DTYPE + + +class Dim2CompatTests: + # Note: these are ONLY for ExtensionArray subclasses that support 2D arrays. + # i.e. not for pyarrow-backed EAs. + + @pytest.fixture(autouse=True) + def skip_if_doesnt_support_2d(self, dtype, request): + if not dtype._supports_2d: + node = request.node + # In cases where we are mixed in to ExtensionTests, we only want to + # skip tests that are defined in Dim2CompatTests + test_func = node._obj + if test_func.__qualname__.startswith("Dim2CompatTests"): + # TODO: is there a less hacky way of checking this? + pytest.skip(f"{dtype} does not support 2D.") + + def test_transpose(self, data): + arr2d = data.repeat(2).reshape(-1, 2) + shape = arr2d.shape + assert shape[0] != shape[-1] # otherwise the rest of the test is useless + + assert arr2d.T.shape == shape[::-1] + + def test_frame_from_2d_array(self, data): + arr2d = data.repeat(2).reshape(-1, 2) + + df = pd.DataFrame(arr2d) + expected = pd.DataFrame({0: arr2d[:, 0], 1: arr2d[:, 1]}) + tm.assert_frame_equal(df, expected) + + def test_swapaxes(self, data): + arr2d = data.repeat(2).reshape(-1, 2) + + result = arr2d.swapaxes(0, 1) + expected = arr2d.T + tm.assert_extension_array_equal(result, expected) + + def test_delete_2d(self, data): + arr2d = data.repeat(3).reshape(-1, 3) + + # axis = 0 + result = arr2d.delete(1, axis=0) + expected = data.delete(1).repeat(3).reshape(-1, 3) + tm.assert_extension_array_equal(result, expected) + + # axis = 1 + result = arr2d.delete(1, axis=1) + expected = data.repeat(2).reshape(-1, 2) + tm.assert_extension_array_equal(result, expected) + + def test_take_2d(self, data): + arr2d = data.reshape(-1, 1) + + result = arr2d.take([0, 0, -1], axis=0) + + expected = data.take([0, 0, -1]).reshape(-1, 1) + tm.assert_extension_array_equal(result, expected) + + def test_repr_2d(self, data): + # this could fail in a corner case where an element contained the name + res = repr(data.reshape(1, -1)) + assert res.count(f"<{type(data).__name__}") == 1 + + res = repr(data.reshape(-1, 1)) + assert res.count(f"<{type(data).__name__}") == 1 + + def test_reshape(self, data): + arr2d = data.reshape(-1, 1) + assert arr2d.shape == (data.size, 1) + assert len(arr2d) == len(data) + + arr2d = data.reshape((-1, 1)) + assert arr2d.shape == (data.size, 1) + assert len(arr2d) == len(data) + + with pytest.raises(ValueError): + data.reshape((data.size, 2)) + with pytest.raises(ValueError): + data.reshape(data.size, 2) + + def test_getitem_2d(self, data): + arr2d = data.reshape(1, -1) + + result = arr2d[0] + tm.assert_extension_array_equal(result, data) + + with pytest.raises(IndexError): + arr2d[1] + + with pytest.raises(IndexError): + arr2d[-2] + + result = arr2d[:] + tm.assert_extension_array_equal(result, arr2d) + + result = arr2d[:, :] + tm.assert_extension_array_equal(result, arr2d) + + result = arr2d[:, 0] + expected = data[[0]] + tm.assert_extension_array_equal(result, expected) + + # dimension-expanding getitem on 1D + result = data[:, np.newaxis] + tm.assert_extension_array_equal(result, arr2d.T) + + def test_iter_2d(self, data): + arr2d = data.reshape(1, -1) + + objs = list(iter(arr2d)) + assert len(objs) == arr2d.shape[0] + + for obj in objs: + assert isinstance(obj, type(data)) + assert obj.dtype == data.dtype + assert obj.ndim == 1 + assert len(obj) == arr2d.shape[1] + + def test_tolist_2d(self, data): + arr2d = data.reshape(1, -1) + + result = arr2d.tolist() + expected = [data.tolist()] + + assert isinstance(result, list) + assert all(isinstance(x, list) for x in result) + + assert result == expected + + def test_concat_2d(self, data): + left = type(data)._concat_same_type([data, data]).reshape(-1, 2) + right = left.copy() + + # axis=0 + result = left._concat_same_type([left, right], axis=0) + expected = data._concat_same_type([data] * 4).reshape(-1, 2) + tm.assert_extension_array_equal(result, expected) + + # axis=1 + result = left._concat_same_type([left, right], axis=1) + assert result.shape == (len(data), 4) + tm.assert_extension_array_equal(result[:, :2], left) + tm.assert_extension_array_equal(result[:, 2:], right) + + # axis > 1 -> invalid + msg = "axis 2 is out of bounds for array of dimension 2" + with pytest.raises(ValueError, match=msg): + left._concat_same_type([left, right], axis=2) + + @pytest.mark.parametrize("method", ["backfill", "pad"]) + def test_fillna_2d_method(self, data_missing, method): + # pad_or_backfill is always along axis=0 + arr = data_missing.repeat(2).reshape(2, 2) + assert arr[0].isna().all() + assert not arr[1].isna().any() + + result = arr._pad_or_backfill(method=method, limit=None) + + expected = data_missing._pad_or_backfill(method=method).repeat(2).reshape(2, 2) + tm.assert_extension_array_equal(result, expected) + + # Reverse so that backfill is not a no-op. + arr2 = arr[::-1] + assert not arr2[0].isna().any() + assert arr2[1].isna().all() + + result2 = arr2._pad_or_backfill(method=method, limit=None) + + expected2 = ( + data_missing[::-1]._pad_or_backfill(method=method).repeat(2).reshape(2, 2) + ) + tm.assert_extension_array_equal(result2, expected2) + + @pytest.mark.parametrize("method", ["mean", "median", "var", "std", "sum", "prod"]) + def test_reductions_2d_axis_none(self, data, method): + arr2d = data.reshape(1, -1) + + err_expected = None + err_result = None + try: + expected = getattr(data, method)() + except Exception as err: + # if the 1D reduction is invalid, the 2D reduction should be as well + err_expected = err + try: + result = getattr(arr2d, method)(axis=None) + except Exception as err2: + err_result = err2 + + else: + result = getattr(arr2d, method)(axis=None) + + if err_result is not None or err_expected is not None: + assert type(err_result) == type(err_expected) + return + + assert is_matching_na(result, expected) or result == expected + + @pytest.mark.parametrize("method", ["mean", "median", "var", "std", "sum", "prod"]) + @pytest.mark.parametrize("min_count", [0, 1]) + def test_reductions_2d_axis0(self, data, method, min_count): + if min_count == 1 and method not in ["sum", "prod"]: + pytest.skip(f"min_count not relevant for {method}") + + arr2d = data.reshape(1, -1) + + kwargs = {} + if method in ["std", "var"]: + # pass ddof=0 so we get all-zero std instead of all-NA std + kwargs["ddof"] = 0 + elif method in ["prod", "sum"]: + kwargs["min_count"] = min_count + + try: + result = getattr(arr2d, method)(axis=0, **kwargs) + except Exception as err: + try: + getattr(data, method)() + except Exception as err2: + assert type(err) == type(err2) + return + else: + raise AssertionError("Both reductions should raise or neither") + + def get_reduction_result_dtype(dtype): + # windows and 32bit builds will in some cases have int32/uint32 + # where other builds will have int64/uint64. + if dtype.itemsize == 8: + return dtype + elif dtype.kind in "ib": + return NUMPY_INT_TO_DTYPE[np.dtype(int)] + else: + # i.e. dtype.kind == "u" + return NUMPY_INT_TO_DTYPE[np.dtype("uint")] + + if method in ["sum", "prod"]: + # std and var are not dtype-preserving + expected = data + if data.dtype.kind in "iub": + dtype = get_reduction_result_dtype(data.dtype) + expected = data.astype(dtype) + assert dtype == expected.dtype + + if min_count == 0: + fill_value = 1 if method == "prod" else 0 + expected = expected.fillna(fill_value) + + tm.assert_extension_array_equal(result, expected) + elif method == "median": + # std and var are not dtype-preserving + expected = data + tm.assert_extension_array_equal(result, expected) + elif method in ["mean", "std", "var"]: + if is_integer_dtype(data) or is_bool_dtype(data): + data = data.astype("Float64") + if method == "mean": + tm.assert_extension_array_equal(result, data) + else: + tm.assert_extension_array_equal(result, data - data) + + @pytest.mark.parametrize("method", ["mean", "median", "var", "std", "sum", "prod"]) + def test_reductions_2d_axis1(self, data, method): + arr2d = data.reshape(1, -1) + + try: + result = getattr(arr2d, method)(axis=1) + except Exception as err: + try: + getattr(data, method)() + except Exception as err2: + assert type(err) == type(err2) + return + else: + raise AssertionError("Both reductions should raise or neither") + + # not necessarily type/dtype-preserving, so weaker assertions + assert result.shape == (1,) + expected_scalar = getattr(data, method)() + res = result[0] + assert is_matching_na(res, expected_scalar) or res == expected_scalar + + +class NDArrayBacked2DTests(Dim2CompatTests): + # More specific tests for NDArrayBackedExtensionArray subclasses + + def test_copy_order(self, data): + # We should be matching numpy semantics for the "order" keyword in 'copy' + arr2d = data.repeat(2).reshape(-1, 2) + assert arr2d._ndarray.flags["C_CONTIGUOUS"] + + res = arr2d.copy() + assert res._ndarray.flags["C_CONTIGUOUS"] + + res = arr2d[::2, ::2].copy() + assert res._ndarray.flags["C_CONTIGUOUS"] + + res = arr2d.copy("F") + assert not res._ndarray.flags["C_CONTIGUOUS"] + assert res._ndarray.flags["F_CONTIGUOUS"] + + res = arr2d.copy("K") + assert res._ndarray.flags["C_CONTIGUOUS"] + + res = arr2d.T.copy("K") + assert not res._ndarray.flags["C_CONTIGUOUS"] + assert res._ndarray.flags["F_CONTIGUOUS"] + + # order not accepted by numpy + msg = r"order must be one of 'C', 'F', 'A', or 'K' \(got 'Q'\)" + with pytest.raises(ValueError, match=msg): + arr2d.copy("Q") + + # neither contiguity + arr_nc = arr2d[::2] + assert not arr_nc._ndarray.flags["C_CONTIGUOUS"] + assert not arr_nc._ndarray.flags["F_CONTIGUOUS"] + + assert arr_nc.copy()._ndarray.flags["C_CONTIGUOUS"] + assert not arr_nc.copy()._ndarray.flags["F_CONTIGUOUS"] + + assert arr_nc.copy("C")._ndarray.flags["C_CONTIGUOUS"] + assert not arr_nc.copy("C")._ndarray.flags["F_CONTIGUOUS"] + + assert not arr_nc.copy("F")._ndarray.flags["C_CONTIGUOUS"] + assert arr_nc.copy("F")._ndarray.flags["F_CONTIGUOUS"] + + assert arr_nc.copy("K")._ndarray.flags["C_CONTIGUOUS"] + assert not arr_nc.copy("K")._ndarray.flags["F_CONTIGUOUS"] diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/getitem.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/getitem.py new file mode 100644 index 0000000000000000000000000000000000000000..5f0c1b960a4758e7cd5423188d5f190922b4eee4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/getitem.py @@ -0,0 +1,469 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +class BaseGetitemTests: + """Tests for ExtensionArray.__getitem__.""" + + def test_iloc_series(self, data): + ser = pd.Series(data) + result = ser.iloc[:4] + expected = pd.Series(data[:4]) + tm.assert_series_equal(result, expected) + + result = ser.iloc[[0, 1, 2, 3]] + tm.assert_series_equal(result, expected) + + def test_iloc_frame(self, data): + df = pd.DataFrame({"A": data, "B": np.arange(len(data), dtype="int64")}) + expected = pd.DataFrame({"A": data[:4]}) + + # slice -> frame + result = df.iloc[:4, [0]] + tm.assert_frame_equal(result, expected) + + # sequence -> frame + result = df.iloc[[0, 1, 2, 3], [0]] + tm.assert_frame_equal(result, expected) + + expected = pd.Series(data[:4], name="A") + + # slice -> series + result = df.iloc[:4, 0] + tm.assert_series_equal(result, expected) + + # sequence -> series + result = df.iloc[:4, 0] + tm.assert_series_equal(result, expected) + + # GH#32959 slice columns with step + result = df.iloc[:, ::2] + tm.assert_frame_equal(result, df[["A"]]) + result = df[["B", "A"]].iloc[:, ::2] + tm.assert_frame_equal(result, df[["B"]]) + + def test_iloc_frame_single_block(self, data): + # GH#32959 null slice along index, slice along columns with single-block + df = pd.DataFrame({"A": data}) + + result = df.iloc[:, :] + tm.assert_frame_equal(result, df) + + result = df.iloc[:, :1] + tm.assert_frame_equal(result, df) + + result = df.iloc[:, :2] + tm.assert_frame_equal(result, df) + + result = df.iloc[:, ::2] + tm.assert_frame_equal(result, df) + + result = df.iloc[:, 1:2] + tm.assert_frame_equal(result, df.iloc[:, :0]) + + result = df.iloc[:, -1:] + tm.assert_frame_equal(result, df) + + def test_loc_series(self, data): + ser = pd.Series(data) + result = ser.loc[:3] + expected = pd.Series(data[:4]) + tm.assert_series_equal(result, expected) + + result = ser.loc[[0, 1, 2, 3]] + tm.assert_series_equal(result, expected) + + def test_loc_frame(self, data): + df = pd.DataFrame({"A": data, "B": np.arange(len(data), dtype="int64")}) + expected = pd.DataFrame({"A": data[:4]}) + + # slice -> frame + result = df.loc[:3, ["A"]] + tm.assert_frame_equal(result, expected) + + # sequence -> frame + result = df.loc[[0, 1, 2, 3], ["A"]] + tm.assert_frame_equal(result, expected) + + expected = pd.Series(data[:4], name="A") + + # slice -> series + result = df.loc[:3, "A"] + tm.assert_series_equal(result, expected) + + # sequence -> series + result = df.loc[:3, "A"] + tm.assert_series_equal(result, expected) + + def test_loc_iloc_frame_single_dtype(self, data): + # GH#27110 bug in ExtensionBlock.iget caused df.iloc[n] to incorrectly + # return a scalar + df = pd.DataFrame({"A": data}) + expected = pd.Series([data[2]], index=["A"], name=2, dtype=data.dtype) + + result = df.loc[2] + tm.assert_series_equal(result, expected) + + expected = pd.Series( + [data[-1]], index=["A"], name=len(data) - 1, dtype=data.dtype + ) + result = df.iloc[-1] + tm.assert_series_equal(result, expected) + + def test_getitem_scalar(self, data): + result = data[0] + assert isinstance(result, data.dtype.type) + + result = pd.Series(data)[0] + assert isinstance(result, data.dtype.type) + + def test_getitem_invalid(self, data): + # TODO: box over scalar, [scalar], (scalar,)? + + msg = ( + r"only integers, slices \(`:`\), ellipsis \(`...`\), numpy.newaxis " + r"\(`None`\) and integer or boolean arrays are valid indices" + ) + with pytest.raises(IndexError, match=msg): + data["foo"] + with pytest.raises(IndexError, match=msg): + data[2.5] + + ub = len(data) + msg = "|".join( + [ + "list index out of range", # json + "index out of bounds", # pyarrow + "Out of bounds access", # Sparse + f"loc must be an integer between -{ub} and {ub}", # Sparse + f"index {ub+1} is out of bounds for axis 0 with size {ub}", + f"index -{ub+1} is out of bounds for axis 0 with size {ub}", + ] + ) + with pytest.raises(IndexError, match=msg): + data[ub + 1] + with pytest.raises(IndexError, match=msg): + data[-ub - 1] + + def test_getitem_scalar_na(self, data_missing, na_cmp, na_value): + result = data_missing[0] + assert na_cmp(result, na_value) + + def test_getitem_empty(self, data): + # Indexing with empty list + result = data[[]] + assert len(result) == 0 + assert isinstance(result, type(data)) + + expected = data[np.array([], dtype="int64")] + tm.assert_extension_array_equal(result, expected) + + def test_getitem_mask(self, data): + # Empty mask, raw array + mask = np.zeros(len(data), dtype=bool) + result = data[mask] + assert len(result) == 0 + assert isinstance(result, type(data)) + + # Empty mask, in series + mask = np.zeros(len(data), dtype=bool) + result = pd.Series(data)[mask] + assert len(result) == 0 + assert result.dtype == data.dtype + + # non-empty mask, raw array + mask[0] = True + result = data[mask] + assert len(result) == 1 + assert isinstance(result, type(data)) + + # non-empty mask, in series + result = pd.Series(data)[mask] + assert len(result) == 1 + assert result.dtype == data.dtype + + def test_getitem_mask_raises(self, data): + mask = np.array([True, False]) + msg = f"Boolean index has wrong length: 2 instead of {len(data)}" + with pytest.raises(IndexError, match=msg): + data[mask] + + mask = pd.array(mask, dtype="boolean") + with pytest.raises(IndexError, match=msg): + data[mask] + + def test_getitem_boolean_array_mask(self, data): + mask = pd.array(np.zeros(data.shape, dtype="bool"), dtype="boolean") + result = data[mask] + assert len(result) == 0 + assert isinstance(result, type(data)) + + result = pd.Series(data)[mask] + assert len(result) == 0 + assert result.dtype == data.dtype + + mask[:5] = True + expected = data.take([0, 1, 2, 3, 4]) + result = data[mask] + tm.assert_extension_array_equal(result, expected) + + expected = pd.Series(expected) + result = pd.Series(data)[mask] + tm.assert_series_equal(result, expected) + + def test_getitem_boolean_na_treated_as_false(self, data): + # https://github.com/pandas-dev/pandas/issues/31503 + mask = pd.array(np.zeros(data.shape, dtype="bool"), dtype="boolean") + mask[:2] = pd.NA + mask[2:4] = True + + result = data[mask] + expected = data[mask.fillna(False)] + + tm.assert_extension_array_equal(result, expected) + + s = pd.Series(data) + + result = s[mask] + expected = s[mask.fillna(False)] + + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "idx", + [[0, 1, 2], pd.array([0, 1, 2], dtype="Int64"), np.array([0, 1, 2])], + ids=["list", "integer-array", "numpy-array"], + ) + def test_getitem_integer_array(self, data, idx): + result = data[idx] + assert len(result) == 3 + assert isinstance(result, type(data)) + expected = data.take([0, 1, 2]) + tm.assert_extension_array_equal(result, expected) + + expected = pd.Series(expected) + result = pd.Series(data)[idx] + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "idx", + [[0, 1, 2, pd.NA], pd.array([0, 1, 2, pd.NA], dtype="Int64")], + ids=["list", "integer-array"], + ) + def test_getitem_integer_with_missing_raises(self, data, idx): + msg = "Cannot index with an integer indexer containing NA values" + with pytest.raises(ValueError, match=msg): + data[idx] + + @pytest.mark.xfail( + reason="Tries label-based and raises KeyError; " + "in some cases raises when calling np.asarray" + ) + @pytest.mark.parametrize( + "idx", + [[0, 1, 2, pd.NA], pd.array([0, 1, 2, pd.NA], dtype="Int64")], + ids=["list", "integer-array"], + ) + def test_getitem_series_integer_with_missing_raises(self, data, idx): + msg = "Cannot index with an integer indexer containing NA values" + # TODO: this raises KeyError about labels not found (it tries label-based) + + ser = pd.Series(data, index=[chr(100 + i) for i in range(len(data))]) + with pytest.raises(ValueError, match=msg): + ser[idx] + + def test_getitem_slice(self, data): + # getitem[slice] should return an array + result = data[slice(0)] # empty + assert isinstance(result, type(data)) + + result = data[slice(1)] # scalar + assert isinstance(result, type(data)) + + def test_getitem_ellipsis_and_slice(self, data): + # GH#40353 this is called from slice_block_rows + result = data[..., :] + tm.assert_extension_array_equal(result, data) + + result = data[:, ...] + tm.assert_extension_array_equal(result, data) + + result = data[..., :3] + tm.assert_extension_array_equal(result, data[:3]) + + result = data[:3, ...] + tm.assert_extension_array_equal(result, data[:3]) + + result = data[..., ::2] + tm.assert_extension_array_equal(result, data[::2]) + + result = data[::2, ...] + tm.assert_extension_array_equal(result, data[::2]) + + def test_get(self, data): + # GH 20882 + s = pd.Series(data, index=[2 * i for i in range(len(data))]) + assert s.get(4) == s.iloc[2] + + result = s.get([4, 6]) + expected = s.iloc[[2, 3]] + tm.assert_series_equal(result, expected) + + result = s.get(slice(2)) + expected = s.iloc[[0, 1]] + tm.assert_series_equal(result, expected) + + assert s.get(-1) is None + assert s.get(s.index.max() + 1) is None + + s = pd.Series(data[:6], index=list("abcdef")) + assert s.get("c") == s.iloc[2] + + result = s.get(slice("b", "d")) + expected = s.iloc[[1, 2, 3]] + tm.assert_series_equal(result, expected) + + result = s.get("Z") + assert result is None + + msg = "Series.__getitem__ treating keys as positions is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + assert s.get(4) == s.iloc[4] + assert s.get(-1) == s.iloc[-1] + assert s.get(len(s)) is None + + # GH 21257 + s = pd.Series(data) + with tm.assert_produces_warning(None): + # GH#45324 make sure we aren't giving a spurious FutureWarning + s2 = s[::2] + assert s2.get(1) is None + + def test_take_sequence(self, data): + result = pd.Series(data)[[0, 1, 3]] + assert result.iloc[0] == data[0] + assert result.iloc[1] == data[1] + assert result.iloc[2] == data[3] + + def test_take(self, data, na_value, na_cmp): + result = data.take([0, -1]) + assert result.dtype == data.dtype + assert result[0] == data[0] + assert result[1] == data[-1] + + result = data.take([0, -1], allow_fill=True, fill_value=na_value) + assert result[0] == data[0] + assert na_cmp(result[1], na_value) + + with pytest.raises(IndexError, match="out of bounds"): + data.take([len(data) + 1]) + + def test_take_empty(self, data, na_value, na_cmp): + empty = data[:0] + + result = empty.take([-1], allow_fill=True) + assert na_cmp(result[0], na_value) + + msg = "cannot do a non-empty take from an empty axes|out of bounds" + + with pytest.raises(IndexError, match=msg): + empty.take([-1]) + + with pytest.raises(IndexError, match="cannot do a non-empty take"): + empty.take([0, 1]) + + def test_take_negative(self, data): + # https://github.com/pandas-dev/pandas/issues/20640 + n = len(data) + result = data.take([0, -n, n - 1, -1]) + expected = data.take([0, 0, n - 1, n - 1]) + tm.assert_extension_array_equal(result, expected) + + def test_take_non_na_fill_value(self, data_missing): + fill_value = data_missing[1] # valid + na = data_missing[0] + + arr = data_missing._from_sequence( + [na, fill_value, na], dtype=data_missing.dtype + ) + result = arr.take([-1, 1], fill_value=fill_value, allow_fill=True) + expected = arr.take([1, 1]) + tm.assert_extension_array_equal(result, expected) + + def test_take_pandas_style_negative_raises(self, data, na_value): + with pytest.raises(ValueError, match=""): + data.take([0, -2], fill_value=na_value, allow_fill=True) + + @pytest.mark.parametrize("allow_fill", [True, False]) + def test_take_out_of_bounds_raises(self, data, allow_fill): + arr = data[:3] + + with pytest.raises(IndexError, match="out of bounds|out-of-bounds"): + arr.take(np.asarray([0, 3]), allow_fill=allow_fill) + + def test_take_series(self, data): + s = pd.Series(data) + result = s.take([0, -1]) + expected = pd.Series( + data._from_sequence([data[0], data[len(data) - 1]], dtype=s.dtype), + index=[0, len(data) - 1], + ) + tm.assert_series_equal(result, expected) + + def test_reindex(self, data, na_value): + s = pd.Series(data) + result = s.reindex([0, 1, 3]) + expected = pd.Series(data.take([0, 1, 3]), index=[0, 1, 3]) + tm.assert_series_equal(result, expected) + + n = len(data) + result = s.reindex([-1, 0, n]) + expected = pd.Series( + data._from_sequence([na_value, data[0], na_value], dtype=s.dtype), + index=[-1, 0, n], + ) + tm.assert_series_equal(result, expected) + + result = s.reindex([n, n + 1]) + expected = pd.Series( + data._from_sequence([na_value, na_value], dtype=s.dtype), index=[n, n + 1] + ) + tm.assert_series_equal(result, expected) + + def test_reindex_non_na_fill_value(self, data_missing): + valid = data_missing[1] + na = data_missing[0] + + arr = data_missing._from_sequence([na, valid], dtype=data_missing.dtype) + ser = pd.Series(arr) + result = ser.reindex([0, 1, 2], fill_value=valid) + expected = pd.Series( + data_missing._from_sequence([na, valid, valid], dtype=data_missing.dtype) + ) + + tm.assert_series_equal(result, expected) + + def test_loc_len1(self, data): + # see GH-27785 take_nd with indexer of len 1 resulting in wrong ndim + df = pd.DataFrame({"A": data}) + res = df.loc[[0], "A"] + assert res.ndim == 1 + assert res._mgr.arrays[0].ndim == 1 + if hasattr(res._mgr, "blocks"): + assert res._mgr._block.ndim == 1 + + def test_item(self, data): + # https://github.com/pandas-dev/pandas/pull/30175 + s = pd.Series(data) + result = s[:1].item() + assert result == data[0] + + msg = "can only convert an array of size 1 to a Python scalar" + with pytest.raises(ValueError, match=msg): + s[:0].item() + + with pytest.raises(ValueError, match=msg): + s.item() diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/groupby.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/groupby.py new file mode 100644 index 0000000000000000000000000000000000000000..414683b02dcba24d9f490017c1e5c915ee0551f7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/groupby.py @@ -0,0 +1,174 @@ +import re + +import pytest + +from pandas.core.dtypes.common import ( + is_bool_dtype, + is_numeric_dtype, + is_object_dtype, + is_string_dtype, +) + +import pandas as pd +import pandas._testing as tm + + +@pytest.mark.filterwarnings( + "ignore:The default of observed=False is deprecated:FutureWarning" +) +class BaseGroupbyTests: + """Groupby-specific tests.""" + + def test_grouping_grouper(self, data_for_grouping): + df = pd.DataFrame( + { + "A": pd.Series( + ["B", "B", None, None, "A", "A", "B", "C"], dtype=object + ), + "B": data_for_grouping, + } + ) + gr1 = df.groupby("A")._grouper.groupings[0] + gr2 = df.groupby("B")._grouper.groupings[0] + + tm.assert_numpy_array_equal(gr1.grouping_vector, df.A.values) + tm.assert_extension_array_equal(gr2.grouping_vector, data_for_grouping) + + @pytest.mark.parametrize("as_index", [True, False]) + def test_groupby_extension_agg(self, as_index, data_for_grouping): + df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping}) + + is_bool = data_for_grouping.dtype._is_boolean + if is_bool: + # only 2 unique values, and the final entry has c==b + # (see data_for_grouping docstring) + df = df.iloc[:-1] + + result = df.groupby("B", as_index=as_index).A.mean() + _, uniques = pd.factorize(data_for_grouping, sort=True) + + exp_vals = [3.0, 1.0, 4.0] + if is_bool: + exp_vals = exp_vals[:-1] + if as_index: + index = pd.Index(uniques, name="B") + expected = pd.Series(exp_vals, index=index, name="A") + tm.assert_series_equal(result, expected) + else: + expected = pd.DataFrame({"B": uniques, "A": exp_vals}) + tm.assert_frame_equal(result, expected) + + def test_groupby_agg_extension(self, data_for_grouping): + # GH#38980 groupby agg on extension type fails for non-numeric types + df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping}) + + expected = df.iloc[[0, 2, 4, 7]] + expected = expected.set_index("A") + + result = df.groupby("A").agg({"B": "first"}) + tm.assert_frame_equal(result, expected) + + result = df.groupby("A").agg("first") + tm.assert_frame_equal(result, expected) + + result = df.groupby("A").first() + tm.assert_frame_equal(result, expected) + + def test_groupby_extension_no_sort(self, data_for_grouping): + df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping}) + + is_bool = data_for_grouping.dtype._is_boolean + if is_bool: + # only 2 unique values, and the final entry has c==b + # (see data_for_grouping docstring) + df = df.iloc[:-1] + + result = df.groupby("B", sort=False).A.mean() + _, index = pd.factorize(data_for_grouping, sort=False) + + index = pd.Index(index, name="B") + exp_vals = [1.0, 3.0, 4.0] + if is_bool: + exp_vals = exp_vals[:-1] + expected = pd.Series(exp_vals, index=index, name="A") + tm.assert_series_equal(result, expected) + + def test_groupby_extension_transform(self, data_for_grouping): + is_bool = data_for_grouping.dtype._is_boolean + + valid = data_for_grouping[~data_for_grouping.isna()] + df = pd.DataFrame({"A": [1, 1, 3, 3, 1, 4], "B": valid}) + is_bool = data_for_grouping.dtype._is_boolean + if is_bool: + # only 2 unique values, and the final entry has c==b + # (see data_for_grouping docstring) + df = df.iloc[:-1] + + result = df.groupby("B").A.transform(len) + expected = pd.Series([3, 3, 2, 2, 3, 1], name="A") + if is_bool: + expected = expected[:-1] + + tm.assert_series_equal(result, expected) + + def test_groupby_extension_apply(self, data_for_grouping, groupby_apply_op): + df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping}) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + df.groupby("B", group_keys=False, observed=False).apply(groupby_apply_op) + df.groupby("B", group_keys=False, observed=False).A.apply(groupby_apply_op) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + df.groupby("A", group_keys=False, observed=False).apply(groupby_apply_op) + df.groupby("A", group_keys=False, observed=False).B.apply(groupby_apply_op) + + def test_groupby_apply_identity(self, data_for_grouping): + df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping}) + result = df.groupby("A").B.apply(lambda x: x.array) + expected = pd.Series( + [ + df.B.iloc[[0, 1, 6]].array, + df.B.iloc[[2, 3]].array, + df.B.iloc[[4, 5]].array, + df.B.iloc[[7]].array, + ], + index=pd.Index([1, 2, 3, 4], name="A"), + name="B", + ) + tm.assert_series_equal(result, expected) + + def test_in_numeric_groupby(self, data_for_grouping): + df = pd.DataFrame( + { + "A": [1, 1, 2, 2, 3, 3, 1, 4], + "B": data_for_grouping, + "C": [1, 1, 1, 1, 1, 1, 1, 1], + } + ) + + dtype = data_for_grouping.dtype + if ( + is_numeric_dtype(dtype) + or is_bool_dtype(dtype) + or dtype.name == "decimal" + or is_string_dtype(dtype) + or is_object_dtype(dtype) + or dtype.kind == "m" # in particular duration[*][pyarrow] + ): + expected = pd.Index(["B", "C"]) + result = df.groupby("A").sum().columns + else: + expected = pd.Index(["C"]) + + msg = "|".join( + [ + # period/datetime + "does not support sum operations", + # all others + re.escape(f"agg function failed [how->sum,dtype->{dtype}"), + ] + ) + with pytest.raises(TypeError, match=msg): + df.groupby("A").sum() + result = df.groupby("A").sum(numeric_only=True).columns + tm.assert_index_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/index.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/index.py new file mode 100644 index 0000000000000000000000000000000000000000..72c4ebfb5d84ae82f74a4c814e72372a651ae6b8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/index.py @@ -0,0 +1,19 @@ +""" +Tests for Indexes backed by arbitrary ExtensionArrays. +""" +import pandas as pd + + +class BaseIndexTests: + """Tests for Index object backed by an ExtensionArray""" + + def test_index_from_array(self, data): + idx = pd.Index(data) + assert data.dtype == idx.dtype + + def test_index_from_listlike_with_dtype(self, data): + idx = pd.Index(data, dtype=data.dtype) + assert idx.dtype == data.dtype + + idx = pd.Index(list(data), dtype=data.dtype) + assert idx.dtype == data.dtype diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/io.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/io.py new file mode 100644 index 0000000000000000000000000000000000000000..3a6f2eb5ba8b1854ac48a22efa02e89672b2f2ac --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/io.py @@ -0,0 +1,39 @@ +from io import StringIO + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import ExtensionArray + + +class BaseParsingTests: + @pytest.mark.parametrize("engine", ["c", "python"]) + def test_EA_types(self, engine, data, request): + if isinstance(data.dtype, pd.CategoricalDtype): + # in parsers.pyx _convert_with_dtype there is special-casing for + # Categorical that pre-empts _from_sequence_of_strings + pass + elif isinstance(data.dtype, pd.core.dtypes.dtypes.NumpyEADtype): + # These get unwrapped internally so are treated as numpy dtypes + # in the parsers.pyx code + pass + elif ( + type(data)._from_sequence_of_strings.__func__ + is ExtensionArray._from_sequence_of_strings.__func__ + ): + # i.e. the EA hasn't overridden _from_sequence_of_strings + mark = pytest.mark.xfail( + reason="_from_sequence_of_strings not implemented", + raises=NotImplementedError, + ) + request.node.add_marker(mark) + + df = pd.DataFrame({"with_dtype": pd.Series(data, dtype=str(data.dtype))}) + csv_output = df.to_csv(index=False, na_rep=np.nan) + result = pd.read_csv( + StringIO(csv_output), dtype={"with_dtype": str(data.dtype)}, engine=engine + ) + expected = df + tm.assert_frame_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/methods.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/methods.py new file mode 100644 index 0000000000000000000000000000000000000000..c803a8113b4a46be4dd99b06cb1026317786a241 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/methods.py @@ -0,0 +1,720 @@ +import inspect +import operator + +import numpy as np +import pytest + +from pandas._typing import Dtype + +from pandas.core.dtypes.common import is_bool_dtype +from pandas.core.dtypes.dtypes import NumpyEADtype +from pandas.core.dtypes.missing import na_value_for_dtype + +import pandas as pd +import pandas._testing as tm +from pandas.core.sorting import nargsort + + +class BaseMethodsTests: + """Various Series and DataFrame methods.""" + + def test_hash_pandas_object(self, data): + # _hash_pandas_object should return a uint64 ndarray of the same length + # as the data + from pandas.core.util.hashing import _default_hash_key + + res = data._hash_pandas_object( + encoding="utf-8", hash_key=_default_hash_key, categorize=False + ) + assert res.dtype == np.uint64 + assert res.shape == data.shape + + def test_value_counts_default_dropna(self, data): + # make sure we have consistent default dropna kwarg + if not hasattr(data, "value_counts"): + pytest.skip(f"value_counts is not implemented for {type(data)}") + sig = inspect.signature(data.value_counts) + kwarg = sig.parameters["dropna"] + assert kwarg.default is True + + @pytest.mark.parametrize("dropna", [True, False]) + def test_value_counts(self, all_data, dropna): + all_data = all_data[:10] + if dropna: + other = all_data[~all_data.isna()] + else: + other = all_data + + result = pd.Series(all_data).value_counts(dropna=dropna).sort_index() + expected = pd.Series(other).value_counts(dropna=dropna).sort_index() + + tm.assert_series_equal(result, expected) + + def test_value_counts_with_normalize(self, data): + # GH 33172 + data = data[:10].unique() + values = np.array(data[~data.isna()]) + ser = pd.Series(data, dtype=data.dtype) + + result = ser.value_counts(normalize=True).sort_index() + + if not isinstance(data, pd.Categorical): + expected = pd.Series( + [1 / len(values)] * len(values), index=result.index, name="proportion" + ) + else: + expected = pd.Series(0.0, index=result.index, name="proportion") + expected[result > 0] = 1 / len(values) + + if getattr(data.dtype, "storage", "") == "pyarrow" or isinstance( + data.dtype, pd.ArrowDtype + ): + # TODO: avoid special-casing + expected = expected.astype("double[pyarrow]") + elif getattr(data.dtype, "storage", "") == "pyarrow_numpy": + # TODO: avoid special-casing + expected = expected.astype("float64") + elif na_value_for_dtype(data.dtype) is pd.NA: + # TODO(GH#44692): avoid special-casing + expected = expected.astype("Float64") + + tm.assert_series_equal(result, expected) + + def test_count(self, data_missing): + df = pd.DataFrame({"A": data_missing}) + result = df.count(axis="columns") + expected = pd.Series([0, 1]) + tm.assert_series_equal(result, expected) + + def test_series_count(self, data_missing): + # GH#26835 + ser = pd.Series(data_missing) + result = ser.count() + expected = 1 + assert result == expected + + def test_apply_simple_series(self, data): + result = pd.Series(data).apply(id) + assert isinstance(result, pd.Series) + + @pytest.mark.parametrize("na_action", [None, "ignore"]) + def test_map(self, data_missing, na_action): + result = data_missing.map(lambda x: x, na_action=na_action) + expected = data_missing.to_numpy() + tm.assert_numpy_array_equal(result, expected) + + def test_argsort(self, data_for_sorting): + result = pd.Series(data_for_sorting).argsort() + # argsort result gets passed to take, so should be np.intp + expected = pd.Series(np.array([2, 0, 1], dtype=np.intp)) + tm.assert_series_equal(result, expected) + + def test_argsort_missing_array(self, data_missing_for_sorting): + result = data_missing_for_sorting.argsort() + # argsort result gets passed to take, so should be np.intp + expected = np.array([2, 0, 1], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + + def test_argsort_missing(self, data_missing_for_sorting): + msg = "The behavior of Series.argsort in the presence of NA values" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = pd.Series(data_missing_for_sorting).argsort() + expected = pd.Series(np.array([1, -1, 0], dtype=np.intp)) + tm.assert_series_equal(result, expected) + + def test_argmin_argmax(self, data_for_sorting, data_missing_for_sorting, na_value): + # GH 24382 + is_bool = data_for_sorting.dtype._is_boolean + + exp_argmax = 1 + exp_argmax_repeated = 3 + if is_bool: + # See data_for_sorting docstring + exp_argmax = 0 + exp_argmax_repeated = 1 + + # data_for_sorting -> [B, C, A] with A < B < C + assert data_for_sorting.argmax() == exp_argmax + assert data_for_sorting.argmin() == 2 + + # with repeated values -> first occurrence + data = data_for_sorting.take([2, 0, 0, 1, 1, 2]) + assert data.argmax() == exp_argmax_repeated + assert data.argmin() == 0 + + # with missing values + # data_missing_for_sorting -> [B, NA, A] with A < B and NA missing. + assert data_missing_for_sorting.argmax() == 0 + assert data_missing_for_sorting.argmin() == 2 + + @pytest.mark.parametrize("method", ["argmax", "argmin"]) + def test_argmin_argmax_empty_array(self, method, data): + # GH 24382 + err_msg = "attempt to get" + with pytest.raises(ValueError, match=err_msg): + getattr(data[:0], method)() + + @pytest.mark.parametrize("method", ["argmax", "argmin"]) + def test_argmin_argmax_all_na(self, method, data, na_value): + # all missing with skipna=True is the same as empty + err_msg = "attempt to get" + data_na = type(data)._from_sequence([na_value, na_value], dtype=data.dtype) + with pytest.raises(ValueError, match=err_msg): + getattr(data_na, method)() + + @pytest.mark.parametrize( + "op_name, skipna, expected", + [ + ("idxmax", True, 0), + ("idxmin", True, 2), + ("argmax", True, 0), + ("argmin", True, 2), + ("idxmax", False, np.nan), + ("idxmin", False, np.nan), + ("argmax", False, -1), + ("argmin", False, -1), + ], + ) + def test_argreduce_series( + self, data_missing_for_sorting, op_name, skipna, expected + ): + # data_missing_for_sorting -> [B, NA, A] with A < B and NA missing. + warn = None + msg = "The behavior of Series.argmax/argmin" + if op_name.startswith("arg") and expected == -1: + warn = FutureWarning + if op_name.startswith("idx") and np.isnan(expected): + warn = FutureWarning + msg = f"The behavior of Series.{op_name}" + ser = pd.Series(data_missing_for_sorting) + with tm.assert_produces_warning(warn, match=msg): + result = getattr(ser, op_name)(skipna=skipna) + tm.assert_almost_equal(result, expected) + + def test_argmax_argmin_no_skipna_notimplemented(self, data_missing_for_sorting): + # GH#38733 + data = data_missing_for_sorting + + with pytest.raises(NotImplementedError, match=""): + data.argmin(skipna=False) + + with pytest.raises(NotImplementedError, match=""): + data.argmax(skipna=False) + + @pytest.mark.parametrize( + "na_position, expected", + [ + ("last", np.array([2, 0, 1], dtype=np.dtype("intp"))), + ("first", np.array([1, 2, 0], dtype=np.dtype("intp"))), + ], + ) + def test_nargsort(self, data_missing_for_sorting, na_position, expected): + # GH 25439 + result = nargsort(data_missing_for_sorting, na_position=na_position) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("ascending", [True, False]) + def test_sort_values(self, data_for_sorting, ascending, sort_by_key): + ser = pd.Series(data_for_sorting) + result = ser.sort_values(ascending=ascending, key=sort_by_key) + expected = ser.iloc[[2, 0, 1]] + if not ascending: + # GH 35922. Expect stable sort + if ser.nunique() == 2: + expected = ser.iloc[[0, 1, 2]] + else: + expected = ser.iloc[[1, 0, 2]] + + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("ascending", [True, False]) + def test_sort_values_missing( + self, data_missing_for_sorting, ascending, sort_by_key + ): + ser = pd.Series(data_missing_for_sorting) + result = ser.sort_values(ascending=ascending, key=sort_by_key) + if ascending: + expected = ser.iloc[[2, 0, 1]] + else: + expected = ser.iloc[[0, 2, 1]] + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("ascending", [True, False]) + def test_sort_values_frame(self, data_for_sorting, ascending): + df = pd.DataFrame({"A": [1, 2, 1], "B": data_for_sorting}) + result = df.sort_values(["A", "B"]) + expected = pd.DataFrame( + {"A": [1, 1, 2], "B": data_for_sorting.take([2, 0, 1])}, index=[2, 0, 1] + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("keep", ["first", "last", False]) + def test_duplicated(self, data, keep): + arr = data.take([0, 1, 0, 1]) + result = arr.duplicated(keep=keep) + if keep == "first": + expected = np.array([False, False, True, True]) + elif keep == "last": + expected = np.array([True, True, False, False]) + else: + expected = np.array([True, True, True, True]) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("box", [pd.Series, lambda x: x]) + @pytest.mark.parametrize("method", [lambda x: x.unique(), pd.unique]) + def test_unique(self, data, box, method): + duplicated = box(data._from_sequence([data[0], data[0]], dtype=data.dtype)) + + result = method(duplicated) + + assert len(result) == 1 + assert isinstance(result, type(data)) + assert result[0] == duplicated[0] + + def test_factorize(self, data_for_grouping): + codes, uniques = pd.factorize(data_for_grouping, use_na_sentinel=True) + + is_bool = data_for_grouping.dtype._is_boolean + if is_bool: + # only 2 unique values + expected_codes = np.array([0, 0, -1, -1, 1, 1, 0, 0], dtype=np.intp) + expected_uniques = data_for_grouping.take([0, 4]) + else: + expected_codes = np.array([0, 0, -1, -1, 1, 1, 0, 2], dtype=np.intp) + expected_uniques = data_for_grouping.take([0, 4, 7]) + + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_extension_array_equal(uniques, expected_uniques) + + def test_factorize_equivalence(self, data_for_grouping): + codes_1, uniques_1 = pd.factorize(data_for_grouping, use_na_sentinel=True) + codes_2, uniques_2 = data_for_grouping.factorize(use_na_sentinel=True) + + tm.assert_numpy_array_equal(codes_1, codes_2) + tm.assert_extension_array_equal(uniques_1, uniques_2) + assert len(uniques_1) == len(pd.unique(uniques_1)) + assert uniques_1.dtype == data_for_grouping.dtype + + def test_factorize_empty(self, data): + codes, uniques = pd.factorize(data[:0]) + expected_codes = np.array([], dtype=np.intp) + expected_uniques = type(data)._from_sequence([], dtype=data[:0].dtype) + + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_extension_array_equal(uniques, expected_uniques) + + def test_fillna_copy_frame(self, data_missing): + arr = data_missing.take([1, 1]) + df = pd.DataFrame({"A": arr}) + df_orig = df.copy() + + filled_val = df.iloc[0, 0] + result = df.fillna(filled_val) + + result.iloc[0, 0] = filled_val + + tm.assert_frame_equal(df, df_orig) + + def test_fillna_copy_series(self, data_missing): + arr = data_missing.take([1, 1]) + ser = pd.Series(arr, copy=False) + ser_orig = ser.copy() + + filled_val = ser[0] + result = ser.fillna(filled_val) + result.iloc[0] = filled_val + + tm.assert_series_equal(ser, ser_orig) + + def test_fillna_length_mismatch(self, data_missing): + msg = "Length of 'value' does not match." + with pytest.raises(ValueError, match=msg): + data_missing.fillna(data_missing.take([1])) + + # Subclasses can override if we expect e.g Sparse[bool], boolean, pyarrow[bool] + _combine_le_expected_dtype: Dtype = NumpyEADtype("bool") + + def test_combine_le(self, data_repeated): + # GH 20825 + # Test that combine works when doing a <= (le) comparison + orig_data1, orig_data2 = data_repeated(2) + s1 = pd.Series(orig_data1) + s2 = pd.Series(orig_data2) + result = s1.combine(s2, lambda x1, x2: x1 <= x2) + expected = pd.Series( + pd.array( + [a <= b for (a, b) in zip(list(orig_data1), list(orig_data2))], + dtype=self._combine_le_expected_dtype, + ) + ) + tm.assert_series_equal(result, expected) + + val = s1.iloc[0] + result = s1.combine(val, lambda x1, x2: x1 <= x2) + expected = pd.Series( + pd.array( + [a <= val for a in list(orig_data1)], + dtype=self._combine_le_expected_dtype, + ) + ) + tm.assert_series_equal(result, expected) + + def test_combine_add(self, data_repeated): + # GH 20825 + orig_data1, orig_data2 = data_repeated(2) + s1 = pd.Series(orig_data1) + s2 = pd.Series(orig_data2) + + # Check if the operation is supported pointwise for our scalars. If not, + # we will expect Series.combine to raise as well. + try: + with np.errstate(over="ignore"): + expected = pd.Series( + orig_data1._from_sequence( + [a + b for (a, b) in zip(list(orig_data1), list(orig_data2))] + ) + ) + except TypeError: + # If the operation is not supported pointwise for our scalars, + # then Series.combine should also raise + with pytest.raises(TypeError): + s1.combine(s2, lambda x1, x2: x1 + x2) + return + + result = s1.combine(s2, lambda x1, x2: x1 + x2) + tm.assert_series_equal(result, expected) + + val = s1.iloc[0] + result = s1.combine(val, lambda x1, x2: x1 + x2) + expected = pd.Series( + orig_data1._from_sequence([a + val for a in list(orig_data1)]) + ) + tm.assert_series_equal(result, expected) + + def test_combine_first(self, data): + # https://github.com/pandas-dev/pandas/issues/24147 + a = pd.Series(data[:3]) + b = pd.Series(data[2:5], index=[2, 3, 4]) + result = a.combine_first(b) + expected = pd.Series(data[:5]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("frame", [True, False]) + @pytest.mark.parametrize( + "periods, indices", + [(-2, [2, 3, 4, -1, -1]), (0, [0, 1, 2, 3, 4]), (2, [-1, -1, 0, 1, 2])], + ) + def test_container_shift(self, data, frame, periods, indices): + # https://github.com/pandas-dev/pandas/issues/22386 + subset = data[:5] + data = pd.Series(subset, name="A") + expected = pd.Series(subset.take(indices, allow_fill=True), name="A") + + if frame: + result = data.to_frame(name="A").assign(B=1).shift(periods) + expected = pd.concat( + [expected, pd.Series([1] * 5, name="B").shift(periods)], axis=1 + ) + compare = tm.assert_frame_equal + else: + result = data.shift(periods) + compare = tm.assert_series_equal + + compare(result, expected) + + def test_shift_0_periods(self, data): + # GH#33856 shifting with periods=0 should return a copy, not same obj + result = data.shift(0) + assert data[0] != data[1] # otherwise below is invalid + data[0] = data[1] + assert result[0] != result[1] # i.e. not the same object/view + + @pytest.mark.parametrize("periods", [1, -2]) + def test_diff(self, data, periods): + data = data[:5] + if is_bool_dtype(data.dtype): + op = operator.xor + else: + op = operator.sub + try: + # does this array implement ops? + op(data, data) + except Exception: + pytest.skip(f"{type(data)} does not support diff") + s = pd.Series(data) + result = s.diff(periods) + expected = pd.Series(op(data, data.shift(periods))) + tm.assert_series_equal(result, expected) + + df = pd.DataFrame({"A": data, "B": [1.0] * 5}) + result = df.diff(periods) + if periods == 1: + b = [np.nan, 0, 0, 0, 0] + else: + b = [0, 0, 0, np.nan, np.nan] + expected = pd.DataFrame({"A": expected, "B": b}) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "periods, indices", + [[-4, [-1, -1]], [-1, [1, -1]], [0, [0, 1]], [1, [-1, 0]], [4, [-1, -1]]], + ) + def test_shift_non_empty_array(self, data, periods, indices): + # https://github.com/pandas-dev/pandas/issues/23911 + subset = data[:2] + result = subset.shift(periods) + expected = subset.take(indices, allow_fill=True) + tm.assert_extension_array_equal(result, expected) + + @pytest.mark.parametrize("periods", [-4, -1, 0, 1, 4]) + def test_shift_empty_array(self, data, periods): + # https://github.com/pandas-dev/pandas/issues/23911 + empty = data[:0] + result = empty.shift(periods) + expected = empty + tm.assert_extension_array_equal(result, expected) + + def test_shift_zero_copies(self, data): + # GH#31502 + result = data.shift(0) + assert result is not data + + result = data[:0].shift(2) + assert result is not data + + def test_shift_fill_value(self, data): + arr = data[:4] + fill_value = data[0] + result = arr.shift(1, fill_value=fill_value) + expected = data.take([0, 0, 1, 2]) + tm.assert_extension_array_equal(result, expected) + + result = arr.shift(-2, fill_value=fill_value) + expected = data.take([2, 3, 0, 0]) + tm.assert_extension_array_equal(result, expected) + + def test_not_hashable(self, data): + # We are in general mutable, so not hashable + with pytest.raises(TypeError, match="unhashable type"): + hash(data) + + def test_hash_pandas_object_works(self, data, as_frame): + # https://github.com/pandas-dev/pandas/issues/23066 + data = pd.Series(data) + if as_frame: + data = data.to_frame() + a = pd.util.hash_pandas_object(data) + b = pd.util.hash_pandas_object(data) + tm.assert_equal(a, b) + + def test_searchsorted(self, data_for_sorting, as_series): + if data_for_sorting.dtype._is_boolean: + return self._test_searchsorted_bool_dtypes(data_for_sorting, as_series) + + b, c, a = data_for_sorting + arr = data_for_sorting.take([2, 0, 1]) # to get [a, b, c] + + if as_series: + arr = pd.Series(arr) + assert arr.searchsorted(a) == 0 + assert arr.searchsorted(a, side="right") == 1 + + assert arr.searchsorted(b) == 1 + assert arr.searchsorted(b, side="right") == 2 + + assert arr.searchsorted(c) == 2 + assert arr.searchsorted(c, side="right") == 3 + + result = arr.searchsorted(arr.take([0, 2])) + expected = np.array([0, 2], dtype=np.intp) + + tm.assert_numpy_array_equal(result, expected) + + # sorter + sorter = np.array([1, 2, 0]) + assert data_for_sorting.searchsorted(a, sorter=sorter) == 0 + + def _test_searchsorted_bool_dtypes(self, data_for_sorting, as_series): + # We call this from test_searchsorted in cases where we have a + # boolean-like dtype. The non-bool test assumes we have more than 2 + # unique values. + dtype = data_for_sorting.dtype + data_for_sorting = pd.array([True, False], dtype=dtype) + b, a = data_for_sorting + arr = type(data_for_sorting)._from_sequence([a, b]) + + if as_series: + arr = pd.Series(arr) + assert arr.searchsorted(a) == 0 + assert arr.searchsorted(a, side="right") == 1 + + assert arr.searchsorted(b) == 1 + assert arr.searchsorted(b, side="right") == 2 + + result = arr.searchsorted(arr.take([0, 1])) + expected = np.array([0, 1], dtype=np.intp) + + tm.assert_numpy_array_equal(result, expected) + + # sorter + sorter = np.array([1, 0]) + assert data_for_sorting.searchsorted(a, sorter=sorter) == 0 + + def test_where_series(self, data, na_value, as_frame): + assert data[0] != data[1] + cls = type(data) + a, b = data[:2] + + orig = pd.Series(cls._from_sequence([a, a, b, b], dtype=data.dtype)) + ser = orig.copy() + cond = np.array([True, True, False, False]) + + if as_frame: + ser = ser.to_frame(name="a") + cond = cond.reshape(-1, 1) + + result = ser.where(cond) + expected = pd.Series( + cls._from_sequence([a, a, na_value, na_value], dtype=data.dtype) + ) + + if as_frame: + expected = expected.to_frame(name="a") + tm.assert_equal(result, expected) + + ser.mask(~cond, inplace=True) + tm.assert_equal(ser, expected) + + # array other + ser = orig.copy() + if as_frame: + ser = ser.to_frame(name="a") + cond = np.array([True, False, True, True]) + other = cls._from_sequence([a, b, a, b], dtype=data.dtype) + if as_frame: + other = pd.DataFrame({"a": other}) + cond = pd.DataFrame({"a": cond}) + result = ser.where(cond, other) + expected = pd.Series(cls._from_sequence([a, b, b, b], dtype=data.dtype)) + if as_frame: + expected = expected.to_frame(name="a") + tm.assert_equal(result, expected) + + ser.mask(~cond, other, inplace=True) + tm.assert_equal(ser, expected) + + @pytest.mark.parametrize("repeats", [0, 1, 2, [1, 2, 3]]) + def test_repeat(self, data, repeats, as_series, use_numpy): + arr = type(data)._from_sequence(data[:3], dtype=data.dtype) + if as_series: + arr = pd.Series(arr) + + result = np.repeat(arr, repeats) if use_numpy else arr.repeat(repeats) + + repeats = [repeats] * 3 if isinstance(repeats, int) else repeats + expected = [x for x, n in zip(arr, repeats) for _ in range(n)] + expected = type(data)._from_sequence(expected, dtype=data.dtype) + if as_series: + expected = pd.Series(expected, index=arr.index.repeat(repeats)) + + tm.assert_equal(result, expected) + + @pytest.mark.parametrize( + "repeats, kwargs, error, msg", + [ + (2, {"axis": 1}, ValueError, "axis"), + (-1, {}, ValueError, "negative"), + ([1, 2], {}, ValueError, "shape"), + (2, {"foo": "bar"}, TypeError, "'foo'"), + ], + ) + def test_repeat_raises(self, data, repeats, kwargs, error, msg, use_numpy): + with pytest.raises(error, match=msg): + if use_numpy: + np.repeat(data, repeats, **kwargs) + else: + data.repeat(repeats, **kwargs) + + def test_delete(self, data): + result = data.delete(0) + expected = data[1:] + tm.assert_extension_array_equal(result, expected) + + result = data.delete([1, 3]) + expected = data._concat_same_type([data[[0]], data[[2]], data[4:]]) + tm.assert_extension_array_equal(result, expected) + + def test_insert(self, data): + # insert at the beginning + result = data[1:].insert(0, data[0]) + tm.assert_extension_array_equal(result, data) + + result = data[1:].insert(-len(data[1:]), data[0]) + tm.assert_extension_array_equal(result, data) + + # insert at the middle + result = data[:-1].insert(4, data[-1]) + + taker = np.arange(len(data)) + taker[5:] = taker[4:-1] + taker[4] = len(data) - 1 + expected = data.take(taker) + tm.assert_extension_array_equal(result, expected) + + def test_insert_invalid(self, data, invalid_scalar): + item = invalid_scalar + + with pytest.raises((TypeError, ValueError)): + data.insert(0, item) + + with pytest.raises((TypeError, ValueError)): + data.insert(4, item) + + with pytest.raises((TypeError, ValueError)): + data.insert(len(data) - 1, item) + + def test_insert_invalid_loc(self, data): + ub = len(data) + + with pytest.raises(IndexError): + data.insert(ub + 1, data[0]) + + with pytest.raises(IndexError): + data.insert(-ub - 1, data[0]) + + with pytest.raises(TypeError): + # we expect TypeError here instead of IndexError to match np.insert + data.insert(1.5, data[0]) + + @pytest.mark.parametrize("box", [pd.array, pd.Series, pd.DataFrame]) + def test_equals(self, data, na_value, as_series, box): + data2 = type(data)._from_sequence([data[0]] * len(data), dtype=data.dtype) + data_na = type(data)._from_sequence([na_value] * len(data), dtype=data.dtype) + + data = tm.box_expected(data, box, transpose=False) + data2 = tm.box_expected(data2, box, transpose=False) + data_na = tm.box_expected(data_na, box, transpose=False) + + # we are asserting with `is True/False` explicitly, to test that the + # result is an actual Python bool, and not something "truthy" + + assert data.equals(data) is True + assert data.equals(data.copy()) is True + + # unequal other data + assert data.equals(data2) is False + assert data.equals(data_na) is False + + # different length + assert data[:2].equals(data[:3]) is False + + # empty are equal + assert data[:0].equals(data[:0]) is True + + # other types + assert data.equals(None) is False + assert data[[0]].equals(data[0]) is False + + def test_equals_same_data_different_object(self, data): + # https://github.com/pandas-dev/pandas/issues/34660 + assert pd.Series(data).equals(pd.Series(data)) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/missing.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/missing.py new file mode 100644 index 0000000000000000000000000000000000000000..dbd6682c12123bd35573fea2b31143b03ddd3cde --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/missing.py @@ -0,0 +1,188 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +class BaseMissingTests: + def test_isna(self, data_missing): + expected = np.array([True, False]) + + result = pd.isna(data_missing) + tm.assert_numpy_array_equal(result, expected) + + result = pd.Series(data_missing).isna() + expected = pd.Series(expected) + tm.assert_series_equal(result, expected) + + # GH 21189 + result = pd.Series(data_missing).drop([0, 1]).isna() + expected = pd.Series([], dtype=bool) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("na_func", ["isna", "notna"]) + def test_isna_returns_copy(self, data_missing, na_func): + result = pd.Series(data_missing) + expected = result.copy() + mask = getattr(result, na_func)() + if isinstance(mask.dtype, pd.SparseDtype): + mask = np.array(mask) + + mask[:] = True + tm.assert_series_equal(result, expected) + + def test_dropna_array(self, data_missing): + result = data_missing.dropna() + expected = data_missing[[1]] + tm.assert_extension_array_equal(result, expected) + + def test_dropna_series(self, data_missing): + ser = pd.Series(data_missing) + result = ser.dropna() + expected = ser.iloc[[1]] + tm.assert_series_equal(result, expected) + + def test_dropna_frame(self, data_missing): + df = pd.DataFrame({"A": data_missing}, columns=pd.Index(["A"], dtype=object)) + + # defaults + result = df.dropna() + expected = df.iloc[[1]] + tm.assert_frame_equal(result, expected) + + # axis = 1 + result = df.dropna(axis="columns") + expected = pd.DataFrame(index=pd.RangeIndex(2), columns=pd.Index([])) + tm.assert_frame_equal(result, expected) + + # multiple + df = pd.DataFrame({"A": data_missing, "B": [1, np.nan]}) + result = df.dropna() + expected = df.iloc[:0] + tm.assert_frame_equal(result, expected) + + def test_fillna_scalar(self, data_missing): + valid = data_missing[1] + result = data_missing.fillna(valid) + expected = data_missing.fillna(valid) + tm.assert_extension_array_equal(result, expected) + + @pytest.mark.filterwarnings( + "ignore:Series.fillna with 'method' is deprecated:FutureWarning" + ) + def test_fillna_limit_pad(self, data_missing): + arr = data_missing.take([1, 0, 0, 0, 1]) + result = pd.Series(arr).ffill(limit=2) + expected = pd.Series(data_missing.take([1, 1, 1, 0, 1])) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "limit_area, input_ilocs, expected_ilocs", + [ + ("outside", [1, 0, 0, 0, 1], [1, 0, 0, 0, 1]), + ("outside", [1, 0, 1, 0, 1], [1, 0, 1, 0, 1]), + ("outside", [0, 1, 1, 1, 0], [0, 1, 1, 1, 1]), + ("outside", [0, 1, 0, 1, 0], [0, 1, 0, 1, 1]), + ("inside", [1, 0, 0, 0, 1], [1, 1, 1, 1, 1]), + ("inside", [1, 0, 1, 0, 1], [1, 1, 1, 1, 1]), + ("inside", [0, 1, 1, 1, 0], [0, 1, 1, 1, 0]), + ("inside", [0, 1, 0, 1, 0], [0, 1, 1, 1, 0]), + ], + ) + def test_ffill_limit_area( + self, data_missing, limit_area, input_ilocs, expected_ilocs + ): + # GH#56616 + arr = data_missing.take(input_ilocs) + result = pd.Series(arr).ffill(limit_area=limit_area) + expected = pd.Series(data_missing.take(expected_ilocs)) + tm.assert_series_equal(result, expected) + + @pytest.mark.filterwarnings( + "ignore:Series.fillna with 'method' is deprecated:FutureWarning" + ) + def test_fillna_limit_backfill(self, data_missing): + arr = data_missing.take([1, 0, 0, 0, 1]) + result = pd.Series(arr).fillna(method="backfill", limit=2) + expected = pd.Series(data_missing.take([1, 0, 1, 1, 1])) + tm.assert_series_equal(result, expected) + + def test_fillna_no_op_returns_copy(self, data): + data = data[~data.isna()] + + valid = data[0] + result = data.fillna(valid) + assert result is not data + tm.assert_extension_array_equal(result, data) + + result = data._pad_or_backfill(method="backfill") + assert result is not data + tm.assert_extension_array_equal(result, data) + + def test_fillna_series(self, data_missing): + fill_value = data_missing[1] + ser = pd.Series(data_missing) + + result = ser.fillna(fill_value) + expected = pd.Series( + data_missing._from_sequence( + [fill_value, fill_value], dtype=data_missing.dtype + ) + ) + tm.assert_series_equal(result, expected) + + # Fill with a series + result = ser.fillna(expected) + tm.assert_series_equal(result, expected) + + # Fill with a series not affecting the missing values + result = ser.fillna(ser) + tm.assert_series_equal(result, ser) + + def test_fillna_series_method(self, data_missing, fillna_method): + fill_value = data_missing[1] + + if fillna_method == "ffill": + data_missing = data_missing[::-1] + + result = getattr(pd.Series(data_missing), fillna_method)() + expected = pd.Series( + data_missing._from_sequence( + [fill_value, fill_value], dtype=data_missing.dtype + ) + ) + + tm.assert_series_equal(result, expected) + + def test_fillna_frame(self, data_missing): + fill_value = data_missing[1] + + result = pd.DataFrame({"A": data_missing, "B": [1, 2]}).fillna(fill_value) + + expected = pd.DataFrame( + { + "A": data_missing._from_sequence( + [fill_value, fill_value], dtype=data_missing.dtype + ), + "B": [1, 2], + } + ) + + tm.assert_frame_equal(result, expected) + + def test_fillna_fill_other(self, data): + result = pd.DataFrame({"A": data, "B": [np.nan] * len(data)}).fillna({"B": 0.0}) + + expected = pd.DataFrame({"A": data, "B": [0.0] * len(result)}) + + tm.assert_frame_equal(result, expected) + + def test_use_inf_as_na_no_effect(self, data_missing): + ser = pd.Series(data_missing) + expected = ser.isna() + msg = "use_inf_as_na option is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + with pd.option_context("mode.use_inf_as_na", True): + result = ser.isna() + tm.assert_series_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/reshaping.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/reshaping.py new file mode 100644 index 0000000000000000000000000000000000000000..4550e3b055cfeaea60f9d0b44c97e099e8e4d47c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/reshaping.py @@ -0,0 +1,379 @@ +import itertools + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.api.extensions import ExtensionArray +from pandas.core.internals.blocks import EABackedBlock + + +class BaseReshapingTests: + """Tests for reshaping and concatenation.""" + + @pytest.mark.parametrize("in_frame", [True, False]) + def test_concat(self, data, in_frame): + wrapped = pd.Series(data) + if in_frame: + wrapped = pd.DataFrame(wrapped) + result = pd.concat([wrapped, wrapped], ignore_index=True) + + assert len(result) == len(data) * 2 + + if in_frame: + dtype = result.dtypes[0] + else: + dtype = result.dtype + + assert dtype == data.dtype + if hasattr(result._mgr, "blocks"): + assert isinstance(result._mgr.blocks[0], EABackedBlock) + assert isinstance(result._mgr.arrays[0], ExtensionArray) + + @pytest.mark.parametrize("in_frame", [True, False]) + def test_concat_all_na_block(self, data_missing, in_frame): + valid_block = pd.Series(data_missing.take([1, 1]), index=[0, 1]) + na_block = pd.Series(data_missing.take([0, 0]), index=[2, 3]) + if in_frame: + valid_block = pd.DataFrame({"a": valid_block}) + na_block = pd.DataFrame({"a": na_block}) + result = pd.concat([valid_block, na_block]) + if in_frame: + expected = pd.DataFrame({"a": data_missing.take([1, 1, 0, 0])}) + tm.assert_frame_equal(result, expected) + else: + expected = pd.Series(data_missing.take([1, 1, 0, 0])) + tm.assert_series_equal(result, expected) + + def test_concat_mixed_dtypes(self, data): + # https://github.com/pandas-dev/pandas/issues/20762 + df1 = pd.DataFrame({"A": data[:3]}) + df2 = pd.DataFrame({"A": [1, 2, 3]}) + df3 = pd.DataFrame({"A": ["a", "b", "c"]}).astype("category") + dfs = [df1, df2, df3] + + # dataframes + result = pd.concat(dfs) + expected = pd.concat([x.astype(object) for x in dfs]) + tm.assert_frame_equal(result, expected) + + # series + result = pd.concat([x["A"] for x in dfs]) + expected = pd.concat([x["A"].astype(object) for x in dfs]) + tm.assert_series_equal(result, expected) + + # simple test for just EA and one other + result = pd.concat([df1, df2.astype(object)]) + expected = pd.concat([df1.astype("object"), df2.astype("object")]) + tm.assert_frame_equal(result, expected) + + result = pd.concat([df1["A"], df2["A"].astype(object)]) + expected = pd.concat([df1["A"].astype("object"), df2["A"].astype("object")]) + tm.assert_series_equal(result, expected) + + def test_concat_columns(self, data, na_value): + df1 = pd.DataFrame({"A": data[:3]}) + df2 = pd.DataFrame({"B": [1, 2, 3]}) + + expected = pd.DataFrame({"A": data[:3], "B": [1, 2, 3]}) + result = pd.concat([df1, df2], axis=1) + tm.assert_frame_equal(result, expected) + result = pd.concat([df1["A"], df2["B"]], axis=1) + tm.assert_frame_equal(result, expected) + + # non-aligned + df2 = pd.DataFrame({"B": [1, 2, 3]}, index=[1, 2, 3]) + expected = pd.DataFrame( + { + "A": data._from_sequence(list(data[:3]) + [na_value], dtype=data.dtype), + "B": [np.nan, 1, 2, 3], + } + ) + + result = pd.concat([df1, df2], axis=1) + tm.assert_frame_equal(result, expected) + result = pd.concat([df1["A"], df2["B"]], axis=1) + tm.assert_frame_equal(result, expected) + + def test_concat_extension_arrays_copy_false(self, data, na_value): + # GH 20756 + df1 = pd.DataFrame({"A": data[:3]}) + df2 = pd.DataFrame({"B": data[3:7]}) + expected = pd.DataFrame( + { + "A": data._from_sequence(list(data[:3]) + [na_value], dtype=data.dtype), + "B": data[3:7], + } + ) + result = pd.concat([df1, df2], axis=1, copy=False) + tm.assert_frame_equal(result, expected) + + def test_concat_with_reindex(self, data): + # GH-33027 + a = pd.DataFrame({"a": data[:5]}) + b = pd.DataFrame({"b": data[:5]}) + result = pd.concat([a, b], ignore_index=True) + expected = pd.DataFrame( + { + "a": data.take(list(range(5)) + ([-1] * 5), allow_fill=True), + "b": data.take(([-1] * 5) + list(range(5)), allow_fill=True), + } + ) + tm.assert_frame_equal(result, expected) + + def test_align(self, data, na_value): + a = data[:3] + b = data[2:5] + r1, r2 = pd.Series(a).align(pd.Series(b, index=[1, 2, 3])) + + # Assumes that the ctor can take a list of scalars of the type + e1 = pd.Series(data._from_sequence(list(a) + [na_value], dtype=data.dtype)) + e2 = pd.Series(data._from_sequence([na_value] + list(b), dtype=data.dtype)) + tm.assert_series_equal(r1, e1) + tm.assert_series_equal(r2, e2) + + def test_align_frame(self, data, na_value): + a = data[:3] + b = data[2:5] + r1, r2 = pd.DataFrame({"A": a}).align(pd.DataFrame({"A": b}, index=[1, 2, 3])) + + # Assumes that the ctor can take a list of scalars of the type + e1 = pd.DataFrame( + {"A": data._from_sequence(list(a) + [na_value], dtype=data.dtype)} + ) + e2 = pd.DataFrame( + {"A": data._from_sequence([na_value] + list(b), dtype=data.dtype)} + ) + tm.assert_frame_equal(r1, e1) + tm.assert_frame_equal(r2, e2) + + def test_align_series_frame(self, data, na_value): + # https://github.com/pandas-dev/pandas/issues/20576 + ser = pd.Series(data, name="a") + df = pd.DataFrame({"col": np.arange(len(ser) + 1)}) + r1, r2 = ser.align(df) + + e1 = pd.Series( + data._from_sequence(list(data) + [na_value], dtype=data.dtype), + name=ser.name, + ) + + tm.assert_series_equal(r1, e1) + tm.assert_frame_equal(r2, df) + + def test_set_frame_expand_regular_with_extension(self, data): + df = pd.DataFrame({"A": [1] * len(data)}) + df["B"] = data + expected = pd.DataFrame({"A": [1] * len(data), "B": data}) + tm.assert_frame_equal(df, expected) + + def test_set_frame_expand_extension_with_regular(self, data): + df = pd.DataFrame({"A": data}) + df["B"] = [1] * len(data) + expected = pd.DataFrame({"A": data, "B": [1] * len(data)}) + tm.assert_frame_equal(df, expected) + + def test_set_frame_overwrite_object(self, data): + # https://github.com/pandas-dev/pandas/issues/20555 + df = pd.DataFrame({"A": [1] * len(data)}, dtype=object) + df["A"] = data + assert df.dtypes["A"] == data.dtype + + def test_merge(self, data, na_value): + # GH-20743 + df1 = pd.DataFrame({"ext": data[:3], "int1": [1, 2, 3], "key": [0, 1, 2]}) + df2 = pd.DataFrame({"int2": [1, 2, 3, 4], "key": [0, 0, 1, 3]}) + + res = pd.merge(df1, df2) + exp = pd.DataFrame( + { + "int1": [1, 1, 2], + "int2": [1, 2, 3], + "key": [0, 0, 1], + "ext": data._from_sequence( + [data[0], data[0], data[1]], dtype=data.dtype + ), + } + ) + tm.assert_frame_equal(res, exp[["ext", "int1", "key", "int2"]]) + + res = pd.merge(df1, df2, how="outer") + exp = pd.DataFrame( + { + "int1": [1, 1, 2, 3, np.nan], + "int2": [1, 2, 3, np.nan, 4], + "key": [0, 0, 1, 2, 3], + "ext": data._from_sequence( + [data[0], data[0], data[1], data[2], na_value], dtype=data.dtype + ), + } + ) + tm.assert_frame_equal(res, exp[["ext", "int1", "key", "int2"]]) + + def test_merge_on_extension_array(self, data): + # GH 23020 + a, b = data[:2] + key = type(data)._from_sequence([a, b], dtype=data.dtype) + + df = pd.DataFrame({"key": key, "val": [1, 2]}) + result = pd.merge(df, df, on="key") + expected = pd.DataFrame({"key": key, "val_x": [1, 2], "val_y": [1, 2]}) + tm.assert_frame_equal(result, expected) + + # order + result = pd.merge(df.iloc[[1, 0]], df, on="key") + expected = expected.iloc[[1, 0]].reset_index(drop=True) + tm.assert_frame_equal(result, expected) + + def test_merge_on_extension_array_duplicates(self, data): + # GH 23020 + a, b = data[:2] + key = type(data)._from_sequence([a, b, a], dtype=data.dtype) + df1 = pd.DataFrame({"key": key, "val": [1, 2, 3]}) + df2 = pd.DataFrame({"key": key, "val": [1, 2, 3]}) + + result = pd.merge(df1, df2, on="key") + expected = pd.DataFrame( + { + "key": key.take([0, 0, 1, 2, 2]), + "val_x": [1, 1, 2, 3, 3], + "val_y": [1, 3, 2, 1, 3], + } + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.filterwarnings( + "ignore:The previous implementation of stack is deprecated" + ) + @pytest.mark.parametrize( + "columns", + [ + ["A", "B"], + pd.MultiIndex.from_tuples( + [("A", "a"), ("A", "b")], names=["outer", "inner"] + ), + ], + ) + @pytest.mark.parametrize("future_stack", [True, False]) + def test_stack(self, data, columns, future_stack): + df = pd.DataFrame({"A": data[:5], "B": data[:5]}) + df.columns = columns + result = df.stack(future_stack=future_stack) + expected = df.astype(object).stack(future_stack=future_stack) + # we need a second astype(object), in case the constructor inferred + # object -> specialized, as is done for period. + expected = expected.astype(object) + + if isinstance(expected, pd.Series): + assert result.dtype == df.iloc[:, 0].dtype + else: + assert all(result.dtypes == df.iloc[:, 0].dtype) + + result = result.astype(object) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize( + "index", + [ + # Two levels, uniform. + pd.MultiIndex.from_product(([["A", "B"], ["a", "b"]]), names=["a", "b"]), + # non-uniform + pd.MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "b")]), + # three levels, non-uniform + pd.MultiIndex.from_product([("A", "B"), ("a", "b", "c"), (0, 1, 2)]), + pd.MultiIndex.from_tuples( + [ + ("A", "a", 1), + ("A", "b", 0), + ("A", "a", 0), + ("B", "a", 0), + ("B", "c", 1), + ] + ), + ], + ) + @pytest.mark.parametrize("obj", ["series", "frame"]) + def test_unstack(self, data, index, obj): + data = data[: len(index)] + if obj == "series": + ser = pd.Series(data, index=index) + else: + ser = pd.DataFrame({"A": data, "B": data}, index=index) + + n = index.nlevels + levels = list(range(n)) + # [0, 1, 2] + # [(0,), (1,), (2,), (0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] + combinations = itertools.chain.from_iterable( + itertools.permutations(levels, i) for i in range(1, n) + ) + + for level in combinations: + result = ser.unstack(level=level) + assert all( + isinstance(result[col].array, type(data)) for col in result.columns + ) + + if obj == "series": + # We should get the same result with to_frame+unstack+droplevel + df = ser.to_frame() + + alt = df.unstack(level=level).droplevel(0, axis=1) + tm.assert_frame_equal(result, alt) + + obj_ser = ser.astype(object) + + expected = obj_ser.unstack(level=level, fill_value=data.dtype.na_value) + if obj == "series": + assert (expected.dtypes == object).all() + + result = result.astype(object) + tm.assert_frame_equal(result, expected) + + def test_ravel(self, data): + # as long as EA is 1D-only, ravel is a no-op + result = data.ravel() + assert type(result) == type(data) + + if data.dtype._is_immutable: + pytest.skip(f"test_ravel assumes mutability and {data.dtype} is immutable") + + # Check that we have a view, not a copy + result[0] = result[1] + assert data[0] == data[1] + + def test_transpose(self, data): + result = data.transpose() + assert type(result) == type(data) + + # check we get a new object + assert result is not data + + # If we ever _did_ support 2D, shape should be reversed + assert result.shape == data.shape[::-1] + + if data.dtype._is_immutable: + pytest.skip( + f"test_transpose assumes mutability and {data.dtype} is immutable" + ) + + # Check that we have a view, not a copy + result[0] = result[1] + assert data[0] == data[1] + + def test_transpose_frame(self, data): + df = pd.DataFrame({"A": data[:4], "B": data[:4]}, index=["a", "b", "c", "d"]) + result = df.T + expected = pd.DataFrame( + { + "a": type(data)._from_sequence([data[0]] * 2, dtype=data.dtype), + "b": type(data)._from_sequence([data[1]] * 2, dtype=data.dtype), + "c": type(data)._from_sequence([data[2]] * 2, dtype=data.dtype), + "d": type(data)._from_sequence([data[3]] * 2, dtype=data.dtype), + }, + index=["A", "B"], + ) + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(np.transpose(np.transpose(df)), df) + tm.assert_frame_equal(np.transpose(np.transpose(df[["A"]])), df[["A"]]) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/date/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/date/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4c637129f0a1477517b8a4cf1d63b2005ac0fb6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/date/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/date/__pycache__/array.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/date/__pycache__/array.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32b15309a5c0077dc846f6459a5c35f485e8ec96 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/date/__pycache__/array.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_add_prefix_suffix.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_add_prefix_suffix.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8463cc82ab80d4e1b48d78b7391b498d0462948f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_add_prefix_suffix.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_align.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_align.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd9a7e115b42622f6a65e9d4a6628a171dd95a4e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_align.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_astype.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_astype.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..778ceea43906de460f93a4009c2f5168a172dcdb Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_astype.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_between.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_between.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0bb064a1aa576be3e679825ea81ec62ad8aa65d9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_between.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_copy.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_copy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..15ff94797828e50f06f7d83ada5264061b928174 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_copy.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_drop.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_drop.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58d31f10414fb0df79ecf692e874c1b80f5c21f3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_drop.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_dropna.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_dropna.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12165da95ec4a433717ce56d4d507e662b22c820 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_dropna.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_fillna.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_fillna.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..533690393d0bf1cfb9a5c340fab6ee3220391b26 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_fillna.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_info.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_info.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f54a77d05f7b2ff0c5bd0fff0c9d4204b79a1bb Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_info.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_size.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_size.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61f8d4e768a66af39dd681c9bf32477c8e564435 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_size.cpython-310.pyc differ