diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/_libs/ops_dispatch.pyi b/llmeval-env/lib/python3.10/site-packages/pandas/_libs/ops_dispatch.pyi new file mode 100644 index 0000000000000000000000000000000000000000..91b5a4dbaaebc177191d3189f12e4e20d56ca0fa --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/_libs/ops_dispatch.pyi @@ -0,0 +1,5 @@ +import numpy as np + +def maybe_dispatch_ufunc_to_dunder_op( + self, ufunc: np.ufunc, method: str, *inputs, **kwargs +): ... diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/compat/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/compat/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..eb890c8b8c0ab5cad3a72f7eeb33dd3824ed20af --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/compat/__init__.py @@ -0,0 +1,197 @@ +""" +compat +====== + +Cross-compatible functions for different versions of Python. + +Other items: +* platform checker +""" +from __future__ import annotations + +import os +import platform +import sys +from typing import TYPE_CHECKING + +from pandas.compat._constants import ( + IS64, + ISMUSL, + PY310, + PY311, + PY312, + PYPY, +) +import pandas.compat.compressors +from pandas.compat.numpy import is_numpy_dev +from pandas.compat.pyarrow import ( + pa_version_under10p1, + pa_version_under11p0, + pa_version_under13p0, + pa_version_under14p0, + pa_version_under14p1, + pa_version_under16p0, +) + +if TYPE_CHECKING: + from pandas._typing import F + + +def set_function_name(f: F, name: str, cls: type) -> F: + """ + Bind the name/qualname attributes of the function. + """ + f.__name__ = name + f.__qualname__ = f"{cls.__name__}.{name}" + f.__module__ = cls.__module__ + return f + + +def is_platform_little_endian() -> bool: + """ + Checking if the running platform is little endian. + + Returns + ------- + bool + True if the running platform is little endian. + """ + return sys.byteorder == "little" + + +def is_platform_windows() -> bool: + """ + Checking if the running platform is windows. + + Returns + ------- + bool + True if the running platform is windows. + """ + return sys.platform in ["win32", "cygwin"] + + +def is_platform_linux() -> bool: + """ + Checking if the running platform is linux. + + Returns + ------- + bool + True if the running platform is linux. + """ + return sys.platform == "linux" + + +def is_platform_mac() -> bool: + """ + Checking if the running platform is mac. + + Returns + ------- + bool + True if the running platform is mac. + """ + return sys.platform == "darwin" + + +def is_platform_arm() -> bool: + """ + Checking if the running platform use ARM architecture. + + Returns + ------- + bool + True if the running platform uses ARM architecture. + """ + return platform.machine() in ("arm64", "aarch64") or platform.machine().startswith( + "armv" + ) + + +def is_platform_power() -> bool: + """ + Checking if the running platform use Power architecture. + + Returns + ------- + bool + True if the running platform uses ARM architecture. + """ + return platform.machine() in ("ppc64", "ppc64le") + + +def is_ci_environment() -> bool: + """ + Checking if running in a continuous integration environment by checking + the PANDAS_CI environment variable. + + Returns + ------- + bool + True if the running in a continuous integration environment. + """ + return os.environ.get("PANDAS_CI", "0") == "1" + + +def get_lzma_file() -> type[pandas.compat.compressors.LZMAFile]: + """ + Importing the `LZMAFile` class from the `lzma` module. + + Returns + ------- + class + The `LZMAFile` class from the `lzma` module. + + Raises + ------ + RuntimeError + If the `lzma` module was not imported correctly, or didn't exist. + """ + if not pandas.compat.compressors.has_lzma: + raise RuntimeError( + "lzma module not available. " + "A Python re-install with the proper dependencies, " + "might be required to solve this issue." + ) + return pandas.compat.compressors.LZMAFile + + +def get_bz2_file() -> type[pandas.compat.compressors.BZ2File]: + """ + Importing the `BZ2File` class from the `bz2` module. + + Returns + ------- + class + The `BZ2File` class from the `bz2` module. + + Raises + ------ + RuntimeError + If the `bz2` module was not imported correctly, or didn't exist. + """ + if not pandas.compat.compressors.has_bz2: + raise RuntimeError( + "bz2 module not available. " + "A Python re-install with the proper dependencies, " + "might be required to solve this issue." + ) + return pandas.compat.compressors.BZ2File + + +__all__ = [ + "is_numpy_dev", + "pa_version_under10p1", + "pa_version_under11p0", + "pa_version_under13p0", + "pa_version_under14p0", + "pa_version_under14p1", + "pa_version_under16p0", + "IS64", + "ISMUSL", + "PY310", + "PY311", + "PY312", + "PYPY", +] diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/compat/_constants.py b/llmeval-env/lib/python3.10/site-packages/pandas/compat/_constants.py new file mode 100644 index 0000000000000000000000000000000000000000..7bc3fbaaefebf69d8ebd622406dc9357237add1a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/compat/_constants.py @@ -0,0 +1,30 @@ +""" +_constants +====== + +Constants relevant for the Python implementation. +""" + +from __future__ import annotations + +import platform +import sys +import sysconfig + +IS64 = sys.maxsize > 2**32 + +PY310 = sys.version_info >= (3, 10) +PY311 = sys.version_info >= (3, 11) +PY312 = sys.version_info >= (3, 12) +PYPY = platform.python_implementation() == "PyPy" +ISMUSL = "musl" in (sysconfig.get_config_var("HOST_GNU_TYPE") or "") +REF_COUNT = 2 if PY311 else 3 + +__all__ = [ + "IS64", + "ISMUSL", + "PY310", + "PY311", + "PY312", + "PYPY", +] diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/compat/_optional.py b/llmeval-env/lib/python3.10/site-packages/pandas/compat/_optional.py new file mode 100644 index 0000000000000000000000000000000000000000..2bc6cd46f09a7e4b103658f9c2ec9a69d93d00b9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/compat/_optional.py @@ -0,0 +1,168 @@ +from __future__ import annotations + +import importlib +import sys +from typing import TYPE_CHECKING +import warnings + +from pandas.util._exceptions import find_stack_level + +from pandas.util.version import Version + +if TYPE_CHECKING: + import types + +# Update install.rst & setup.cfg when updating versions! + +VERSIONS = { + "adbc-driver-postgresql": "0.8.0", + "adbc-driver-sqlite": "0.8.0", + "bs4": "4.11.2", + "blosc": "1.21.3", + "bottleneck": "1.3.6", + "dataframe-api-compat": "0.1.7", + "fastparquet": "2022.12.0", + "fsspec": "2022.11.0", + "html5lib": "1.1", + "hypothesis": "6.46.1", + "gcsfs": "2022.11.0", + "jinja2": "3.1.2", + "lxml.etree": "4.9.2", + "matplotlib": "3.6.3", + "numba": "0.56.4", + "numexpr": "2.8.4", + "odfpy": "1.4.1", + "openpyxl": "3.1.0", + "pandas_gbq": "0.19.0", + "psycopg2": "2.9.6", # (dt dec pq3 ext lo64) + "pymysql": "1.0.2", + "pyarrow": "10.0.1", + "pyreadstat": "1.2.0", + "pytest": "7.3.2", + "python-calamine": "0.1.7", + "pyxlsb": "1.0.10", + "s3fs": "2022.11.0", + "scipy": "1.10.0", + "sqlalchemy": "2.0.0", + "tables": "3.8.0", + "tabulate": "0.9.0", + "xarray": "2022.12.0", + "xlrd": "2.0.1", + "xlsxwriter": "3.0.5", + "zstandard": "0.19.0", + "tzdata": "2022.7", + "qtpy": "2.3.0", + "pyqt5": "5.15.9", +} + +# A mapping from import name to package name (on PyPI) for packages where +# these two names are different. + +INSTALL_MAPPING = { + "bs4": "beautifulsoup4", + "bottleneck": "Bottleneck", + "jinja2": "Jinja2", + "lxml.etree": "lxml", + "odf": "odfpy", + "pandas_gbq": "pandas-gbq", + "python_calamine": "python-calamine", + "sqlalchemy": "SQLAlchemy", + "tables": "pytables", +} + + +def get_version(module: types.ModuleType) -> str: + version = getattr(module, "__version__", None) + + if version is None: + raise ImportError(f"Can't determine version for {module.__name__}") + if module.__name__ == "psycopg2": + # psycopg2 appends " (dt dec pq3 ext lo64)" to it's version + version = version.split()[0] + return version + + +def import_optional_dependency( + name: str, + extra: str = "", + errors: str = "raise", + min_version: str | None = None, +): + """ + Import an optional dependency. + + By default, if a dependency is missing an ImportError with a nice + message will be raised. If a dependency is present, but too old, + we raise. + + Parameters + ---------- + name : str + The module name. + extra : str + Additional text to include in the ImportError message. + errors : str {'raise', 'warn', 'ignore'} + What to do when a dependency is not found or its version is too old. + + * raise : Raise an ImportError + * warn : Only applicable when a module's version is to old. + Warns that the version is too old and returns None + * ignore: If the module is not installed, return None, otherwise, + return the module, even if the version is too old. + It's expected that users validate the version locally when + using ``errors="ignore"`` (see. ``io/html.py``) + min_version : str, default None + Specify a minimum version that is different from the global pandas + minimum version required. + Returns + ------- + maybe_module : Optional[ModuleType] + The imported module, when found and the version is correct. + None is returned when the package is not found and `errors` + is False, or when the package's version is too old and `errors` + is ``'warn'`` or ``'ignore'``. + """ + assert errors in {"warn", "raise", "ignore"} + + package_name = INSTALL_MAPPING.get(name) + install_name = package_name if package_name is not None else name + + msg = ( + f"Missing optional dependency '{install_name}'. {extra} " + f"Use pip or conda to install {install_name}." + ) + try: + module = importlib.import_module(name) + except ImportError: + if errors == "raise": + raise ImportError(msg) + return None + + # Handle submodules: if we have submodule, grab parent module from sys.modules + parent = name.split(".")[0] + if parent != name: + install_name = parent + module_to_get = sys.modules[install_name] + else: + module_to_get = module + minimum_version = min_version if min_version is not None else VERSIONS.get(parent) + if minimum_version: + version = get_version(module_to_get) + if version and Version(version) < Version(minimum_version): + msg = ( + f"Pandas requires version '{minimum_version}' or newer of '{parent}' " + f"(version '{version}' currently installed)." + ) + if errors == "warn": + warnings.warn( + msg, + UserWarning, + stacklevel=find_stack_level(), + ) + return None + elif errors == "raise": + raise ImportError(msg) + else: + return None + + return module diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/compat/compressors.py b/llmeval-env/lib/python3.10/site-packages/pandas/compat/compressors.py new file mode 100644 index 0000000000000000000000000000000000000000..1f31e34c092c9672559ca2f5194cb1da7083d03b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/compat/compressors.py @@ -0,0 +1,77 @@ +""" +Patched ``BZ2File`` and ``LZMAFile`` to handle pickle protocol 5. +""" + +from __future__ import annotations + +from pickle import PickleBuffer + +from pandas.compat._constants import PY310 + +try: + import bz2 + + has_bz2 = True +except ImportError: + has_bz2 = False + +try: + import lzma + + has_lzma = True +except ImportError: + has_lzma = False + + +def flatten_buffer( + b: bytes | bytearray | memoryview | PickleBuffer, +) -> bytes | bytearray | memoryview: + """ + Return some 1-D `uint8` typed buffer. + + Coerces anything that does not match that description to one that does + without copying if possible (otherwise will copy). + """ + + if isinstance(b, (bytes, bytearray)): + return b + + if not isinstance(b, PickleBuffer): + b = PickleBuffer(b) + + try: + # coerce to 1-D `uint8` C-contiguous `memoryview` zero-copy + return b.raw() + except BufferError: + # perform in-memory copy if buffer is not contiguous + return memoryview(b).tobytes("A") + + +if has_bz2: + + class BZ2File(bz2.BZ2File): + if not PY310: + + def write(self, b) -> int: + # Workaround issue where `bz2.BZ2File` expects `len` + # to return the number of bytes in `b` by converting + # `b` into something that meets that constraint with + # minimal copying. + # + # Note: This is fixed in Python 3.10. + return super().write(flatten_buffer(b)) + + +if has_lzma: + + class LZMAFile(lzma.LZMAFile): + if not PY310: + + def write(self, b) -> int: + # Workaround issue where `lzma.LZMAFile` expects `len` + # to return the number of bytes in `b` by converting + # `b` into something that meets that constraint with + # minimal copying. + # + # Note: This is fixed in Python 3.10. + return super().write(flatten_buffer(b)) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/compat/numpy/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/compat/numpy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3014bd652d8c46b49bcf34e8d050679f70e3f7da --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/compat/numpy/__init__.py @@ -0,0 +1,53 @@ +""" support numpy compatibility across versions """ +import warnings + +import numpy as np + +from pandas.util.version import Version + +# numpy versioning +_np_version = np.__version__ +_nlv = Version(_np_version) +np_version_lt1p23 = _nlv < Version("1.23") +np_version_gte1p24 = _nlv >= Version("1.24") +np_version_gte1p24p3 = _nlv >= Version("1.24.3") +np_version_gte1p25 = _nlv >= Version("1.25") +np_version_gt2 = _nlv >= Version("2.0.0.dev0") +is_numpy_dev = _nlv.dev is not None +_min_numpy_ver = "1.22.4" + + +if _nlv < Version(_min_numpy_ver): + raise ImportError( + f"this version of pandas is incompatible with numpy < {_min_numpy_ver}\n" + f"your numpy version is {_np_version}.\n" + f"Please upgrade numpy to >= {_min_numpy_ver} to use this pandas version" + ) + + +np_long: type +np_ulong: type + +if np_version_gt2: + try: + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + r".*In the future `np\.long` will be defined as.*", + FutureWarning, + ) + np_long = np.long # type: ignore[attr-defined] + np_ulong = np.ulong # type: ignore[attr-defined] + except AttributeError: + np_long = np.int_ + np_ulong = np.uint +else: + np_long = np.int_ + np_ulong = np.uint + + +__all__ = [ + "np", + "_np_version", + "is_numpy_dev", +] diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/compat/numpy/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/compat/numpy/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f82a53659c9758b1fce438b371a08debba593842 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/compat/numpy/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/compat/numpy/function.py b/llmeval-env/lib/python3.10/site-packages/pandas/compat/numpy/function.py new file mode 100644 index 0000000000000000000000000000000000000000..4df30f7f4a8a79984ca6de521ac058bd30fd8faf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/compat/numpy/function.py @@ -0,0 +1,418 @@ +""" +For compatibility with numpy libraries, pandas functions or methods have to +accept '*args' and '**kwargs' parameters to accommodate numpy arguments that +are not actually used or respected in the pandas implementation. + +To ensure that users do not abuse these parameters, validation is performed in +'validators.py' to make sure that any extra parameters passed correspond ONLY +to those in the numpy signature. Part of that validation includes whether or +not the user attempted to pass in non-default values for these extraneous +parameters. As we want to discourage users from relying on these parameters +when calling the pandas implementation, we want them only to pass in the +default values for these parameters. + +This module provides a set of commonly used default arguments for functions and +methods that are spread throughout the codebase. This module will make it +easier to adjust to future upstream changes in the analogous numpy signatures. +""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, + TypeVar, + cast, + overload, +) + +import numpy as np +from numpy import ndarray + +from pandas._libs.lib import ( + is_bool, + is_integer, +) +from pandas.errors import UnsupportedFunctionCall +from pandas.util._validators import ( + validate_args, + validate_args_and_kwargs, + validate_kwargs, +) + +if TYPE_CHECKING: + from pandas._typing import ( + Axis, + AxisInt, + ) + + AxisNoneT = TypeVar("AxisNoneT", Axis, None) + + +class CompatValidator: + def __init__( + self, + defaults, + fname=None, + method: str | None = None, + max_fname_arg_count=None, + ) -> None: + self.fname = fname + self.method = method + self.defaults = defaults + self.max_fname_arg_count = max_fname_arg_count + + def __call__( + self, + args, + kwargs, + fname=None, + max_fname_arg_count=None, + method: str | None = None, + ) -> None: + if not args and not kwargs: + return None + + fname = self.fname if fname is None else fname + max_fname_arg_count = ( + self.max_fname_arg_count + if max_fname_arg_count is None + else max_fname_arg_count + ) + method = self.method if method is None else method + + if method == "args": + validate_args(fname, args, max_fname_arg_count, self.defaults) + elif method == "kwargs": + validate_kwargs(fname, kwargs, self.defaults) + elif method == "both": + validate_args_and_kwargs( + fname, args, kwargs, max_fname_arg_count, self.defaults + ) + else: + raise ValueError(f"invalid validation method '{method}'") + + +ARGMINMAX_DEFAULTS = {"out": None} +validate_argmin = CompatValidator( + ARGMINMAX_DEFAULTS, fname="argmin", method="both", max_fname_arg_count=1 +) +validate_argmax = CompatValidator( + ARGMINMAX_DEFAULTS, fname="argmax", method="both", max_fname_arg_count=1 +) + + +def process_skipna(skipna: bool | ndarray | None, args) -> tuple[bool, Any]: + if isinstance(skipna, ndarray) or skipna is None: + args = (skipna,) + args + skipna = True + + return skipna, args + + +def validate_argmin_with_skipna(skipna: bool | ndarray | None, args, kwargs) -> bool: + """ + If 'Series.argmin' is called via the 'numpy' library, the third parameter + in its signature is 'out', which takes either an ndarray or 'None', so + check if the 'skipna' parameter is either an instance of ndarray or is + None, since 'skipna' itself should be a boolean + """ + skipna, args = process_skipna(skipna, args) + validate_argmin(args, kwargs) + return skipna + + +def validate_argmax_with_skipna(skipna: bool | ndarray | None, args, kwargs) -> bool: + """ + If 'Series.argmax' is called via the 'numpy' library, the third parameter + in its signature is 'out', which takes either an ndarray or 'None', so + check if the 'skipna' parameter is either an instance of ndarray or is + None, since 'skipna' itself should be a boolean + """ + skipna, args = process_skipna(skipna, args) + validate_argmax(args, kwargs) + return skipna + + +ARGSORT_DEFAULTS: dict[str, int | str | None] = {} +ARGSORT_DEFAULTS["axis"] = -1 +ARGSORT_DEFAULTS["kind"] = "quicksort" +ARGSORT_DEFAULTS["order"] = None +ARGSORT_DEFAULTS["kind"] = None +ARGSORT_DEFAULTS["stable"] = None + + +validate_argsort = CompatValidator( + ARGSORT_DEFAULTS, fname="argsort", max_fname_arg_count=0, method="both" +) + +# two different signatures of argsort, this second validation for when the +# `kind` param is supported +ARGSORT_DEFAULTS_KIND: dict[str, int | None] = {} +ARGSORT_DEFAULTS_KIND["axis"] = -1 +ARGSORT_DEFAULTS_KIND["order"] = None +ARGSORT_DEFAULTS_KIND["stable"] = None +validate_argsort_kind = CompatValidator( + ARGSORT_DEFAULTS_KIND, fname="argsort", max_fname_arg_count=0, method="both" +) + + +def validate_argsort_with_ascending(ascending: bool | int | None, args, kwargs) -> bool: + """ + If 'Categorical.argsort' is called via the 'numpy' library, the first + parameter in its signature is 'axis', which takes either an integer or + 'None', so check if the 'ascending' parameter has either integer type or is + None, since 'ascending' itself should be a boolean + """ + if is_integer(ascending) or ascending is None: + args = (ascending,) + args + ascending = True + + validate_argsort_kind(args, kwargs, max_fname_arg_count=3) + ascending = cast(bool, ascending) + return ascending + + +CLIP_DEFAULTS: dict[str, Any] = {"out": None} +validate_clip = CompatValidator( + CLIP_DEFAULTS, fname="clip", method="both", max_fname_arg_count=3 +) + + +@overload +def validate_clip_with_axis(axis: ndarray, args, kwargs) -> None: + ... + + +@overload +def validate_clip_with_axis(axis: AxisNoneT, args, kwargs) -> AxisNoneT: + ... + + +def validate_clip_with_axis( + axis: ndarray | AxisNoneT, args, kwargs +) -> AxisNoneT | None: + """ + If 'NDFrame.clip' is called via the numpy library, the third parameter in + its signature is 'out', which can takes an ndarray, so check if the 'axis' + parameter is an instance of ndarray, since 'axis' itself should either be + an integer or None + """ + if isinstance(axis, ndarray): + args = (axis,) + args + # error: Incompatible types in assignment (expression has type "None", + # variable has type "Union[ndarray[Any, Any], str, int]") + axis = None # type: ignore[assignment] + + validate_clip(args, kwargs) + # error: Incompatible return value type (got "Union[ndarray[Any, Any], + # str, int]", expected "Union[str, int, None]") + return axis # type: ignore[return-value] + + +CUM_FUNC_DEFAULTS: dict[str, Any] = {} +CUM_FUNC_DEFAULTS["dtype"] = None +CUM_FUNC_DEFAULTS["out"] = None +validate_cum_func = CompatValidator( + CUM_FUNC_DEFAULTS, method="both", max_fname_arg_count=1 +) +validate_cumsum = CompatValidator( + CUM_FUNC_DEFAULTS, fname="cumsum", method="both", max_fname_arg_count=1 +) + + +def validate_cum_func_with_skipna(skipna: bool, args, kwargs, name) -> bool: + """ + If this function is called via the 'numpy' library, the third parameter in + its signature is 'dtype', which takes either a 'numpy' dtype or 'None', so + check if the 'skipna' parameter is a boolean or not + """ + if not is_bool(skipna): + args = (skipna,) + args + skipna = True + elif isinstance(skipna, np.bool_): + skipna = bool(skipna) + + validate_cum_func(args, kwargs, fname=name) + return skipna + + +ALLANY_DEFAULTS: dict[str, bool | None] = {} +ALLANY_DEFAULTS["dtype"] = None +ALLANY_DEFAULTS["out"] = None +ALLANY_DEFAULTS["keepdims"] = False +ALLANY_DEFAULTS["axis"] = None +validate_all = CompatValidator( + ALLANY_DEFAULTS, fname="all", method="both", max_fname_arg_count=1 +) +validate_any = CompatValidator( + ALLANY_DEFAULTS, fname="any", method="both", max_fname_arg_count=1 +) + +LOGICAL_FUNC_DEFAULTS = {"out": None, "keepdims": False} +validate_logical_func = CompatValidator(LOGICAL_FUNC_DEFAULTS, method="kwargs") + +MINMAX_DEFAULTS = {"axis": None, "dtype": None, "out": None, "keepdims": False} +validate_min = CompatValidator( + MINMAX_DEFAULTS, fname="min", method="both", max_fname_arg_count=1 +) +validate_max = CompatValidator( + MINMAX_DEFAULTS, fname="max", method="both", max_fname_arg_count=1 +) + +RESHAPE_DEFAULTS: dict[str, str] = {"order": "C"} +validate_reshape = CompatValidator( + RESHAPE_DEFAULTS, fname="reshape", method="both", max_fname_arg_count=1 +) + +REPEAT_DEFAULTS: dict[str, Any] = {"axis": None} +validate_repeat = CompatValidator( + REPEAT_DEFAULTS, fname="repeat", method="both", max_fname_arg_count=1 +) + +ROUND_DEFAULTS: dict[str, Any] = {"out": None} +validate_round = CompatValidator( + ROUND_DEFAULTS, fname="round", method="both", max_fname_arg_count=1 +) + +SORT_DEFAULTS: dict[str, int | str | None] = {} +SORT_DEFAULTS["axis"] = -1 +SORT_DEFAULTS["kind"] = "quicksort" +SORT_DEFAULTS["order"] = None +validate_sort = CompatValidator(SORT_DEFAULTS, fname="sort", method="kwargs") + +STAT_FUNC_DEFAULTS: dict[str, Any | None] = {} +STAT_FUNC_DEFAULTS["dtype"] = None +STAT_FUNC_DEFAULTS["out"] = None + +SUM_DEFAULTS = STAT_FUNC_DEFAULTS.copy() +SUM_DEFAULTS["axis"] = None +SUM_DEFAULTS["keepdims"] = False +SUM_DEFAULTS["initial"] = None + +PROD_DEFAULTS = SUM_DEFAULTS.copy() + +MEAN_DEFAULTS = SUM_DEFAULTS.copy() + +MEDIAN_DEFAULTS = STAT_FUNC_DEFAULTS.copy() +MEDIAN_DEFAULTS["overwrite_input"] = False +MEDIAN_DEFAULTS["keepdims"] = False + +STAT_FUNC_DEFAULTS["keepdims"] = False + +validate_stat_func = CompatValidator(STAT_FUNC_DEFAULTS, method="kwargs") +validate_sum = CompatValidator( + SUM_DEFAULTS, fname="sum", method="both", max_fname_arg_count=1 +) +validate_prod = CompatValidator( + PROD_DEFAULTS, fname="prod", method="both", max_fname_arg_count=1 +) +validate_mean = CompatValidator( + MEAN_DEFAULTS, fname="mean", method="both", max_fname_arg_count=1 +) +validate_median = CompatValidator( + MEDIAN_DEFAULTS, fname="median", method="both", max_fname_arg_count=1 +) + +STAT_DDOF_FUNC_DEFAULTS: dict[str, bool | None] = {} +STAT_DDOF_FUNC_DEFAULTS["dtype"] = None +STAT_DDOF_FUNC_DEFAULTS["out"] = None +STAT_DDOF_FUNC_DEFAULTS["keepdims"] = False +validate_stat_ddof_func = CompatValidator(STAT_DDOF_FUNC_DEFAULTS, method="kwargs") + +TAKE_DEFAULTS: dict[str, str | None] = {} +TAKE_DEFAULTS["out"] = None +TAKE_DEFAULTS["mode"] = "raise" +validate_take = CompatValidator(TAKE_DEFAULTS, fname="take", method="kwargs") + + +def validate_take_with_convert(convert: ndarray | bool | None, args, kwargs) -> bool: + """ + If this function is called via the 'numpy' library, the third parameter in + its signature is 'axis', which takes either an ndarray or 'None', so check + if the 'convert' parameter is either an instance of ndarray or is None + """ + if isinstance(convert, ndarray) or convert is None: + args = (convert,) + args + convert = True + + validate_take(args, kwargs, max_fname_arg_count=3, method="both") + return convert + + +TRANSPOSE_DEFAULTS = {"axes": None} +validate_transpose = CompatValidator( + TRANSPOSE_DEFAULTS, fname="transpose", method="both", max_fname_arg_count=0 +) + + +def validate_groupby_func(name: str, args, kwargs, allowed=None) -> None: + """ + 'args' and 'kwargs' should be empty, except for allowed kwargs because all + of their necessary parameters are explicitly listed in the function + signature + """ + if allowed is None: + allowed = [] + + kwargs = set(kwargs) - set(allowed) + + if len(args) + len(kwargs) > 0: + raise UnsupportedFunctionCall( + "numpy operations are not valid with groupby. " + f"Use .groupby(...).{name}() instead" + ) + + +RESAMPLER_NUMPY_OPS = ("min", "max", "sum", "prod", "mean", "std", "var") + + +def validate_resampler_func(method: str, args, kwargs) -> None: + """ + 'args' and 'kwargs' should be empty because all of their necessary + parameters are explicitly listed in the function signature + """ + if len(args) + len(kwargs) > 0: + if method in RESAMPLER_NUMPY_OPS: + raise UnsupportedFunctionCall( + "numpy operations are not valid with resample. " + f"Use .resample(...).{method}() instead" + ) + raise TypeError("too many arguments passed in") + + +def validate_minmax_axis(axis: AxisInt | None, ndim: int = 1) -> None: + """ + Ensure that the axis argument passed to min, max, argmin, or argmax is zero + or None, as otherwise it will be incorrectly ignored. + + Parameters + ---------- + axis : int or None + ndim : int, default 1 + + Raises + ------ + ValueError + """ + if axis is None: + return + if axis >= ndim or (axis < 0 and ndim + axis < 0): + raise ValueError(f"`axis` must be fewer than the number of dimensions ({ndim})") + + +_validation_funcs = { + "median": validate_median, + "mean": validate_mean, + "min": validate_min, + "max": validate_max, + "sum": validate_sum, + "prod": validate_prod, +} + + +def validate_func(fname, args, kwargs) -> None: + if fname not in _validation_funcs: + return validate_stat_func(args, kwargs, fname=fname) + + validation_func = _validation_funcs[fname] + return validation_func(args, kwargs) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/compat/pickle_compat.py b/llmeval-env/lib/python3.10/site-packages/pandas/compat/pickle_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..cd98087c06c18634304c29d88837017a6952a4fc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/compat/pickle_compat.py @@ -0,0 +1,262 @@ +""" +Support pre-0.12 series pickle compatibility. +""" +from __future__ import annotations + +import contextlib +import copy +import io +import pickle as pkl +from typing import TYPE_CHECKING + +import numpy as np + +from pandas._libs.arrays import NDArrayBacked +from pandas._libs.tslibs import BaseOffset + +from pandas import Index +from pandas.core.arrays import ( + DatetimeArray, + PeriodArray, + TimedeltaArray, +) +from pandas.core.internals import BlockManager + +if TYPE_CHECKING: + from collections.abc import Generator + + +def load_reduce(self) -> None: + stack = self.stack + args = stack.pop() + func = stack[-1] + + try: + stack[-1] = func(*args) + return + except TypeError as err: + # If we have a deprecated function, + # try to replace and try again. + + msg = "_reconstruct: First argument must be a sub-type of ndarray" + + if msg in str(err): + try: + cls = args[0] + stack[-1] = object.__new__(cls) + return + except TypeError: + pass + elif args and isinstance(args[0], type) and issubclass(args[0], BaseOffset): + # TypeError: object.__new__(Day) is not safe, use Day.__new__() + cls = args[0] + stack[-1] = cls.__new__(*args) + return + elif args and issubclass(args[0], PeriodArray): + cls = args[0] + stack[-1] = NDArrayBacked.__new__(*args) + return + + raise + + +# If classes are moved, provide compat here. +_class_locations_map = { + ("pandas.core.sparse.array", "SparseArray"): ("pandas.core.arrays", "SparseArray"), + # 15477 + ("pandas.core.base", "FrozenNDArray"): ("numpy", "ndarray"), + # Re-routing unpickle block logic to go through _unpickle_block instead + # for pandas <= 1.3.5 + ("pandas.core.internals.blocks", "new_block"): ( + "pandas._libs.internals", + "_unpickle_block", + ), + ("pandas.core.indexes.frozen", "FrozenNDArray"): ("numpy", "ndarray"), + ("pandas.core.base", "FrozenList"): ("pandas.core.indexes.frozen", "FrozenList"), + # 10890 + ("pandas.core.series", "TimeSeries"): ("pandas.core.series", "Series"), + ("pandas.sparse.series", "SparseTimeSeries"): ( + "pandas.core.sparse.series", + "SparseSeries", + ), + # 12588, extensions moving + ("pandas._sparse", "BlockIndex"): ("pandas._libs.sparse", "BlockIndex"), + ("pandas.tslib", "Timestamp"): ("pandas._libs.tslib", "Timestamp"), + # 18543 moving period + ("pandas._period", "Period"): ("pandas._libs.tslibs.period", "Period"), + ("pandas._libs.period", "Period"): ("pandas._libs.tslibs.period", "Period"), + # 18014 moved __nat_unpickle from _libs.tslib-->_libs.tslibs.nattype + ("pandas.tslib", "__nat_unpickle"): ( + "pandas._libs.tslibs.nattype", + "__nat_unpickle", + ), + ("pandas._libs.tslib", "__nat_unpickle"): ( + "pandas._libs.tslibs.nattype", + "__nat_unpickle", + ), + # 15998 top-level dirs moving + ("pandas.sparse.array", "SparseArray"): ( + "pandas.core.arrays.sparse", + "SparseArray", + ), + ("pandas.indexes.base", "_new_Index"): ("pandas.core.indexes.base", "_new_Index"), + ("pandas.indexes.base", "Index"): ("pandas.core.indexes.base", "Index"), + ("pandas.indexes.numeric", "Int64Index"): ( + "pandas.core.indexes.base", + "Index", # updated in 50775 + ), + ("pandas.indexes.range", "RangeIndex"): ("pandas.core.indexes.range", "RangeIndex"), + ("pandas.indexes.multi", "MultiIndex"): ("pandas.core.indexes.multi", "MultiIndex"), + ("pandas.tseries.index", "_new_DatetimeIndex"): ( + "pandas.core.indexes.datetimes", + "_new_DatetimeIndex", + ), + ("pandas.tseries.index", "DatetimeIndex"): ( + "pandas.core.indexes.datetimes", + "DatetimeIndex", + ), + ("pandas.tseries.period", "PeriodIndex"): ( + "pandas.core.indexes.period", + "PeriodIndex", + ), + # 19269, arrays moving + ("pandas.core.categorical", "Categorical"): ("pandas.core.arrays", "Categorical"), + # 19939, add timedeltaindex, float64index compat from 15998 move + ("pandas.tseries.tdi", "TimedeltaIndex"): ( + "pandas.core.indexes.timedeltas", + "TimedeltaIndex", + ), + ("pandas.indexes.numeric", "Float64Index"): ( + "pandas.core.indexes.base", + "Index", # updated in 50775 + ), + # 50775, remove Int64Index, UInt64Index & Float64Index from codabase + ("pandas.core.indexes.numeric", "Int64Index"): ( + "pandas.core.indexes.base", + "Index", + ), + ("pandas.core.indexes.numeric", "UInt64Index"): ( + "pandas.core.indexes.base", + "Index", + ), + ("pandas.core.indexes.numeric", "Float64Index"): ( + "pandas.core.indexes.base", + "Index", + ), + ("pandas.core.arrays.sparse.dtype", "SparseDtype"): ( + "pandas.core.dtypes.dtypes", + "SparseDtype", + ), +} + + +# our Unpickler sub-class to override methods and some dispatcher +# functions for compat and uses a non-public class of the pickle module. + + +class Unpickler(pkl._Unpickler): + def find_class(self, module, name): + # override superclass + key = (module, name) + module, name = _class_locations_map.get(key, key) + return super().find_class(module, name) + + +Unpickler.dispatch = copy.copy(Unpickler.dispatch) +Unpickler.dispatch[pkl.REDUCE[0]] = load_reduce + + +def load_newobj(self) -> None: + args = self.stack.pop() + cls = self.stack[-1] + + # compat + if issubclass(cls, Index): + obj = object.__new__(cls) + elif issubclass(cls, DatetimeArray) and not args: + arr = np.array([], dtype="M8[ns]") + obj = cls.__new__(cls, arr, arr.dtype) + elif issubclass(cls, TimedeltaArray) and not args: + arr = np.array([], dtype="m8[ns]") + obj = cls.__new__(cls, arr, arr.dtype) + elif cls is BlockManager and not args: + obj = cls.__new__(cls, (), [], False) + else: + obj = cls.__new__(cls, *args) + + self.stack[-1] = obj + + +Unpickler.dispatch[pkl.NEWOBJ[0]] = load_newobj + + +def load_newobj_ex(self) -> None: + kwargs = self.stack.pop() + args = self.stack.pop() + cls = self.stack.pop() + + # compat + if issubclass(cls, Index): + obj = object.__new__(cls) + else: + obj = cls.__new__(cls, *args, **kwargs) + self.append(obj) + + +try: + Unpickler.dispatch[pkl.NEWOBJ_EX[0]] = load_newobj_ex +except (AttributeError, KeyError): + pass + + +def load(fh, encoding: str | None = None, is_verbose: bool = False): + """ + Load a pickle, with a provided encoding, + + Parameters + ---------- + fh : a filelike object + encoding : an optional encoding + is_verbose : show exception output + """ + try: + fh.seek(0) + if encoding is not None: + up = Unpickler(fh, encoding=encoding) + else: + up = Unpickler(fh) + # "Unpickler" has no attribute "is_verbose" [attr-defined] + up.is_verbose = is_verbose # type: ignore[attr-defined] + + return up.load() + except (ValueError, TypeError): + raise + + +def loads( + bytes_object: bytes, + *, + fix_imports: bool = True, + encoding: str = "ASCII", + errors: str = "strict", +): + """ + Analogous to pickle._loads. + """ + fd = io.BytesIO(bytes_object) + return Unpickler( + fd, fix_imports=fix_imports, encoding=encoding, errors=errors + ).load() + + +@contextlib.contextmanager +def patch_pickle() -> Generator[None, None, None]: + """ + Temporarily patch pickle to use our unpickler. + """ + orig_loads = pkl.loads + try: + setattr(pkl, "loads", loads) + yield + finally: + setattr(pkl, "loads", orig_loads) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/compat/pyarrow.py b/llmeval-env/lib/python3.10/site-packages/pandas/compat/pyarrow.py new file mode 100644 index 0000000000000000000000000000000000000000..a2dfa69bbf236b3df1e2963d50f3c9aca387d4e3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/compat/pyarrow.py @@ -0,0 +1,27 @@ +""" support pyarrow compatibility across versions """ + +from __future__ import annotations + +from pandas.util.version import Version + +try: + import pyarrow as pa + + _palv = Version(Version(pa.__version__).base_version) + pa_version_under10p1 = _palv < Version("10.0.1") + pa_version_under11p0 = _palv < Version("11.0.0") + pa_version_under12p0 = _palv < Version("12.0.0") + pa_version_under13p0 = _palv < Version("13.0.0") + pa_version_under14p0 = _palv < Version("14.0.0") + pa_version_under14p1 = _palv < Version("14.0.1") + pa_version_under15p0 = _palv < Version("15.0.0") + pa_version_under16p0 = _palv < Version("16.0.0") +except ImportError: + pa_version_under10p1 = True + pa_version_under11p0 = True + pa_version_under12p0 = True + pa_version_under13p0 = True + pa_version_under14p0 = True + pa_version_under14p1 = True + pa_version_under15p0 = True + pa_version_under16p0 = True diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b47d42de99ba46f0a64982c1ef7b4d9d3c28b58 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_cat_accessor.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_cat_accessor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..803f2144cfae2e7e501683a254c749135ef82867 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_cat_accessor.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_dt_accessor.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_dt_accessor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45fd9e1ded67b5076c8e4cf8acba8ac958175ede Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_dt_accessor.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_list_accessor.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_list_accessor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c562d56f486dc68579220b384a51657816e2122e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_list_accessor.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_sparse_accessor.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_sparse_accessor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..454a4d6c386340ec0f4a9c927681037296d82869 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_sparse_accessor.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_str_accessor.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_str_accessor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bec4fb877a46138ca25624981c9c77a00717a3f9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_str_accessor.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_struct_accessor.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_struct_accessor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e218ca0b01dfd4c9ed168d561af0c341cd79024e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_struct_accessor.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/test_cat_accessor.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/test_cat_accessor.py new file mode 100644 index 0000000000000000000000000000000000000000..2d50b0f36904a94e7e16aac426e1d098fe320c94 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/test_cat_accessor.py @@ -0,0 +1,258 @@ +import numpy as np +import pytest + +from pandas import ( + Categorical, + DataFrame, + Index, + Series, + Timestamp, + date_range, + period_range, + timedelta_range, +) +import pandas._testing as tm +from pandas.core.arrays.categorical import CategoricalAccessor +from pandas.core.indexes.accessors import Properties + + +class TestCatAccessor: + @pytest.mark.parametrize( + "method", + [ + lambda x: x.cat.set_categories([1, 2, 3]), + lambda x: x.cat.reorder_categories([2, 3, 1], ordered=True), + lambda x: x.cat.rename_categories([1, 2, 3]), + lambda x: x.cat.remove_unused_categories(), + lambda x: x.cat.remove_categories([2]), + lambda x: x.cat.add_categories([4]), + lambda x: x.cat.as_ordered(), + lambda x: x.cat.as_unordered(), + ], + ) + def test_getname_categorical_accessor(self, method): + # GH#17509 + ser = Series([1, 2, 3], name="A").astype("category") + expected = "A" + result = method(ser).name + assert result == expected + + def test_cat_accessor(self): + ser = Series(Categorical(["a", "b", np.nan, "a"])) + tm.assert_index_equal(ser.cat.categories, Index(["a", "b"])) + assert not ser.cat.ordered, False + + exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"]) + + res = ser.cat.set_categories(["b", "a"]) + tm.assert_categorical_equal(res.values, exp) + + ser[:] = "a" + ser = ser.cat.remove_unused_categories() + tm.assert_index_equal(ser.cat.categories, Index(["a"])) + + def test_cat_accessor_api(self): + # GH#9322 + + assert Series.cat is CategoricalAccessor + ser = Series(list("aabbcde")).astype("category") + assert isinstance(ser.cat, CategoricalAccessor) + + invalid = Series([1]) + with pytest.raises(AttributeError, match="only use .cat accessor"): + invalid.cat + assert not hasattr(invalid, "cat") + + def test_cat_accessor_no_new_attributes(self): + # https://github.com/pandas-dev/pandas/issues/10673 + cat = Series(list("aabbcde")).astype("category") + with pytest.raises(AttributeError, match="You cannot add any new attribute"): + cat.cat.xlabel = "a" + + def test_categorical_delegations(self): + # invalid accessor + msg = r"Can only use \.cat accessor with a 'category' dtype" + with pytest.raises(AttributeError, match=msg): + Series([1, 2, 3]).cat + with pytest.raises(AttributeError, match=msg): + Series([1, 2, 3]).cat() + with pytest.raises(AttributeError, match=msg): + Series(["a", "b", "c"]).cat + with pytest.raises(AttributeError, match=msg): + Series(np.arange(5.0)).cat + with pytest.raises(AttributeError, match=msg): + Series([Timestamp("20130101")]).cat + + # Series should delegate calls to '.categories', '.codes', '.ordered' + # and the methods '.set_categories()' 'drop_unused_categories()' to the + # categorical + ser = Series(Categorical(["a", "b", "c", "a"], ordered=True)) + exp_categories = Index(["a", "b", "c"]) + tm.assert_index_equal(ser.cat.categories, exp_categories) + ser = ser.cat.rename_categories([1, 2, 3]) + exp_categories = Index([1, 2, 3]) + tm.assert_index_equal(ser.cat.categories, exp_categories) + + exp_codes = Series([0, 1, 2, 0], dtype="int8") + tm.assert_series_equal(ser.cat.codes, exp_codes) + + assert ser.cat.ordered + ser = ser.cat.as_unordered() + assert not ser.cat.ordered + + ser = ser.cat.as_ordered() + assert ser.cat.ordered + + # reorder + ser = Series(Categorical(["a", "b", "c", "a"], ordered=True)) + exp_categories = Index(["c", "b", "a"]) + exp_values = np.array(["a", "b", "c", "a"], dtype=np.object_) + ser = ser.cat.set_categories(["c", "b", "a"]) + tm.assert_index_equal(ser.cat.categories, exp_categories) + tm.assert_numpy_array_equal(ser.values.__array__(), exp_values) + tm.assert_numpy_array_equal(ser.__array__(), exp_values) + + # remove unused categories + ser = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"])) + exp_categories = Index(["a", "b"]) + exp_values = np.array(["a", "b", "b", "a"], dtype=np.object_) + ser = ser.cat.remove_unused_categories() + tm.assert_index_equal(ser.cat.categories, exp_categories) + tm.assert_numpy_array_equal(ser.values.__array__(), exp_values) + tm.assert_numpy_array_equal(ser.__array__(), exp_values) + + # This method is likely to be confused, so test that it raises an error + # on wrong inputs: + msg = "'Series' object has no attribute 'set_categories'" + with pytest.raises(AttributeError, match=msg): + ser.set_categories([4, 3, 2, 1]) + + # right: ser.cat.set_categories([4,3,2,1]) + + # GH#18862 (let Series.cat.rename_categories take callables) + ser = Series(Categorical(["a", "b", "c", "a"], ordered=True)) + result = ser.cat.rename_categories(lambda x: x.upper()) + expected = Series( + Categorical(["A", "B", "C", "A"], categories=["A", "B", "C"], ordered=True) + ) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "idx", + [ + date_range("1/1/2015", periods=5), + date_range("1/1/2015", periods=5, tz="MET"), + period_range("1/1/2015", freq="D", periods=5), + timedelta_range("1 days", "10 days"), + ], + ) + def test_dt_accessor_api_for_categorical(self, idx): + # https://github.com/pandas-dev/pandas/issues/10661 + + ser = Series(idx) + cat = ser.astype("category") + + # only testing field (like .day) + # and bool (is_month_start) + attr_names = type(ser._values)._datetimelike_ops + + assert isinstance(cat.dt, Properties) + + special_func_defs = [ + ("strftime", ("%Y-%m-%d",), {}), + ("round", ("D",), {}), + ("floor", ("D",), {}), + ("ceil", ("D",), {}), + ("asfreq", ("D",), {}), + ("as_unit", ("s"), {}), + ] + if idx.dtype == "M8[ns]": + # exclude dt64tz since that is already localized and would raise + tup = ("tz_localize", ("UTC",), {}) + special_func_defs.append(tup) + elif idx.dtype.kind == "M": + # exclude dt64 since that is not localized so would raise + tup = ("tz_convert", ("EST",), {}) + special_func_defs.append(tup) + + _special_func_names = [f[0] for f in special_func_defs] + + _ignore_names = ["components", "tz_localize", "tz_convert"] + + func_names = [ + fname + for fname in dir(ser.dt) + if not ( + fname.startswith("_") + or fname in attr_names + or fname in _special_func_names + or fname in _ignore_names + ) + ] + + func_defs = [(fname, (), {}) for fname in func_names] + func_defs.extend( + f_def for f_def in special_func_defs if f_def[0] in dir(ser.dt) + ) + + for func, args, kwargs in func_defs: + warn_cls = [] + if func == "to_period" and getattr(idx, "tz", None) is not None: + # dropping TZ + warn_cls.append(UserWarning) + if func == "to_pydatetime": + # deprecated to return Index[object] + warn_cls.append(FutureWarning) + if warn_cls: + warn_cls = tuple(warn_cls) + else: + warn_cls = None + with tm.assert_produces_warning(warn_cls): + res = getattr(cat.dt, func)(*args, **kwargs) + exp = getattr(ser.dt, func)(*args, **kwargs) + + tm.assert_equal(res, exp) + + for attr in attr_names: + res = getattr(cat.dt, attr) + exp = getattr(ser.dt, attr) + + tm.assert_equal(res, exp) + + def test_dt_accessor_api_for_categorical_invalid(self): + invalid = Series([1, 2, 3]).astype("category") + msg = "Can only use .dt accessor with datetimelike" + + with pytest.raises(AttributeError, match=msg): + invalid.dt + assert not hasattr(invalid, "str") + + def test_set_categories_setitem(self): + # GH#43334 + + df = DataFrame({"Survived": [1, 0, 1], "Sex": [0, 1, 1]}, dtype="category") + + df["Survived"] = df["Survived"].cat.rename_categories(["No", "Yes"]) + df["Sex"] = df["Sex"].cat.rename_categories(["female", "male"]) + + # values should not be coerced to NaN + assert list(df["Sex"]) == ["female", "male", "male"] + assert list(df["Survived"]) == ["Yes", "No", "Yes"] + + df["Sex"] = Categorical(df["Sex"], categories=["female", "male"], ordered=False) + df["Survived"] = Categorical( + df["Survived"], categories=["No", "Yes"], ordered=False + ) + + # values should not be coerced to NaN + assert list(df["Sex"]) == ["female", "male", "male"] + assert list(df["Survived"]) == ["Yes", "No", "Yes"] + + def test_categorical_of_booleans_is_boolean(self): + # https://github.com/pandas-dev/pandas/issues/46313 + df = DataFrame( + {"int_cat": [1, 2, 3], "bool_cat": [True, False, False]}, dtype="category" + ) + value = df["bool_cat"].cat.categories.dtype + expected = np.dtype(np.bool_) + assert value is expected diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/test_dt_accessor.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/test_dt_accessor.py new file mode 100644 index 0000000000000000000000000000000000000000..34465a7c12c180dbab607d949c7e9e5a940dc86e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/test_dt_accessor.py @@ -0,0 +1,843 @@ +import calendar +from datetime import ( + date, + datetime, + time, +) +import locale +import unicodedata + +import numpy as np +import pytest +import pytz + +from pandas._libs.tslibs.timezones import maybe_get_tz +from pandas.errors import SettingWithCopyError + +from pandas.core.dtypes.common import ( + is_integer_dtype, + is_list_like, +) + +import pandas as pd +from pandas import ( + DataFrame, + DatetimeIndex, + Index, + Period, + PeriodIndex, + Series, + TimedeltaIndex, + date_range, + period_range, + timedelta_range, +) +import pandas._testing as tm +from pandas.core.arrays import ( + DatetimeArray, + PeriodArray, + TimedeltaArray, +) + +ok_for_period = PeriodArray._datetimelike_ops +ok_for_period_methods = ["strftime", "to_timestamp", "asfreq"] +ok_for_dt = DatetimeArray._datetimelike_ops +ok_for_dt_methods = [ + "to_period", + "to_pydatetime", + "tz_localize", + "tz_convert", + "normalize", + "strftime", + "round", + "floor", + "ceil", + "day_name", + "month_name", + "isocalendar", + "as_unit", +] +ok_for_td = TimedeltaArray._datetimelike_ops +ok_for_td_methods = [ + "components", + "to_pytimedelta", + "total_seconds", + "round", + "floor", + "ceil", + "as_unit", +] + + +def get_dir(ser): + # check limited display api + results = [r for r in ser.dt.__dir__() if not r.startswith("_")] + return sorted(set(results)) + + +class TestSeriesDatetimeValues: + def _compare(self, ser, name): + # GH 7207, 11128 + # test .dt namespace accessor + + def get_expected(ser, prop): + result = getattr(Index(ser._values), prop) + if isinstance(result, np.ndarray): + if is_integer_dtype(result): + result = result.astype("int64") + elif not is_list_like(result) or isinstance(result, DataFrame): + return result + return Series(result, index=ser.index, name=ser.name) + + left = getattr(ser.dt, name) + right = get_expected(ser, name) + if not (is_list_like(left) and is_list_like(right)): + assert left == right + elif isinstance(left, DataFrame): + tm.assert_frame_equal(left, right) + else: + tm.assert_series_equal(left, right) + + @pytest.mark.parametrize("freq", ["D", "s", "ms"]) + def test_dt_namespace_accessor_datetime64(self, freq): + # GH#7207, GH#11128 + # test .dt namespace accessor + + # datetimeindex + dti = date_range("20130101", periods=5, freq=freq) + ser = Series(dti, name="xxx") + + for prop in ok_for_dt: + # we test freq below + if prop != "freq": + self._compare(ser, prop) + + for prop in ok_for_dt_methods: + getattr(ser.dt, prop) + + msg = "The behavior of DatetimeProperties.to_pydatetime is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = ser.dt.to_pydatetime() + assert isinstance(result, np.ndarray) + assert result.dtype == object + + result = ser.dt.tz_localize("US/Eastern") + exp_values = DatetimeIndex(ser.values).tz_localize("US/Eastern") + expected = Series(exp_values, index=ser.index, name="xxx") + tm.assert_series_equal(result, expected) + + tz_result = result.dt.tz + assert str(tz_result) == "US/Eastern" + freq_result = ser.dt.freq + assert freq_result == DatetimeIndex(ser.values, freq="infer").freq + + # let's localize, then convert + result = ser.dt.tz_localize("UTC").dt.tz_convert("US/Eastern") + exp_values = ( + DatetimeIndex(ser.values).tz_localize("UTC").tz_convert("US/Eastern") + ) + expected = Series(exp_values, index=ser.index, name="xxx") + tm.assert_series_equal(result, expected) + + def test_dt_namespace_accessor_datetime64tz(self): + # GH#7207, GH#11128 + # test .dt namespace accessor + + # datetimeindex with tz + dti = date_range("20130101", periods=5, tz="US/Eastern") + ser = Series(dti, name="xxx") + for prop in ok_for_dt: + # we test freq below + if prop != "freq": + self._compare(ser, prop) + + for prop in ok_for_dt_methods: + getattr(ser.dt, prop) + + msg = "The behavior of DatetimeProperties.to_pydatetime is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = ser.dt.to_pydatetime() + assert isinstance(result, np.ndarray) + assert result.dtype == object + + result = ser.dt.tz_convert("CET") + expected = Series(ser._values.tz_convert("CET"), index=ser.index, name="xxx") + tm.assert_series_equal(result, expected) + + tz_result = result.dt.tz + assert str(tz_result) == "CET" + freq_result = ser.dt.freq + assert freq_result == DatetimeIndex(ser.values, freq="infer").freq + + def test_dt_namespace_accessor_timedelta(self): + # GH#7207, GH#11128 + # test .dt namespace accessor + + # timedelta index + cases = [ + Series( + timedelta_range("1 day", periods=5), index=list("abcde"), name="xxx" + ), + Series(timedelta_range("1 day 01:23:45", periods=5, freq="s"), name="xxx"), + Series( + timedelta_range("2 days 01:23:45.012345", periods=5, freq="ms"), + name="xxx", + ), + ] + for ser in cases: + for prop in ok_for_td: + # we test freq below + if prop != "freq": + self._compare(ser, prop) + + for prop in ok_for_td_methods: + getattr(ser.dt, prop) + + result = ser.dt.components + assert isinstance(result, DataFrame) + tm.assert_index_equal(result.index, ser.index) + + result = ser.dt.to_pytimedelta() + assert isinstance(result, np.ndarray) + assert result.dtype == object + + result = ser.dt.total_seconds() + assert isinstance(result, Series) + assert result.dtype == "float64" + + freq_result = ser.dt.freq + assert freq_result == TimedeltaIndex(ser.values, freq="infer").freq + + def test_dt_namespace_accessor_period(self): + # GH#7207, GH#11128 + # test .dt namespace accessor + + # periodindex + pi = period_range("20130101", periods=5, freq="D") + ser = Series(pi, name="xxx") + + for prop in ok_for_period: + # we test freq below + if prop != "freq": + self._compare(ser, prop) + + for prop in ok_for_period_methods: + getattr(ser.dt, prop) + + freq_result = ser.dt.freq + assert freq_result == PeriodIndex(ser.values).freq + + def test_dt_namespace_accessor_index_and_values(self): + # both + index = date_range("20130101", periods=3, freq="D") + dti = date_range("20140204", periods=3, freq="s") + ser = Series(dti, index=index, name="xxx") + exp = Series( + np.array([2014, 2014, 2014], dtype="int32"), index=index, name="xxx" + ) + tm.assert_series_equal(ser.dt.year, exp) + + exp = Series(np.array([2, 2, 2], dtype="int32"), index=index, name="xxx") + tm.assert_series_equal(ser.dt.month, exp) + + exp = Series(np.array([0, 1, 2], dtype="int32"), index=index, name="xxx") + tm.assert_series_equal(ser.dt.second, exp) + + exp = Series([ser.iloc[0]] * 3, index=index, name="xxx") + tm.assert_series_equal(ser.dt.normalize(), exp) + + def test_dt_accessor_limited_display_api(self): + # tznaive + ser = Series(date_range("20130101", periods=5, freq="D"), name="xxx") + results = get_dir(ser) + tm.assert_almost_equal(results, sorted(set(ok_for_dt + ok_for_dt_methods))) + + # tzaware + ser = Series(date_range("2015-01-01", "2016-01-01", freq="min"), name="xxx") + ser = ser.dt.tz_localize("UTC").dt.tz_convert("America/Chicago") + results = get_dir(ser) + tm.assert_almost_equal(results, sorted(set(ok_for_dt + ok_for_dt_methods))) + + # Period + idx = period_range("20130101", periods=5, freq="D", name="xxx").astype(object) + with tm.assert_produces_warning(FutureWarning, match="Dtype inference"): + ser = Series(idx) + results = get_dir(ser) + tm.assert_almost_equal( + results, sorted(set(ok_for_period + ok_for_period_methods)) + ) + + def test_dt_accessor_ambiguous_freq_conversions(self): + # GH#11295 + # ambiguous time error on the conversions + ser = Series(date_range("2015-01-01", "2016-01-01", freq="min"), name="xxx") + ser = ser.dt.tz_localize("UTC").dt.tz_convert("America/Chicago") + + exp_values = date_range( + "2015-01-01", "2016-01-01", freq="min", tz="UTC" + ).tz_convert("America/Chicago") + # freq not preserved by tz_localize above + exp_values = exp_values._with_freq(None) + expected = Series(exp_values, name="xxx") + tm.assert_series_equal(ser, expected) + + def test_dt_accessor_not_writeable(self, using_copy_on_write, warn_copy_on_write): + # no setting allowed + ser = Series(date_range("20130101", periods=5, freq="D"), name="xxx") + with pytest.raises(ValueError, match="modifications"): + ser.dt.hour = 5 + + # trying to set a copy + msg = "modifications to a property of a datetimelike.+not supported" + with pd.option_context("chained_assignment", "raise"): + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + ser.dt.hour[0] = 5 + elif warn_copy_on_write: + with tm.assert_produces_warning( + FutureWarning, match="ChainedAssignmentError" + ): + ser.dt.hour[0] = 5 + else: + with pytest.raises(SettingWithCopyError, match=msg): + ser.dt.hour[0] = 5 + + @pytest.mark.parametrize( + "method, dates", + [ + ["round", ["2012-01-02", "2012-01-02", "2012-01-01"]], + ["floor", ["2012-01-01", "2012-01-01", "2012-01-01"]], + ["ceil", ["2012-01-02", "2012-01-02", "2012-01-02"]], + ], + ) + def test_dt_round(self, method, dates): + # round + ser = Series( + pd.to_datetime( + ["2012-01-01 13:00:00", "2012-01-01 12:01:00", "2012-01-01 08:00:00"] + ), + name="xxx", + ) + result = getattr(ser.dt, method)("D") + expected = Series(pd.to_datetime(dates), name="xxx") + tm.assert_series_equal(result, expected) + + def test_dt_round_tz(self): + ser = Series( + pd.to_datetime( + ["2012-01-01 13:00:00", "2012-01-01 12:01:00", "2012-01-01 08:00:00"] + ), + name="xxx", + ) + result = ser.dt.tz_localize("UTC").dt.tz_convert("US/Eastern").dt.round("D") + + exp_values = pd.to_datetime( + ["2012-01-01", "2012-01-01", "2012-01-01"] + ).tz_localize("US/Eastern") + expected = Series(exp_values, name="xxx") + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("method", ["ceil", "round", "floor"]) + def test_dt_round_tz_ambiguous(self, method): + # GH 18946 round near "fall back" DST + df1 = DataFrame( + [ + pd.to_datetime("2017-10-29 02:00:00+02:00", utc=True), + pd.to_datetime("2017-10-29 02:00:00+01:00", utc=True), + pd.to_datetime("2017-10-29 03:00:00+01:00", utc=True), + ], + columns=["date"], + ) + df1["date"] = df1["date"].dt.tz_convert("Europe/Madrid") + # infer + result = getattr(df1.date.dt, method)("h", ambiguous="infer") + expected = df1["date"] + tm.assert_series_equal(result, expected) + + # bool-array + result = getattr(df1.date.dt, method)("h", ambiguous=[True, False, False]) + tm.assert_series_equal(result, expected) + + # NaT + result = getattr(df1.date.dt, method)("h", ambiguous="NaT") + expected = df1["date"].copy() + expected.iloc[0:2] = pd.NaT + tm.assert_series_equal(result, expected) + + # raise + with tm.external_error_raised(pytz.AmbiguousTimeError): + getattr(df1.date.dt, method)("h", ambiguous="raise") + + @pytest.mark.parametrize( + "method, ts_str, freq", + [ + ["ceil", "2018-03-11 01:59:00-0600", "5min"], + ["round", "2018-03-11 01:59:00-0600", "5min"], + ["floor", "2018-03-11 03:01:00-0500", "2h"], + ], + ) + def test_dt_round_tz_nonexistent(self, method, ts_str, freq): + # GH 23324 round near "spring forward" DST + ser = Series([pd.Timestamp(ts_str, tz="America/Chicago")]) + result = getattr(ser.dt, method)(freq, nonexistent="shift_forward") + expected = Series([pd.Timestamp("2018-03-11 03:00:00", tz="America/Chicago")]) + tm.assert_series_equal(result, expected) + + result = getattr(ser.dt, method)(freq, nonexistent="NaT") + expected = Series([pd.NaT]).dt.tz_localize(result.dt.tz) + tm.assert_series_equal(result, expected) + + with pytest.raises(pytz.NonExistentTimeError, match="2018-03-11 02:00:00"): + getattr(ser.dt, method)(freq, nonexistent="raise") + + @pytest.mark.parametrize("freq", ["ns", "us", "1000us"]) + def test_dt_round_nonnano_higher_resolution_no_op(self, freq): + # GH 52761 + ser = Series( + ["2020-05-31 08:00:00", "2000-12-31 04:00:05", "1800-03-14 07:30:20"], + dtype="datetime64[ms]", + ) + expected = ser.copy() + result = ser.dt.round(freq) + tm.assert_series_equal(result, expected) + + assert not np.shares_memory(ser.array._ndarray, result.array._ndarray) + + def test_dt_namespace_accessor_categorical(self): + # GH 19468 + dti = DatetimeIndex(["20171111", "20181212"]).repeat(2) + ser = Series(pd.Categorical(dti), name="foo") + result = ser.dt.year + expected = Series([2017, 2017, 2018, 2018], dtype="int32", name="foo") + tm.assert_series_equal(result, expected) + + def test_dt_tz_localize_categorical(self, tz_aware_fixture): + # GH 27952 + tz = tz_aware_fixture + datetimes = Series( + ["2019-01-01", "2019-01-01", "2019-01-02"], dtype="datetime64[ns]" + ) + categorical = datetimes.astype("category") + result = categorical.dt.tz_localize(tz) + expected = datetimes.dt.tz_localize(tz) + tm.assert_series_equal(result, expected) + + def test_dt_tz_convert_categorical(self, tz_aware_fixture): + # GH 27952 + tz = tz_aware_fixture + datetimes = Series( + ["2019-01-01", "2019-01-01", "2019-01-02"], dtype="datetime64[ns, MET]" + ) + categorical = datetimes.astype("category") + result = categorical.dt.tz_convert(tz) + expected = datetimes.dt.tz_convert(tz) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("accessor", ["year", "month", "day"]) + def test_dt_other_accessors_categorical(self, accessor): + # GH 27952 + datetimes = Series( + ["2018-01-01", "2018-01-01", "2019-01-02"], dtype="datetime64[ns]" + ) + categorical = datetimes.astype("category") + result = getattr(categorical.dt, accessor) + expected = getattr(datetimes.dt, accessor) + tm.assert_series_equal(result, expected) + + def test_dt_accessor_no_new_attributes(self): + # https://github.com/pandas-dev/pandas/issues/10673 + ser = Series(date_range("20130101", periods=5, freq="D")) + with pytest.raises(AttributeError, match="You cannot add any new attribute"): + ser.dt.xlabel = "a" + + # error: Unsupported operand types for + ("List[None]" and "List[str]") + @pytest.mark.parametrize( + "time_locale", [None] + tm.get_locales() # type: ignore[operator] + ) + def test_dt_accessor_datetime_name_accessors(self, time_locale): + # Test Monday -> Sunday and January -> December, in that sequence + if time_locale is None: + # If the time_locale is None, day-name and month_name should + # return the english attributes + expected_days = [ + "Monday", + "Tuesday", + "Wednesday", + "Thursday", + "Friday", + "Saturday", + "Sunday", + ] + expected_months = [ + "January", + "February", + "March", + "April", + "May", + "June", + "July", + "August", + "September", + "October", + "November", + "December", + ] + else: + with tm.set_locale(time_locale, locale.LC_TIME): + expected_days = calendar.day_name[:] + expected_months = calendar.month_name[1:] + + ser = Series(date_range(freq="D", start=datetime(1998, 1, 1), periods=365)) + english_days = [ + "Monday", + "Tuesday", + "Wednesday", + "Thursday", + "Friday", + "Saturday", + "Sunday", + ] + for day, name, eng_name in zip(range(4, 11), expected_days, english_days): + name = name.capitalize() + assert ser.dt.day_name(locale=time_locale)[day] == name + assert ser.dt.day_name(locale=None)[day] == eng_name + ser = pd.concat([ser, Series([pd.NaT])]) + assert np.isnan(ser.dt.day_name(locale=time_locale).iloc[-1]) + + ser = Series(date_range(freq="ME", start="2012", end="2013")) + result = ser.dt.month_name(locale=time_locale) + expected = Series([month.capitalize() for month in expected_months]) + + # work around https://github.com/pandas-dev/pandas/issues/22342 + result = result.str.normalize("NFD") + expected = expected.str.normalize("NFD") + + tm.assert_series_equal(result, expected) + + for s_date, expected in zip(ser, expected_months): + result = s_date.month_name(locale=time_locale) + expected = expected.capitalize() + + result = unicodedata.normalize("NFD", result) + expected = unicodedata.normalize("NFD", expected) + + assert result == expected + + ser = pd.concat([ser, Series([pd.NaT])]) + assert np.isnan(ser.dt.month_name(locale=time_locale).iloc[-1]) + + def test_strftime(self): + # GH 10086 + ser = Series(date_range("20130101", periods=5)) + result = ser.dt.strftime("%Y/%m/%d") + expected = Series( + ["2013/01/01", "2013/01/02", "2013/01/03", "2013/01/04", "2013/01/05"] + ) + tm.assert_series_equal(result, expected) + + ser = Series(date_range("2015-02-03 11:22:33.4567", periods=5)) + result = ser.dt.strftime("%Y/%m/%d %H-%M-%S") + expected = Series( + [ + "2015/02/03 11-22-33", + "2015/02/04 11-22-33", + "2015/02/05 11-22-33", + "2015/02/06 11-22-33", + "2015/02/07 11-22-33", + ] + ) + tm.assert_series_equal(result, expected) + + ser = Series(period_range("20130101", periods=5)) + result = ser.dt.strftime("%Y/%m/%d") + expected = Series( + ["2013/01/01", "2013/01/02", "2013/01/03", "2013/01/04", "2013/01/05"] + ) + tm.assert_series_equal(result, expected) + + ser = Series(period_range("2015-02-03 11:22:33.4567", periods=5, freq="s")) + result = ser.dt.strftime("%Y/%m/%d %H-%M-%S") + expected = Series( + [ + "2015/02/03 11-22-33", + "2015/02/03 11-22-34", + "2015/02/03 11-22-35", + "2015/02/03 11-22-36", + "2015/02/03 11-22-37", + ] + ) + tm.assert_series_equal(result, expected) + + def test_strftime_dt64_days(self): + ser = Series(date_range("20130101", periods=5)) + ser.iloc[0] = pd.NaT + result = ser.dt.strftime("%Y/%m/%d") + expected = Series( + [np.nan, "2013/01/02", "2013/01/03", "2013/01/04", "2013/01/05"] + ) + tm.assert_series_equal(result, expected) + + datetime_index = date_range("20150301", periods=5) + result = datetime_index.strftime("%Y/%m/%d") + + expected = Index( + ["2015/03/01", "2015/03/02", "2015/03/03", "2015/03/04", "2015/03/05"], + dtype=np.object_, + ) + # dtype may be S10 or U10 depending on python version + tm.assert_index_equal(result, expected) + + def test_strftime_period_days(self, using_infer_string): + period_index = period_range("20150301", periods=5) + result = period_index.strftime("%Y/%m/%d") + expected = Index( + ["2015/03/01", "2015/03/02", "2015/03/03", "2015/03/04", "2015/03/05"], + dtype="=U10", + ) + if using_infer_string: + expected = expected.astype("string[pyarrow_numpy]") + tm.assert_index_equal(result, expected) + + def test_strftime_dt64_microsecond_resolution(self): + ser = Series([datetime(2013, 1, 1, 2, 32, 59), datetime(2013, 1, 2, 14, 32, 1)]) + result = ser.dt.strftime("%Y-%m-%d %H:%M:%S") + expected = Series(["2013-01-01 02:32:59", "2013-01-02 14:32:01"]) + tm.assert_series_equal(result, expected) + + def test_strftime_period_hours(self): + ser = Series(period_range("20130101", periods=4, freq="h")) + result = ser.dt.strftime("%Y/%m/%d %H:%M:%S") + expected = Series( + [ + "2013/01/01 00:00:00", + "2013/01/01 01:00:00", + "2013/01/01 02:00:00", + "2013/01/01 03:00:00", + ] + ) + tm.assert_series_equal(result, expected) + + def test_strftime_period_minutes(self): + ser = Series(period_range("20130101", periods=4, freq="ms")) + result = ser.dt.strftime("%Y/%m/%d %H:%M:%S.%l") + expected = Series( + [ + "2013/01/01 00:00:00.000", + "2013/01/01 00:00:00.001", + "2013/01/01 00:00:00.002", + "2013/01/01 00:00:00.003", + ] + ) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "data", + [ + DatetimeIndex(["2019-01-01", pd.NaT]), + PeriodIndex(["2019-01-01", pd.NaT], dtype="period[D]"), + ], + ) + def test_strftime_nat(self, data): + # GH 29578 + ser = Series(data) + result = ser.dt.strftime("%Y-%m-%d") + expected = Series(["2019-01-01", np.nan]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "data", [DatetimeIndex([pd.NaT]), PeriodIndex([pd.NaT], dtype="period[D]")] + ) + def test_strftime_all_nat(self, data): + # https://github.com/pandas-dev/pandas/issues/45858 + ser = Series(data) + with tm.assert_produces_warning(None): + result = ser.dt.strftime("%Y-%m-%d") + expected = Series([np.nan], dtype=object) + tm.assert_series_equal(result, expected) + + def test_valid_dt_with_missing_values(self): + # GH 8689 + ser = Series(date_range("20130101", periods=5, freq="D")) + ser.iloc[2] = pd.NaT + + for attr in ["microsecond", "nanosecond", "second", "minute", "hour", "day"]: + expected = getattr(ser.dt, attr).copy() + expected.iloc[2] = np.nan + result = getattr(ser.dt, attr) + tm.assert_series_equal(result, expected) + + result = ser.dt.date + expected = Series( + [ + date(2013, 1, 1), + date(2013, 1, 2), + pd.NaT, + date(2013, 1, 4), + date(2013, 1, 5), + ], + dtype="object", + ) + tm.assert_series_equal(result, expected) + + result = ser.dt.time + expected = Series([time(0), time(0), pd.NaT, time(0), time(0)], dtype="object") + tm.assert_series_equal(result, expected) + + def test_dt_accessor_api(self): + # GH 9322 + from pandas.core.indexes.accessors import ( + CombinedDatetimelikeProperties, + DatetimeProperties, + ) + + assert Series.dt is CombinedDatetimelikeProperties + + ser = Series(date_range("2000-01-01", periods=3)) + assert isinstance(ser.dt, DatetimeProperties) + + @pytest.mark.parametrize( + "ser", + [ + Series(np.arange(5)), + Series(list("abcde")), + Series(np.random.default_rng(2).standard_normal(5)), + ], + ) + def test_dt_accessor_invalid(self, ser): + # GH#9322 check that series with incorrect dtypes don't have attr + with pytest.raises(AttributeError, match="only use .dt accessor"): + ser.dt + assert not hasattr(ser, "dt") + + def test_dt_accessor_updates_on_inplace(self): + ser = Series(date_range("2018-01-01", periods=10)) + ser[2] = None + return_value = ser.fillna(pd.Timestamp("2018-01-01"), inplace=True) + assert return_value is None + result = ser.dt.date + assert result[0] == result[2] + + def test_date_tz(self): + # GH11757 + rng = DatetimeIndex( + ["2014-04-04 23:56", "2014-07-18 21:24", "2015-11-22 22:14"], + tz="US/Eastern", + ) + ser = Series(rng) + expected = Series([date(2014, 4, 4), date(2014, 7, 18), date(2015, 11, 22)]) + tm.assert_series_equal(ser.dt.date, expected) + tm.assert_series_equal(ser.apply(lambda x: x.date()), expected) + + def test_dt_timetz_accessor(self, tz_naive_fixture): + # GH21358 + tz = maybe_get_tz(tz_naive_fixture) + + dtindex = DatetimeIndex( + ["2014-04-04 23:56", "2014-07-18 21:24", "2015-11-22 22:14"], tz=tz + ) + ser = Series(dtindex) + expected = Series( + [time(23, 56, tzinfo=tz), time(21, 24, tzinfo=tz), time(22, 14, tzinfo=tz)] + ) + result = ser.dt.timetz + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "input_series, expected_output", + [ + [["2020-01-01"], [[2020, 1, 3]]], + [[pd.NaT], [[np.nan, np.nan, np.nan]]], + [["2019-12-31", "2019-12-29"], [[2020, 1, 2], [2019, 52, 7]]], + [["2010-01-01", pd.NaT], [[2009, 53, 5], [np.nan, np.nan, np.nan]]], + # see GH#36032 + [["2016-01-08", "2016-01-04"], [[2016, 1, 5], [2016, 1, 1]]], + [["2016-01-07", "2016-01-01"], [[2016, 1, 4], [2015, 53, 5]]], + ], + ) + def test_isocalendar(self, input_series, expected_output): + result = pd.to_datetime(Series(input_series)).dt.isocalendar() + expected_frame = DataFrame( + expected_output, columns=["year", "week", "day"], dtype="UInt32" + ) + tm.assert_frame_equal(result, expected_frame) + + def test_hour_index(self): + dt_series = Series( + date_range(start="2021-01-01", periods=5, freq="h"), + index=[2, 6, 7, 8, 11], + dtype="category", + ) + result = dt_series.dt.hour + expected = Series( + [0, 1, 2, 3, 4], + dtype="int32", + index=[2, 6, 7, 8, 11], + ) + tm.assert_series_equal(result, expected) + + +class TestSeriesPeriodValuesDtAccessor: + @pytest.mark.parametrize( + "input_vals", + [ + [Period("2016-01", freq="M"), Period("2016-02", freq="M")], + [Period("2016-01-01", freq="D"), Period("2016-01-02", freq="D")], + [ + Period("2016-01-01 00:00:00", freq="h"), + Period("2016-01-01 01:00:00", freq="h"), + ], + [ + Period("2016-01-01 00:00:00", freq="M"), + Period("2016-01-01 00:01:00", freq="M"), + ], + [ + Period("2016-01-01 00:00:00", freq="s"), + Period("2016-01-01 00:00:01", freq="s"), + ], + ], + ) + def test_end_time_timevalues(self, input_vals): + # GH#17157 + # Check that the time part of the Period is adjusted by end_time + # when using the dt accessor on a Series + input_vals = PeriodArray._from_sequence(np.asarray(input_vals)) + + ser = Series(input_vals) + result = ser.dt.end_time + expected = ser.apply(lambda x: x.end_time) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("input_vals", [("2001"), ("NaT")]) + def test_to_period(self, input_vals): + # GH#21205 + expected = Series([input_vals], dtype="Period[D]") + result = Series([input_vals], dtype="datetime64[ns]").dt.to_period("D") + tm.assert_series_equal(result, expected) + + +def test_normalize_pre_epoch_dates(): + # GH: 36294 + ser = pd.to_datetime(Series(["1969-01-01 09:00:00", "2016-01-01 09:00:00"])) + result = ser.dt.normalize() + expected = pd.to_datetime(Series(["1969-01-01", "2016-01-01"])) + tm.assert_series_equal(result, expected) + + +def test_day_attribute_non_nano_beyond_int32(): + # GH 52386 + data = np.array( + [ + 136457654736252, + 134736784364431, + 245345345545332, + 223432411, + 2343241, + 3634548734, + 23234, + ], + dtype="timedelta64[s]", + ) + ser = Series(data) + result = ser.dt.days + expected = Series([1579371003, 1559453522, 2839645203, 2586, 27, 42066, 0]) + tm.assert_series_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/test_list_accessor.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/test_list_accessor.py new file mode 100644 index 0000000000000000000000000000000000000000..1c60567c1a5300dd4118ae729c3c784eabd4d747 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/test_list_accessor.py @@ -0,0 +1,129 @@ +import re + +import pytest + +from pandas import ( + ArrowDtype, + Series, +) +import pandas._testing as tm + +pa = pytest.importorskip("pyarrow") + +from pandas.compat import pa_version_under11p0 + + +@pytest.mark.parametrize( + "list_dtype", + ( + pa.list_(pa.int64()), + pa.list_(pa.int64(), list_size=3), + pa.large_list(pa.int64()), + ), +) +def test_list_getitem(list_dtype): + ser = Series( + [[1, 2, 3], [4, None, 5], None], + dtype=ArrowDtype(list_dtype), + ) + actual = ser.list[1] + expected = Series([2, None, None], dtype="int64[pyarrow]") + tm.assert_series_equal(actual, expected) + + +def test_list_getitem_slice(): + ser = Series( + [[1, 2, 3], [4, None, 5], None], + dtype=ArrowDtype(pa.list_(pa.int64())), + ) + if pa_version_under11p0: + with pytest.raises( + NotImplementedError, match="List slice not supported by pyarrow " + ): + ser.list[1:None:None] + else: + actual = ser.list[1:None:None] + expected = Series( + [[2, 3], [None, 5], None], dtype=ArrowDtype(pa.list_(pa.int64())) + ) + tm.assert_series_equal(actual, expected) + + +def test_list_len(): + ser = Series( + [[1, 2, 3], [4, None], None], + dtype=ArrowDtype(pa.list_(pa.int64())), + ) + actual = ser.list.len() + expected = Series([3, 2, None], dtype=ArrowDtype(pa.int32())) + tm.assert_series_equal(actual, expected) + + +def test_list_flatten(): + ser = Series( + [[1, 2, 3], [4, None], None], + dtype=ArrowDtype(pa.list_(pa.int64())), + ) + actual = ser.list.flatten() + expected = Series([1, 2, 3, 4, None], dtype=ArrowDtype(pa.int64())) + tm.assert_series_equal(actual, expected) + + +def test_list_getitem_slice_invalid(): + ser = Series( + [[1, 2, 3], [4, None, 5], None], + dtype=ArrowDtype(pa.list_(pa.int64())), + ) + if pa_version_under11p0: + with pytest.raises( + NotImplementedError, match="List slice not supported by pyarrow " + ): + ser.list[1:None:0] + else: + with pytest.raises(pa.lib.ArrowInvalid, match=re.escape("`step` must be >= 1")): + ser.list[1:None:0] + + +def test_list_accessor_non_list_dtype(): + ser = Series( + [1, 2, 4], + dtype=ArrowDtype(pa.int64()), + ) + with pytest.raises( + AttributeError, + match=re.escape( + "Can only use the '.list' accessor with 'list[pyarrow]' dtype, " + "not int64[pyarrow]." + ), + ): + ser.list[1:None:0] + + +@pytest.mark.parametrize( + "list_dtype", + ( + pa.list_(pa.int64()), + pa.list_(pa.int64(), list_size=3), + pa.large_list(pa.int64()), + ), +) +def test_list_getitem_invalid_index(list_dtype): + ser = Series( + [[1, 2, 3], [4, None, 5], None], + dtype=ArrowDtype(list_dtype), + ) + with pytest.raises(pa.lib.ArrowInvalid, match="Index -1 is out of bounds"): + ser.list[-1] + with pytest.raises(pa.lib.ArrowInvalid, match="Index 5 is out of bounds"): + ser.list[5] + with pytest.raises(ValueError, match="key must be an int or slice, got str"): + ser.list["abc"] + + +def test_list_accessor_not_iterable(): + ser = Series( + [[1, 2, 3], [4, None], None], + dtype=ArrowDtype(pa.list_(pa.int64())), + ) + with pytest.raises(TypeError, match="'ListAccessor' object is not iterable"): + iter(ser.list) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/test_sparse_accessor.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/test_sparse_accessor.py new file mode 100644 index 0000000000000000000000000000000000000000..118095b5dcdbc4f63ba511feb954673485f34427 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/test_sparse_accessor.py @@ -0,0 +1,9 @@ +from pandas import Series + + +class TestSparseAccessor: + def test_sparse_accessor_updates_on_inplace(self): + ser = Series([1, 1, 2, 3], dtype="Sparse[int]") + return_value = ser.drop([0, 1], inplace=True) + assert return_value is None + assert ser.sparse.density == 1.0 diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/test_str_accessor.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/test_str_accessor.py new file mode 100644 index 0000000000000000000000000000000000000000..09d965ef1f32268550bbf35a17f11529545054ed --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/test_str_accessor.py @@ -0,0 +1,25 @@ +import pytest + +from pandas import Series +import pandas._testing as tm + + +class TestStrAccessor: + def test_str_attribute(self): + # GH#9068 + methods = ["strip", "rstrip", "lstrip"] + ser = Series([" jack", "jill ", " jesse ", "frank"]) + for method in methods: + expected = Series([getattr(str, method)(x) for x in ser.values]) + tm.assert_series_equal(getattr(Series.str, method)(ser.str), expected) + + # str accessor only valid with string values + ser = Series(range(5)) + with pytest.raises(AttributeError, match="only use .str accessor"): + ser.str.repeat(2) + + def test_str_accessor_updates_on_inplace(self): + ser = Series(list("abc")) + return_value = ser.drop([0], inplace=True) + assert return_value is None + assert len(ser.str.lower()) == 2 diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/test_struct_accessor.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/test_struct_accessor.py new file mode 100644 index 0000000000000000000000000000000000000000..80aea75fda406cf77e0fda73a6d9849c393e7d83 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/accessors/test_struct_accessor.py @@ -0,0 +1,196 @@ +import re + +import pytest + +from pandas.compat.pyarrow import ( + pa_version_under11p0, + pa_version_under13p0, +) + +from pandas import ( + ArrowDtype, + DataFrame, + Index, + Series, +) +import pandas._testing as tm + +pa = pytest.importorskip("pyarrow") +pc = pytest.importorskip("pyarrow.compute") + + +def test_struct_accessor_dtypes(): + ser = Series( + [], + dtype=ArrowDtype( + pa.struct( + [ + ("int_col", pa.int64()), + ("string_col", pa.string()), + ( + "struct_col", + pa.struct( + [ + ("int_col", pa.int64()), + ("float_col", pa.float64()), + ] + ), + ), + ] + ) + ), + ) + actual = ser.struct.dtypes + expected = Series( + [ + ArrowDtype(pa.int64()), + ArrowDtype(pa.string()), + ArrowDtype( + pa.struct( + [ + ("int_col", pa.int64()), + ("float_col", pa.float64()), + ] + ) + ), + ], + index=Index(["int_col", "string_col", "struct_col"]), + ) + tm.assert_series_equal(actual, expected) + + +@pytest.mark.skipif(pa_version_under13p0, reason="pyarrow>=13.0.0 required") +def test_struct_accessor_field(): + index = Index([-100, 42, 123]) + ser = Series( + [ + {"rice": 1.0, "maize": -1, "wheat": "a"}, + {"rice": 2.0, "maize": 0, "wheat": "b"}, + {"rice": 3.0, "maize": 1, "wheat": "c"}, + ], + dtype=ArrowDtype( + pa.struct( + [ + ("rice", pa.float64()), + ("maize", pa.int64()), + ("wheat", pa.string()), + ] + ) + ), + index=index, + ) + by_name = ser.struct.field("maize") + by_name_expected = Series( + [-1, 0, 1], + dtype=ArrowDtype(pa.int64()), + index=index, + name="maize", + ) + tm.assert_series_equal(by_name, by_name_expected) + + by_index = ser.struct.field(2) + by_index_expected = Series( + ["a", "b", "c"], + dtype=ArrowDtype(pa.string()), + index=index, + name="wheat", + ) + tm.assert_series_equal(by_index, by_index_expected) + + +def test_struct_accessor_field_with_invalid_name_or_index(): + ser = Series([], dtype=ArrowDtype(pa.struct([("field", pa.int64())]))) + + with pytest.raises(ValueError, match="name_or_index must be an int, str,"): + ser.struct.field(1.1) + + +@pytest.mark.skipif(pa_version_under11p0, reason="pyarrow>=11.0.0 required") +def test_struct_accessor_explode(): + index = Index([-100, 42, 123]) + ser = Series( + [ + {"painted": 1, "snapping": {"sea": "green"}}, + {"painted": 2, "snapping": {"sea": "leatherback"}}, + {"painted": 3, "snapping": {"sea": "hawksbill"}}, + ], + dtype=ArrowDtype( + pa.struct( + [ + ("painted", pa.int64()), + ("snapping", pa.struct([("sea", pa.string())])), + ] + ) + ), + index=index, + ) + actual = ser.struct.explode() + expected = DataFrame( + { + "painted": Series([1, 2, 3], index=index, dtype=ArrowDtype(pa.int64())), + "snapping": Series( + [{"sea": "green"}, {"sea": "leatherback"}, {"sea": "hawksbill"}], + index=index, + dtype=ArrowDtype(pa.struct([("sea", pa.string())])), + ), + }, + ) + tm.assert_frame_equal(actual, expected) + + +@pytest.mark.parametrize( + "invalid", + [ + pytest.param(Series([1, 2, 3], dtype="int64"), id="int64"), + pytest.param( + Series(["a", "b", "c"], dtype="string[pyarrow]"), id="string-pyarrow" + ), + ], +) +def test_struct_accessor_api_for_invalid(invalid): + with pytest.raises( + AttributeError, + match=re.escape( + "Can only use the '.struct' accessor with 'struct[pyarrow]' dtype, " + f"not {invalid.dtype}." + ), + ): + invalid.struct + + +@pytest.mark.parametrize( + ["indices", "name"], + [ + (0, "int_col"), + ([1, 2], "str_col"), + (pc.field("int_col"), "int_col"), + ("int_col", "int_col"), + (b"string_col", b"string_col"), + ([b"string_col"], "string_col"), + ], +) +@pytest.mark.skipif(pa_version_under13p0, reason="pyarrow>=13.0.0 required") +def test_struct_accessor_field_expanded(indices, name): + arrow_type = pa.struct( + [ + ("int_col", pa.int64()), + ( + "struct_col", + pa.struct( + [ + ("int_col", pa.int64()), + ("float_col", pa.float64()), + ("str_col", pa.string()), + ] + ), + ), + (b"string_col", pa.string()), + ] + ) + + data = pa.array([], type=arrow_type) + ser = Series(data, dtype=ArrowDtype(arrow_type)) + expected = pc.struct_field(data, indices) + result = ser.struct.field(indices) + tm.assert_equal(result.array._pa_array.combine_chunks(), expected) + assert result.name == name diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bcb0d30f405e2079a1ff21d838f52cd5f659fd93 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/__init__.py @@ -0,0 +1,7 @@ +""" +Test files dedicated to individual (stand-alone) Series methods + +Ideally these files/tests should correspond 1-to-1 with tests.frame.methods + +These may also present opportunities for sharing/de-duplicating test code. +""" diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_argsort.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_argsort.py new file mode 100644 index 0000000000000000000000000000000000000000..432c0eceee01107cdcd23056a39e0ef4bd55545b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_argsort.py @@ -0,0 +1,84 @@ +import numpy as np +import pytest + +from pandas import ( + Series, + Timestamp, + isna, +) +import pandas._testing as tm + + +class TestSeriesArgsort: + def test_argsort_axis(self): + # GH#54257 + ser = Series(range(3)) + + msg = "No axis named 2 for object type Series" + with pytest.raises(ValueError, match=msg): + ser.argsort(axis=2) + + def test_argsort_numpy(self, datetime_series): + ser = datetime_series + + res = np.argsort(ser).values + expected = np.argsort(np.array(ser)) + tm.assert_numpy_array_equal(res, expected) + + # with missing values + ts = ser.copy() + ts[::2] = np.nan + + msg = "The behavior of Series.argsort in the presence of NA values" + with tm.assert_produces_warning( + FutureWarning, match=msg, check_stacklevel=False + ): + result = np.argsort(ts)[1::2] + expected = np.argsort(np.array(ts.dropna())) + + tm.assert_numpy_array_equal(result.values, expected) + + def test_argsort(self, datetime_series): + argsorted = datetime_series.argsort() + assert issubclass(argsorted.dtype.type, np.integer) + + def test_argsort_dt64(self, unit): + # GH#2967 (introduced bug in 0.11-dev I think) + ser = Series( + [Timestamp(f"201301{i:02d}") for i in range(1, 6)], dtype=f"M8[{unit}]" + ) + assert ser.dtype == f"datetime64[{unit}]" + shifted = ser.shift(-1) + assert shifted.dtype == f"datetime64[{unit}]" + assert isna(shifted[4]) + + result = ser.argsort() + expected = Series(range(5), dtype=np.intp) + tm.assert_series_equal(result, expected) + + msg = "The behavior of Series.argsort in the presence of NA values" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = shifted.argsort() + expected = Series(list(range(4)) + [-1], dtype=np.intp) + tm.assert_series_equal(result, expected) + + def test_argsort_stable(self): + ser = Series(np.random.default_rng(2).integers(0, 100, size=10000)) + mindexer = ser.argsort(kind="mergesort") + qindexer = ser.argsort() + + mexpected = np.argsort(ser.values, kind="mergesort") + qexpected = np.argsort(ser.values, kind="quicksort") + + tm.assert_series_equal(mindexer.astype(np.intp), Series(mexpected)) + tm.assert_series_equal(qindexer.astype(np.intp), Series(qexpected)) + msg = ( + r"ndarray Expected type , " + r"found instead" + ) + with pytest.raises(AssertionError, match=msg): + tm.assert_numpy_array_equal(qindexer, mindexer) + + def test_argsort_preserve_name(self, datetime_series): + result = datetime_series.argsort() + assert result.name == datetime_series.name diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_asof.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_asof.py new file mode 100644 index 0000000000000000000000000000000000000000..2acc2921e5efc089b1dd4ed7aa9a6cebc1a3d0fe --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_asof.py @@ -0,0 +1,205 @@ +import numpy as np +import pytest + +from pandas._libs.tslibs import IncompatibleFrequency + +from pandas import ( + DatetimeIndex, + PeriodIndex, + Series, + Timestamp, + date_range, + isna, + notna, + offsets, + period_range, +) +import pandas._testing as tm + + +class TestSeriesAsof: + def test_asof_nanosecond_index_access(self): + ts = Timestamp("20130101").as_unit("ns")._value + dti = DatetimeIndex([ts + 50 + i for i in range(100)]) + ser = Series(np.random.default_rng(2).standard_normal(100), index=dti) + + first_value = ser.asof(ser.index[0]) + + # GH#46903 previously incorrectly was "day" + assert dti.resolution == "nanosecond" + + # this used to not work bc parsing was done by dateutil that didn't + # handle nanoseconds + assert first_value == ser["2013-01-01 00:00:00.000000050"] + + expected_ts = np.datetime64("2013-01-01 00:00:00.000000050", "ns") + assert first_value == ser[Timestamp(expected_ts)] + + def test_basic(self): + # array or list or dates + N = 50 + rng = date_range("1/1/1990", periods=N, freq="53s") + ts = Series(np.random.default_rng(2).standard_normal(N), index=rng) + ts.iloc[15:30] = np.nan + dates = date_range("1/1/1990", periods=N * 3, freq="25s") + + result = ts.asof(dates) + assert notna(result).all() + lb = ts.index[14] + ub = ts.index[30] + + result = ts.asof(list(dates)) + assert notna(result).all() + lb = ts.index[14] + ub = ts.index[30] + + mask = (result.index >= lb) & (result.index < ub) + rs = result[mask] + assert (rs == ts[lb]).all() + + val = result[result.index[result.index >= ub][0]] + assert ts[ub] == val + + def test_scalar(self): + N = 30 + rng = date_range("1/1/1990", periods=N, freq="53s") + # Explicit cast to float avoid implicit cast when setting nan + ts = Series(np.arange(N), index=rng, dtype="float") + ts.iloc[5:10] = np.nan + ts.iloc[15:20] = np.nan + + val1 = ts.asof(ts.index[7]) + val2 = ts.asof(ts.index[19]) + + assert val1 == ts.iloc[4] + assert val2 == ts.iloc[14] + + # accepts strings + val1 = ts.asof(str(ts.index[7])) + assert val1 == ts.iloc[4] + + # in there + result = ts.asof(ts.index[3]) + assert result == ts.iloc[3] + + # no as of value + d = ts.index[0] - offsets.BDay() + assert np.isnan(ts.asof(d)) + + def test_with_nan(self): + # basic asof test + rng = date_range("1/1/2000", "1/2/2000", freq="4h") + s = Series(np.arange(len(rng)), index=rng) + r = s.resample("2h").mean() + + result = r.asof(r.index) + expected = Series( + [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6.0], + index=date_range("1/1/2000", "1/2/2000", freq="2h"), + ) + tm.assert_series_equal(result, expected) + + r.iloc[3:5] = np.nan + result = r.asof(r.index) + expected = Series( + [0, 0, 1, 1, 1, 1, 3, 3, 4, 4, 5, 5, 6.0], + index=date_range("1/1/2000", "1/2/2000", freq="2h"), + ) + tm.assert_series_equal(result, expected) + + r.iloc[-3:] = np.nan + result = r.asof(r.index) + expected = Series( + [0, 0, 1, 1, 1, 1, 3, 3, 4, 4, 4, 4, 4.0], + index=date_range("1/1/2000", "1/2/2000", freq="2h"), + ) + tm.assert_series_equal(result, expected) + + def test_periodindex(self): + # array or list or dates + N = 50 + rng = period_range("1/1/1990", periods=N, freq="h") + ts = Series(np.random.default_rng(2).standard_normal(N), index=rng) + ts.iloc[15:30] = np.nan + dates = date_range("1/1/1990", periods=N * 3, freq="37min") + + result = ts.asof(dates) + assert notna(result).all() + lb = ts.index[14] + ub = ts.index[30] + + result = ts.asof(list(dates)) + assert notna(result).all() + lb = ts.index[14] + ub = ts.index[30] + + pix = PeriodIndex(result.index.values, freq="h") + mask = (pix >= lb) & (pix < ub) + rs = result[mask] + assert (rs == ts[lb]).all() + + ts.iloc[5:10] = np.nan + ts.iloc[15:20] = np.nan + + val1 = ts.asof(ts.index[7]) + val2 = ts.asof(ts.index[19]) + + assert val1 == ts.iloc[4] + assert val2 == ts.iloc[14] + + # accepts strings + val1 = ts.asof(str(ts.index[7])) + assert val1 == ts.iloc[4] + + # in there + assert ts.asof(ts.index[3]) == ts.iloc[3] + + # no as of value + d = ts.index[0].to_timestamp() - offsets.BDay() + assert isna(ts.asof(d)) + + # Mismatched freq + msg = "Input has different freq" + with pytest.raises(IncompatibleFrequency, match=msg): + ts.asof(rng.asfreq("D")) + + def test_errors(self): + s = Series( + [1, 2, 3], + index=[Timestamp("20130101"), Timestamp("20130103"), Timestamp("20130102")], + ) + + # non-monotonic + assert not s.index.is_monotonic_increasing + with pytest.raises(ValueError, match="requires a sorted index"): + s.asof(s.index[0]) + + # subset with Series + N = 10 + rng = date_range("1/1/1990", periods=N, freq="53s") + s = Series(np.random.default_rng(2).standard_normal(N), index=rng) + with pytest.raises(ValueError, match="not valid for Series"): + s.asof(s.index[0], subset="foo") + + def test_all_nans(self): + # GH 15713 + # series is all nans + + # testing non-default indexes + N = 50 + rng = date_range("1/1/1990", periods=N, freq="53s") + + dates = date_range("1/1/1990", periods=N * 3, freq="25s") + result = Series(np.nan, index=rng).asof(dates) + expected = Series(np.nan, index=dates) + tm.assert_series_equal(result, expected) + + # testing scalar input + date = date_range("1/1/1990", periods=N * 3, freq="25s")[0] + result = Series(np.nan, index=rng).asof(date) + assert isna(result) + + # test name is propagated + result = Series(np.nan, index=[1, 2, 3, 4], name="test").asof([4, 5]) + expected = Series(np.nan, index=[4, 5], name="test") + tm.assert_series_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_astype.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_astype.py new file mode 100644 index 0000000000000000000000000000000000000000..4c8028e74ee5518ec97c1e571c9cb04fa0045c85 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_astype.py @@ -0,0 +1,683 @@ +from datetime import ( + datetime, + timedelta, +) +from importlib import reload +import string +import sys + +import numpy as np +import pytest + +from pandas._libs.tslibs import iNaT +import pandas.util._test_decorators as td + +from pandas import ( + NA, + Categorical, + CategoricalDtype, + DatetimeTZDtype, + Index, + Interval, + NaT, + Series, + Timedelta, + Timestamp, + cut, + date_range, + to_datetime, +) +import pandas._testing as tm + + +def rand_str(nchars: int) -> str: + """ + Generate one random byte string. + """ + RANDS_CHARS = np.array( + list(string.ascii_letters + string.digits), dtype=(np.str_, 1) + ) + return "".join(np.random.default_rng(2).choice(RANDS_CHARS, nchars)) + + +class TestAstypeAPI: + def test_astype_unitless_dt64_raises(self): + # GH#47844 + ser = Series(["1970-01-01", "1970-01-01", "1970-01-01"], dtype="datetime64[ns]") + df = ser.to_frame() + + msg = "Casting to unit-less dtype 'datetime64' is not supported" + with pytest.raises(TypeError, match=msg): + ser.astype(np.datetime64) + with pytest.raises(TypeError, match=msg): + df.astype(np.datetime64) + with pytest.raises(TypeError, match=msg): + ser.astype("datetime64") + with pytest.raises(TypeError, match=msg): + df.astype("datetime64") + + def test_arg_for_errors_in_astype(self): + # see GH#14878 + ser = Series([1, 2, 3]) + + msg = ( + r"Expected value of kwarg 'errors' to be one of \['raise', " + r"'ignore'\]\. Supplied value is 'False'" + ) + with pytest.raises(ValueError, match=msg): + ser.astype(np.float64, errors=False) + + ser.astype(np.int8, errors="raise") + + @pytest.mark.parametrize("dtype_class", [dict, Series]) + def test_astype_dict_like(self, dtype_class): + # see GH#7271 + ser = Series(range(0, 10, 2), name="abc") + + dt1 = dtype_class({"abc": str}) + result = ser.astype(dt1) + expected = Series(["0", "2", "4", "6", "8"], name="abc", dtype=object) + tm.assert_series_equal(result, expected) + + dt2 = dtype_class({"abc": "float64"}) + result = ser.astype(dt2) + expected = Series([0.0, 2.0, 4.0, 6.0, 8.0], dtype="float64", name="abc") + tm.assert_series_equal(result, expected) + + dt3 = dtype_class({"abc": str, "def": str}) + msg = ( + "Only the Series name can be used for the key in Series dtype " + r"mappings\." + ) + with pytest.raises(KeyError, match=msg): + ser.astype(dt3) + + dt4 = dtype_class({0: str}) + with pytest.raises(KeyError, match=msg): + ser.astype(dt4) + + # GH#16717 + # if dtypes provided is empty, it should error + if dtype_class is Series: + dt5 = dtype_class({}, dtype=object) + else: + dt5 = dtype_class({}) + + with pytest.raises(KeyError, match=msg): + ser.astype(dt5) + + +class TestAstype: + @pytest.mark.parametrize("tz", [None, "UTC", "US/Pacific"]) + def test_astype_object_to_dt64_non_nano(self, tz): + # GH#55756, GH#54620 + ts = Timestamp("2999-01-01") + dtype = "M8[us]" + if tz is not None: + dtype = f"M8[us, {tz}]" + vals = [ts, "2999-01-02 03:04:05.678910", 2500] + ser = Series(vals, dtype=object) + result = ser.astype(dtype) + + # The 2500 is interpreted as microseconds, consistent with what + # we would get if we created DatetimeIndexes from vals[:2] and vals[2:] + # and concated the results. + pointwise = [ + vals[0].tz_localize(tz), + Timestamp(vals[1], tz=tz), + to_datetime(vals[2], unit="us", utc=True).tz_convert(tz), + ] + exp_vals = [x.as_unit("us").asm8 for x in pointwise] + exp_arr = np.array(exp_vals, dtype="M8[us]") + expected = Series(exp_arr, dtype="M8[us]") + if tz is not None: + expected = expected.dt.tz_localize("UTC").dt.tz_convert(tz) + tm.assert_series_equal(result, expected) + + def test_astype_mixed_object_to_dt64tz(self): + # pre-2.0 this raised ValueError bc of tz mismatch + # xref GH#32581 + ts = Timestamp("2016-01-04 05:06:07", tz="US/Pacific") + ts2 = ts.tz_convert("Asia/Tokyo") + + ser = Series([ts, ts2], dtype=object) + res = ser.astype("datetime64[ns, Europe/Brussels]") + expected = Series( + [ts.tz_convert("Europe/Brussels"), ts2.tz_convert("Europe/Brussels")], + dtype="datetime64[ns, Europe/Brussels]", + ) + tm.assert_series_equal(res, expected) + + @pytest.mark.parametrize("dtype", np.typecodes["All"]) + def test_astype_empty_constructor_equality(self, dtype): + # see GH#15524 + + if dtype not in ( + "S", + "V", # poor support (if any) currently + "M", + "m", # Generic timestamps raise a ValueError. Already tested. + ): + init_empty = Series([], dtype=dtype) + as_type_empty = Series([]).astype(dtype) + tm.assert_series_equal(init_empty, as_type_empty) + + @pytest.mark.parametrize("dtype", [str, np.str_]) + @pytest.mark.parametrize( + "series", + [ + Series([string.digits * 10, rand_str(63), rand_str(64), rand_str(1000)]), + Series([string.digits * 10, rand_str(63), rand_str(64), np.nan, 1.0]), + ], + ) + def test_astype_str_map(self, dtype, series, using_infer_string): + # see GH#4405 + result = series.astype(dtype) + expected = series.map(str) + if using_infer_string: + expected = expected.astype(object) + tm.assert_series_equal(result, expected) + + def test_astype_float_to_period(self): + result = Series([np.nan]).astype("period[D]") + expected = Series([NaT], dtype="period[D]") + tm.assert_series_equal(result, expected) + + def test_astype_no_pandas_dtype(self): + # https://github.com/pandas-dev/pandas/pull/24866 + ser = Series([1, 2], dtype="int64") + # Don't have NumpyEADtype in the public API, so we use `.array.dtype`, + # which is a NumpyEADtype. + result = ser.astype(ser.array.dtype) + tm.assert_series_equal(result, ser) + + @pytest.mark.parametrize("dtype", [np.datetime64, np.timedelta64]) + def test_astype_generic_timestamp_no_frequency(self, dtype, request): + # see GH#15524, GH#15987 + data = [1] + ser = Series(data) + + if np.dtype(dtype).name not in ["timedelta64", "datetime64"]: + mark = pytest.mark.xfail(reason="GH#33890 Is assigned ns unit") + request.applymarker(mark) + + msg = ( + rf"The '{dtype.__name__}' dtype has no unit\. " + rf"Please pass in '{dtype.__name__}\[ns\]' instead." + ) + with pytest.raises(ValueError, match=msg): + ser.astype(dtype) + + def test_astype_dt64_to_str(self): + # GH#10442 : testing astype(str) is correct for Series/DatetimeIndex + dti = date_range("2012-01-01", periods=3) + result = Series(dti).astype(str) + expected = Series(["2012-01-01", "2012-01-02", "2012-01-03"], dtype=object) + tm.assert_series_equal(result, expected) + + def test_astype_dt64tz_to_str(self): + # GH#10442 : testing astype(str) is correct for Series/DatetimeIndex + dti_tz = date_range("2012-01-01", periods=3, tz="US/Eastern") + result = Series(dti_tz).astype(str) + expected = Series( + [ + "2012-01-01 00:00:00-05:00", + "2012-01-02 00:00:00-05:00", + "2012-01-03 00:00:00-05:00", + ], + dtype=object, + ) + tm.assert_series_equal(result, expected) + + def test_astype_datetime(self, unit): + ser = Series(iNaT, dtype=f"M8[{unit}]", index=range(5)) + + ser = ser.astype("O") + assert ser.dtype == np.object_ + + ser = Series([datetime(2001, 1, 2, 0, 0)]) + + ser = ser.astype("O") + assert ser.dtype == np.object_ + + ser = Series( + [datetime(2001, 1, 2, 0, 0) for i in range(3)], dtype=f"M8[{unit}]" + ) + + ser[1] = np.nan + assert ser.dtype == f"M8[{unit}]" + + ser = ser.astype("O") + assert ser.dtype == np.object_ + + def test_astype_datetime64tz(self): + ser = Series(date_range("20130101", periods=3, tz="US/Eastern")) + + # astype + result = ser.astype(object) + expected = Series(ser.astype(object), dtype=object) + tm.assert_series_equal(result, expected) + + result = Series(ser.values).dt.tz_localize("UTC").dt.tz_convert(ser.dt.tz) + tm.assert_series_equal(result, ser) + + # astype - object, preserves on construction + result = Series(ser.astype(object)) + expected = ser.astype(object) + tm.assert_series_equal(result, expected) + + # astype - datetime64[ns, tz] + msg = "Cannot use .astype to convert from timezone-naive" + with pytest.raises(TypeError, match=msg): + # dt64->dt64tz astype deprecated + Series(ser.values).astype("datetime64[ns, US/Eastern]") + + with pytest.raises(TypeError, match=msg): + # dt64->dt64tz astype deprecated + Series(ser.values).astype(ser.dtype) + + result = ser.astype("datetime64[ns, CET]") + expected = Series(date_range("20130101 06:00:00", periods=3, tz="CET")) + tm.assert_series_equal(result, expected) + + def test_astype_str_cast_dt64(self): + # see GH#9757 + ts = Series([Timestamp("2010-01-04 00:00:00")]) + res = ts.astype(str) + + expected = Series(["2010-01-04"], dtype=object) + tm.assert_series_equal(res, expected) + + ts = Series([Timestamp("2010-01-04 00:00:00", tz="US/Eastern")]) + res = ts.astype(str) + + expected = Series(["2010-01-04 00:00:00-05:00"], dtype=object) + tm.assert_series_equal(res, expected) + + def test_astype_str_cast_td64(self): + # see GH#9757 + + td = Series([Timedelta(1, unit="d")]) + ser = td.astype(str) + + expected = Series(["1 days"], dtype=object) + tm.assert_series_equal(ser, expected) + + def test_dt64_series_astype_object(self): + dt64ser = Series(date_range("20130101", periods=3)) + result = dt64ser.astype(object) + assert isinstance(result.iloc[0], datetime) + assert result.dtype == np.object_ + + def test_td64_series_astype_object(self): + tdser = Series(["59 Days", "59 Days", "NaT"], dtype="timedelta64[ns]") + result = tdser.astype(object) + assert isinstance(result.iloc[0], timedelta) + assert result.dtype == np.object_ + + @pytest.mark.parametrize( + "data, dtype", + [ + (["x", "y", "z"], "string[python]"), + pytest.param( + ["x", "y", "z"], + "string[pyarrow]", + marks=td.skip_if_no("pyarrow"), + ), + (["x", "y", "z"], "category"), + (3 * [Timestamp("2020-01-01", tz="UTC")], None), + (3 * [Interval(0, 1)], None), + ], + ) + @pytest.mark.parametrize("errors", ["raise", "ignore"]) + def test_astype_ignores_errors_for_extension_dtypes(self, data, dtype, errors): + # https://github.com/pandas-dev/pandas/issues/35471 + ser = Series(data, dtype=dtype) + if errors == "ignore": + expected = ser + result = ser.astype(float, errors="ignore") + tm.assert_series_equal(result, expected) + else: + msg = "(Cannot cast)|(could not convert)" + with pytest.raises((ValueError, TypeError), match=msg): + ser.astype(float, errors=errors) + + @pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64]) + def test_astype_from_float_to_str(self, dtype): + # https://github.com/pandas-dev/pandas/issues/36451 + ser = Series([0.1], dtype=dtype) + result = ser.astype(str) + expected = Series(["0.1"], dtype=object) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "value, string_value", + [ + (None, "None"), + (np.nan, "nan"), + (NA, ""), + ], + ) + def test_astype_to_str_preserves_na(self, value, string_value): + # https://github.com/pandas-dev/pandas/issues/36904 + ser = Series(["a", "b", value], dtype=object) + result = ser.astype(str) + expected = Series(["a", "b", string_value], dtype=object) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("dtype", ["float32", "float64", "int64", "int32"]) + def test_astype(self, dtype): + ser = Series(np.random.default_rng(2).standard_normal(5), name="foo") + as_typed = ser.astype(dtype) + + assert as_typed.dtype == dtype + assert as_typed.name == ser.name + + @pytest.mark.parametrize("value", [np.nan, np.inf]) + @pytest.mark.parametrize("dtype", [np.int32, np.int64]) + def test_astype_cast_nan_inf_int(self, dtype, value): + # gh-14265: check NaN and inf raise error when converting to int + msg = "Cannot convert non-finite values \\(NA or inf\\) to integer" + ser = Series([value]) + + with pytest.raises(ValueError, match=msg): + ser.astype(dtype) + + @pytest.mark.parametrize("dtype", [int, np.int8, np.int64]) + def test_astype_cast_object_int_fail(self, dtype): + arr = Series(["car", "house", "tree", "1"]) + msg = r"invalid literal for int\(\) with base 10: 'car'" + with pytest.raises(ValueError, match=msg): + arr.astype(dtype) + + def test_astype_float_to_uint_negatives_raise( + self, float_numpy_dtype, any_unsigned_int_numpy_dtype + ): + # GH#45151 We don't cast negative numbers to nonsense values + # TODO: same for EA float/uint dtypes, signed integers? + arr = np.arange(5).astype(float_numpy_dtype) - 3 # includes negatives + ser = Series(arr) + + msg = "Cannot losslessly cast from .* to .*" + with pytest.raises(ValueError, match=msg): + ser.astype(any_unsigned_int_numpy_dtype) + + with pytest.raises(ValueError, match=msg): + ser.to_frame().astype(any_unsigned_int_numpy_dtype) + + with pytest.raises(ValueError, match=msg): + # We currently catch and re-raise in Index.astype + Index(ser).astype(any_unsigned_int_numpy_dtype) + + with pytest.raises(ValueError, match=msg): + ser.array.astype(any_unsigned_int_numpy_dtype) + + def test_astype_cast_object_int(self): + arr = Series(["1", "2", "3", "4"], dtype=object) + result = arr.astype(int) + + tm.assert_series_equal(result, Series(np.arange(1, 5))) + + def test_astype_unicode(self, using_infer_string): + # see GH#7758: A bit of magic is required to set + # default encoding to utf-8 + digits = string.digits + test_series = [ + Series([digits * 10, rand_str(63), rand_str(64), rand_str(1000)]), + Series(["データーサイエンス、お前はもう死んでいる"]), + ] + + former_encoding = None + + if sys.getdefaultencoding() == "utf-8": + # GH#45326 as of 2.0 Series.astype matches Index.astype by handling + # bytes with obj.decode() instead of str(obj) + item = "野菜食べないとやばい" + ser = Series([item.encode()]) + result = ser.astype(np.str_) + expected = Series([item], dtype=object) + tm.assert_series_equal(result, expected) + + for ser in test_series: + res = ser.astype(np.str_) + expec = ser.map(str) + if using_infer_string: + expec = expec.astype(object) + tm.assert_series_equal(res, expec) + + # Restore the former encoding + if former_encoding is not None and former_encoding != "utf-8": + reload(sys) + sys.setdefaultencoding(former_encoding) + + def test_astype_bytes(self): + # GH#39474 + result = Series(["foo", "bar", "baz"]).astype(bytes) + assert result.dtypes == np.dtype("S3") + + def test_astype_nan_to_bool(self): + # GH#43018 + ser = Series(np.nan, dtype="object") + result = ser.astype("bool") + expected = Series(True, dtype="bool") + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "dtype", + tm.ALL_INT_EA_DTYPES + tm.FLOAT_EA_DTYPES, + ) + def test_astype_ea_to_datetimetzdtype(self, dtype): + # GH37553 + ser = Series([4, 0, 9], dtype=dtype) + result = ser.astype(DatetimeTZDtype(tz="US/Pacific")) + + expected = Series( + { + 0: Timestamp("1969-12-31 16:00:00.000000004-08:00", tz="US/Pacific"), + 1: Timestamp("1969-12-31 16:00:00.000000000-08:00", tz="US/Pacific"), + 2: Timestamp("1969-12-31 16:00:00.000000009-08:00", tz="US/Pacific"), + } + ) + + tm.assert_series_equal(result, expected) + + def test_astype_retain_attrs(self, any_numpy_dtype): + # GH#44414 + ser = Series([0, 1, 2, 3]) + ser.attrs["Location"] = "Michigan" + + result = ser.astype(any_numpy_dtype).attrs + expected = ser.attrs + + tm.assert_dict_equal(expected, result) + + +class TestAstypeString: + @pytest.mark.parametrize( + "data, dtype", + [ + ([True, NA], "boolean"), + (["A", NA], "category"), + (["2020-10-10", "2020-10-10"], "datetime64[ns]"), + (["2020-10-10", "2020-10-10", NaT], "datetime64[ns]"), + ( + ["2012-01-01 00:00:00-05:00", NaT], + "datetime64[ns, US/Eastern]", + ), + ([1, None], "UInt16"), + (["1/1/2021", "2/1/2021"], "period[M]"), + (["1/1/2021", "2/1/2021", NaT], "period[M]"), + (["1 Day", "59 Days", NaT], "timedelta64[ns]"), + # currently no way to parse IntervalArray from a list of strings + ], + ) + def test_astype_string_to_extension_dtype_roundtrip( + self, data, dtype, request, nullable_string_dtype + ): + if dtype == "boolean": + mark = pytest.mark.xfail( + reason="TODO StringArray.astype() with missing values #GH40566" + ) + request.applymarker(mark) + # GH-40351 + ser = Series(data, dtype=dtype) + + # Note: just passing .astype(dtype) fails for dtype="category" + # with bc ser.dtype.categories will be object dtype whereas + # result.dtype.categories will have string dtype + result = ser.astype(nullable_string_dtype).astype(ser.dtype) + tm.assert_series_equal(result, ser) + + +class TestAstypeCategorical: + def test_astype_categorical_to_other(self): + cat = Categorical([f"{i} - {i + 499}" for i in range(0, 10000, 500)]) + ser = Series(np.random.default_rng(2).integers(0, 10000, 100)).sort_values() + ser = cut(ser, range(0, 10500, 500), right=False, labels=cat) + + expected = ser + tm.assert_series_equal(ser.astype("category"), expected) + tm.assert_series_equal(ser.astype(CategoricalDtype()), expected) + msg = r"Cannot cast object|string dtype to float64" + with pytest.raises(ValueError, match=msg): + ser.astype("float64") + + cat = Series(Categorical(["a", "b", "b", "a", "a", "c", "c", "c"])) + exp = Series(["a", "b", "b", "a", "a", "c", "c", "c"], dtype=object) + tm.assert_series_equal(cat.astype("str"), exp) + s2 = Series(Categorical(["1", "2", "3", "4"])) + exp2 = Series([1, 2, 3, 4]).astype("int") + tm.assert_series_equal(s2.astype("int"), exp2) + + # object don't sort correctly, so just compare that we have the same + # values + def cmp(a, b): + tm.assert_almost_equal(np.sort(np.unique(a)), np.sort(np.unique(b))) + + expected = Series(np.array(ser.values), name="value_group") + cmp(ser.astype("object"), expected) + cmp(ser.astype(np.object_), expected) + + # array conversion + tm.assert_almost_equal(np.array(ser), np.array(ser.values)) + + tm.assert_series_equal(ser.astype("category"), ser) + tm.assert_series_equal(ser.astype(CategoricalDtype()), ser) + + roundtrip_expected = ser.cat.set_categories( + ser.cat.categories.sort_values() + ).cat.remove_unused_categories() + result = ser.astype("object").astype("category") + tm.assert_series_equal(result, roundtrip_expected) + result = ser.astype("object").astype(CategoricalDtype()) + tm.assert_series_equal(result, roundtrip_expected) + + def test_astype_categorical_invalid_conversions(self): + # invalid conversion (these are NOT a dtype) + cat = Categorical([f"{i} - {i + 499}" for i in range(0, 10000, 500)]) + ser = Series(np.random.default_rng(2).integers(0, 10000, 100)).sort_values() + ser = cut(ser, range(0, 10500, 500), right=False, labels=cat) + + msg = ( + "dtype '' " + "not understood" + ) + with pytest.raises(TypeError, match=msg): + ser.astype(Categorical) + with pytest.raises(TypeError, match=msg): + ser.astype("object").astype(Categorical) + + def test_astype_categoricaldtype(self): + ser = Series(["a", "b", "a"]) + result = ser.astype(CategoricalDtype(["a", "b"], ordered=True)) + expected = Series(Categorical(["a", "b", "a"], ordered=True)) + tm.assert_series_equal(result, expected) + + result = ser.astype(CategoricalDtype(["a", "b"], ordered=False)) + expected = Series(Categorical(["a", "b", "a"], ordered=False)) + tm.assert_series_equal(result, expected) + + result = ser.astype(CategoricalDtype(["a", "b", "c"], ordered=False)) + expected = Series( + Categorical(["a", "b", "a"], categories=["a", "b", "c"], ordered=False) + ) + tm.assert_series_equal(result, expected) + tm.assert_index_equal(result.cat.categories, Index(["a", "b", "c"])) + + @pytest.mark.parametrize("name", [None, "foo"]) + @pytest.mark.parametrize("dtype_ordered", [True, False]) + @pytest.mark.parametrize("series_ordered", [True, False]) + def test_astype_categorical_to_categorical( + self, name, dtype_ordered, series_ordered + ): + # GH#10696, GH#18593 + s_data = list("abcaacbab") + s_dtype = CategoricalDtype(list("bac"), ordered=series_ordered) + ser = Series(s_data, dtype=s_dtype, name=name) + + # unspecified categories + dtype = CategoricalDtype(ordered=dtype_ordered) + result = ser.astype(dtype) + exp_dtype = CategoricalDtype(s_dtype.categories, dtype_ordered) + expected = Series(s_data, name=name, dtype=exp_dtype) + tm.assert_series_equal(result, expected) + + # different categories + dtype = CategoricalDtype(list("adc"), dtype_ordered) + result = ser.astype(dtype) + expected = Series(s_data, name=name, dtype=dtype) + tm.assert_series_equal(result, expected) + + if dtype_ordered is False: + # not specifying ordered, so only test once + expected = ser + result = ser.astype("category") + tm.assert_series_equal(result, expected) + + def test_astype_bool_missing_to_categorical(self): + # GH-19182 + ser = Series([True, False, np.nan]) + assert ser.dtypes == np.object_ + + result = ser.astype(CategoricalDtype(categories=[True, False])) + expected = Series(Categorical([True, False, np.nan], categories=[True, False])) + tm.assert_series_equal(result, expected) + + def test_astype_categories_raises(self): + # deprecated GH#17636, removed in GH#27141 + ser = Series(["a", "b", "a"]) + with pytest.raises(TypeError, match="got an unexpected"): + ser.astype("category", categories=["a", "b"], ordered=True) + + @pytest.mark.parametrize("items", [["a", "b", "c", "a"], [1, 2, 3, 1]]) + def test_astype_from_categorical(self, items): + ser = Series(items) + exp = Series(Categorical(items)) + res = ser.astype("category") + tm.assert_series_equal(res, exp) + + def test_astype_from_categorical_with_keywords(self): + # with keywords + lst = ["a", "b", "c", "a"] + ser = Series(lst) + exp = Series(Categorical(lst, ordered=True)) + res = ser.astype(CategoricalDtype(None, ordered=True)) + tm.assert_series_equal(res, exp) + + exp = Series(Categorical(lst, categories=list("abcdef"), ordered=True)) + res = ser.astype(CategoricalDtype(list("abcdef"), ordered=True)) + tm.assert_series_equal(res, exp) + + def test_astype_timedelta64_with_np_nan(self): + # GH45798 + result = Series([Timedelta(1), np.nan], dtype="timedelta64[ns]") + expected = Series([Timedelta(1), NaT], dtype="timedelta64[ns]") + tm.assert_series_equal(result, expected) + + @td.skip_if_no("pyarrow") + def test_astype_int_na_string(self): + # GH#57418 + ser = Series([12, NA], dtype="Int64[pyarrow]") + result = ser.astype("string[pyarrow]") + expected = Series(["12", NA], dtype="string[pyarrow]") + tm.assert_series_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_clip.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_clip.py new file mode 100644 index 0000000000000000000000000000000000000000..5bbf35f6e39bb237ef1a3853403d47f1fed5b3de --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_clip.py @@ -0,0 +1,146 @@ +from datetime import datetime + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + Series, + Timestamp, + isna, + notna, +) +import pandas._testing as tm + + +class TestSeriesClip: + def test_clip(self, datetime_series): + val = datetime_series.median() + + assert datetime_series.clip(lower=val).min() == val + assert datetime_series.clip(upper=val).max() == val + + result = datetime_series.clip(-0.5, 0.5) + expected = np.clip(datetime_series, -0.5, 0.5) + tm.assert_series_equal(result, expected) + assert isinstance(expected, Series) + + def test_clip_types_and_nulls(self): + sers = [ + Series([np.nan, 1.0, 2.0, 3.0]), + Series([None, "a", "b", "c"]), + Series(pd.to_datetime([np.nan, 1, 2, 3], unit="D")), + ] + + for s in sers: + thresh = s[2] + lower = s.clip(lower=thresh) + upper = s.clip(upper=thresh) + assert lower[notna(lower)].min() == thresh + assert upper[notna(upper)].max() == thresh + assert list(isna(s)) == list(isna(lower)) + assert list(isna(s)) == list(isna(upper)) + + def test_series_clipping_with_na_values(self, any_numeric_ea_dtype, nulls_fixture): + # Ensure that clipping method can handle NA values with out failing + # GH#40581 + + if nulls_fixture is pd.NaT: + # constructor will raise, see + # test_constructor_mismatched_null_nullable_dtype + pytest.skip("See test_constructor_mismatched_null_nullable_dtype") + + ser = Series([nulls_fixture, 1.0, 3.0], dtype=any_numeric_ea_dtype) + s_clipped_upper = ser.clip(upper=2.0) + s_clipped_lower = ser.clip(lower=2.0) + + expected_upper = Series([nulls_fixture, 1.0, 2.0], dtype=any_numeric_ea_dtype) + expected_lower = Series([nulls_fixture, 2.0, 3.0], dtype=any_numeric_ea_dtype) + + tm.assert_series_equal(s_clipped_upper, expected_upper) + tm.assert_series_equal(s_clipped_lower, expected_lower) + + def test_clip_with_na_args(self): + """Should process np.nan argument as None""" + # GH#17276 + s = Series([1, 2, 3]) + + tm.assert_series_equal(s.clip(np.nan), Series([1, 2, 3])) + tm.assert_series_equal(s.clip(upper=np.nan, lower=np.nan), Series([1, 2, 3])) + + # GH#19992 + msg = "Downcasting behavior in Series and DataFrame methods 'where'" + # TODO: avoid this warning here? seems like we should never be upcasting + # in the first place? + with tm.assert_produces_warning(FutureWarning, match=msg): + res = s.clip(lower=[0, 4, np.nan]) + tm.assert_series_equal(res, Series([1, 4, 3])) + with tm.assert_produces_warning(FutureWarning, match=msg): + res = s.clip(upper=[1, np.nan, 1]) + tm.assert_series_equal(res, Series([1, 2, 1])) + + # GH#40420 + s = Series([1, 2, 3]) + result = s.clip(0, [np.nan, np.nan, np.nan]) + tm.assert_series_equal(s, result) + + def test_clip_against_series(self): + # GH#6966 + + s = Series([1.0, 1.0, 4.0]) + + lower = Series([1.0, 2.0, 3.0]) + upper = Series([1.5, 2.5, 3.5]) + + tm.assert_series_equal(s.clip(lower, upper), Series([1.0, 2.0, 3.5])) + tm.assert_series_equal(s.clip(1.5, upper), Series([1.5, 1.5, 3.5])) + + @pytest.mark.parametrize("inplace", [True, False]) + @pytest.mark.parametrize("upper", [[1, 2, 3], np.asarray([1, 2, 3])]) + def test_clip_against_list_like(self, inplace, upper): + # GH#15390 + original = Series([5, 6, 7]) + result = original.clip(upper=upper, inplace=inplace) + expected = Series([1, 2, 3]) + + if inplace: + result = original + tm.assert_series_equal(result, expected, check_exact=True) + + def test_clip_with_datetimes(self): + # GH#11838 + # naive and tz-aware datetimes + + t = Timestamp("2015-12-01 09:30:30") + s = Series([Timestamp("2015-12-01 09:30:00"), Timestamp("2015-12-01 09:31:00")]) + result = s.clip(upper=t) + expected = Series( + [Timestamp("2015-12-01 09:30:00"), Timestamp("2015-12-01 09:30:30")] + ) + tm.assert_series_equal(result, expected) + + t = Timestamp("2015-12-01 09:30:30", tz="US/Eastern") + s = Series( + [ + Timestamp("2015-12-01 09:30:00", tz="US/Eastern"), + Timestamp("2015-12-01 09:31:00", tz="US/Eastern"), + ] + ) + result = s.clip(upper=t) + expected = Series( + [ + Timestamp("2015-12-01 09:30:00", tz="US/Eastern"), + Timestamp("2015-12-01 09:30:30", tz="US/Eastern"), + ] + ) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("dtype", [object, "M8[us]"]) + def test_clip_with_timestamps_and_oob_datetimes(self, dtype): + # GH-42794 + ser = Series([datetime(1, 1, 1), datetime(9999, 9, 9)], dtype=dtype) + + result = ser.clip(lower=Timestamp.min, upper=Timestamp.max) + expected = Series([Timestamp.min, Timestamp.max], dtype=dtype) + + tm.assert_series_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_copy.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_copy.py new file mode 100644 index 0000000000000000000000000000000000000000..23dbe85075916dbb901afdcf8267c8877db3b3f8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_copy.py @@ -0,0 +1,91 @@ +import numpy as np +import pytest + +from pandas import ( + Series, + Timestamp, +) +import pandas._testing as tm + + +class TestCopy: + @pytest.mark.parametrize("deep", ["default", None, False, True]) + def test_copy(self, deep, using_copy_on_write, warn_copy_on_write): + ser = Series(np.arange(10), dtype="float64") + + # default deep is True + if deep == "default": + ser2 = ser.copy() + else: + ser2 = ser.copy(deep=deep) + + if using_copy_on_write: + # INFO(CoW) a shallow copy doesn't yet copy the data + # but parent will not be modified (CoW) + if deep is None or deep is False: + assert np.may_share_memory(ser.values, ser2.values) + else: + assert not np.may_share_memory(ser.values, ser2.values) + + with tm.assert_cow_warning(warn_copy_on_write and deep is False): + ser2[::2] = np.nan + + if deep is not False or using_copy_on_write: + # Did not modify original Series + assert np.isnan(ser2[0]) + assert not np.isnan(ser[0]) + else: + # we DID modify the original Series + assert np.isnan(ser2[0]) + assert np.isnan(ser[0]) + + @pytest.mark.filterwarnings("ignore:Setting a value on a view:FutureWarning") + @pytest.mark.parametrize("deep", ["default", None, False, True]) + def test_copy_tzaware(self, deep, using_copy_on_write): + # GH#11794 + # copy of tz-aware + expected = Series([Timestamp("2012/01/01", tz="UTC")]) + expected2 = Series([Timestamp("1999/01/01", tz="UTC")]) + + ser = Series([Timestamp("2012/01/01", tz="UTC")]) + + if deep == "default": + ser2 = ser.copy() + else: + ser2 = ser.copy(deep=deep) + + if using_copy_on_write: + # INFO(CoW) a shallow copy doesn't yet copy the data + # but parent will not be modified (CoW) + if deep is None or deep is False: + assert np.may_share_memory(ser.values, ser2.values) + else: + assert not np.may_share_memory(ser.values, ser2.values) + + ser2[0] = Timestamp("1999/01/01", tz="UTC") + + # default deep is True + if deep is not False or using_copy_on_write: + # Did not modify original Series + tm.assert_series_equal(ser2, expected2) + tm.assert_series_equal(ser, expected) + else: + # we DID modify the original Series + tm.assert_series_equal(ser2, expected2) + tm.assert_series_equal(ser, expected2) + + def test_copy_name(self, datetime_series): + result = datetime_series.copy() + assert result.name == datetime_series.name + + def test_copy_index_name_checking(self, datetime_series): + # don't want to be able to modify the index stored elsewhere after + # making a copy + + datetime_series.index.name = None + assert datetime_series.index.name is None + assert datetime_series is datetime_series + + cp = datetime_series.copy() + cp.index.name = "foo" + assert datetime_series.index.name is None diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_describe.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_describe.py new file mode 100644 index 0000000000000000000000000000000000000000..79ec11feb530817e735cf1d45cd7985839cd4d05 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_describe.py @@ -0,0 +1,203 @@ +import numpy as np +import pytest + +from pandas.compat.numpy import np_version_gte1p25 + +from pandas.core.dtypes.common import ( + is_complex_dtype, + is_extension_array_dtype, +) + +from pandas import ( + NA, + Period, + Series, + Timedelta, + Timestamp, + date_range, +) +import pandas._testing as tm + + +class TestSeriesDescribe: + def test_describe_ints(self): + ser = Series([0, 1, 2, 3, 4], name="int_data") + result = ser.describe() + expected = Series( + [5, 2, ser.std(), 0, 1, 2, 3, 4], + name="int_data", + index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], + ) + tm.assert_series_equal(result, expected) + + def test_describe_bools(self): + ser = Series([True, True, False, False, False], name="bool_data") + result = ser.describe() + expected = Series( + [5, 2, False, 3], name="bool_data", index=["count", "unique", "top", "freq"] + ) + tm.assert_series_equal(result, expected) + + def test_describe_strs(self): + ser = Series(["a", "a", "b", "c", "d"], name="str_data") + result = ser.describe() + expected = Series( + [5, 4, "a", 2], name="str_data", index=["count", "unique", "top", "freq"] + ) + tm.assert_series_equal(result, expected) + + def test_describe_timedelta64(self): + ser = Series( + [ + Timedelta("1 days"), + Timedelta("2 days"), + Timedelta("3 days"), + Timedelta("4 days"), + Timedelta("5 days"), + ], + name="timedelta_data", + ) + result = ser.describe() + expected = Series( + [5, ser[2], ser.std(), ser[0], ser[1], ser[2], ser[3], ser[4]], + name="timedelta_data", + index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], + ) + tm.assert_series_equal(result, expected) + + def test_describe_period(self): + ser = Series( + [Period("2020-01", "M"), Period("2020-01", "M"), Period("2019-12", "M")], + name="period_data", + ) + result = ser.describe() + expected = Series( + [3, 2, ser[0], 2], + name="period_data", + index=["count", "unique", "top", "freq"], + ) + tm.assert_series_equal(result, expected) + + def test_describe_empty_object(self): + # https://github.com/pandas-dev/pandas/issues/27183 + s = Series([None, None], dtype=object) + result = s.describe() + expected = Series( + [0, 0, np.nan, np.nan], + dtype=object, + index=["count", "unique", "top", "freq"], + ) + tm.assert_series_equal(result, expected) + + result = s[:0].describe() + tm.assert_series_equal(result, expected) + # ensure NaN, not None + assert np.isnan(result.iloc[2]) + assert np.isnan(result.iloc[3]) + + def test_describe_with_tz(self, tz_naive_fixture): + # GH 21332 + tz = tz_naive_fixture + name = str(tz_naive_fixture) + start = Timestamp(2018, 1, 1) + end = Timestamp(2018, 1, 5) + s = Series(date_range(start, end, tz=tz), name=name) + result = s.describe() + expected = Series( + [ + 5, + Timestamp(2018, 1, 3).tz_localize(tz), + start.tz_localize(tz), + s[1], + s[2], + s[3], + end.tz_localize(tz), + ], + name=name, + index=["count", "mean", "min", "25%", "50%", "75%", "max"], + ) + tm.assert_series_equal(result, expected) + + def test_describe_with_tz_numeric(self): + name = tz = "CET" + start = Timestamp(2018, 1, 1) + end = Timestamp(2018, 1, 5) + s = Series(date_range(start, end, tz=tz), name=name) + + result = s.describe() + + expected = Series( + [ + 5, + Timestamp("2018-01-03 00:00:00", tz=tz), + Timestamp("2018-01-01 00:00:00", tz=tz), + Timestamp("2018-01-02 00:00:00", tz=tz), + Timestamp("2018-01-03 00:00:00", tz=tz), + Timestamp("2018-01-04 00:00:00", tz=tz), + Timestamp("2018-01-05 00:00:00", tz=tz), + ], + name=name, + index=["count", "mean", "min", "25%", "50%", "75%", "max"], + ) + tm.assert_series_equal(result, expected) + + def test_datetime_is_numeric_includes_datetime(self): + s = Series(date_range("2012", periods=3)) + result = s.describe() + expected = Series( + [ + 3, + Timestamp("2012-01-02"), + Timestamp("2012-01-01"), + Timestamp("2012-01-01T12:00:00"), + Timestamp("2012-01-02"), + Timestamp("2012-01-02T12:00:00"), + Timestamp("2012-01-03"), + ], + index=["count", "mean", "min", "25%", "50%", "75%", "max"], + ) + tm.assert_series_equal(result, expected) + + @pytest.mark.filterwarnings("ignore:Casting complex values to real discards") + def test_numeric_result_dtype(self, any_numeric_dtype): + # GH#48340 - describe should always return float on non-complex numeric input + if is_extension_array_dtype(any_numeric_dtype): + dtype = "Float64" + else: + dtype = "complex128" if is_complex_dtype(any_numeric_dtype) else None + + ser = Series([0, 1], dtype=any_numeric_dtype) + if dtype == "complex128" and np_version_gte1p25: + with pytest.raises( + TypeError, match=r"^a must be an array of real numbers$" + ): + ser.describe() + return + result = ser.describe() + expected = Series( + [ + 2.0, + 0.5, + ser.std(), + 0, + 0.25, + 0.5, + 0.75, + 1.0, + ], + index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], + dtype=dtype, + ) + tm.assert_series_equal(result, expected) + + def test_describe_one_element_ea(self): + # GH#52515 + ser = Series([0.0], dtype="Float64") + with tm.assert_produces_warning(None): + result = ser.describe() + expected = Series( + [1, 0, NA, 0, 0, 0, 0, 0], + dtype="Float64", + index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], + ) + tm.assert_series_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_drop.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_drop.py new file mode 100644 index 0000000000000000000000000000000000000000..5d9a469915cfb718aba9020b82105c66d93b429f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_drop.py @@ -0,0 +1,99 @@ +import pytest + +from pandas import ( + Index, + Series, +) +import pandas._testing as tm +from pandas.api.types import is_bool_dtype + + +@pytest.mark.parametrize( + "data, index, drop_labels, axis, expected_data, expected_index", + [ + # Unique Index + ([1, 2], ["one", "two"], ["two"], 0, [1], ["one"]), + ([1, 2], ["one", "two"], ["two"], "rows", [1], ["one"]), + ([1, 1, 2], ["one", "two", "one"], ["two"], 0, [1, 2], ["one", "one"]), + # GH 5248 Non-Unique Index + ([1, 1, 2], ["one", "two", "one"], "two", 0, [1, 2], ["one", "one"]), + ([1, 1, 2], ["one", "two", "one"], ["one"], 0, [1], ["two"]), + ([1, 1, 2], ["one", "two", "one"], "one", 0, [1], ["two"]), + ], +) +def test_drop_unique_and_non_unique_index( + data, index, axis, drop_labels, expected_data, expected_index +): + ser = Series(data=data, index=index) + result = ser.drop(drop_labels, axis=axis) + expected = Series(data=expected_data, index=expected_index) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "data, index, drop_labels, axis, error_type, error_desc", + [ + # single string/tuple-like + (range(3), list("abc"), "bc", 0, KeyError, "not found in axis"), + # bad axis + (range(3), list("abc"), ("a",), 0, KeyError, "not found in axis"), + (range(3), list("abc"), "one", "columns", ValueError, "No axis named columns"), + ], +) +def test_drop_exception_raised(data, index, drop_labels, axis, error_type, error_desc): + ser = Series(data, index=index) + with pytest.raises(error_type, match=error_desc): + ser.drop(drop_labels, axis=axis) + + +def test_drop_with_ignore_errors(): + # errors='ignore' + ser = Series(range(3), index=list("abc")) + result = ser.drop("bc", errors="ignore") + tm.assert_series_equal(result, ser) + result = ser.drop(["a", "d"], errors="ignore") + expected = ser.iloc[1:] + tm.assert_series_equal(result, expected) + + # GH 8522 + ser = Series([2, 3], index=[True, False]) + assert is_bool_dtype(ser.index) + assert ser.index.dtype == bool + result = ser.drop(True) + expected = Series([3], index=[False]) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("index", [[1, 2, 3], [1, 1, 3]]) +@pytest.mark.parametrize("drop_labels", [[], [1], [3]]) +def test_drop_empty_list(index, drop_labels): + # GH 21494 + expected_index = [i for i in index if i not in drop_labels] + series = Series(index=index, dtype=object).drop(drop_labels) + expected = Series(index=expected_index, dtype=object) + tm.assert_series_equal(series, expected) + + +@pytest.mark.parametrize( + "data, index, drop_labels", + [ + (None, [1, 2, 3], [1, 4]), + (None, [1, 2, 2], [1, 4]), + ([2, 3], [0, 1], [False, True]), + ], +) +def test_drop_non_empty_list(data, index, drop_labels): + # GH 21494 and GH 16877 + dtype = object if data is None else None + ser = Series(data=data, index=index, dtype=dtype) + with pytest.raises(KeyError, match="not found in axis"): + ser.drop(drop_labels) + + +def test_drop_index_ea_dtype(any_numeric_ea_dtype): + # GH#45860 + df = Series(100, index=Index([1, 2, 2], dtype=any_numeric_ea_dtype)) + idx = Index([df.index[1]]) + result = df.drop(idx) + expected = Series(100, index=Index([1], dtype=any_numeric_ea_dtype)) + tm.assert_series_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_dtypes.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_dtypes.py new file mode 100644 index 0000000000000000000000000000000000000000..82260bc2a65b9f7e387532b380b9ce013d29cbb7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_dtypes.py @@ -0,0 +1,7 @@ +import numpy as np + + +class TestSeriesDtypes: + def test_dtype(self, datetime_series): + assert datetime_series.dtype == np.dtype("float64") + assert datetime_series.dtypes == np.dtype("float64") diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_equals.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_equals.py new file mode 100644 index 0000000000000000000000000000000000000000..875ffdd3fe8514edb4c313c8f558330c787cef08 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_equals.py @@ -0,0 +1,145 @@ +from contextlib import nullcontext +import copy + +import numpy as np +import pytest + +from pandas._libs.missing import is_matching_na +from pandas.compat.numpy import np_version_gte1p25 + +from pandas.core.dtypes.common import is_float + +from pandas import ( + Index, + MultiIndex, + Series, +) +import pandas._testing as tm + + +@pytest.mark.parametrize( + "arr, idx", + [ + ([1, 2, 3, 4], [0, 2, 1, 3]), + ([1, np.nan, 3, np.nan], [0, 2, 1, 3]), + ( + [1, np.nan, 3, np.nan], + MultiIndex.from_tuples([(0, "a"), (1, "b"), (2, "c"), (3, "c")]), + ), + ], +) +def test_equals(arr, idx): + s1 = Series(arr, index=idx) + s2 = s1.copy() + assert s1.equals(s2) + + s1[1] = 9 + assert not s1.equals(s2) + + +@pytest.mark.parametrize( + "val", [1, 1.1, 1 + 1j, True, "abc", [1, 2], (1, 2), {1, 2}, {"a": 1}, None] +) +def test_equals_list_array(val): + # GH20676 Verify equals operator for list of Numpy arrays + arr = np.array([1, 2]) + s1 = Series([arr, arr]) + s2 = s1.copy() + assert s1.equals(s2) + + s1[1] = val + + cm = ( + tm.assert_produces_warning(FutureWarning, check_stacklevel=False) + if isinstance(val, str) and not np_version_gte1p25 + else nullcontext() + ) + with cm: + assert not s1.equals(s2) + + +def test_equals_false_negative(): + # GH8437 Verify false negative behavior of equals function for dtype object + arr = [False, np.nan] + s1 = Series(arr) + s2 = s1.copy() + s3 = Series(index=range(2), dtype=object) + s4 = s3.copy() + s5 = s3.copy() + s6 = s3.copy() + + s3[:-1] = s4[:-1] = s5[0] = s6[0] = False + assert s1.equals(s1) + assert s1.equals(s2) + assert s1.equals(s3) + assert s1.equals(s4) + assert s1.equals(s5) + assert s5.equals(s6) + + +def test_equals_matching_nas(): + # matching but not identical NAs + left = Series([np.datetime64("NaT")], dtype=object) + right = Series([np.datetime64("NaT")], dtype=object) + assert left.equals(right) + with tm.assert_produces_warning(FutureWarning, match="Dtype inference"): + assert Index(left).equals(Index(right)) + assert left.array.equals(right.array) + + left = Series([np.timedelta64("NaT")], dtype=object) + right = Series([np.timedelta64("NaT")], dtype=object) + assert left.equals(right) + with tm.assert_produces_warning(FutureWarning, match="Dtype inference"): + assert Index(left).equals(Index(right)) + assert left.array.equals(right.array) + + left = Series([np.float64("NaN")], dtype=object) + right = Series([np.float64("NaN")], dtype=object) + assert left.equals(right) + assert Index(left, dtype=left.dtype).equals(Index(right, dtype=right.dtype)) + assert left.array.equals(right.array) + + +def test_equals_mismatched_nas(nulls_fixture, nulls_fixture2): + # GH#39650 + left = nulls_fixture + right = nulls_fixture2 + if hasattr(right, "copy"): + right = right.copy() + else: + right = copy.copy(right) + + ser = Series([left], dtype=object) + ser2 = Series([right], dtype=object) + + if is_matching_na(left, right): + assert ser.equals(ser2) + elif (left is None and is_float(right)) or (right is None and is_float(left)): + assert ser.equals(ser2) + else: + assert not ser.equals(ser2) + + +def test_equals_none_vs_nan(): + # GH#39650 + ser = Series([1, None], dtype=object) + ser2 = Series([1, np.nan], dtype=object) + + assert ser.equals(ser2) + assert Index(ser, dtype=ser.dtype).equals(Index(ser2, dtype=ser2.dtype)) + assert ser.array.equals(ser2.array) + + +def test_equals_None_vs_float(): + # GH#44190 + left = Series([-np.inf, np.nan, -1.0, 0.0, 1.0, 10 / 3, np.inf], dtype=object) + right = Series([None] * len(left)) + + # these series were found to be equal due to a bug, check that they are correctly + # found to not equal + assert not left.equals(right) + assert not right.equals(left) + assert not left.to_frame().equals(right.to_frame()) + assert not right.to_frame().equals(left.to_frame()) + assert not Index(left, dtype="object").equals(Index(right, dtype="object")) + assert not Index(right, dtype="object").equals(Index(left, dtype="object")) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_explode.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_explode.py new file mode 100644 index 0000000000000000000000000000000000000000..5a0188585ef30c12f7222a054714957df4ed2ff2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_explode.py @@ -0,0 +1,175 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +def test_basic(): + s = pd.Series([[0, 1, 2], np.nan, [], (3, 4)], index=list("abcd"), name="foo") + result = s.explode() + expected = pd.Series( + [0, 1, 2, np.nan, np.nan, 3, 4], index=list("aaabcdd"), dtype=object, name="foo" + ) + tm.assert_series_equal(result, expected) + + +def test_mixed_type(): + s = pd.Series( + [[0, 1, 2], np.nan, None, np.array([]), pd.Series(["a", "b"])], name="foo" + ) + result = s.explode() + expected = pd.Series( + [0, 1, 2, np.nan, None, np.nan, "a", "b"], + index=[0, 0, 0, 1, 2, 3, 4, 4], + dtype=object, + name="foo", + ) + tm.assert_series_equal(result, expected) + + +def test_empty(): + s = pd.Series(dtype=object) + result = s.explode() + expected = s.copy() + tm.assert_series_equal(result, expected) + + +def test_nested_lists(): + s = pd.Series([[[1, 2, 3]], [1, 2], 1]) + result = s.explode() + expected = pd.Series([[1, 2, 3], 1, 2, 1], index=[0, 1, 1, 2]) + tm.assert_series_equal(result, expected) + + +def test_multi_index(): + s = pd.Series( + [[0, 1, 2], np.nan, [], (3, 4)], + name="foo", + index=pd.MultiIndex.from_product([list("ab"), range(2)], names=["foo", "bar"]), + ) + result = s.explode() + index = pd.MultiIndex.from_tuples( + [("a", 0), ("a", 0), ("a", 0), ("a", 1), ("b", 0), ("b", 1), ("b", 1)], + names=["foo", "bar"], + ) + expected = pd.Series( + [0, 1, 2, np.nan, np.nan, 3, 4], index=index, dtype=object, name="foo" + ) + tm.assert_series_equal(result, expected) + + +def test_large(): + s = pd.Series([range(256)]).explode() + result = s.explode() + tm.assert_series_equal(result, s) + + +def test_invert_array(): + df = pd.DataFrame({"a": pd.date_range("20190101", periods=3, tz="UTC")}) + + listify = df.apply(lambda x: x.array, axis=1) + result = listify.explode() + tm.assert_series_equal(result, df["a"].rename()) + + +@pytest.mark.parametrize( + "s", [pd.Series([1, 2, 3]), pd.Series(pd.date_range("2019", periods=3, tz="UTC"))] +) +def test_non_object_dtype(s): + result = s.explode() + tm.assert_series_equal(result, s) + + +def test_typical_usecase(): + df = pd.DataFrame( + [{"var1": "a,b,c", "var2": 1}, {"var1": "d,e,f", "var2": 2}], + columns=["var1", "var2"], + ) + exploded = df.var1.str.split(",").explode() + result = df[["var2"]].join(exploded) + expected = pd.DataFrame( + {"var2": [1, 1, 1, 2, 2, 2], "var1": list("abcdef")}, + columns=["var2", "var1"], + index=[0, 0, 0, 1, 1, 1], + ) + tm.assert_frame_equal(result, expected) + + +def test_nested_EA(): + # a nested EA array + s = pd.Series( + [ + pd.date_range("20170101", periods=3, tz="UTC"), + pd.date_range("20170104", periods=3, tz="UTC"), + ] + ) + result = s.explode() + expected = pd.Series( + pd.date_range("20170101", periods=6, tz="UTC"), index=[0, 0, 0, 1, 1, 1] + ) + tm.assert_series_equal(result, expected) + + +def test_duplicate_index(): + # GH 28005 + s = pd.Series([[1, 2], [3, 4]], index=[0, 0]) + result = s.explode() + expected = pd.Series([1, 2, 3, 4], index=[0, 0, 0, 0], dtype=object) + tm.assert_series_equal(result, expected) + + +def test_ignore_index(): + # GH 34932 + s = pd.Series([[1, 2], [3, 4]]) + result = s.explode(ignore_index=True) + expected = pd.Series([1, 2, 3, 4], index=[0, 1, 2, 3], dtype=object) + tm.assert_series_equal(result, expected) + + +def test_explode_sets(): + # https://github.com/pandas-dev/pandas/issues/35614 + s = pd.Series([{"a", "b", "c"}], index=[1]) + result = s.explode().sort_values() + expected = pd.Series(["a", "b", "c"], index=[1, 1, 1]) + tm.assert_series_equal(result, expected) + + +def test_explode_scalars_can_ignore_index(): + # https://github.com/pandas-dev/pandas/issues/40487 + s = pd.Series([1, 2, 3], index=["a", "b", "c"]) + result = s.explode(ignore_index=True) + expected = pd.Series([1, 2, 3]) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("ignore_index", [True, False]) +def test_explode_pyarrow_list_type(ignore_index): + # GH 53602 + pa = pytest.importorskip("pyarrow") + + data = [ + [None, None], + [1], + [], + [2, 3], + None, + ] + ser = pd.Series(data, dtype=pd.ArrowDtype(pa.list_(pa.int64()))) + result = ser.explode(ignore_index=ignore_index) + expected = pd.Series( + data=[None, None, 1, None, 2, 3, None], + index=None if ignore_index else [0, 0, 1, 2, 3, 3, 4], + dtype=pd.ArrowDtype(pa.int64()), + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("ignore_index", [True, False]) +def test_explode_pyarrow_non_list_type(ignore_index): + pa = pytest.importorskip("pyarrow") + data = [1, 2, 3] + ser = pd.Series(data, dtype=pd.ArrowDtype(pa.int64())) + result = ser.explode(ignore_index=ignore_index) + expected = pd.Series([1, 2, 3], dtype="int64[pyarrow]", index=[0, 1, 2]) + tm.assert_series_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_fillna.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_fillna.py new file mode 100644 index 0000000000000000000000000000000000000000..293259661cd9a107eb4ad8e33c0b73dfb4010a14 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_fillna.py @@ -0,0 +1,1155 @@ +from datetime import ( + datetime, + timedelta, + timezone, +) + +import numpy as np +import pytest +import pytz + +from pandas import ( + Categorical, + DataFrame, + DatetimeIndex, + NaT, + Period, + Series, + Timedelta, + Timestamp, + date_range, + isna, + timedelta_range, +) +import pandas._testing as tm +from pandas.core.arrays import period_array + + +@pytest.mark.filterwarnings( + "ignore:(Series|DataFrame).fillna with 'method' is deprecated:FutureWarning" +) +class TestSeriesFillNA: + def test_fillna_nat(self): + series = Series([0, 1, 2, NaT._value], dtype="M8[ns]") + + filled = series.fillna(method="pad") + filled2 = series.fillna(value=series.values[2]) + + expected = series.copy() + expected.iloc[3] = expected.iloc[2] + + tm.assert_series_equal(filled, expected) + tm.assert_series_equal(filled2, expected) + + df = DataFrame({"A": series}) + filled = df.fillna(method="pad") + filled2 = df.fillna(value=series.values[2]) + expected = DataFrame({"A": expected}) + tm.assert_frame_equal(filled, expected) + tm.assert_frame_equal(filled2, expected) + + series = Series([NaT._value, 0, 1, 2], dtype="M8[ns]") + + filled = series.fillna(method="bfill") + filled2 = series.fillna(value=series[1]) + + expected = series.copy() + expected[0] = expected[1] + + tm.assert_series_equal(filled, expected) + tm.assert_series_equal(filled2, expected) + + df = DataFrame({"A": series}) + filled = df.fillna(method="bfill") + filled2 = df.fillna(value=series[1]) + expected = DataFrame({"A": expected}) + tm.assert_frame_equal(filled, expected) + tm.assert_frame_equal(filled2, expected) + + def test_fillna_value_or_method(self, datetime_series): + msg = "Cannot specify both 'value' and 'method'" + with pytest.raises(ValueError, match=msg): + datetime_series.fillna(value=0, method="ffill") + + def test_fillna(self): + ts = Series( + [0.0, 1.0, 2.0, 3.0, 4.0], index=date_range("2020-01-01", periods=5) + ) + + tm.assert_series_equal(ts, ts.fillna(method="ffill")) + + ts.iloc[2] = np.nan + + exp = Series([0.0, 1.0, 1.0, 3.0, 4.0], index=ts.index) + tm.assert_series_equal(ts.fillna(method="ffill"), exp) + + exp = Series([0.0, 1.0, 3.0, 3.0, 4.0], index=ts.index) + tm.assert_series_equal(ts.fillna(method="backfill"), exp) + + exp = Series([0.0, 1.0, 5.0, 3.0, 4.0], index=ts.index) + tm.assert_series_equal(ts.fillna(value=5), exp) + + msg = "Must specify a fill 'value' or 'method'" + with pytest.raises(ValueError, match=msg): + ts.fillna() + + def test_fillna_nonscalar(self): + # GH#5703 + s1 = Series([np.nan]) + s2 = Series([1]) + result = s1.fillna(s2) + expected = Series([1.0]) + tm.assert_series_equal(result, expected) + result = s1.fillna({}) + tm.assert_series_equal(result, s1) + result = s1.fillna(Series((), dtype=object)) + tm.assert_series_equal(result, s1) + result = s2.fillna(s1) + tm.assert_series_equal(result, s2) + result = s1.fillna({0: 1}) + tm.assert_series_equal(result, expected) + result = s1.fillna({1: 1}) + tm.assert_series_equal(result, Series([np.nan])) + result = s1.fillna({0: 1, 1: 1}) + tm.assert_series_equal(result, expected) + result = s1.fillna(Series({0: 1, 1: 1})) + tm.assert_series_equal(result, expected) + result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5])) + tm.assert_series_equal(result, s1) + + def test_fillna_aligns(self): + s1 = Series([0, 1, 2], list("abc")) + s2 = Series([0, np.nan, 2], list("bac")) + result = s2.fillna(s1) + expected = Series([0, 0, 2.0], list("bac")) + tm.assert_series_equal(result, expected) + + def test_fillna_limit(self): + ser = Series(np.nan, index=[0, 1, 2]) + result = ser.fillna(999, limit=1) + expected = Series([999, np.nan, np.nan], index=[0, 1, 2]) + tm.assert_series_equal(result, expected) + + result = ser.fillna(999, limit=2) + expected = Series([999, 999, np.nan], index=[0, 1, 2]) + tm.assert_series_equal(result, expected) + + def test_fillna_dont_cast_strings(self): + # GH#9043 + # make sure a string representation of int/float values can be filled + # correctly without raising errors or being converted + vals = ["0", "1.5", "-0.3"] + for val in vals: + ser = Series([0, 1, np.nan, np.nan, 4], dtype="float64") + result = ser.fillna(val) + expected = Series([0, 1, val, val, 4], dtype="object") + tm.assert_series_equal(result, expected) + + def test_fillna_consistency(self): + # GH#16402 + # fillna with a tz aware to a tz-naive, should result in object + + ser = Series([Timestamp("20130101"), NaT]) + + result = ser.fillna(Timestamp("20130101", tz="US/Eastern")) + expected = Series( + [Timestamp("20130101"), Timestamp("2013-01-01", tz="US/Eastern")], + dtype="object", + ) + tm.assert_series_equal(result, expected) + + result = ser.where([True, False], Timestamp("20130101", tz="US/Eastern")) + tm.assert_series_equal(result, expected) + + result = ser.where([True, False], Timestamp("20130101", tz="US/Eastern")) + tm.assert_series_equal(result, expected) + + # with a non-datetime + result = ser.fillna("foo") + expected = Series([Timestamp("20130101"), "foo"]) + tm.assert_series_equal(result, expected) + + # assignment + ser2 = ser.copy() + with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"): + ser2[1] = "foo" + tm.assert_series_equal(ser2, expected) + + def test_fillna_downcast(self): + # GH#15277 + # infer int64 from float64 + ser = Series([1.0, np.nan]) + msg = "The 'downcast' keyword in fillna is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = ser.fillna(0, downcast="infer") + expected = Series([1, 0]) + tm.assert_series_equal(result, expected) + + # infer int64 from float64 when fillna value is a dict + ser = Series([1.0, np.nan]) + with tm.assert_produces_warning(FutureWarning, match=msg): + result = ser.fillna({1: 0}, downcast="infer") + expected = Series([1, 0]) + tm.assert_series_equal(result, expected) + + def test_fillna_downcast_infer_objects_to_numeric(self): + # GH#44241 if we have object-dtype, 'downcast="infer"' should + # _actually_ infer + + arr = np.arange(5).astype(object) + arr[3] = np.nan + + ser = Series(arr) + + msg = "The 'downcast' keyword in fillna is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = ser.fillna(3, downcast="infer") + expected = Series(np.arange(5), dtype=np.int64) + tm.assert_series_equal(res, expected) + + msg = "The 'downcast' keyword in ffill is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = ser.ffill(downcast="infer") + expected = Series([0, 1, 2, 2, 4], dtype=np.int64) + tm.assert_series_equal(res, expected) + + msg = "The 'downcast' keyword in bfill is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = ser.bfill(downcast="infer") + expected = Series([0, 1, 2, 4, 4], dtype=np.int64) + tm.assert_series_equal(res, expected) + + # with a non-round float present, we will downcast to float64 + ser[2] = 2.5 + + expected = Series([0, 1, 2.5, 3, 4], dtype=np.float64) + msg = "The 'downcast' keyword in fillna is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = ser.fillna(3, downcast="infer") + tm.assert_series_equal(res, expected) + + msg = "The 'downcast' keyword in ffill is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = ser.ffill(downcast="infer") + expected = Series([0, 1, 2.5, 2.5, 4], dtype=np.float64) + tm.assert_series_equal(res, expected) + + msg = "The 'downcast' keyword in bfill is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = ser.bfill(downcast="infer") + expected = Series([0, 1, 2.5, 4, 4], dtype=np.float64) + tm.assert_series_equal(res, expected) + + def test_timedelta_fillna(self, frame_or_series, unit): + # GH#3371 + ser = Series( + [ + Timestamp("20130101"), + Timestamp("20130101"), + Timestamp("20130102"), + Timestamp("20130103 9:01:01"), + ], + dtype=f"M8[{unit}]", + ) + td = ser.diff() + obj = frame_or_series(td).copy() + + # reg fillna + result = obj.fillna(Timedelta(seconds=0)) + expected = Series( + [ + timedelta(0), + timedelta(0), + timedelta(1), + timedelta(days=1, seconds=9 * 3600 + 60 + 1), + ], + dtype=f"m8[{unit}]", + ) + expected = frame_or_series(expected) + tm.assert_equal(result, expected) + + # GH#45746 pre-1.? ints were interpreted as seconds. then that was + # deprecated and changed to raise. In 2.0 it casts to common dtype, + # consistent with every other dtype's behavior + res = obj.fillna(1) + expected = obj.astype(object).fillna(1) + tm.assert_equal(res, expected) + + result = obj.fillna(Timedelta(seconds=1)) + expected = Series( + [ + timedelta(seconds=1), + timedelta(0), + timedelta(1), + timedelta(days=1, seconds=9 * 3600 + 60 + 1), + ], + dtype=f"m8[{unit}]", + ) + expected = frame_or_series(expected) + tm.assert_equal(result, expected) + + result = obj.fillna(timedelta(days=1, seconds=1)) + expected = Series( + [ + timedelta(days=1, seconds=1), + timedelta(0), + timedelta(1), + timedelta(days=1, seconds=9 * 3600 + 60 + 1), + ], + dtype=f"m8[{unit}]", + ) + expected = frame_or_series(expected) + tm.assert_equal(result, expected) + + result = obj.fillna(np.timedelta64(10**9)) + expected = Series( + [ + timedelta(seconds=1), + timedelta(0), + timedelta(1), + timedelta(days=1, seconds=9 * 3600 + 60 + 1), + ], + dtype=f"m8[{unit}]", + ) + expected = frame_or_series(expected) + tm.assert_equal(result, expected) + + result = obj.fillna(NaT) + expected = Series( + [ + NaT, + timedelta(0), + timedelta(1), + timedelta(days=1, seconds=9 * 3600 + 60 + 1), + ], + dtype=f"m8[{unit}]", + ) + expected = frame_or_series(expected) + tm.assert_equal(result, expected) + + # ffill + td[2] = np.nan + obj = frame_or_series(td).copy() + result = obj.ffill() + expected = td.fillna(Timedelta(seconds=0)) + expected[0] = np.nan + expected = frame_or_series(expected) + + tm.assert_equal(result, expected) + + # bfill + td[2] = np.nan + obj = frame_or_series(td) + result = obj.bfill() + expected = td.fillna(Timedelta(seconds=0)) + expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1) + expected = frame_or_series(expected) + tm.assert_equal(result, expected) + + def test_datetime64_fillna(self): + ser = Series( + [ + Timestamp("20130101"), + Timestamp("20130101"), + Timestamp("20130102"), + Timestamp("20130103 9:01:01"), + ] + ) + ser[2] = np.nan + + # ffill + result = ser.ffill() + expected = Series( + [ + Timestamp("20130101"), + Timestamp("20130101"), + Timestamp("20130101"), + Timestamp("20130103 9:01:01"), + ] + ) + tm.assert_series_equal(result, expected) + + # bfill + result = ser.bfill() + expected = Series( + [ + Timestamp("20130101"), + Timestamp("20130101"), + Timestamp("20130103 9:01:01"), + Timestamp("20130103 9:01:01"), + ] + ) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "scalar", + [ + False, + pytest.param( + True, + marks=pytest.mark.xfail( + reason="GH#56410 scalar case not yet addressed" + ), + ), + ], + ) + @pytest.mark.parametrize("tz", [None, "UTC"]) + def test_datetime64_fillna_mismatched_reso_no_rounding(self, tz, scalar): + # GH#56410 + dti = date_range("2016-01-01", periods=3, unit="s", tz=tz) + item = Timestamp("2016-02-03 04:05:06.789", tz=tz) + vec = date_range(item, periods=3, unit="ms") + + exp_dtype = "M8[ms]" if tz is None else "M8[ms, UTC]" + expected = Series([item, dti[1], dti[2]], dtype=exp_dtype) + + ser = Series(dti) + ser[0] = NaT + ser2 = ser.copy() + + res = ser.fillna(item) + res2 = ser2.fillna(Series(vec)) + + if scalar: + tm.assert_series_equal(res, expected) + else: + tm.assert_series_equal(res2, expected) + + @pytest.mark.parametrize( + "scalar", + [ + False, + pytest.param( + True, + marks=pytest.mark.xfail( + reason="GH#56410 scalar case not yet addressed" + ), + ), + ], + ) + def test_timedelta64_fillna_mismatched_reso_no_rounding(self, scalar): + # GH#56410 + tdi = date_range("2016-01-01", periods=3, unit="s") - Timestamp("1970-01-01") + item = Timestamp("2016-02-03 04:05:06.789") - Timestamp("1970-01-01") + vec = timedelta_range(item, periods=3, unit="ms") + + expected = Series([item, tdi[1], tdi[2]], dtype="m8[ms]") + + ser = Series(tdi) + ser[0] = NaT + ser2 = ser.copy() + + res = ser.fillna(item) + res2 = ser2.fillna(Series(vec)) + + if scalar: + tm.assert_series_equal(res, expected) + else: + tm.assert_series_equal(res2, expected) + + def test_datetime64_fillna_backfill(self): + # GH#6587 + # make sure that we are treating as integer when filling + ser = Series([NaT, NaT, "2013-08-05 15:30:00.000001"], dtype="M8[ns]") + + expected = Series( + [ + "2013-08-05 15:30:00.000001", + "2013-08-05 15:30:00.000001", + "2013-08-05 15:30:00.000001", + ], + dtype="M8[ns]", + ) + result = ser.fillna(method="backfill") + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("tz", ["US/Eastern", "Asia/Tokyo"]) + def test_datetime64_tz_fillna(self, tz, unit): + # DatetimeLikeBlock + ser = Series( + [ + Timestamp("2011-01-01 10:00"), + NaT, + Timestamp("2011-01-03 10:00"), + NaT, + ], + dtype=f"M8[{unit}]", + ) + null_loc = Series([False, True, False, True]) + + result = ser.fillna(Timestamp("2011-01-02 10:00")) + expected = Series( + [ + Timestamp("2011-01-01 10:00"), + Timestamp("2011-01-02 10:00"), + Timestamp("2011-01-03 10:00"), + Timestamp("2011-01-02 10:00"), + ], + dtype=f"M8[{unit}]", + ) + tm.assert_series_equal(expected, result) + # check s is not changed + tm.assert_series_equal(isna(ser), null_loc) + + result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz)) + expected = Series( + [ + Timestamp("2011-01-01 10:00"), + Timestamp("2011-01-02 10:00", tz=tz), + Timestamp("2011-01-03 10:00"), + Timestamp("2011-01-02 10:00", tz=tz), + ] + ) + tm.assert_series_equal(expected, result) + tm.assert_series_equal(isna(ser), null_loc) + + result = ser.fillna("AAA") + expected = Series( + [ + Timestamp("2011-01-01 10:00"), + "AAA", + Timestamp("2011-01-03 10:00"), + "AAA", + ], + dtype=object, + ) + tm.assert_series_equal(expected, result) + tm.assert_series_equal(isna(ser), null_loc) + + result = ser.fillna( + { + 1: Timestamp("2011-01-02 10:00", tz=tz), + 3: Timestamp("2011-01-04 10:00"), + } + ) + expected = Series( + [ + Timestamp("2011-01-01 10:00"), + Timestamp("2011-01-02 10:00", tz=tz), + Timestamp("2011-01-03 10:00"), + Timestamp("2011-01-04 10:00"), + ] + ) + tm.assert_series_equal(expected, result) + tm.assert_series_equal(isna(ser), null_loc) + + result = ser.fillna( + {1: Timestamp("2011-01-02 10:00"), 3: Timestamp("2011-01-04 10:00")} + ) + expected = Series( + [ + Timestamp("2011-01-01 10:00"), + Timestamp("2011-01-02 10:00"), + Timestamp("2011-01-03 10:00"), + Timestamp("2011-01-04 10:00"), + ], + dtype=f"M8[{unit}]", + ) + tm.assert_series_equal(expected, result) + tm.assert_series_equal(isna(ser), null_loc) + + # DatetimeTZBlock + idx = DatetimeIndex( + ["2011-01-01 10:00", NaT, "2011-01-03 10:00", NaT], tz=tz + ).as_unit(unit) + ser = Series(idx) + assert ser.dtype == f"datetime64[{unit}, {tz}]" + tm.assert_series_equal(isna(ser), null_loc) + + result = ser.fillna(Timestamp("2011-01-02 10:00")) + expected = Series( + [ + Timestamp("2011-01-01 10:00", tz=tz), + Timestamp("2011-01-02 10:00"), + Timestamp("2011-01-03 10:00", tz=tz), + Timestamp("2011-01-02 10:00"), + ] + ) + tm.assert_series_equal(expected, result) + tm.assert_series_equal(isna(ser), null_loc) + + result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz)) + idx = DatetimeIndex( + [ + "2011-01-01 10:00", + "2011-01-02 10:00", + "2011-01-03 10:00", + "2011-01-02 10:00", + ], + tz=tz, + ).as_unit(unit) + expected = Series(idx) + tm.assert_series_equal(expected, result) + tm.assert_series_equal(isna(ser), null_loc) + + result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz).to_pydatetime()) + idx = DatetimeIndex( + [ + "2011-01-01 10:00", + "2011-01-02 10:00", + "2011-01-03 10:00", + "2011-01-02 10:00", + ], + tz=tz, + ).as_unit(unit) + expected = Series(idx) + tm.assert_series_equal(expected, result) + tm.assert_series_equal(isna(ser), null_loc) + + result = ser.fillna("AAA") + expected = Series( + [ + Timestamp("2011-01-01 10:00", tz=tz), + "AAA", + Timestamp("2011-01-03 10:00", tz=tz), + "AAA", + ], + dtype=object, + ) + tm.assert_series_equal(expected, result) + tm.assert_series_equal(isna(ser), null_loc) + + result = ser.fillna( + { + 1: Timestamp("2011-01-02 10:00", tz=tz), + 3: Timestamp("2011-01-04 10:00"), + } + ) + expected = Series( + [ + Timestamp("2011-01-01 10:00", tz=tz), + Timestamp("2011-01-02 10:00", tz=tz), + Timestamp("2011-01-03 10:00", tz=tz), + Timestamp("2011-01-04 10:00"), + ] + ) + tm.assert_series_equal(expected, result) + tm.assert_series_equal(isna(ser), null_loc) + + result = ser.fillna( + { + 1: Timestamp("2011-01-02 10:00", tz=tz), + 3: Timestamp("2011-01-04 10:00", tz=tz), + } + ) + expected = Series( + [ + Timestamp("2011-01-01 10:00", tz=tz), + Timestamp("2011-01-02 10:00", tz=tz), + Timestamp("2011-01-03 10:00", tz=tz), + Timestamp("2011-01-04 10:00", tz=tz), + ] + ).dt.as_unit(unit) + tm.assert_series_equal(expected, result) + tm.assert_series_equal(isna(ser), null_loc) + + # filling with a naive/other zone, coerce to object + result = ser.fillna(Timestamp("20130101")) + expected = Series( + [ + Timestamp("2011-01-01 10:00", tz=tz), + Timestamp("2013-01-01"), + Timestamp("2011-01-03 10:00", tz=tz), + Timestamp("2013-01-01"), + ] + ) + tm.assert_series_equal(expected, result) + tm.assert_series_equal(isna(ser), null_loc) + + # pre-2.0 fillna with mixed tzs would cast to object, in 2.0 + # it retains dtype. + result = ser.fillna(Timestamp("20130101", tz="US/Pacific")) + expected = Series( + [ + Timestamp("2011-01-01 10:00", tz=tz), + Timestamp("2013-01-01", tz="US/Pacific").tz_convert(tz), + Timestamp("2011-01-03 10:00", tz=tz), + Timestamp("2013-01-01", tz="US/Pacific").tz_convert(tz), + ] + ).dt.as_unit(unit) + tm.assert_series_equal(expected, result) + tm.assert_series_equal(isna(ser), null_loc) + + def test_fillna_dt64tz_with_method(self): + # with timezone + # GH#15855 + ser = Series([Timestamp("2012-11-11 00:00:00+01:00"), NaT]) + exp = Series( + [ + Timestamp("2012-11-11 00:00:00+01:00"), + Timestamp("2012-11-11 00:00:00+01:00"), + ] + ) + tm.assert_series_equal(ser.fillna(method="pad"), exp) + + ser = Series([NaT, Timestamp("2012-11-11 00:00:00+01:00")]) + exp = Series( + [ + Timestamp("2012-11-11 00:00:00+01:00"), + Timestamp("2012-11-11 00:00:00+01:00"), + ] + ) + tm.assert_series_equal(ser.fillna(method="bfill"), exp) + + def test_fillna_pytimedelta(self): + # GH#8209 + ser = Series([np.nan, Timedelta("1 days")], index=["A", "B"]) + + result = ser.fillna(timedelta(1)) + expected = Series(Timedelta("1 days"), index=["A", "B"]) + tm.assert_series_equal(result, expected) + + def test_fillna_period(self): + # GH#13737 + ser = Series([Period("2011-01", freq="M"), Period("NaT", freq="M")]) + + res = ser.fillna(Period("2012-01", freq="M")) + exp = Series([Period("2011-01", freq="M"), Period("2012-01", freq="M")]) + tm.assert_series_equal(res, exp) + assert res.dtype == "Period[M]" + + def test_fillna_dt64_timestamp(self, frame_or_series): + ser = Series( + [ + Timestamp("20130101"), + Timestamp("20130101"), + Timestamp("20130102"), + Timestamp("20130103 9:01:01"), + ] + ) + ser[2] = np.nan + obj = frame_or_series(ser) + + # reg fillna + result = obj.fillna(Timestamp("20130104")) + expected = Series( + [ + Timestamp("20130101"), + Timestamp("20130101"), + Timestamp("20130104"), + Timestamp("20130103 9:01:01"), + ] + ) + expected = frame_or_series(expected) + tm.assert_equal(result, expected) + + result = obj.fillna(NaT) + expected = obj + tm.assert_equal(result, expected) + + def test_fillna_dt64_non_nao(self): + # GH#27419 + ser = Series([Timestamp("2010-01-01"), NaT, Timestamp("2000-01-01")]) + val = np.datetime64("1975-04-05", "ms") + + result = ser.fillna(val) + expected = Series( + [Timestamp("2010-01-01"), Timestamp("1975-04-05"), Timestamp("2000-01-01")] + ) + tm.assert_series_equal(result, expected) + + def test_fillna_numeric_inplace(self): + x = Series([np.nan, 1.0, np.nan, 3.0, np.nan], ["z", "a", "b", "c", "d"]) + y = x.copy() + + return_value = y.fillna(value=0, inplace=True) + assert return_value is None + + expected = x.fillna(value=0) + tm.assert_series_equal(y, expected) + + # --------------------------------------------------------------- + # CategoricalDtype + + @pytest.mark.parametrize( + "fill_value, expected_output", + [ + ("a", ["a", "a", "b", "a", "a"]), + ({1: "a", 3: "b", 4: "b"}, ["a", "a", "b", "b", "b"]), + ({1: "a"}, ["a", "a", "b", np.nan, np.nan]), + ({1: "a", 3: "b"}, ["a", "a", "b", "b", np.nan]), + (Series("a"), ["a", np.nan, "b", np.nan, np.nan]), + (Series("a", index=[1]), ["a", "a", "b", np.nan, np.nan]), + (Series({1: "a", 3: "b"}), ["a", "a", "b", "b", np.nan]), + (Series(["a", "b"], index=[3, 4]), ["a", np.nan, "b", "a", "b"]), + ], + ) + def test_fillna_categorical(self, fill_value, expected_output): + # GH#17033 + # Test fillna for a Categorical series + data = ["a", np.nan, "b", np.nan, np.nan] + ser = Series(Categorical(data, categories=["a", "b"])) + exp = Series(Categorical(expected_output, categories=["a", "b"])) + result = ser.fillna(fill_value) + tm.assert_series_equal(result, exp) + + @pytest.mark.parametrize( + "fill_value, expected_output", + [ + (Series(["a", "b", "c", "d", "e"]), ["a", "b", "b", "d", "e"]), + (Series(["b", "d", "a", "d", "a"]), ["a", "d", "b", "d", "a"]), + ( + Series( + Categorical( + ["b", "d", "a", "d", "a"], categories=["b", "c", "d", "e", "a"] + ) + ), + ["a", "d", "b", "d", "a"], + ), + ], + ) + def test_fillna_categorical_with_new_categories(self, fill_value, expected_output): + # GH#26215 + data = ["a", np.nan, "b", np.nan, np.nan] + ser = Series(Categorical(data, categories=["a", "b", "c", "d", "e"])) + exp = Series(Categorical(expected_output, categories=["a", "b", "c", "d", "e"])) + result = ser.fillna(fill_value) + tm.assert_series_equal(result, exp) + + def test_fillna_categorical_raises(self): + data = ["a", np.nan, "b", np.nan, np.nan] + ser = Series(Categorical(data, categories=["a", "b"])) + cat = ser._values + + msg = "Cannot setitem on a Categorical with a new category" + with pytest.raises(TypeError, match=msg): + ser.fillna("d") + + msg2 = "Length of 'value' does not match." + with pytest.raises(ValueError, match=msg2): + cat.fillna(Series("d")) + + with pytest.raises(TypeError, match=msg): + ser.fillna({1: "d", 3: "a"}) + + msg = '"value" parameter must be a scalar or dict, but you passed a "list"' + with pytest.raises(TypeError, match=msg): + ser.fillna(["a", "b"]) + + msg = '"value" parameter must be a scalar or dict, but you passed a "tuple"' + with pytest.raises(TypeError, match=msg): + ser.fillna(("a", "b")) + + msg = ( + '"value" parameter must be a scalar, dict ' + 'or Series, but you passed a "DataFrame"' + ) + with pytest.raises(TypeError, match=msg): + ser.fillna(DataFrame({1: ["a"], 3: ["b"]})) + + @pytest.mark.parametrize("dtype", [float, "float32", "float64"]) + @pytest.mark.parametrize("fill_type", tm.ALL_REAL_NUMPY_DTYPES) + @pytest.mark.parametrize("scalar", [True, False]) + def test_fillna_float_casting(self, dtype, fill_type, scalar): + # GH-43424 + ser = Series([np.nan, 1.2], dtype=dtype) + fill_values = Series([2, 2], dtype=fill_type) + if scalar: + fill_values = fill_values.dtype.type(2) + + result = ser.fillna(fill_values) + expected = Series([2.0, 1.2], dtype=dtype) + tm.assert_series_equal(result, expected) + + ser = Series([np.nan, 1.2], dtype=dtype) + mask = ser.isna().to_numpy() + ser[mask] = fill_values + tm.assert_series_equal(ser, expected) + + ser = Series([np.nan, 1.2], dtype=dtype) + ser.mask(mask, fill_values, inplace=True) + tm.assert_series_equal(ser, expected) + + ser = Series([np.nan, 1.2], dtype=dtype) + res = ser.where(~mask, fill_values) + tm.assert_series_equal(res, expected) + + def test_fillna_f32_upcast_with_dict(self): + # GH-43424 + ser = Series([np.nan, 1.2], dtype=np.float32) + result = ser.fillna({0: 1}) + expected = Series([1.0, 1.2], dtype=np.float32) + tm.assert_series_equal(result, expected) + + # --------------------------------------------------------------- + # Invalid Usages + + def test_fillna_invalid_method(self, datetime_series): + try: + datetime_series.fillna(method="ffil") + except ValueError as inst: + assert "ffil" in str(inst) + + def test_fillna_listlike_invalid(self): + ser = Series(np.random.default_rng(2).integers(-100, 100, 50)) + msg = '"value" parameter must be a scalar or dict, but you passed a "list"' + with pytest.raises(TypeError, match=msg): + ser.fillna([1, 2]) + + msg = '"value" parameter must be a scalar or dict, but you passed a "tuple"' + with pytest.raises(TypeError, match=msg): + ser.fillna((1, 2)) + + def test_fillna_method_and_limit_invalid(self): + # related GH#9217, make sure limit is an int and greater than 0 + ser = Series([1, 2, 3, None]) + msg = "|".join( + [ + r"Cannot specify both 'value' and 'method'\.", + "Limit must be greater than 0", + "Limit must be an integer", + ] + ) + for limit in [-1, 0, 1.0, 2.0]: + for method in ["backfill", "bfill", "pad", "ffill", None]: + with pytest.raises(ValueError, match=msg): + ser.fillna(1, limit=limit, method=method) + + def test_fillna_datetime64_with_timezone_tzinfo(self): + # https://github.com/pandas-dev/pandas/issues/38851 + # different tzinfos representing UTC treated as equal + ser = Series(date_range("2020", periods=3, tz="UTC")) + expected = ser.copy() + ser[1] = NaT + result = ser.fillna(datetime(2020, 1, 2, tzinfo=timezone.utc)) + tm.assert_series_equal(result, expected) + + # pre-2.0 we cast to object with mixed tzs, in 2.0 we retain dtype + ts = Timestamp("2000-01-01", tz="US/Pacific") + ser2 = Series(ser._values.tz_convert("dateutil/US/Pacific")) + assert ser2.dtype.kind == "M" + result = ser2.fillna(ts) + expected = Series( + [ser2[0], ts.tz_convert(ser2.dtype.tz), ser2[2]], + dtype=ser2.dtype, + ) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "input, input_fillna, expected_data, expected_categories", + [ + (["A", "B", None, "A"], "B", ["A", "B", "B", "A"], ["A", "B"]), + (["A", "B", np.nan, "A"], "B", ["A", "B", "B", "A"], ["A", "B"]), + ], + ) + def test_fillna_categorical_accept_same_type( + self, input, input_fillna, expected_data, expected_categories + ): + # GH32414 + cat = Categorical(input) + ser = Series(cat).fillna(input_fillna) + filled = cat.fillna(ser) + result = cat.fillna(filled) + expected = Categorical(expected_data, categories=expected_categories) + tm.assert_categorical_equal(result, expected) + + +@pytest.mark.filterwarnings( + "ignore:Series.fillna with 'method' is deprecated:FutureWarning" +) +class TestFillnaPad: + def test_fillna_bug(self): + ser = Series([np.nan, 1.0, np.nan, 3.0, np.nan], ["z", "a", "b", "c", "d"]) + filled = ser.fillna(method="ffill") + expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0], ser.index) + tm.assert_series_equal(filled, expected) + + filled = ser.fillna(method="bfill") + expected = Series([1.0, 1.0, 3.0, 3.0, np.nan], ser.index) + tm.assert_series_equal(filled, expected) + + def test_ffill(self): + ts = Series( + [0.0, 1.0, 2.0, 3.0, 4.0], index=date_range("2020-01-01", periods=5) + ) + ts.iloc[2] = np.nan + tm.assert_series_equal(ts.ffill(), ts.fillna(method="ffill")) + + def test_ffill_mixed_dtypes_without_missing_data(self): + # GH#14956 + series = Series([datetime(2015, 1, 1, tzinfo=pytz.utc), 1]) + result = series.ffill() + tm.assert_series_equal(series, result) + + def test_bfill(self): + ts = Series( + [0.0, 1.0, 2.0, 3.0, 4.0], index=date_range("2020-01-01", periods=5) + ) + ts.iloc[2] = np.nan + tm.assert_series_equal(ts.bfill(), ts.fillna(method="bfill")) + + def test_pad_nan(self): + x = Series( + [np.nan, 1.0, np.nan, 3.0, np.nan], ["z", "a", "b", "c", "d"], dtype=float + ) + + return_value = x.fillna(method="pad", inplace=True) + assert return_value is None + + expected = Series( + [np.nan, 1.0, 1.0, 3.0, 3.0], ["z", "a", "b", "c", "d"], dtype=float + ) + tm.assert_series_equal(x[1:], expected[1:]) + assert np.isnan(x.iloc[0]), np.isnan(expected.iloc[0]) + + def test_series_fillna_limit(self): + index = np.arange(10) + s = Series(np.random.default_rng(2).standard_normal(10), index=index) + + result = s[:2].reindex(index) + result = result.fillna(method="pad", limit=5) + + expected = s[:2].reindex(index).fillna(method="pad") + expected[-3:] = np.nan + tm.assert_series_equal(result, expected) + + result = s[-2:].reindex(index) + result = result.fillna(method="bfill", limit=5) + + expected = s[-2:].reindex(index).fillna(method="backfill") + expected[:3] = np.nan + tm.assert_series_equal(result, expected) + + def test_series_pad_backfill_limit(self): + index = np.arange(10) + s = Series(np.random.default_rng(2).standard_normal(10), index=index) + + result = s[:2].reindex(index, method="pad", limit=5) + + expected = s[:2].reindex(index).fillna(method="pad") + expected[-3:] = np.nan + tm.assert_series_equal(result, expected) + + result = s[-2:].reindex(index, method="backfill", limit=5) + + expected = s[-2:].reindex(index).fillna(method="backfill") + expected[:3] = np.nan + tm.assert_series_equal(result, expected) + + def test_fillna_int(self): + ser = Series(np.random.default_rng(2).integers(-100, 100, 50)) + return_value = ser.fillna(method="ffill", inplace=True) + assert return_value is None + tm.assert_series_equal(ser.fillna(method="ffill", inplace=False), ser) + + def test_datetime64tz_fillna_round_issue(self): + # GH#14872 + + data = Series( + [NaT, NaT, datetime(2016, 12, 12, 22, 24, 6, 100001, tzinfo=pytz.utc)] + ) + + filled = data.bfill() + + expected = Series( + [ + datetime(2016, 12, 12, 22, 24, 6, 100001, tzinfo=pytz.utc), + datetime(2016, 12, 12, 22, 24, 6, 100001, tzinfo=pytz.utc), + datetime(2016, 12, 12, 22, 24, 6, 100001, tzinfo=pytz.utc), + ] + ) + + tm.assert_series_equal(filled, expected) + + def test_fillna_parr(self): + # GH-24537 + dti = date_range( + Timestamp.max - Timedelta(nanoseconds=10), periods=5, freq="ns" + ) + ser = Series(dti.to_period("ns")) + ser[2] = NaT + arr = period_array( + [ + Timestamp("2262-04-11 23:47:16.854775797"), + Timestamp("2262-04-11 23:47:16.854775798"), + Timestamp("2262-04-11 23:47:16.854775798"), + Timestamp("2262-04-11 23:47:16.854775800"), + Timestamp("2262-04-11 23:47:16.854775801"), + ], + freq="ns", + ) + expected = Series(arr) + + filled = ser.ffill() + + tm.assert_series_equal(filled, expected) + + @pytest.mark.parametrize("func", ["pad", "backfill"]) + def test_pad_backfill_deprecated(self, func): + # GH#33396 + ser = Series([1, 2, 3]) + with tm.assert_produces_warning(FutureWarning): + getattr(ser, func)() + + +@pytest.mark.parametrize( + "data, expected_data, method, kwargs", + ( + ( + [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan], + [np.nan, np.nan, 3.0, 3.0, 3.0, 3.0, 7.0, np.nan, np.nan], + "ffill", + {"limit_area": "inside"}, + ), + ( + [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan], + [np.nan, np.nan, 3.0, 3.0, np.nan, np.nan, 7.0, np.nan, np.nan], + "ffill", + {"limit_area": "inside", "limit": 1}, + ), + ( + [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan], + [np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, 7.0], + "ffill", + {"limit_area": "outside"}, + ), + ( + [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan], + [np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, np.nan], + "ffill", + {"limit_area": "outside", "limit": 1}, + ), + ( + [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], + [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], + "ffill", + {"limit_area": "outside", "limit": 1}, + ), + ( + range(5), + range(5), + "ffill", + {"limit_area": "outside", "limit": 1}, + ), + ( + [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan], + [np.nan, np.nan, 3.0, 7.0, 7.0, 7.0, 7.0, np.nan, np.nan], + "bfill", + {"limit_area": "inside"}, + ), + ( + [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan], + [np.nan, np.nan, 3.0, np.nan, np.nan, 7.0, 7.0, np.nan, np.nan], + "bfill", + {"limit_area": "inside", "limit": 1}, + ), + ( + [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan], + [3.0, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, np.nan, np.nan], + "bfill", + {"limit_area": "outside"}, + ), + ( + [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan], + [np.nan, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, np.nan, np.nan], + "bfill", + {"limit_area": "outside", "limit": 1}, + ), + ), +) +def test_ffill_bfill_limit_area(data, expected_data, method, kwargs): + # GH#56492 + s = Series(data) + expected = Series(expected_data) + result = getattr(s, method)(**kwargs) + tm.assert_series_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_get_numeric_data.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_get_numeric_data.py new file mode 100644 index 0000000000000000000000000000000000000000..8325cc884ebcba069c18f457371cfa061893cf81 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_get_numeric_data.py @@ -0,0 +1,38 @@ +from pandas import ( + Index, + Series, + date_range, +) +import pandas._testing as tm + + +class TestGetNumericData: + def test_get_numeric_data_preserve_dtype( + self, using_copy_on_write, warn_copy_on_write + ): + # get the numeric data + obj = Series([1, 2, 3]) + result = obj._get_numeric_data() + tm.assert_series_equal(result, obj) + + # returned object is a shallow copy + with tm.assert_cow_warning(warn_copy_on_write): + result.iloc[0] = 0 + if using_copy_on_write: + assert obj.iloc[0] == 1 + else: + assert obj.iloc[0] == 0 + + obj = Series([1, "2", 3.0]) + result = obj._get_numeric_data() + expected = Series([], dtype=object, index=Index([], dtype=object)) + tm.assert_series_equal(result, expected) + + obj = Series([True, False, True]) + result = obj._get_numeric_data() + tm.assert_series_equal(result, obj) + + obj = Series(date_range("20130101", periods=3)) + result = obj._get_numeric_data() + expected = Series([], dtype="M8[ns]", index=Index([], dtype=object)) + tm.assert_series_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_infer_objects.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_infer_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..29abac6b3780ec500ef0c635785c9624e4264934 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_infer_objects.py @@ -0,0 +1,56 @@ +import numpy as np + +from pandas import ( + Series, + interval_range, +) +import pandas._testing as tm + + +class TestInferObjects: + def test_copy(self, index_or_series): + # GH#50096 + # case where we don't need to do inference because it is already non-object + obj = index_or_series(np.array([1, 2, 3], dtype="int64")) + + result = obj.infer_objects(copy=False) + assert tm.shares_memory(result, obj) + + # case where we try to do inference but can't do better than object + obj2 = index_or_series(np.array(["foo", 2], dtype=object)) + result2 = obj2.infer_objects(copy=False) + assert tm.shares_memory(result2, obj2) + + def test_infer_objects_series(self, index_or_series): + # GH#11221 + actual = index_or_series(np.array([1, 2, 3], dtype="O")).infer_objects() + expected = index_or_series([1, 2, 3]) + tm.assert_equal(actual, expected) + + actual = index_or_series(np.array([1, 2, 3, None], dtype="O")).infer_objects() + expected = index_or_series([1.0, 2.0, 3.0, np.nan]) + tm.assert_equal(actual, expected) + + # only soft conversions, unconvertible pass thru unchanged + + obj = index_or_series(np.array([1, 2, 3, None, "a"], dtype="O")) + actual = obj.infer_objects() + expected = index_or_series([1, 2, 3, None, "a"], dtype=object) + + assert actual.dtype == "object" + tm.assert_equal(actual, expected) + + def test_infer_objects_interval(self, index_or_series): + # GH#50090 + ii = interval_range(1, 10) + obj = index_or_series(ii) + + result = obj.astype(object).infer_objects() + tm.assert_equal(result, obj) + + def test_infer_objects_bytes(self): + # GH#49650 + ser = Series([b"a"], dtype="bytes") + expected = ser.copy() + result = ser.infer_objects() + tm.assert_series_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_isin.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_isin.py new file mode 100644 index 0000000000000000000000000000000000000000..f94f67b8cc40a2031cab0791ccaf2fbc207b5023 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_isin.py @@ -0,0 +1,252 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + Series, + date_range, +) +import pandas._testing as tm +from pandas.core import algorithms +from pandas.core.arrays import PeriodArray + + +class TestSeriesIsIn: + def test_isin(self): + s = Series(["A", "B", "C", "a", "B", "B", "A", "C"]) + + result = s.isin(["A", "C"]) + expected = Series([True, False, True, False, False, False, True, True]) + tm.assert_series_equal(result, expected) + + # GH#16012 + # This specific issue has to have a series over 1e6 in len, but the + # comparison array (in_list) must be large enough so that numpy doesn't + # do a manual masking trick that will avoid this issue altogether + s = Series(list("abcdefghijk" * 10**5)) + # If numpy doesn't do the manual comparison/mask, these + # unorderable mixed types are what cause the exception in numpy + in_list = [-1, "a", "b", "G", "Y", "Z", "E", "K", "E", "S", "I", "R", "R"] * 6 + + assert s.isin(in_list).sum() == 200000 + + def test_isin_with_string_scalar(self): + # GH#4763 + s = Series(["A", "B", "C", "a", "B", "B", "A", "C"]) + msg = ( + r"only list-like objects are allowed to be passed to isin\(\), " + r"you passed a `str`" + ) + with pytest.raises(TypeError, match=msg): + s.isin("a") + + s = Series(["aaa", "b", "c"]) + with pytest.raises(TypeError, match=msg): + s.isin("aaa") + + def test_isin_datetimelike_mismatched_reso(self): + expected = Series([True, True, False, False, False]) + + ser = Series(date_range("jan-01-2013", "jan-05-2013")) + + # fails on dtype conversion in the first place + day_values = np.asarray(ser[0:2].values).astype("datetime64[D]") + result = ser.isin(day_values) + tm.assert_series_equal(result, expected) + + dta = ser[:2]._values.astype("M8[s]") + result = ser.isin(dta) + tm.assert_series_equal(result, expected) + + def test_isin_datetimelike_mismatched_reso_list(self): + expected = Series([True, True, False, False, False]) + + ser = Series(date_range("jan-01-2013", "jan-05-2013")) + + dta = ser[:2]._values.astype("M8[s]") + result = ser.isin(list(dta)) + tm.assert_series_equal(result, expected) + + def test_isin_with_i8(self): + # GH#5021 + + expected = Series([True, True, False, False, False]) + expected2 = Series([False, True, False, False, False]) + + # datetime64[ns] + s = Series(date_range("jan-01-2013", "jan-05-2013")) + + result = s.isin(s[0:2]) + tm.assert_series_equal(result, expected) + + result = s.isin(s[0:2].values) + tm.assert_series_equal(result, expected) + + result = s.isin([s[1]]) + tm.assert_series_equal(result, expected2) + + result = s.isin([np.datetime64(s[1])]) + tm.assert_series_equal(result, expected2) + + result = s.isin(set(s[0:2])) + tm.assert_series_equal(result, expected) + + # timedelta64[ns] + s = Series(pd.to_timedelta(range(5), unit="d")) + result = s.isin(s[0:2]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("empty", [[], Series(dtype=object), np.array([])]) + def test_isin_empty(self, empty): + # see GH#16991 + s = Series(["a", "b"]) + expected = Series([False, False]) + + result = s.isin(empty) + tm.assert_series_equal(expected, result) + + def test_isin_read_only(self): + # https://github.com/pandas-dev/pandas/issues/37174 + arr = np.array([1, 2, 3]) + arr.setflags(write=False) + s = Series([1, 2, 3]) + result = s.isin(arr) + expected = Series([True, True, True]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("dtype", [object, None]) + def test_isin_dt64_values_vs_ints(self, dtype): + # GH#36621 dont cast integers to datetimes for isin + dti = date_range("2013-01-01", "2013-01-05") + ser = Series(dti) + + comps = np.asarray([1356998400000000000], dtype=dtype) + + res = dti.isin(comps) + expected = np.array([False] * len(dti), dtype=bool) + tm.assert_numpy_array_equal(res, expected) + + res = ser.isin(comps) + tm.assert_series_equal(res, Series(expected)) + + res = pd.core.algorithms.isin(ser, comps) + tm.assert_numpy_array_equal(res, expected) + + def test_isin_tzawareness_mismatch(self): + dti = date_range("2013-01-01", "2013-01-05") + ser = Series(dti) + + other = dti.tz_localize("UTC") + + res = dti.isin(other) + expected = np.array([False] * len(dti), dtype=bool) + tm.assert_numpy_array_equal(res, expected) + + res = ser.isin(other) + tm.assert_series_equal(res, Series(expected)) + + res = pd.core.algorithms.isin(ser, other) + tm.assert_numpy_array_equal(res, expected) + + def test_isin_period_freq_mismatch(self): + dti = date_range("2013-01-01", "2013-01-05") + pi = dti.to_period("M") + ser = Series(pi) + + # We construct another PeriodIndex with the same i8 values + # but different dtype + dtype = dti.to_period("Y").dtype + other = PeriodArray._simple_new(pi.asi8, dtype=dtype) + + res = pi.isin(other) + expected = np.array([False] * len(pi), dtype=bool) + tm.assert_numpy_array_equal(res, expected) + + res = ser.isin(other) + tm.assert_series_equal(res, Series(expected)) + + res = pd.core.algorithms.isin(ser, other) + tm.assert_numpy_array_equal(res, expected) + + @pytest.mark.parametrize("values", [[-9.0, 0.0], [-9, 0]]) + def test_isin_float_in_int_series(self, values): + # GH#19356 GH#21804 + ser = Series(values) + result = ser.isin([-9, -0.5]) + expected = Series([True, False]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("dtype", ["boolean", "Int64", "Float64"]) + @pytest.mark.parametrize( + "data,values,expected", + [ + ([0, 1, 0], [1], [False, True, False]), + ([0, 1, 0], [1, pd.NA], [False, True, False]), + ([0, pd.NA, 0], [1, 0], [True, False, True]), + ([0, 1, pd.NA], [1, pd.NA], [False, True, True]), + ([0, 1, pd.NA], [1, np.nan], [False, True, False]), + ([0, pd.NA, pd.NA], [np.nan, pd.NaT, None], [False, False, False]), + ], + ) + def test_isin_masked_types(self, dtype, data, values, expected): + # GH#42405 + ser = Series(data, dtype=dtype) + + result = ser.isin(values) + expected = Series(expected, dtype="boolean") + + tm.assert_series_equal(result, expected) + + +def test_isin_large_series_mixed_dtypes_and_nan(monkeypatch): + # https://github.com/pandas-dev/pandas/issues/37094 + # combination of object dtype for the values + # and > _MINIMUM_COMP_ARR_LEN elements + min_isin_comp = 5 + ser = Series([1, 2, np.nan] * min_isin_comp) + with monkeypatch.context() as m: + m.setattr(algorithms, "_MINIMUM_COMP_ARR_LEN", min_isin_comp) + result = ser.isin({"foo", "bar"}) + expected = Series([False] * 3 * min_isin_comp) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "array,expected", + [ + ( + [0, 1j, 1j, 1, 1 + 1j, 1 + 2j, 1 + 1j], + Series([False, True, True, False, True, True, True], dtype=bool), + ) + ], +) +def test_isin_complex_numbers(array, expected): + # GH 17927 + result = Series(array).isin([1j, 1 + 1j, 1 + 2j]) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "data,is_in", + [([1, [2]], [1]), (["simple str", [{"values": 3}]], ["simple str"])], +) +def test_isin_filtering_with_mixed_object_types(data, is_in): + # GH 20883 + + ser = Series(data) + result = ser.isin(is_in) + expected = Series([True, False]) + + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("data", [[1, 2, 3], [1.0, 2.0, 3.0]]) +@pytest.mark.parametrize("isin", [[1, 2], [1.0, 2.0]]) +def test_isin_filtering_on_iterable(data, isin): + # GH 50234 + + ser = Series(data) + result = ser.isin(i for i in isin) + expected_result = Series([True, True, False]) + + tm.assert_series_equal(result, expected_result) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_isna.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_isna.py new file mode 100644 index 0000000000000000000000000000000000000000..7e324aa86a052246a074950082e272fee7e505e3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_isna.py @@ -0,0 +1,35 @@ +""" +We also test Series.notna in this file. +""" +import numpy as np + +from pandas import ( + Period, + Series, +) +import pandas._testing as tm + + +class TestIsna: + def test_isna_period_dtype(self): + # GH#13737 + ser = Series([Period("2011-01", freq="M"), Period("NaT", freq="M")]) + + expected = Series([False, True]) + + result = ser.isna() + tm.assert_series_equal(result, expected) + + result = ser.notna() + tm.assert_series_equal(result, ~expected) + + def test_isna(self): + ser = Series([0, 5.4, 3, np.nan, -0.001]) + expected = Series([False, False, False, True, False]) + tm.assert_series_equal(ser.isna(), expected) + tm.assert_series_equal(ser.notna(), ~expected) + + ser = Series(["hi", "", np.nan]) + expected = Series([False, False, True]) + tm.assert_series_equal(ser.isna(), expected) + tm.assert_series_equal(ser.notna(), ~expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_reindex_like.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_reindex_like.py new file mode 100644 index 0000000000000000000000000000000000000000..7f24c778feb1b4556587773f711e21521efc537c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_reindex_like.py @@ -0,0 +1,41 @@ +from datetime import datetime + +import numpy as np + +from pandas import Series +import pandas._testing as tm + + +def test_reindex_like(datetime_series): + other = datetime_series[::2] + tm.assert_series_equal( + datetime_series.reindex(other.index), datetime_series.reindex_like(other) + ) + + # GH#7179 + day1 = datetime(2013, 3, 5) + day2 = datetime(2013, 5, 5) + day3 = datetime(2014, 3, 5) + + series1 = Series([5, None, None], [day1, day2, day3]) + series2 = Series([None, None], [day1, day3]) + + result = series1.reindex_like(series2, method="pad") + expected = Series([5, np.nan], index=[day1, day3]) + tm.assert_series_equal(result, expected) + + +def test_reindex_like_nearest(): + ser = Series(np.arange(10, dtype="int64")) + + target = [0.1, 0.9, 1.5, 2.0] + other = ser.reindex(target, method="nearest") + expected = Series(np.around(target).astype("int64"), target) + + result = ser.reindex_like(other, method="nearest") + tm.assert_series_equal(expected, result) + + result = ser.reindex_like(other, method="nearest", tolerance=1) + tm.assert_series_equal(expected, result) + result = ser.reindex_like(other, method="nearest", tolerance=[1, 2, 3, 4]) + tm.assert_series_equal(expected, result) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_rename_axis.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_rename_axis.py new file mode 100644 index 0000000000000000000000000000000000000000..58c095d697ede213be5c730e305bf1789810ac4a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_rename_axis.py @@ -0,0 +1,47 @@ +import pytest + +from pandas import ( + Index, + MultiIndex, + Series, +) +import pandas._testing as tm + + +class TestSeriesRenameAxis: + def test_rename_axis_mapper(self): + # GH 19978 + mi = MultiIndex.from_product([["a", "b", "c"], [1, 2]], names=["ll", "nn"]) + ser = Series(list(range(len(mi))), index=mi) + + result = ser.rename_axis(index={"ll": "foo"}) + assert result.index.names == ["foo", "nn"] + + result = ser.rename_axis(index=str.upper, axis=0) + assert result.index.names == ["LL", "NN"] + + result = ser.rename_axis(index=["foo", "goo"]) + assert result.index.names == ["foo", "goo"] + + with pytest.raises(TypeError, match="unexpected"): + ser.rename_axis(columns="wrong") + + def test_rename_axis_inplace(self, datetime_series): + # GH 15704 + expected = datetime_series.rename_axis("foo") + result = datetime_series + no_return = result.rename_axis("foo", inplace=True) + + assert no_return is None + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("kwargs", [{"mapper": None}, {"index": None}, {}]) + def test_rename_axis_none(self, kwargs): + # GH 25034 + index = Index(list("abc"), name="foo") + ser = Series([1, 2, 3], index=index) + + result = ser.rename_axis(**kwargs) + expected_index = index.rename(None) if kwargs else index + expected = Series([1, 2, 3], index=expected_index) + tm.assert_series_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_replace.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_replace.py new file mode 100644 index 0000000000000000000000000000000000000000..b0f4e233ba5eba4de29b320bb7592c6b2671ebcd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_replace.py @@ -0,0 +1,813 @@ +import re + +import numpy as np +import pytest + +from pandas._config import using_pyarrow_string_dtype + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import IntervalArray + + +class TestSeriesReplace: + def test_replace_explicit_none(self): + # GH#36984 if the user explicitly passes value=None, give it to them + ser = pd.Series([0, 0, ""], dtype=object) + result = ser.replace("", None) + expected = pd.Series([0, 0, None], dtype=object) + tm.assert_series_equal(result, expected) + + # Cast column 2 to object to avoid implicit cast when setting entry to "" + df = pd.DataFrame(np.zeros((3, 3))).astype({2: object}) + df.iloc[2, 2] = "" + result = df.replace("", None) + expected = pd.DataFrame( + { + 0: np.zeros(3), + 1: np.zeros(3), + 2: np.array([0.0, 0.0, None], dtype=object), + } + ) + assert expected.iloc[2, 2] is None + tm.assert_frame_equal(result, expected) + + # GH#19998 same thing with object dtype + ser = pd.Series([10, 20, 30, "a", "a", "b", "a"]) + result = ser.replace("a", None) + expected = pd.Series([10, 20, 30, None, None, "b", None]) + assert expected.iloc[-1] is None + tm.assert_series_equal(result, expected) + + def test_replace_noop_doesnt_downcast(self): + # GH#44498 + ser = pd.Series([None, None, pd.Timestamp("2021-12-16 17:31")], dtype=object) + res = ser.replace({np.nan: None}) # should be a no-op + tm.assert_series_equal(res, ser) + assert res.dtype == object + + # same thing but different calling convention + res = ser.replace(np.nan, None) + tm.assert_series_equal(res, ser) + assert res.dtype == object + + def test_replace(self): + N = 50 + ser = pd.Series(np.random.default_rng(2).standard_normal(N)) + ser[0:4] = np.nan + ser[6:10] = 0 + + # replace list with a single value + return_value = ser.replace([np.nan], -1, inplace=True) + assert return_value is None + + exp = ser.fillna(-1) + tm.assert_series_equal(ser, exp) + + rs = ser.replace(0.0, np.nan) + ser[ser == 0.0] = np.nan + tm.assert_series_equal(rs, ser) + + ser = pd.Series( + np.fabs(np.random.default_rng(2).standard_normal(N)), + pd.date_range("2020-01-01", periods=N), + dtype=object, + ) + ser[:5] = np.nan + ser[6:10] = "foo" + ser[20:30] = "bar" + + # replace list with a single value + msg = "Downcasting behavior in `replace`" + with tm.assert_produces_warning(FutureWarning, match=msg): + rs = ser.replace([np.nan, "foo", "bar"], -1) + + assert (rs[:5] == -1).all() + assert (rs[6:10] == -1).all() + assert (rs[20:30] == -1).all() + assert (pd.isna(ser[:5])).all() + + # replace with different values + with tm.assert_produces_warning(FutureWarning, match=msg): + rs = ser.replace({np.nan: -1, "foo": -2, "bar": -3}) + + assert (rs[:5] == -1).all() + assert (rs[6:10] == -2).all() + assert (rs[20:30] == -3).all() + assert (pd.isna(ser[:5])).all() + + # replace with different values with 2 lists + with tm.assert_produces_warning(FutureWarning, match=msg): + rs2 = ser.replace([np.nan, "foo", "bar"], [-1, -2, -3]) + tm.assert_series_equal(rs, rs2) + + # replace inplace + with tm.assert_produces_warning(FutureWarning, match=msg): + return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True) + assert return_value is None + + assert (ser[:5] == -1).all() + assert (ser[6:10] == -1).all() + assert (ser[20:30] == -1).all() + + def test_replace_nan_with_inf(self): + ser = pd.Series([np.nan, 0, np.inf]) + tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0)) + + ser = pd.Series([np.nan, 0, "foo", "bar", np.inf, None, pd.NaT]) + tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0)) + filled = ser.copy() + filled[4] = 0 + tm.assert_series_equal(ser.replace(np.inf, 0), filled) + + def test_replace_listlike_value_listlike_target(self, datetime_series): + ser = pd.Series(datetime_series.index) + tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0)) + + # malformed + msg = r"Replacement lists must match in length\. Expecting 3 got 2" + with pytest.raises(ValueError, match=msg): + ser.replace([1, 2, 3], [np.nan, 0]) + + # ser is dt64 so can't hold 1 or 2, so this replace is a no-op + result = ser.replace([1, 2], [np.nan, 0]) + tm.assert_series_equal(result, ser) + + ser = pd.Series([0, 1, 2, 3, 4]) + result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0]) + tm.assert_series_equal(result, pd.Series([4, 3, 2, 1, 0])) + + def test_replace_gh5319(self): + # API change from 0.12? + # GH 5319 + ser = pd.Series([0, np.nan, 2, 3, 4]) + expected = ser.ffill() + msg = ( + "Series.replace without 'value' and with non-dict-like " + "'to_replace' is deprecated" + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + result = ser.replace([np.nan]) + tm.assert_series_equal(result, expected) + + ser = pd.Series([0, np.nan, 2, 3, 4]) + expected = ser.ffill() + with tm.assert_produces_warning(FutureWarning, match=msg): + result = ser.replace(np.nan) + tm.assert_series_equal(result, expected) + + def test_replace_datetime64(self): + # GH 5797 + ser = pd.Series(pd.date_range("20130101", periods=5)) + expected = ser.copy() + expected.loc[2] = pd.Timestamp("20120101") + result = ser.replace({pd.Timestamp("20130103"): pd.Timestamp("20120101")}) + tm.assert_series_equal(result, expected) + result = ser.replace(pd.Timestamp("20130103"), pd.Timestamp("20120101")) + tm.assert_series_equal(result, expected) + + def test_replace_nat_with_tz(self): + # GH 11792: Test with replacing NaT in a list with tz data + ts = pd.Timestamp("2015/01/01", tz="UTC") + s = pd.Series([pd.NaT, pd.Timestamp("2015/01/01", tz="UTC")]) + result = s.replace([np.nan, pd.NaT], pd.Timestamp.min) + expected = pd.Series([pd.Timestamp.min, ts], dtype=object) + tm.assert_series_equal(expected, result) + + def test_replace_timedelta_td64(self): + tdi = pd.timedelta_range(0, periods=5) + ser = pd.Series(tdi) + + # Using a single dict argument means we go through replace_list + result = ser.replace({ser[1]: ser[3]}) + + expected = pd.Series([ser[0], ser[3], ser[2], ser[3], ser[4]]) + tm.assert_series_equal(result, expected) + + def test_replace_with_single_list(self): + ser = pd.Series([0, 1, 2, 3, 4]) + msg2 = ( + "Series.replace without 'value' and with non-dict-like " + "'to_replace' is deprecated" + ) + with tm.assert_produces_warning(FutureWarning, match=msg2): + result = ser.replace([1, 2, 3]) + tm.assert_series_equal(result, pd.Series([0, 0, 0, 0, 4])) + + s = ser.copy() + with tm.assert_produces_warning(FutureWarning, match=msg2): + return_value = s.replace([1, 2, 3], inplace=True) + assert return_value is None + tm.assert_series_equal(s, pd.Series([0, 0, 0, 0, 4])) + + # make sure things don't get corrupted when fillna call fails + s = ser.copy() + msg = ( + r"Invalid fill method\. Expecting pad \(ffill\) or backfill " + r"\(bfill\)\. Got crash_cymbal" + ) + msg3 = "The 'method' keyword in Series.replace is deprecated" + with pytest.raises(ValueError, match=msg): + with tm.assert_produces_warning(FutureWarning, match=msg3): + return_value = s.replace([1, 2, 3], inplace=True, method="crash_cymbal") + assert return_value is None + tm.assert_series_equal(s, ser) + + def test_replace_mixed_types(self): + ser = pd.Series(np.arange(5), dtype="int64") + + def check_replace(to_rep, val, expected): + sc = ser.copy() + result = ser.replace(to_rep, val) + return_value = sc.replace(to_rep, val, inplace=True) + assert return_value is None + tm.assert_series_equal(expected, result) + tm.assert_series_equal(expected, sc) + + # 3.0 can still be held in our int64 series, so we do not upcast GH#44940 + tr, v = [3], [3.0] + check_replace(tr, v, ser) + # Note this matches what we get with the scalars 3 and 3.0 + check_replace(tr[0], v[0], ser) + + # MUST upcast to float + e = pd.Series([0, 1, 2, 3.5, 4]) + tr, v = [3], [3.5] + check_replace(tr, v, e) + + # casts to object + e = pd.Series([0, 1, 2, 3.5, "a"]) + tr, v = [3, 4], [3.5, "a"] + check_replace(tr, v, e) + + # again casts to object + e = pd.Series([0, 1, 2, 3.5, pd.Timestamp("20130101")]) + tr, v = [3, 4], [3.5, pd.Timestamp("20130101")] + check_replace(tr, v, e) + + # casts to object + e = pd.Series([0, 1, 2, 3.5, True], dtype="object") + tr, v = [3, 4], [3.5, True] + check_replace(tr, v, e) + + # test an object with dates + floats + integers + strings + dr = pd.Series(pd.date_range("1/1/2001", "1/10/2001", freq="D")) + result = dr.astype(object).replace([dr[0], dr[1], dr[2]], [1.0, 2, "a"]) + expected = pd.Series([1.0, 2, "a"] + dr[3:].tolist(), dtype=object) + tm.assert_series_equal(result, expected) + + def test_replace_bool_with_string_no_op(self): + s = pd.Series([True, False, True]) + result = s.replace("fun", "in-the-sun") + tm.assert_series_equal(s, result) + + def test_replace_bool_with_string(self): + # nonexistent elements + s = pd.Series([True, False, True]) + result = s.replace(True, "2u") + expected = pd.Series(["2u", False, "2u"]) + tm.assert_series_equal(expected, result) + + def test_replace_bool_with_bool(self): + s = pd.Series([True, False, True]) + result = s.replace(True, False) + expected = pd.Series([False] * len(s)) + tm.assert_series_equal(expected, result) + + def test_replace_with_dict_with_bool_keys(self): + s = pd.Series([True, False, True]) + result = s.replace({"asdf": "asdb", True: "yes"}) + expected = pd.Series(["yes", False, "yes"]) + tm.assert_series_equal(result, expected) + + def test_replace_Int_with_na(self, any_int_ea_dtype): + # GH 38267 + result = pd.Series([0, None], dtype=any_int_ea_dtype).replace(0, pd.NA) + expected = pd.Series([pd.NA, pd.NA], dtype=any_int_ea_dtype) + tm.assert_series_equal(result, expected) + result = pd.Series([0, 1], dtype=any_int_ea_dtype).replace(0, pd.NA) + result.replace(1, pd.NA, inplace=True) + tm.assert_series_equal(result, expected) + + def test_replace2(self): + N = 50 + ser = pd.Series( + np.fabs(np.random.default_rng(2).standard_normal(N)), + pd.date_range("2020-01-01", periods=N), + dtype=object, + ) + ser[:5] = np.nan + ser[6:10] = "foo" + ser[20:30] = "bar" + + # replace list with a single value + msg = "Downcasting behavior in `replace`" + with tm.assert_produces_warning(FutureWarning, match=msg): + rs = ser.replace([np.nan, "foo", "bar"], -1) + + assert (rs[:5] == -1).all() + assert (rs[6:10] == -1).all() + assert (rs[20:30] == -1).all() + assert (pd.isna(ser[:5])).all() + + # replace with different values + with tm.assert_produces_warning(FutureWarning, match=msg): + rs = ser.replace({np.nan: -1, "foo": -2, "bar": -3}) + + assert (rs[:5] == -1).all() + assert (rs[6:10] == -2).all() + assert (rs[20:30] == -3).all() + assert (pd.isna(ser[:5])).all() + + # replace with different values with 2 lists + with tm.assert_produces_warning(FutureWarning, match=msg): + rs2 = ser.replace([np.nan, "foo", "bar"], [-1, -2, -3]) + tm.assert_series_equal(rs, rs2) + + # replace inplace + with tm.assert_produces_warning(FutureWarning, match=msg): + return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True) + assert return_value is None + assert (ser[:5] == -1).all() + assert (ser[6:10] == -1).all() + assert (ser[20:30] == -1).all() + + @pytest.mark.parametrize("inplace", [True, False]) + def test_replace_cascade(self, inplace): + # Test that replaced values are not replaced again + # GH #50778 + ser = pd.Series([1, 2, 3]) + expected = pd.Series([2, 3, 4]) + + res = ser.replace([1, 2, 3], [2, 3, 4], inplace=inplace) + if inplace: + tm.assert_series_equal(ser, expected) + else: + tm.assert_series_equal(res, expected) + + def test_replace_with_dictlike_and_string_dtype(self, nullable_string_dtype): + # GH 32621, GH#44940 + ser = pd.Series(["one", "two", np.nan], dtype=nullable_string_dtype) + expected = pd.Series(["1", "2", np.nan], dtype=nullable_string_dtype) + result = ser.replace({"one": "1", "two": "2"}) + tm.assert_series_equal(expected, result) + + def test_replace_with_empty_dictlike(self): + # GH 15289 + s = pd.Series(list("abcd")) + tm.assert_series_equal(s, s.replace({})) + + empty_series = pd.Series([]) + tm.assert_series_equal(s, s.replace(empty_series)) + + def test_replace_string_with_number(self): + # GH 15743 + s = pd.Series([1, 2, 3]) + result = s.replace("2", np.nan) + expected = pd.Series([1, 2, 3]) + tm.assert_series_equal(expected, result) + + def test_replace_replacer_equals_replacement(self): + # GH 20656 + # make sure all replacers are matching against original values + s = pd.Series(["a", "b"]) + expected = pd.Series(["b", "a"]) + result = s.replace({"a": "b", "b": "a"}) + tm.assert_series_equal(expected, result) + + def test_replace_unicode_with_number(self): + # GH 15743 + s = pd.Series([1, 2, 3]) + result = s.replace("2", np.nan) + expected = pd.Series([1, 2, 3]) + tm.assert_series_equal(expected, result) + + def test_replace_mixed_types_with_string(self): + # Testing mixed + s = pd.Series([1, 2, 3, "4", 4, 5]) + msg = "Downcasting behavior in `replace`" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = s.replace([2, "4"], np.nan) + expected = pd.Series([1, np.nan, 3, np.nan, 4, 5]) + tm.assert_series_equal(expected, result) + + @pytest.mark.xfail(using_pyarrow_string_dtype(), reason="can't fill 0 in string") + @pytest.mark.parametrize( + "categorical, numeric", + [ + (pd.Categorical(["A"], categories=["A", "B"]), [1]), + (pd.Categorical(["A", "B"], categories=["A", "B"]), [1, 2]), + ], + ) + def test_replace_categorical(self, categorical, numeric): + # GH 24971, GH#23305 + ser = pd.Series(categorical) + msg = "Downcasting behavior in `replace`" + msg = "with CategoricalDtype is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = ser.replace({"A": 1, "B": 2}) + expected = pd.Series(numeric).astype("category") + if 2 not in expected.cat.categories: + # i.e. categories should be [1, 2] even if there are no "B"s present + # GH#44940 + expected = expected.cat.add_categories(2) + tm.assert_series_equal(expected, result) + + @pytest.mark.parametrize( + "data, data_exp", [(["a", "b", "c"], ["b", "b", "c"]), (["a"], ["b"])] + ) + def test_replace_categorical_inplace(self, data, data_exp): + # GH 53358 + result = pd.Series(data, dtype="category") + msg = "with CategoricalDtype is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result.replace(to_replace="a", value="b", inplace=True) + expected = pd.Series(data_exp, dtype="category") + tm.assert_series_equal(result, expected) + + def test_replace_categorical_single(self): + # GH 26988 + dti = pd.date_range("2016-01-01", periods=3, tz="US/Pacific") + s = pd.Series(dti) + c = s.astype("category") + + expected = c.copy() + expected = expected.cat.add_categories("foo") + expected[2] = "foo" + expected = expected.cat.remove_unused_categories() + assert c[2] != "foo" + + msg = "with CategoricalDtype is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = c.replace(c[2], "foo") + tm.assert_series_equal(expected, result) + assert c[2] != "foo" # ensure non-inplace call does not alter original + + msg = "with CategoricalDtype is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + return_value = c.replace(c[2], "foo", inplace=True) + assert return_value is None + tm.assert_series_equal(expected, c) + + first_value = c[0] + msg = "with CategoricalDtype is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + return_value = c.replace(c[1], c[0], inplace=True) + assert return_value is None + assert c[0] == c[1] == first_value # test replacing with existing value + + def test_replace_with_no_overflowerror(self): + # GH 25616 + # casts to object without Exception from OverflowError + s = pd.Series([0, 1, 2, 3, 4]) + result = s.replace([3], ["100000000000000000000"]) + expected = pd.Series([0, 1, 2, "100000000000000000000", 4]) + tm.assert_series_equal(result, expected) + + s = pd.Series([0, "100000000000000000000", "100000000000000000001"]) + result = s.replace(["100000000000000000000"], [1]) + expected = pd.Series([0, 1, "100000000000000000001"]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "ser, to_replace, exp", + [ + ([1, 2, 3], {1: 2, 2: 3, 3: 4}, [2, 3, 4]), + (["1", "2", "3"], {"1": "2", "2": "3", "3": "4"}, ["2", "3", "4"]), + ], + ) + def test_replace_commutative(self, ser, to_replace, exp): + # GH 16051 + # DataFrame.replace() overwrites when values are non-numeric + + series = pd.Series(ser) + + expected = pd.Series(exp) + result = series.replace(to_replace) + + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "ser, exp", [([1, 2, 3], [1, True, 3]), (["x", 2, 3], ["x", True, 3])] + ) + def test_replace_no_cast(self, ser, exp): + # GH 9113 + # BUG: replace int64 dtype with bool coerces to int64 + + series = pd.Series(ser) + result = series.replace(2, True) + expected = pd.Series(exp) + + tm.assert_series_equal(result, expected) + + def test_replace_invalid_to_replace(self): + # GH 18634 + # API: replace() should raise an exception if invalid argument is given + series = pd.Series(["a", "b", "c "]) + msg = ( + r"Expecting 'to_replace' to be either a scalar, array-like, " + r"dict or None, got invalid type.*" + ) + msg2 = ( + "Series.replace without 'value' and with non-dict-like " + "'to_replace' is deprecated" + ) + with pytest.raises(TypeError, match=msg): + with tm.assert_produces_warning(FutureWarning, match=msg2): + series.replace(lambda x: x.strip()) + + @pytest.mark.parametrize("frame", [False, True]) + def test_replace_nonbool_regex(self, frame): + obj = pd.Series(["a", "b", "c "]) + if frame: + obj = obj.to_frame() + + msg = "'to_replace' must be 'None' if 'regex' is not a bool" + with pytest.raises(ValueError, match=msg): + obj.replace(to_replace=["a"], regex="foo") + + @pytest.mark.parametrize("frame", [False, True]) + def test_replace_empty_copy(self, frame): + obj = pd.Series([], dtype=np.float64) + if frame: + obj = obj.to_frame() + + res = obj.replace(4, 5, inplace=True) + assert res is None + + res = obj.replace(4, 5, inplace=False) + tm.assert_equal(res, obj) + assert res is not obj + + def test_replace_only_one_dictlike_arg(self, fixed_now_ts): + # GH#33340 + + ser = pd.Series([1, 2, "A", fixed_now_ts, True]) + to_replace = {0: 1, 2: "A"} + value = "foo" + msg = "Series.replace cannot use dict-like to_replace and non-None value" + with pytest.raises(ValueError, match=msg): + ser.replace(to_replace, value) + + to_replace = 1 + value = {0: "foo", 2: "bar"} + msg = "Series.replace cannot use dict-value and non-None to_replace" + with pytest.raises(ValueError, match=msg): + ser.replace(to_replace, value) + + def test_replace_extension_other(self, frame_or_series): + # https://github.com/pandas-dev/pandas/issues/34530 + obj = frame_or_series(pd.array([1, 2, 3], dtype="Int64")) + result = obj.replace("", "") # no exception + # should not have changed dtype + tm.assert_equal(obj, result) + + def _check_replace_with_method(self, ser: pd.Series): + df = ser.to_frame() + + msg1 = "The 'method' keyword in Series.replace is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg1): + res = ser.replace(ser[1], method="pad") + expected = pd.Series([ser[0], ser[0]] + list(ser[2:]), dtype=ser.dtype) + tm.assert_series_equal(res, expected) + + msg2 = "The 'method' keyword in DataFrame.replace is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg2): + res_df = df.replace(ser[1], method="pad") + tm.assert_frame_equal(res_df, expected.to_frame()) + + ser2 = ser.copy() + with tm.assert_produces_warning(FutureWarning, match=msg1): + res2 = ser2.replace(ser[1], method="pad", inplace=True) + assert res2 is None + tm.assert_series_equal(ser2, expected) + + with tm.assert_produces_warning(FutureWarning, match=msg2): + res_df2 = df.replace(ser[1], method="pad", inplace=True) + assert res_df2 is None + tm.assert_frame_equal(df, expected.to_frame()) + + def test_replace_ea_dtype_with_method(self, any_numeric_ea_dtype): + arr = pd.array([1, 2, pd.NA, 4], dtype=any_numeric_ea_dtype) + ser = pd.Series(arr) + + self._check_replace_with_method(ser) + + @pytest.mark.parametrize("as_categorical", [True, False]) + def test_replace_interval_with_method(self, as_categorical): + # in particular interval that can't hold NA + + idx = pd.IntervalIndex.from_breaks(range(4)) + ser = pd.Series(idx) + if as_categorical: + ser = ser.astype("category") + + self._check_replace_with_method(ser) + + @pytest.mark.parametrize("as_period", [True, False]) + @pytest.mark.parametrize("as_categorical", [True, False]) + def test_replace_datetimelike_with_method(self, as_period, as_categorical): + idx = pd.date_range("2016-01-01", periods=5, tz="US/Pacific") + if as_period: + idx = idx.tz_localize(None).to_period("D") + + ser = pd.Series(idx) + ser.iloc[-2] = pd.NaT + if as_categorical: + ser = ser.astype("category") + + self._check_replace_with_method(ser) + + def test_replace_with_compiled_regex(self): + # https://github.com/pandas-dev/pandas/issues/35680 + s = pd.Series(["a", "b", "c"]) + regex = re.compile("^a$") + result = s.replace({regex: "z"}, regex=True) + expected = pd.Series(["z", "b", "c"]) + tm.assert_series_equal(result, expected) + + def test_pandas_replace_na(self): + # GH#43344 + ser = pd.Series(["AA", "BB", "CC", "DD", "EE", "", pd.NA], dtype="string") + regex_mapping = { + "AA": "CC", + "BB": "CC", + "EE": "CC", + "CC": "CC-REPL", + } + result = ser.replace(regex_mapping, regex=True) + exp = pd.Series(["CC", "CC", "CC-REPL", "DD", "CC", "", pd.NA], dtype="string") + tm.assert_series_equal(result, exp) + + @pytest.mark.parametrize( + "dtype, input_data, to_replace, expected_data", + [ + ("bool", [True, False], {True: False}, [False, False]), + ("int64", [1, 2], {1: 10, 2: 20}, [10, 20]), + ("Int64", [1, 2], {1: 10, 2: 20}, [10, 20]), + ("float64", [1.1, 2.2], {1.1: 10.1, 2.2: 20.5}, [10.1, 20.5]), + ("Float64", [1.1, 2.2], {1.1: 10.1, 2.2: 20.5}, [10.1, 20.5]), + ("string", ["one", "two"], {"one": "1", "two": "2"}, ["1", "2"]), + ( + pd.IntervalDtype("int64"), + IntervalArray([pd.Interval(1, 2), pd.Interval(2, 3)]), + {pd.Interval(1, 2): pd.Interval(10, 20)}, + IntervalArray([pd.Interval(10, 20), pd.Interval(2, 3)]), + ), + ( + pd.IntervalDtype("float64"), + IntervalArray([pd.Interval(1.0, 2.7), pd.Interval(2.8, 3.1)]), + {pd.Interval(1.0, 2.7): pd.Interval(10.6, 20.8)}, + IntervalArray([pd.Interval(10.6, 20.8), pd.Interval(2.8, 3.1)]), + ), + ( + pd.PeriodDtype("M"), + [pd.Period("2020-05", freq="M")], + {pd.Period("2020-05", freq="M"): pd.Period("2020-06", freq="M")}, + [pd.Period("2020-06", freq="M")], + ), + ], + ) + def test_replace_dtype(self, dtype, input_data, to_replace, expected_data): + # GH#33484 + ser = pd.Series(input_data, dtype=dtype) + result = ser.replace(to_replace) + expected = pd.Series(expected_data, dtype=dtype) + tm.assert_series_equal(result, expected) + + def test_replace_string_dtype(self): + # GH#40732, GH#44940 + ser = pd.Series(["one", "two", np.nan], dtype="string") + res = ser.replace({"one": "1", "two": "2"}) + expected = pd.Series(["1", "2", np.nan], dtype="string") + tm.assert_series_equal(res, expected) + + # GH#31644 + ser2 = pd.Series(["A", np.nan], dtype="string") + res2 = ser2.replace("A", "B") + expected2 = pd.Series(["B", np.nan], dtype="string") + tm.assert_series_equal(res2, expected2) + + ser3 = pd.Series(["A", "B"], dtype="string") + res3 = ser3.replace("A", pd.NA) + expected3 = pd.Series([pd.NA, "B"], dtype="string") + tm.assert_series_equal(res3, expected3) + + def test_replace_string_dtype_list_to_replace(self): + # GH#41215, GH#44940 + ser = pd.Series(["abc", "def"], dtype="string") + res = ser.replace(["abc", "any other string"], "xyz") + expected = pd.Series(["xyz", "def"], dtype="string") + tm.assert_series_equal(res, expected) + + def test_replace_string_dtype_regex(self): + # GH#31644 + ser = pd.Series(["A", "B"], dtype="string") + res = ser.replace(r".", "C", regex=True) + expected = pd.Series(["C", "C"], dtype="string") + tm.assert_series_equal(res, expected) + + def test_replace_nullable_numeric(self): + # GH#40732, GH#44940 + + floats = pd.Series([1.0, 2.0, 3.999, 4.4], dtype=pd.Float64Dtype()) + assert floats.replace({1.0: 9}).dtype == floats.dtype + assert floats.replace(1.0, 9).dtype == floats.dtype + assert floats.replace({1.0: 9.0}).dtype == floats.dtype + assert floats.replace(1.0, 9.0).dtype == floats.dtype + + res = floats.replace(to_replace=[1.0, 2.0], value=[9.0, 10.0]) + assert res.dtype == floats.dtype + + ints = pd.Series([1, 2, 3, 4], dtype=pd.Int64Dtype()) + assert ints.replace({1: 9}).dtype == ints.dtype + assert ints.replace(1, 9).dtype == ints.dtype + assert ints.replace({1: 9.0}).dtype == ints.dtype + assert ints.replace(1, 9.0).dtype == ints.dtype + + # nullable (for now) raises instead of casting + with pytest.raises(TypeError, match="Invalid value"): + ints.replace({1: 9.5}) + with pytest.raises(TypeError, match="Invalid value"): + ints.replace(1, 9.5) + + @pytest.mark.xfail(using_pyarrow_string_dtype(), reason="can't fill 1 in string") + @pytest.mark.parametrize("regex", [False, True]) + def test_replace_regex_dtype_series(self, regex): + # GH-48644 + series = pd.Series(["0"]) + expected = pd.Series([1]) + msg = "Downcasting behavior in `replace`" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = series.replace(to_replace="0", value=1, regex=regex) + tm.assert_series_equal(result, expected) + + def test_replace_different_int_types(self, any_int_numpy_dtype): + # GH#45311 + labs = pd.Series([1, 1, 1, 0, 0, 2, 2, 2], dtype=any_int_numpy_dtype) + + maps = pd.Series([0, 2, 1], dtype=any_int_numpy_dtype) + map_dict = dict(zip(maps.values, maps.index)) + + result = labs.replace(map_dict) + expected = labs.replace({0: 0, 2: 1, 1: 2}) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("val", [2, np.nan, 2.0]) + def test_replace_value_none_dtype_numeric(self, val): + # GH#48231 + ser = pd.Series([1, val]) + result = ser.replace(val, None) + expected = pd.Series([1, None], dtype=object) + tm.assert_series_equal(result, expected) + + def test_replace_change_dtype_series(self, using_infer_string): + # GH#25797 + df = pd.DataFrame.from_dict({"Test": ["0.5", True, "0.6"]}) + warn = FutureWarning if using_infer_string else None + with tm.assert_produces_warning(warn, match="Downcasting"): + df["Test"] = df["Test"].replace([True], [np.nan]) + expected = pd.DataFrame.from_dict({"Test": ["0.5", np.nan, "0.6"]}) + tm.assert_frame_equal(df, expected) + + df = pd.DataFrame.from_dict({"Test": ["0.5", None, "0.6"]}) + df["Test"] = df["Test"].replace([None], [np.nan]) + tm.assert_frame_equal(df, expected) + + df = pd.DataFrame.from_dict({"Test": ["0.5", None, "0.6"]}) + df["Test"] = df["Test"].fillna(np.nan) + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize("dtype", ["object", "Int64"]) + def test_replace_na_in_obj_column(self, dtype): + # GH#47480 + ser = pd.Series([0, 1, pd.NA], dtype=dtype) + expected = pd.Series([0, 2, pd.NA], dtype=dtype) + result = ser.replace(to_replace=1, value=2) + tm.assert_series_equal(result, expected) + + ser.replace(to_replace=1, value=2, inplace=True) + tm.assert_series_equal(ser, expected) + + @pytest.mark.parametrize("val", [0, 0.5]) + def test_replace_numeric_column_with_na(self, val): + # GH#50758 + ser = pd.Series([val, 1]) + expected = pd.Series([val, pd.NA]) + result = ser.replace(to_replace=1, value=pd.NA) + tm.assert_series_equal(result, expected) + + ser.replace(to_replace=1, value=pd.NA, inplace=True) + tm.assert_series_equal(ser, expected) + + def test_replace_ea_float_with_bool(self): + # GH#55398 + ser = pd.Series([0.0], dtype="Float64") + expected = ser.copy() + result = ser.replace(False, 1.0) + tm.assert_series_equal(result, expected) + + ser = pd.Series([False], dtype="boolean") + expected = ser.copy() + result = ser.replace(0.0, True) + tm.assert_series_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_set_name.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_set_name.py new file mode 100644 index 0000000000000000000000000000000000000000..cbc8ebde7a8ab79fabe94781123844c856c9c78b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_set_name.py @@ -0,0 +1,21 @@ +from datetime import datetime + +from pandas import Series + + +class TestSetName: + def test_set_name(self): + ser = Series([1, 2, 3]) + ser2 = ser._set_name("foo") + assert ser2.name == "foo" + assert ser.name is None + assert ser is not ser2 + + def test_set_name_attribute(self): + ser = Series([1, 2, 3]) + ser2 = Series([1, 2, 3], name="bar") + for name in [7, 7.0, "name", datetime(2001, 1, 1), (1,), "\u05D0"]: + ser.name = name + assert ser.name == name + ser2.name = name + assert ser2.name == name diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_size.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_size.py new file mode 100644 index 0000000000000000000000000000000000000000..20a454996fa4488501d6f623ad3afc6fa38e5634 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_size.py @@ -0,0 +1,22 @@ +import pytest + +from pandas import Series + + +@pytest.mark.parametrize( + "data, index, expected", + [ + ([1, 2, 3], None, 3), + ({"a": 1, "b": 2, "c": 3}, None, 3), + ([1, 2, 3], ["x", "y", "z"], 3), + ([1, 2, 3, 4, 5], ["x", "y", "z", "w", "n"], 5), + ([1, 2, 3], None, 3), + ([1, 2, 3], ["x", "y", "z"], 3), + ([1, 2, 3, 4], ["x", "y", "z", "w"], 4), + ], +) +def test_series(data, index, expected): + # GH#52897 + ser = Series(data, index=index) + assert ser.size == expected + assert isinstance(ser.size, int) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_to_frame.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_to_frame.py new file mode 100644 index 0000000000000000000000000000000000000000..0eadf696b34cc034d2be76ce5daba2cff679da74 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_to_frame.py @@ -0,0 +1,63 @@ +import pytest + +from pandas import ( + DataFrame, + Index, + Series, +) +import pandas._testing as tm + + +class TestToFrame: + def test_to_frame_respects_name_none(self): + # GH#44212 if we explicitly pass name=None, then that should be respected, + # not changed to 0 + # GH-45448 this is first deprecated & enforced in 2.0 + ser = Series(range(3)) + result = ser.to_frame(None) + + exp_index = Index([None], dtype=object) + tm.assert_index_equal(result.columns, exp_index) + + result = ser.rename("foo").to_frame(None) + exp_index = Index([None], dtype=object) + tm.assert_index_equal(result.columns, exp_index) + + def test_to_frame(self, datetime_series): + datetime_series.name = None + rs = datetime_series.to_frame() + xp = DataFrame(datetime_series.values, index=datetime_series.index) + tm.assert_frame_equal(rs, xp) + + datetime_series.name = "testname" + rs = datetime_series.to_frame() + xp = DataFrame( + {"testname": datetime_series.values}, index=datetime_series.index + ) + tm.assert_frame_equal(rs, xp) + + rs = datetime_series.to_frame(name="testdifferent") + xp = DataFrame( + {"testdifferent": datetime_series.values}, index=datetime_series.index + ) + tm.assert_frame_equal(rs, xp) + + @pytest.mark.filterwarnings( + "ignore:Passing a BlockManager|Passing a SingleBlockManager:DeprecationWarning" + ) + def test_to_frame_expanddim(self): + # GH#9762 + + class SubclassedSeries(Series): + @property + def _constructor_expanddim(self): + return SubclassedFrame + + class SubclassedFrame(DataFrame): + pass + + ser = SubclassedSeries([1, 2, 3], name="X") + result = ser.to_frame() + assert isinstance(result, SubclassedFrame) + expected = SubclassedFrame({"X": [1, 2, 3]}) + tm.assert_frame_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_unstack.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_unstack.py new file mode 100644 index 0000000000000000000000000000000000000000..3c70e839c8e206c0b0e07a1046132666643329aa --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_unstack.py @@ -0,0 +1,169 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + Series, + date_range, +) +import pandas._testing as tm + + +def test_unstack_preserves_object(): + mi = MultiIndex.from_product([["bar", "foo"], ["one", "two"]]) + + ser = Series(np.arange(4.0), index=mi, dtype=object) + + res1 = ser.unstack() + assert (res1.dtypes == object).all() + + res2 = ser.unstack(level=0) + assert (res2.dtypes == object).all() + + +def test_unstack(): + index = MultiIndex( + levels=[["bar", "foo"], ["one", "three", "two"]], + codes=[[1, 1, 0, 0], [0, 1, 0, 2]], + ) + + s = Series(np.arange(4.0), index=index) + unstacked = s.unstack() + + expected = DataFrame( + [[2.0, np.nan, 3.0], [0.0, 1.0, np.nan]], + index=["bar", "foo"], + columns=["one", "three", "two"], + ) + + tm.assert_frame_equal(unstacked, expected) + + unstacked = s.unstack(level=0) + tm.assert_frame_equal(unstacked, expected.T) + + index = MultiIndex( + levels=[["bar"], ["one", "two", "three"], [0, 1]], + codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]], + ) + s = Series(np.random.default_rng(2).standard_normal(6), index=index) + exp_index = MultiIndex( + levels=[["one", "two", "three"], [0, 1]], + codes=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]], + ) + expected = DataFrame({"bar": s.values}, index=exp_index).sort_index(level=0) + unstacked = s.unstack(0).sort_index() + tm.assert_frame_equal(unstacked, expected) + + # GH5873 + idx = MultiIndex.from_arrays([[101, 102], [3.5, np.nan]]) + ts = Series([1, 2], index=idx) + left = ts.unstack() + right = DataFrame( + [[np.nan, 1], [2, np.nan]], index=[101, 102], columns=[np.nan, 3.5] + ) + tm.assert_frame_equal(left, right) + + idx = MultiIndex.from_arrays( + [ + ["cat", "cat", "cat", "dog", "dog"], + ["a", "a", "b", "a", "b"], + [1, 2, 1, 1, np.nan], + ] + ) + ts = Series([1.0, 1.1, 1.2, 1.3, 1.4], index=idx) + right = DataFrame( + [[1.0, 1.3], [1.1, np.nan], [np.nan, 1.4], [1.2, np.nan]], + columns=["cat", "dog"], + ) + tpls = [("a", 1), ("a", 2), ("b", np.nan), ("b", 1)] + right.index = MultiIndex.from_tuples(tpls) + tm.assert_frame_equal(ts.unstack(level=0), right) + + +def test_unstack_tuplename_in_multiindex(): + # GH 19966 + idx = MultiIndex.from_product( + [["a", "b", "c"], [1, 2, 3]], names=[("A", "a"), ("B", "b")] + ) + ser = Series(1, index=idx) + result = ser.unstack(("A", "a")) + + expected = DataFrame( + [[1, 1, 1], [1, 1, 1], [1, 1, 1]], + columns=MultiIndex.from_tuples([("a",), ("b",), ("c",)], names=[("A", "a")]), + index=Index([1, 2, 3], name=("B", "b")), + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "unstack_idx, expected_values, expected_index, expected_columns", + [ + ( + ("A", "a"), + [[1, 1], [1, 1], [1, 1], [1, 1]], + MultiIndex.from_tuples([(1, 3), (1, 4), (2, 3), (2, 4)], names=["B", "C"]), + MultiIndex.from_tuples([("a",), ("b",)], names=[("A", "a")]), + ), + ( + (("A", "a"), "B"), + [[1, 1, 1, 1], [1, 1, 1, 1]], + Index([3, 4], name="C"), + MultiIndex.from_tuples( + [("a", 1), ("a", 2), ("b", 1), ("b", 2)], names=[("A", "a"), "B"] + ), + ), + ], +) +def test_unstack_mixed_type_name_in_multiindex( + unstack_idx, expected_values, expected_index, expected_columns +): + # GH 19966 + idx = MultiIndex.from_product( + [["a", "b"], [1, 2], [3, 4]], names=[("A", "a"), "B", "C"] + ) + ser = Series(1, index=idx) + result = ser.unstack(unstack_idx) + + expected = DataFrame( + expected_values, columns=expected_columns, index=expected_index + ) + tm.assert_frame_equal(result, expected) + + +def test_unstack_multi_index_categorical_values(): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + mi = df.stack(future_stack=True).index.rename(["major", "minor"]) + ser = Series(["foo"] * len(mi), index=mi, name="category", dtype="category") + + result = ser.unstack() + + dti = ser.index.levels[0] + c = pd.Categorical(["foo"] * len(dti)) + expected = DataFrame( + {"A": c.copy(), "B": c.copy(), "C": c.copy(), "D": c.copy()}, + columns=Index(list("ABCD"), name="minor"), + index=dti.rename("major"), + ) + tm.assert_frame_equal(result, expected) + + +def test_unstack_mixed_level_names(): + # GH#48763 + arrays = [["a", "a"], [1, 2], ["red", "blue"]] + idx = MultiIndex.from_arrays(arrays, names=("x", 0, "y")) + ser = Series([1, 2], index=idx) + result = ser.unstack("x") + expected = DataFrame( + [[1], [2]], + columns=Index(["a"], name="x"), + index=MultiIndex.from_tuples([(1, "red"), (2, "blue")], names=[0, "y"]), + ) + tm.assert_frame_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_values.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_values.py new file mode 100644 index 0000000000000000000000000000000000000000..cb1595e68264fbe5f07b014be4975657fa2fa8cf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_values.py @@ -0,0 +1,29 @@ +import numpy as np +import pytest + +from pandas import ( + IntervalIndex, + Series, + period_range, +) +import pandas._testing as tm + + +class TestValues: + @pytest.mark.parametrize( + "data", + [ + period_range("2000", periods=4), + IntervalIndex.from_breaks([1, 2, 3, 4]), + ], + ) + def test_values_object_extension_dtypes(self, data): + # https://github.com/pandas-dev/pandas/issues/23995 + result = Series(data).values + expected = np.array(data.astype(object)) + tm.assert_numpy_array_equal(result, expected) + + def test_values(self, datetime_series): + tm.assert_almost_equal( + datetime_series.values, list(datetime_series), check_dtype=False + ) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_view.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_view.py new file mode 100644 index 0000000000000000000000000000000000000000..7e0ac372cd443cf9a468b469df1c22818f10aad2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/methods/test_view.py @@ -0,0 +1,61 @@ +import numpy as np +import pytest + +from pandas import ( + Index, + Series, + array, + date_range, +) +import pandas._testing as tm + +pytestmark = pytest.mark.filterwarnings( + "ignore:Series.view is deprecated and will be removed in a future version.:FutureWarning" # noqa: E501 +) + + +class TestView: + def test_view_i8_to_datetimelike(self): + dti = date_range("2000", periods=4, tz="US/Central") + ser = Series(dti.asi8) + + result = ser.view(dti.dtype) + tm.assert_datetime_array_equal(result._values, dti._data._with_freq(None)) + + pi = dti.tz_localize(None).to_period("D") + ser = Series(pi.asi8) + result = ser.view(pi.dtype) + tm.assert_period_array_equal(result._values, pi._data) + + def test_view_tz(self): + # GH#24024 + ser = Series(date_range("2000", periods=4, tz="US/Central")) + result = ser.view("i8") + expected = Series( + [ + 946706400000000000, + 946792800000000000, + 946879200000000000, + 946965600000000000, + ] + ) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "first", ["m8[ns]", "M8[ns]", "M8[ns, US/Central]", "period[D]"] + ) + @pytest.mark.parametrize( + "second", ["m8[ns]", "M8[ns]", "M8[ns, US/Central]", "period[D]"] + ) + @pytest.mark.parametrize("box", [Series, Index, array]) + def test_view_between_datetimelike(self, first, second, box): + dti = date_range("2016-01-01", periods=3) + + orig = box(dti) + obj = orig.view(first) + assert obj.dtype == first + tm.assert_numpy_array_equal(np.asarray(obj.view("i8")), dti.asi8) + + res = obj.view(second) + assert res.dtype == second + tm.assert_numpy_array_equal(np.asarray(obj.view("i8")), dti.asi8) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_api.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_api.py new file mode 100644 index 0000000000000000000000000000000000000000..29d6e2036476ef62ed1a7eea734400915d71f0ab --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_api.py @@ -0,0 +1,300 @@ +import inspect +import pydoc + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Index, + Series, + date_range, + period_range, + timedelta_range, +) +import pandas._testing as tm + + +class TestSeriesMisc: + def test_tab_completion(self): + # GH 9910 + s = Series(list("abcd")) + # Series of str values should have .str but not .dt/.cat in __dir__ + assert "str" in dir(s) + assert "dt" not in dir(s) + assert "cat" not in dir(s) + + def test_tab_completion_dt(self): + # similarly for .dt + s = Series(date_range("1/1/2015", periods=5)) + assert "dt" in dir(s) + assert "str" not in dir(s) + assert "cat" not in dir(s) + + def test_tab_completion_cat(self): + # Similarly for .cat, but with the twist that str and dt should be + # there if the categories are of that type first cat and str. + s = Series(list("abbcd"), dtype="category") + assert "cat" in dir(s) + assert "str" in dir(s) # as it is a string categorical + assert "dt" not in dir(s) + + def test_tab_completion_cat_str(self): + # similar to cat and str + s = Series(date_range("1/1/2015", periods=5)).astype("category") + assert "cat" in dir(s) + assert "str" not in dir(s) + assert "dt" in dir(s) # as it is a datetime categorical + + def test_tab_completion_with_categorical(self): + # test the tab completion display + ok_for_cat = [ + "categories", + "codes", + "ordered", + "set_categories", + "add_categories", + "remove_categories", + "rename_categories", + "reorder_categories", + "remove_unused_categories", + "as_ordered", + "as_unordered", + ] + + s = Series(list("aabbcde")).astype("category") + results = sorted({r for r in s.cat.__dir__() if not r.startswith("_")}) + tm.assert_almost_equal(results, sorted(set(ok_for_cat))) + + @pytest.mark.parametrize( + "index", + [ + Index(list("ab") * 5, dtype="category"), + Index([str(i) for i in range(10)]), + Index(["foo", "bar", "baz"] * 2), + date_range("2020-01-01", periods=10), + period_range("2020-01-01", periods=10, freq="D"), + timedelta_range("1 day", periods=10), + Index(np.arange(10), dtype=np.uint64), + Index(np.arange(10), dtype=np.int64), + Index(np.arange(10), dtype=np.float64), + Index([True, False]), + Index([f"a{i}" for i in range(101)]), + pd.MultiIndex.from_tuples(zip("ABCD", "EFGH")), + pd.MultiIndex.from_tuples(zip([0, 1, 2, 3], "EFGH")), + ], + ) + def test_index_tab_completion(self, index): + # dir contains string-like values of the Index. + s = Series(index=index, dtype=object) + dir_s = dir(s) + for i, x in enumerate(s.index.unique(level=0)): + if i < 100: + assert not isinstance(x, str) or not x.isidentifier() or x in dir_s + else: + assert x not in dir_s + + @pytest.mark.parametrize("ser", [Series(dtype=object), Series([1])]) + def test_not_hashable(self, ser): + msg = "unhashable type: 'Series'" + with pytest.raises(TypeError, match=msg): + hash(ser) + + def test_contains(self, datetime_series): + tm.assert_contains_all(datetime_series.index, datetime_series) + + def test_axis_alias(self): + s = Series([1, 2, np.nan]) + tm.assert_series_equal(s.dropna(axis="rows"), s.dropna(axis="index")) + assert s.dropna().sum("rows") == 3 + assert s._get_axis_number("rows") == 0 + assert s._get_axis_name("rows") == "index" + + def test_class_axis(self): + # https://github.com/pandas-dev/pandas/issues/18147 + # no exception and no empty docstring + assert pydoc.getdoc(Series.index) + + def test_ndarray_compat(self): + # test numpy compat with Series as sub-class of NDFrame + tsdf = DataFrame( + np.random.default_rng(2).standard_normal((1000, 3)), + columns=["A", "B", "C"], + index=date_range("1/1/2000", periods=1000), + ) + + def f(x): + return x[x.idxmax()] + + result = tsdf.apply(f) + expected = tsdf.max() + tm.assert_series_equal(result, expected) + + def test_ndarray_compat_like_func(self): + # using an ndarray like function + s = Series(np.random.default_rng(2).standard_normal(10)) + result = Series(np.ones_like(s)) + expected = Series(1, index=range(10), dtype="float64") + tm.assert_series_equal(result, expected) + + def test_ndarray_compat_ravel(self): + # ravel + s = Series(np.random.default_rng(2).standard_normal(10)) + with tm.assert_produces_warning(FutureWarning, match="ravel is deprecated"): + result = s.ravel(order="F") + tm.assert_almost_equal(result, s.values.ravel(order="F")) + + def test_empty_method(self): + s_empty = Series(dtype=object) + assert s_empty.empty + + @pytest.mark.parametrize("dtype", ["int64", object]) + def test_empty_method_full_series(self, dtype): + full_series = Series(index=[1], dtype=dtype) + assert not full_series.empty + + @pytest.mark.parametrize("dtype", [None, "Int64"]) + def test_integer_series_size(self, dtype): + # GH 25580 + s = Series(range(9), dtype=dtype) + assert s.size == 9 + + def test_attrs(self): + s = Series([0, 1], name="abc") + assert s.attrs == {} + s.attrs["version"] = 1 + result = s + 1 + assert result.attrs == {"version": 1} + + def test_inspect_getmembers(self): + # GH38782 + pytest.importorskip("jinja2") + ser = Series(dtype=object) + msg = "Series._data is deprecated" + with tm.assert_produces_warning( + DeprecationWarning, match=msg, check_stacklevel=False + ): + inspect.getmembers(ser) + + def test_unknown_attribute(self): + # GH#9680 + tdi = timedelta_range(start=0, periods=10, freq="1s") + ser = Series(np.random.default_rng(2).normal(size=10), index=tdi) + assert "foo" not in ser.__dict__ + msg = "'Series' object has no attribute 'foo'" + with pytest.raises(AttributeError, match=msg): + ser.foo + + @pytest.mark.parametrize("op", ["year", "day", "second", "weekday"]) + def test_datetime_series_no_datelike_attrs(self, op, datetime_series): + # GH#7206 + msg = f"'Series' object has no attribute '{op}'" + with pytest.raises(AttributeError, match=msg): + getattr(datetime_series, op) + + def test_series_datetimelike_attribute_access(self): + # attribute access should still work! + ser = Series({"year": 2000, "month": 1, "day": 10}) + assert ser.year == 2000 + assert ser.month == 1 + assert ser.day == 10 + + def test_series_datetimelike_attribute_access_invalid(self): + ser = Series({"year": 2000, "month": 1, "day": 10}) + msg = "'Series' object has no attribute 'weekday'" + with pytest.raises(AttributeError, match=msg): + ser.weekday + + @pytest.mark.filterwarnings("ignore:Downcasting object dtype arrays:FutureWarning") + @pytest.mark.parametrize( + "kernel, has_numeric_only", + [ + ("skew", True), + ("var", True), + ("all", False), + ("prod", True), + ("any", False), + ("idxmin", False), + ("quantile", False), + ("idxmax", False), + ("min", True), + ("sem", True), + ("mean", True), + ("nunique", False), + ("max", True), + ("sum", True), + ("count", False), + ("median", True), + ("std", True), + ("backfill", False), + ("rank", True), + ("pct_change", False), + ("cummax", False), + ("shift", False), + ("diff", False), + ("cumsum", False), + ("cummin", False), + ("cumprod", False), + ("fillna", False), + ("ffill", False), + ("pad", False), + ("bfill", False), + ("sample", False), + ("tail", False), + ("take", False), + ("head", False), + ("cov", False), + ("corr", False), + ], + ) + @pytest.mark.parametrize("dtype", [bool, int, float, object]) + def test_numeric_only(self, kernel, has_numeric_only, dtype): + # GH#47500 + ser = Series([0, 1, 1], dtype=dtype) + if kernel == "corrwith": + args = (ser,) + elif kernel == "corr": + args = (ser,) + elif kernel == "cov": + args = (ser,) + elif kernel == "nth": + args = (0,) + elif kernel == "fillna": + args = (True,) + elif kernel == "fillna": + args = ("ffill",) + elif kernel == "take": + args = ([0],) + elif kernel == "quantile": + args = (0.5,) + else: + args = () + method = getattr(ser, kernel) + if not has_numeric_only: + msg = ( + "(got an unexpected keyword argument 'numeric_only'" + "|too many arguments passed in)" + ) + with pytest.raises(TypeError, match=msg): + method(*args, numeric_only=True) + elif dtype is object: + msg = f"Series.{kernel} does not allow numeric_only=True with non-numeric" + with pytest.raises(TypeError, match=msg): + method(*args, numeric_only=True) + else: + result = method(*args, numeric_only=True) + expected = method(*args, numeric_only=False) + if isinstance(expected, Series): + # transformer + tm.assert_series_equal(result, expected) + else: + # reducer + assert result == expected + + +@pytest.mark.parametrize("converter", [int, float, complex]) +def test_float_int_deprecated(converter): + # GH 51101 + with tm.assert_produces_warning(FutureWarning): + assert converter(Series([1])) == converter(1) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_arithmetic.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_arithmetic.py new file mode 100644 index 0000000000000000000000000000000000000000..b40e2e99dae2eed2d1673797fcb9810bb412de0c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_arithmetic.py @@ -0,0 +1,984 @@ +from datetime import ( + date, + timedelta, + timezone, +) +from decimal import Decimal +import operator + +import numpy as np +import pytest + +from pandas._libs import lib +from pandas._libs.tslibs import IncompatibleFrequency + +import pandas as pd +from pandas import ( + Categorical, + DatetimeTZDtype, + Index, + Series, + Timedelta, + bdate_range, + date_range, + isna, +) +import pandas._testing as tm +from pandas.core import ops +from pandas.core.computation import expressions as expr +from pandas.core.computation.check import NUMEXPR_INSTALLED + + +@pytest.fixture(autouse=True, params=[0, 1000000], ids=["numexpr", "python"]) +def switch_numexpr_min_elements(request, monkeypatch): + with monkeypatch.context() as m: + m.setattr(expr, "_MIN_ELEMENTS", request.param) + yield + + +def _permute(obj): + return obj.take(np.random.default_rng(2).permutation(len(obj))) + + +class TestSeriesFlexArithmetic: + @pytest.mark.parametrize( + "ts", + [ + (lambda x: x, lambda x: x * 2, False), + (lambda x: x, lambda x: x[::2], False), + (lambda x: x, lambda x: 5, True), + ( + lambda x: Series(range(10), dtype=np.float64), + lambda x: Series(range(10), dtype=np.float64), + True, + ), + ], + ) + @pytest.mark.parametrize( + "opname", ["add", "sub", "mul", "floordiv", "truediv", "pow"] + ) + def test_flex_method_equivalence(self, opname, ts): + # check that Series.{opname} behaves like Series.__{opname}__, + tser = Series( + np.arange(20, dtype=np.float64), + index=date_range("2020-01-01", periods=20), + name="ts", + ) + + series = ts[0](tser) + other = ts[1](tser) + check_reverse = ts[2] + + op = getattr(Series, opname) + alt = getattr(operator, opname) + + result = op(series, other) + expected = alt(series, other) + tm.assert_almost_equal(result, expected) + if check_reverse: + rop = getattr(Series, "r" + opname) + result = rop(series, other) + expected = alt(other, series) + tm.assert_almost_equal(result, expected) + + def test_flex_method_subclass_metadata_preservation(self, all_arithmetic_operators): + # GH 13208 + class MySeries(Series): + _metadata = ["x"] + + @property + def _constructor(self): + return MySeries + + opname = all_arithmetic_operators + op = getattr(Series, opname) + m = MySeries([1, 2, 3], name="test") + m.x = 42 + result = op(m, 1) + assert result.x == 42 + + def test_flex_add_scalar_fill_value(self): + # GH12723 + ser = Series([0, 1, np.nan, 3, 4, 5]) + + exp = ser.fillna(0).add(2) + res = ser.add(2, fill_value=0) + tm.assert_series_equal(res, exp) + + pairings = [(Series.div, operator.truediv, 1), (Series.rdiv, ops.rtruediv, 1)] + for op in ["add", "sub", "mul", "pow", "truediv", "floordiv"]: + fv = 0 + lop = getattr(Series, op) + lequiv = getattr(operator, op) + rop = getattr(Series, "r" + op) + # bind op at definition time... + requiv = lambda x, y, op=op: getattr(operator, op)(y, x) + pairings.append((lop, lequiv, fv)) + pairings.append((rop, requiv, fv)) + + @pytest.mark.parametrize("op, equiv_op, fv", pairings) + def test_operators_combine(self, op, equiv_op, fv): + def _check_fill(meth, op, a, b, fill_value=0): + exp_index = a.index.union(b.index) + a = a.reindex(exp_index) + b = b.reindex(exp_index) + + amask = isna(a) + bmask = isna(b) + + exp_values = [] + for i in range(len(exp_index)): + with np.errstate(all="ignore"): + if amask[i]: + if bmask[i]: + exp_values.append(np.nan) + continue + exp_values.append(op(fill_value, b[i])) + elif bmask[i]: + if amask[i]: + exp_values.append(np.nan) + continue + exp_values.append(op(a[i], fill_value)) + else: + exp_values.append(op(a[i], b[i])) + + result = meth(a, b, fill_value=fill_value) + expected = Series(exp_values, exp_index) + tm.assert_series_equal(result, expected) + + a = Series([np.nan, 1.0, 2.0, 3.0, np.nan], index=np.arange(5)) + b = Series([np.nan, 1, np.nan, 3, np.nan, 4.0], index=np.arange(6)) + + result = op(a, b) + exp = equiv_op(a, b) + tm.assert_series_equal(result, exp) + _check_fill(op, equiv_op, a, b, fill_value=fv) + # should accept axis=0 or axis='rows' + op(a, b, axis=0) + + +class TestSeriesArithmetic: + # Some of these may end up in tests/arithmetic, but are not yet sorted + + def test_add_series_with_period_index(self): + rng = pd.period_range("1/1/2000", "1/1/2010", freq="Y") + ts = Series(np.random.default_rng(2).standard_normal(len(rng)), index=rng) + + result = ts + ts[::2] + expected = ts + ts + expected.iloc[1::2] = np.nan + tm.assert_series_equal(result, expected) + + result = ts + _permute(ts[::2]) + tm.assert_series_equal(result, expected) + + msg = "Input has different freq=D from Period\\(freq=Y-DEC\\)" + with pytest.raises(IncompatibleFrequency, match=msg): + ts + ts.asfreq("D", how="end") + + @pytest.mark.parametrize( + "target_add,input_value,expected_value", + [ + ("!", ["hello", "world"], ["hello!", "world!"]), + ("m", ["hello", "world"], ["hellom", "worldm"]), + ], + ) + def test_string_addition(self, target_add, input_value, expected_value): + # GH28658 - ensure adding 'm' does not raise an error + a = Series(input_value) + + result = a + target_add + expected = Series(expected_value) + tm.assert_series_equal(result, expected) + + def test_divmod(self): + # GH#25557 + a = Series([1, 1, 1, np.nan], index=["a", "b", "c", "d"]) + b = Series([2, np.nan, 1, np.nan], index=["a", "b", "d", "e"]) + + result = a.divmod(b) + expected = divmod(a, b) + tm.assert_series_equal(result[0], expected[0]) + tm.assert_series_equal(result[1], expected[1]) + + result = a.rdivmod(b) + expected = divmod(b, a) + tm.assert_series_equal(result[0], expected[0]) + tm.assert_series_equal(result[1], expected[1]) + + @pytest.mark.parametrize("index", [None, range(9)]) + def test_series_integer_mod(self, index): + # GH#24396 + s1 = Series(range(1, 10)) + s2 = Series("foo", index=index) + + msg = "not all arguments converted during string formatting|mod not" + + with pytest.raises((TypeError, NotImplementedError), match=msg): + s2 % s1 + + def test_add_with_duplicate_index(self): + # GH14227 + s1 = Series([1, 2], index=[1, 1]) + s2 = Series([10, 10], index=[1, 2]) + result = s1 + s2 + expected = Series([11, 12, np.nan], index=[1, 1, 2]) + tm.assert_series_equal(result, expected) + + def test_add_na_handling(self): + ser = Series( + [Decimal("1.3"), Decimal("2.3")], index=[date(2012, 1, 1), date(2012, 1, 2)] + ) + + result = ser + ser.shift(1) + result2 = ser.shift(1) + ser + assert isna(result.iloc[0]) + assert isna(result2.iloc[0]) + + def test_add_corner_cases(self, datetime_series): + empty = Series([], index=Index([]), dtype=np.float64) + + result = datetime_series + empty + assert np.isnan(result).all() + + result = empty + empty.copy() + assert len(result) == 0 + + def test_add_float_plus_int(self, datetime_series): + # float + int + int_ts = datetime_series.astype(int)[:-5] + added = datetime_series + int_ts + expected = Series( + datetime_series.values[:-5] + int_ts.values, + index=datetime_series.index[:-5], + name="ts", + ) + tm.assert_series_equal(added[:-5], expected) + + def test_mul_empty_int_corner_case(self): + s1 = Series([], [], dtype=np.int32) + s2 = Series({"x": 0.0}) + tm.assert_series_equal(s1 * s2, Series([np.nan], index=["x"])) + + def test_sub_datetimelike_align(self): + # GH#7500 + # datetimelike ops need to align + dt = Series(date_range("2012-1-1", periods=3, freq="D")) + dt.iloc[2] = np.nan + dt2 = dt[::-1] + + expected = Series([timedelta(0), timedelta(0), pd.NaT]) + # name is reset + result = dt2 - dt + tm.assert_series_equal(result, expected) + + expected = Series(expected, name=0) + result = (dt2.to_frame() - dt.to_frame())[0] + tm.assert_series_equal(result, expected) + + def test_alignment_doesnt_change_tz(self): + # GH#33671 + dti = date_range("2016-01-01", periods=10, tz="CET") + dti_utc = dti.tz_convert("UTC") + ser = Series(10, index=dti) + ser_utc = Series(10, index=dti_utc) + + # we don't care about the result, just that original indexes are unchanged + ser * ser_utc + + assert ser.index is dti + assert ser_utc.index is dti_utc + + def test_alignment_categorical(self): + # GH13365 + cat = Categorical(["3z53", "3z53", "LoJG", "LoJG", "LoJG", "N503"]) + ser1 = Series(2, index=cat) + ser2 = Series(2, index=cat[:-1]) + result = ser1 * ser2 + + exp_index = ["3z53"] * 4 + ["LoJG"] * 9 + ["N503"] + exp_index = pd.CategoricalIndex(exp_index, categories=cat.categories) + exp_values = [4.0] * 13 + [np.nan] + expected = Series(exp_values, exp_index) + + tm.assert_series_equal(result, expected) + + def test_arithmetic_with_duplicate_index(self): + # GH#8363 + # integer ops with a non-unique index + index = [2, 2, 3, 3, 4] + ser = Series(np.arange(1, 6, dtype="int64"), index=index) + other = Series(np.arange(5, dtype="int64"), index=index) + result = ser - other + expected = Series(1, index=[2, 2, 3, 3, 4]) + tm.assert_series_equal(result, expected) + + # GH#8363 + # datetime ops with a non-unique index + ser = Series(date_range("20130101 09:00:00", periods=5), index=index) + other = Series(date_range("20130101", periods=5), index=index) + result = ser - other + expected = Series(Timedelta("9 hours"), index=[2, 2, 3, 3, 4]) + tm.assert_series_equal(result, expected) + + def test_masked_and_non_masked_propagate_na(self): + # GH#45810 + ser1 = Series([0, np.nan], dtype="float") + ser2 = Series([0, 1], dtype="Int64") + result = ser1 * ser2 + expected = Series([0, pd.NA], dtype="Float64") + tm.assert_series_equal(result, expected) + + def test_mask_div_propagate_na_for_non_na_dtype(self): + # GH#42630 + ser1 = Series([15, pd.NA, 5, 4], dtype="Int64") + ser2 = Series([15, 5, np.nan, 4]) + result = ser1 / ser2 + expected = Series([1.0, pd.NA, pd.NA, 1.0], dtype="Float64") + tm.assert_series_equal(result, expected) + + result = ser2 / ser1 + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("val, dtype", [(3, "Int64"), (3.5, "Float64")]) + def test_add_list_to_masked_array(self, val, dtype): + # GH#22962 + ser = Series([1, None, 3], dtype="Int64") + result = ser + [1, None, val] + expected = Series([2, None, 3 + val], dtype=dtype) + tm.assert_series_equal(result, expected) + + result = [1, None, val] + ser + tm.assert_series_equal(result, expected) + + def test_add_list_to_masked_array_boolean(self, request): + # GH#22962 + warning = ( + UserWarning + if request.node.callspec.id == "numexpr" and NUMEXPR_INSTALLED + else None + ) + ser = Series([True, None, False], dtype="boolean") + with tm.assert_produces_warning(warning): + result = ser + [True, None, True] + expected = Series([True, None, True], dtype="boolean") + tm.assert_series_equal(result, expected) + + with tm.assert_produces_warning(warning): + result = [True, None, True] + ser + tm.assert_series_equal(result, expected) + + +# ------------------------------------------------------------------ +# Comparisons + + +class TestSeriesFlexComparison: + @pytest.mark.parametrize("axis", [0, None, "index"]) + def test_comparison_flex_basic(self, axis, comparison_op): + left = Series(np.random.default_rng(2).standard_normal(10)) + right = Series(np.random.default_rng(2).standard_normal(10)) + result = getattr(left, comparison_op.__name__)(right, axis=axis) + expected = comparison_op(left, right) + tm.assert_series_equal(result, expected) + + def test_comparison_bad_axis(self, comparison_op): + left = Series(np.random.default_rng(2).standard_normal(10)) + right = Series(np.random.default_rng(2).standard_normal(10)) + + msg = "No axis named 1 for object type" + with pytest.raises(ValueError, match=msg): + getattr(left, comparison_op.__name__)(right, axis=1) + + @pytest.mark.parametrize( + "values, op", + [ + ([False, False, True, False], "eq"), + ([True, True, False, True], "ne"), + ([False, False, True, False], "le"), + ([False, False, False, False], "lt"), + ([False, True, True, False], "ge"), + ([False, True, False, False], "gt"), + ], + ) + def test_comparison_flex_alignment(self, values, op): + left = Series([1, 3, 2], index=list("abc")) + right = Series([2, 2, 2], index=list("bcd")) + result = getattr(left, op)(right) + expected = Series(values, index=list("abcd")) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "values, op, fill_value", + [ + ([False, False, True, True], "eq", 2), + ([True, True, False, False], "ne", 2), + ([False, False, True, True], "le", 0), + ([False, False, False, True], "lt", 0), + ([True, True, True, False], "ge", 0), + ([True, True, False, False], "gt", 0), + ], + ) + def test_comparison_flex_alignment_fill(self, values, op, fill_value): + left = Series([1, 3, 2], index=list("abc")) + right = Series([2, 2, 2], index=list("bcd")) + result = getattr(left, op)(right, fill_value=fill_value) + expected = Series(values, index=list("abcd")) + tm.assert_series_equal(result, expected) + + +class TestSeriesComparison: + def test_comparison_different_length(self): + a = Series(["a", "b", "c"]) + b = Series(["b", "a"]) + msg = "only compare identically-labeled Series" + with pytest.raises(ValueError, match=msg): + a < b + + a = Series([1, 2]) + b = Series([2, 3, 4]) + with pytest.raises(ValueError, match=msg): + a == b + + @pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"]) + def test_ser_flex_cmp_return_dtypes(self, opname): + # GH#15115 + ser = Series([1, 3, 2], index=range(3)) + const = 2 + result = getattr(ser, opname)(const).dtypes + expected = np.dtype("bool") + assert result == expected + + @pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"]) + def test_ser_flex_cmp_return_dtypes_empty(self, opname): + # GH#15115 empty Series case + ser = Series([1, 3, 2], index=range(3)) + empty = ser.iloc[:0] + const = 2 + result = getattr(empty, opname)(const).dtypes + expected = np.dtype("bool") + assert result == expected + + @pytest.mark.parametrize( + "names", [(None, None, None), ("foo", "bar", None), ("baz", "baz", "baz")] + ) + def test_ser_cmp_result_names(self, names, comparison_op): + # datetime64 dtype + op = comparison_op + dti = date_range("1949-06-07 03:00:00", freq="h", periods=5, name=names[0]) + ser = Series(dti).rename(names[1]) + result = op(ser, dti) + assert result.name == names[2] + + # datetime64tz dtype + dti = dti.tz_localize("US/Central") + dti = pd.DatetimeIndex(dti, freq="infer") # freq not preserved by tz_localize + ser = Series(dti).rename(names[1]) + result = op(ser, dti) + assert result.name == names[2] + + # timedelta64 dtype + tdi = dti - dti.shift(1) + ser = Series(tdi).rename(names[1]) + result = op(ser, tdi) + assert result.name == names[2] + + # interval dtype + if op in [operator.eq, operator.ne]: + # interval dtype comparisons not yet implemented + ii = pd.interval_range(start=0, periods=5, name=names[0]) + ser = Series(ii).rename(names[1]) + result = op(ser, ii) + assert result.name == names[2] + + # categorical + if op in [operator.eq, operator.ne]: + # categorical dtype comparisons raise for inequalities + cidx = tdi.astype("category") + ser = Series(cidx).rename(names[1]) + result = op(ser, cidx) + assert result.name == names[2] + + def test_comparisons(self, using_infer_string): + s = Series(["a", "b", "c"]) + s2 = Series([False, True, False]) + + # it works! + exp = Series([False, False, False]) + if using_infer_string: + import pyarrow as pa + + msg = "has no kernel" + # TODO(3.0) GH56008 + with pytest.raises(pa.lib.ArrowNotImplementedError, match=msg): + s == s2 + with tm.assert_produces_warning( + DeprecationWarning, match="comparison", check_stacklevel=False + ): + with pytest.raises(pa.lib.ArrowNotImplementedError, match=msg): + s2 == s + else: + tm.assert_series_equal(s == s2, exp) + tm.assert_series_equal(s2 == s, exp) + + # ----------------------------------------------------------------- + # Categorical Dtype Comparisons + + def test_categorical_comparisons(self): + # GH#8938 + # allow equality comparisons + a = Series(list("abc"), dtype="category") + b = Series(list("abc"), dtype="object") + c = Series(["a", "b", "cc"], dtype="object") + d = Series(list("acb"), dtype="object") + e = Categorical(list("abc")) + f = Categorical(list("acb")) + + # vs scalar + assert not (a == "a").all() + assert ((a != "a") == ~(a == "a")).all() + + assert not ("a" == a).all() + assert (a == "a")[0] + assert ("a" == a)[0] + assert not ("a" != a)[0] + + # vs list-like + assert (a == a).all() + assert not (a != a).all() + + assert (a == list(a)).all() + assert (a == b).all() + assert (b == a).all() + assert ((~(a == b)) == (a != b)).all() + assert ((~(b == a)) == (b != a)).all() + + assert not (a == c).all() + assert not (c == a).all() + assert not (a == d).all() + assert not (d == a).all() + + # vs a cat-like + assert (a == e).all() + assert (e == a).all() + assert not (a == f).all() + assert not (f == a).all() + + assert (~(a == e) == (a != e)).all() + assert (~(e == a) == (e != a)).all() + assert (~(a == f) == (a != f)).all() + assert (~(f == a) == (f != a)).all() + + # non-equality is not comparable + msg = "can only compare equality or not" + with pytest.raises(TypeError, match=msg): + a < b + with pytest.raises(TypeError, match=msg): + b < a + with pytest.raises(TypeError, match=msg): + a > b + with pytest.raises(TypeError, match=msg): + b > a + + def test_unequal_categorical_comparison_raises_type_error(self): + # unequal comparison should raise for unordered cats + cat = Series(Categorical(list("abc"))) + msg = "can only compare equality or not" + with pytest.raises(TypeError, match=msg): + cat > "b" + + cat = Series(Categorical(list("abc"), ordered=False)) + with pytest.raises(TypeError, match=msg): + cat > "b" + + # https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057 + # and following comparisons with scalars not in categories should raise + # for unequal comps, but not for equal/not equal + cat = Series(Categorical(list("abc"), ordered=True)) + + msg = "Invalid comparison between dtype=category and str" + with pytest.raises(TypeError, match=msg): + cat < "d" + with pytest.raises(TypeError, match=msg): + cat > "d" + with pytest.raises(TypeError, match=msg): + "d" < cat + with pytest.raises(TypeError, match=msg): + "d" > cat + + tm.assert_series_equal(cat == "d", Series([False, False, False])) + tm.assert_series_equal(cat != "d", Series([True, True, True])) + + # ----------------------------------------------------------------- + + def test_comparison_tuples(self): + # GH#11339 + # comparisons vs tuple + s = Series([(1, 1), (1, 2)]) + + result = s == (1, 2) + expected = Series([False, True]) + tm.assert_series_equal(result, expected) + + result = s != (1, 2) + expected = Series([True, False]) + tm.assert_series_equal(result, expected) + + result = s == (0, 0) + expected = Series([False, False]) + tm.assert_series_equal(result, expected) + + result = s != (0, 0) + expected = Series([True, True]) + tm.assert_series_equal(result, expected) + + s = Series([(1, 1), (1, 1)]) + + result = s == (1, 1) + expected = Series([True, True]) + tm.assert_series_equal(result, expected) + + result = s != (1, 1) + expected = Series([False, False]) + tm.assert_series_equal(result, expected) + + def test_comparison_frozenset(self): + ser = Series([frozenset([1]), frozenset([1, 2])]) + + result = ser == frozenset([1]) + expected = Series([True, False]) + tm.assert_series_equal(result, expected) + + def test_comparison_operators_with_nas(self, comparison_op): + ser = Series(bdate_range("1/1/2000", periods=10), dtype=object) + ser[::2] = np.nan + + # test that comparisons work + val = ser[5] + + result = comparison_op(ser, val) + expected = comparison_op(ser.dropna(), val).reindex(ser.index) + + msg = "Downcasting object dtype arrays" + with tm.assert_produces_warning(FutureWarning, match=msg): + if comparison_op is operator.ne: + expected = expected.fillna(True).astype(bool) + else: + expected = expected.fillna(False).astype(bool) + + tm.assert_series_equal(result, expected) + + def test_ne(self): + ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float) + expected = np.array([True, True, False, True, True]) + tm.assert_numpy_array_equal(ts.index != 5, expected) + tm.assert_numpy_array_equal(~(ts.index == 5), expected) + + @pytest.mark.parametrize( + "left, right", + [ + ( + Series([1, 2, 3], index=list("ABC"), name="x"), + Series([2, 2, 2], index=list("ABD"), name="x"), + ), + ( + Series([1, 2, 3], index=list("ABC"), name="x"), + Series([2, 2, 2, 2], index=list("ABCD"), name="x"), + ), + ], + ) + def test_comp_ops_df_compat(self, left, right, frame_or_series): + # GH 1134 + # GH 50083 to clarify that index and columns must be identically labeled + if frame_or_series is not Series: + msg = ( + rf"Can only compare identically-labeled \(both index and columns\) " + f"{frame_or_series.__name__} objects" + ) + left = left.to_frame() + right = right.to_frame() + else: + msg = ( + f"Can only compare identically-labeled {frame_or_series.__name__} " + f"objects" + ) + + with pytest.raises(ValueError, match=msg): + left == right + with pytest.raises(ValueError, match=msg): + right == left + + with pytest.raises(ValueError, match=msg): + left != right + with pytest.raises(ValueError, match=msg): + right != left + + with pytest.raises(ValueError, match=msg): + left < right + with pytest.raises(ValueError, match=msg): + right < left + + def test_compare_series_interval_keyword(self): + # GH#25338 + ser = Series(["IntervalA", "IntervalB", "IntervalC"]) + result = ser == "IntervalA" + expected = Series([True, False, False]) + tm.assert_series_equal(result, expected) + + +# ------------------------------------------------------------------ +# Unsorted +# These arithmetic tests were previously in other files, eventually +# should be parametrized and put into tests.arithmetic + + +class TestTimeSeriesArithmetic: + def test_series_add_tz_mismatch_converts_to_utc(self): + rng = date_range("1/1/2011", periods=100, freq="h", tz="utc") + + perm = np.random.default_rng(2).permutation(100)[:90] + ser1 = Series( + np.random.default_rng(2).standard_normal(90), + index=rng.take(perm).tz_convert("US/Eastern"), + ) + + perm = np.random.default_rng(2).permutation(100)[:90] + ser2 = Series( + np.random.default_rng(2).standard_normal(90), + index=rng.take(perm).tz_convert("Europe/Berlin"), + ) + + result = ser1 + ser2 + + uts1 = ser1.tz_convert("utc") + uts2 = ser2.tz_convert("utc") + expected = uts1 + uts2 + + # sort since input indexes are not equal + expected = expected.sort_index() + + assert result.index.tz is timezone.utc + tm.assert_series_equal(result, expected) + + def test_series_add_aware_naive_raises(self): + rng = date_range("1/1/2011", periods=10, freq="h") + ser = Series(np.random.default_rng(2).standard_normal(len(rng)), index=rng) + + ser_utc = ser.tz_localize("utc") + + msg = "Cannot join tz-naive with tz-aware DatetimeIndex" + with pytest.raises(Exception, match=msg): + ser + ser_utc + + with pytest.raises(Exception, match=msg): + ser_utc + ser + + # TODO: belongs in tests/arithmetic? + def test_datetime_understood(self, unit): + # Ensures it doesn't fail to create the right series + # reported in issue#16726 + series = Series(date_range("2012-01-01", periods=3, unit=unit)) + offset = pd.offsets.DateOffset(days=6) + result = series - offset + exp_dti = pd.to_datetime(["2011-12-26", "2011-12-27", "2011-12-28"]).as_unit( + unit + ) + expected = Series(exp_dti) + tm.assert_series_equal(result, expected) + + def test_align_date_objects_with_datetimeindex(self): + rng = date_range("1/1/2000", periods=20) + ts = Series(np.random.default_rng(2).standard_normal(20), index=rng) + + ts_slice = ts[5:] + ts2 = ts_slice.copy() + ts2.index = [x.date() for x in ts2.index] + + result = ts + ts2 + result2 = ts2 + ts + expected = ts + ts[5:] + expected.index = expected.index._with_freq(None) + tm.assert_series_equal(result, expected) + tm.assert_series_equal(result2, expected) + + +class TestNamePreservation: + @pytest.mark.parametrize("box", [list, tuple, np.array, Index, Series, pd.array]) + @pytest.mark.parametrize("flex", [True, False]) + def test_series_ops_name_retention(self, flex, box, names, all_binary_operators): + # GH#33930 consistent name-retention + op = all_binary_operators + + left = Series(range(10), name=names[0]) + right = Series(range(10), name=names[1]) + + name = op.__name__.strip("_") + is_logical = name in ["and", "rand", "xor", "rxor", "or", "ror"] + + msg = ( + r"Logical ops \(and, or, xor\) between Pandas objects and " + "dtype-less sequences" + ) + warn = None + if box in [list, tuple] and is_logical: + warn = FutureWarning + + right = box(right) + if flex: + if is_logical: + # Series doesn't have these as flex methods + return + result = getattr(left, name)(right) + else: + # GH#37374 logical ops behaving as set ops deprecated + with tm.assert_produces_warning(warn, match=msg): + result = op(left, right) + + assert isinstance(result, Series) + if box in [Index, Series]: + assert result.name is names[2] or result.name == names[2] + else: + assert result.name is names[0] or result.name == names[0] + + def test_binop_maybe_preserve_name(self, datetime_series): + # names match, preserve + result = datetime_series * datetime_series + assert result.name == datetime_series.name + result = datetime_series.mul(datetime_series) + assert result.name == datetime_series.name + + result = datetime_series * datetime_series[:-2] + assert result.name == datetime_series.name + + # names don't match, don't preserve + cp = datetime_series.copy() + cp.name = "something else" + result = datetime_series + cp + assert result.name is None + result = datetime_series.add(cp) + assert result.name is None + + ops = ["add", "sub", "mul", "div", "truediv", "floordiv", "mod", "pow"] + ops = ops + ["r" + op for op in ops] + for op in ops: + # names match, preserve + ser = datetime_series.copy() + result = getattr(ser, op)(ser) + assert result.name == datetime_series.name + + # names don't match, don't preserve + cp = datetime_series.copy() + cp.name = "changed" + result = getattr(ser, op)(cp) + assert result.name is None + + def test_scalarop_preserve_name(self, datetime_series): + result = datetime_series * 2 + assert result.name == datetime_series.name + + +class TestInplaceOperations: + @pytest.mark.parametrize( + "dtype1, dtype2, dtype_expected, dtype_mul", + ( + ("Int64", "Int64", "Int64", "Int64"), + ("float", "float", "float", "float"), + ("Int64", "float", "Float64", "Float64"), + ("Int64", "Float64", "Float64", "Float64"), + ), + ) + def test_series_inplace_ops(self, dtype1, dtype2, dtype_expected, dtype_mul): + # GH 37910 + + ser1 = Series([1], dtype=dtype1) + ser2 = Series([2], dtype=dtype2) + ser1 += ser2 + expected = Series([3], dtype=dtype_expected) + tm.assert_series_equal(ser1, expected) + + ser1 -= ser2 + expected = Series([1], dtype=dtype_expected) + tm.assert_series_equal(ser1, expected) + + ser1 *= ser2 + expected = Series([2], dtype=dtype_mul) + tm.assert_series_equal(ser1, expected) + + +def test_none_comparison(request, series_with_simple_index): + series = series_with_simple_index + + if len(series) < 1: + request.applymarker( + pytest.mark.xfail(reason="Test doesn't make sense on empty data") + ) + + # bug brought up by #1079 + # changed from TypeError in 0.17.0 + series.iloc[0] = np.nan + + # noinspection PyComparisonWithNone + result = series == None # noqa: E711 + assert not result.iat[0] + assert not result.iat[1] + + # noinspection PyComparisonWithNone + result = series != None # noqa: E711 + assert result.iat[0] + assert result.iat[1] + + result = None == series # noqa: E711 + assert not result.iat[0] + assert not result.iat[1] + + result = None != series # noqa: E711 + assert result.iat[0] + assert result.iat[1] + + if lib.is_np_dtype(series.dtype, "M") or isinstance(series.dtype, DatetimeTZDtype): + # Following DatetimeIndex (and Timestamp) convention, + # inequality comparisons with Series[datetime64] raise + msg = "Invalid comparison" + with pytest.raises(TypeError, match=msg): + None > series + with pytest.raises(TypeError, match=msg): + series > None + else: + result = None > series + assert not result.iat[0] + assert not result.iat[1] + + result = series < None + assert not result.iat[0] + assert not result.iat[1] + + +def test_series_varied_multiindex_alignment(): + # GH 20414 + s1 = Series( + range(8), + index=pd.MultiIndex.from_product( + [list("ab"), list("xy"), [1, 2]], names=["ab", "xy", "num"] + ), + ) + s2 = Series( + [1000 * i for i in range(1, 5)], + index=pd.MultiIndex.from_product([list("xy"), [1, 2]], names=["xy", "num"]), + ) + result = s1.loc[pd.IndexSlice[["a"], :, :]] + s2 + expected = Series( + [1000, 2001, 3002, 4003], + index=pd.MultiIndex.from_tuples( + [("a", "x", 1), ("a", "x", 2), ("a", "y", 1), ("a", "y", 2)], + names=["ab", "xy", "num"], + ), + ) + tm.assert_series_equal(result, expected) + + +def test_rmod_consistent_large_series(): + # GH 29602 + result = Series([2] * 10001).rmod(-1) + expected = Series([1] * 10001) + + tm.assert_series_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_constructors.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_constructors.py new file mode 100644 index 0000000000000000000000000000000000000000..387be8398e4b281421317c844269fe45a10eb6bc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_constructors.py @@ -0,0 +1,2290 @@ +from collections import OrderedDict +from collections.abc import Iterator +from datetime import ( + datetime, + timedelta, +) + +from dateutil.tz import tzoffset +import numpy as np +from numpy import ma +import pytest + +from pandas._libs import ( + iNaT, + lib, +) +from pandas.compat.numpy import np_version_gt2 +from pandas.errors import IntCastingNaNError +import pandas.util._test_decorators as td + +from pandas.core.dtypes.dtypes import CategoricalDtype + +import pandas as pd +from pandas import ( + Categorical, + DataFrame, + DatetimeIndex, + DatetimeTZDtype, + Index, + Interval, + IntervalIndex, + MultiIndex, + NaT, + Period, + RangeIndex, + Series, + Timestamp, + date_range, + isna, + period_range, + timedelta_range, +) +import pandas._testing as tm +from pandas.core.arrays import ( + IntegerArray, + IntervalArray, + period_array, +) +from pandas.core.internals.blocks import NumpyBlock + + +class TestSeriesConstructors: + def test_from_ints_with_non_nano_dt64_dtype(self, index_or_series): + values = np.arange(10) + + res = index_or_series(values, dtype="M8[s]") + expected = index_or_series(values.astype("M8[s]")) + tm.assert_equal(res, expected) + + res = index_or_series(list(values), dtype="M8[s]") + tm.assert_equal(res, expected) + + def test_from_na_value_and_interval_of_datetime_dtype(self): + # GH#41805 + ser = Series([None], dtype="interval[datetime64[ns]]") + assert ser.isna().all() + assert ser.dtype == "interval[datetime64[ns], right]" + + def test_infer_with_date_and_datetime(self): + # GH#49341 pre-2.0 we inferred datetime-and-date to datetime64, which + # was inconsistent with Index behavior + ts = Timestamp(2016, 1, 1) + vals = [ts.to_pydatetime(), ts.date()] + + ser = Series(vals) + expected = Series(vals, dtype=object) + tm.assert_series_equal(ser, expected) + + idx = Index(vals) + expected = Index(vals, dtype=object) + tm.assert_index_equal(idx, expected) + + def test_unparsable_strings_with_dt64_dtype(self): + # pre-2.0 these would be silently ignored and come back with object dtype + vals = ["aa"] + msg = "^Unknown datetime string format, unable to parse: aa, at position 0$" + with pytest.raises(ValueError, match=msg): + Series(vals, dtype="datetime64[ns]") + + with pytest.raises(ValueError, match=msg): + Series(np.array(vals, dtype=object), dtype="datetime64[ns]") + + @pytest.mark.parametrize( + "constructor", + [ + # NOTE: some overlap with test_constructor_empty but that test does not + # test for None or an empty generator. + # test_constructor_pass_none tests None but only with the index also + # passed. + (lambda idx: Series(index=idx)), + (lambda idx: Series(None, index=idx)), + (lambda idx: Series({}, index=idx)), + (lambda idx: Series((), index=idx)), + (lambda idx: Series([], index=idx)), + (lambda idx: Series((_ for _ in []), index=idx)), + (lambda idx: Series(data=None, index=idx)), + (lambda idx: Series(data={}, index=idx)), + (lambda idx: Series(data=(), index=idx)), + (lambda idx: Series(data=[], index=idx)), + (lambda idx: Series(data=(_ for _ in []), index=idx)), + ], + ) + @pytest.mark.parametrize("empty_index", [None, []]) + def test_empty_constructor(self, constructor, empty_index): + # GH 49573 (addition of empty_index parameter) + expected = Series(index=empty_index) + result = constructor(empty_index) + + assert result.dtype == object + assert len(result.index) == 0 + tm.assert_series_equal(result, expected, check_index_type=True) + + def test_invalid_dtype(self): + # GH15520 + msg = "not understood" + invalid_list = [Timestamp, "Timestamp", list] + for dtype in invalid_list: + with pytest.raises(TypeError, match=msg): + Series([], name="time", dtype=dtype) + + def test_invalid_compound_dtype(self): + # GH#13296 + c_dtype = np.dtype([("a", "i8"), ("b", "f4")]) + cdt_arr = np.array([(1, 0.4), (256, -13)], dtype=c_dtype) + + with pytest.raises(ValueError, match="Use DataFrame instead"): + Series(cdt_arr, index=["A", "B"]) + + def test_scalar_conversion(self): + # Pass in scalar is disabled + scalar = Series(0.5) + assert not isinstance(scalar, float) + + def test_scalar_extension_dtype(self, ea_scalar_and_dtype): + # GH 28401 + + ea_scalar, ea_dtype = ea_scalar_and_dtype + + ser = Series(ea_scalar, index=range(3)) + expected = Series([ea_scalar] * 3, dtype=ea_dtype) + + assert ser.dtype == ea_dtype + tm.assert_series_equal(ser, expected) + + def test_constructor(self, datetime_series, using_infer_string): + empty_series = Series() + assert datetime_series.index._is_all_dates + + # Pass in Series + derived = Series(datetime_series) + assert derived.index._is_all_dates + + tm.assert_index_equal(derived.index, datetime_series.index) + # Ensure new index is not created + assert id(datetime_series.index) == id(derived.index) + + # Mixed type Series + mixed = Series(["hello", np.nan], index=[0, 1]) + assert mixed.dtype == np.object_ if not using_infer_string else "string" + assert np.isnan(mixed[1]) + + assert not empty_series.index._is_all_dates + assert not Series().index._is_all_dates + + # exception raised is of type ValueError GH35744 + with pytest.raises( + ValueError, + match=r"Data must be 1-dimensional, got ndarray of shape \(3, 3\) instead", + ): + Series(np.random.default_rng(2).standard_normal((3, 3)), index=np.arange(3)) + + mixed.name = "Series" + rs = Series(mixed).name + xp = "Series" + assert rs == xp + + # raise on MultiIndex GH4187 + m = MultiIndex.from_arrays([[1, 2], [3, 4]]) + msg = "initializing a Series from a MultiIndex is not supported" + with pytest.raises(NotImplementedError, match=msg): + Series(m) + + def test_constructor_index_ndim_gt_1_raises(self): + # GH#18579 + df = DataFrame([[1, 2], [3, 4], [5, 6]], index=[3, 6, 9]) + with pytest.raises(ValueError, match="Index data must be 1-dimensional"): + Series([1, 3, 2], index=df) + + @pytest.mark.parametrize("input_class", [list, dict, OrderedDict]) + def test_constructor_empty(self, input_class, using_infer_string): + empty = Series() + empty2 = Series(input_class()) + + # these are Index() and RangeIndex() which don't compare type equal + # but are just .equals + tm.assert_series_equal(empty, empty2, check_index_type=False) + + # With explicit dtype: + empty = Series(dtype="float64") + empty2 = Series(input_class(), dtype="float64") + tm.assert_series_equal(empty, empty2, check_index_type=False) + + # GH 18515 : with dtype=category: + empty = Series(dtype="category") + empty2 = Series(input_class(), dtype="category") + tm.assert_series_equal(empty, empty2, check_index_type=False) + + if input_class is not list: + # With index: + empty = Series(index=range(10)) + empty2 = Series(input_class(), index=range(10)) + tm.assert_series_equal(empty, empty2) + + # With index and dtype float64: + empty = Series(np.nan, index=range(10)) + empty2 = Series(input_class(), index=range(10), dtype="float64") + tm.assert_series_equal(empty, empty2) + + # GH 19853 : with empty string, index and dtype str + empty = Series("", dtype=str, index=range(3)) + if using_infer_string: + empty2 = Series("", index=range(3), dtype=object) + else: + empty2 = Series("", index=range(3)) + tm.assert_series_equal(empty, empty2) + + @pytest.mark.parametrize("input_arg", [np.nan, float("nan")]) + def test_constructor_nan(self, input_arg): + empty = Series(dtype="float64", index=range(10)) + empty2 = Series(input_arg, index=range(10)) + + tm.assert_series_equal(empty, empty2, check_index_type=False) + + @pytest.mark.parametrize( + "dtype", + ["f8", "i8", "M8[ns]", "m8[ns]", "category", "object", "datetime64[ns, UTC]"], + ) + @pytest.mark.parametrize("index", [None, Index([])]) + def test_constructor_dtype_only(self, dtype, index): + # GH-20865 + result = Series(dtype=dtype, index=index) + assert result.dtype == dtype + assert len(result) == 0 + + def test_constructor_no_data_index_order(self): + result = Series(index=["b", "a", "c"]) + assert result.index.tolist() == ["b", "a", "c"] + + def test_constructor_no_data_string_type(self): + # GH 22477 + result = Series(index=[1], dtype=str) + assert np.isnan(result.iloc[0]) + + @pytest.mark.parametrize("item", ["entry", "ѐ", 13]) + def test_constructor_string_element_string_type(self, item): + # GH 22477 + result = Series(item, index=[1], dtype=str) + assert result.iloc[0] == str(item) + + def test_constructor_dtype_str_na_values(self, string_dtype): + # https://github.com/pandas-dev/pandas/issues/21083 + ser = Series(["x", None], dtype=string_dtype) + result = ser.isna() + expected = Series([False, True]) + tm.assert_series_equal(result, expected) + assert ser.iloc[1] is None + + ser = Series(["x", np.nan], dtype=string_dtype) + assert np.isnan(ser.iloc[1]) + + def test_constructor_series(self): + index1 = ["d", "b", "a", "c"] + index2 = sorted(index1) + s1 = Series([4, 7, -5, 3], index=index1) + s2 = Series(s1, index=index2) + + tm.assert_series_equal(s2, s1.sort_index()) + + def test_constructor_iterable(self): + # GH 21987 + class Iter: + def __iter__(self) -> Iterator: + yield from range(10) + + expected = Series(list(range(10)), dtype="int64") + result = Series(Iter(), dtype="int64") + tm.assert_series_equal(result, expected) + + def test_constructor_sequence(self): + # GH 21987 + expected = Series(list(range(10)), dtype="int64") + result = Series(range(10), dtype="int64") + tm.assert_series_equal(result, expected) + + def test_constructor_single_str(self): + # GH 21987 + expected = Series(["abc"]) + result = Series("abc") + tm.assert_series_equal(result, expected) + + def test_constructor_list_like(self): + # make sure that we are coercing different + # list-likes to standard dtypes and not + # platform specific + expected = Series([1, 2, 3], dtype="int64") + for obj in [[1, 2, 3], (1, 2, 3), np.array([1, 2, 3], dtype="int64")]: + result = Series(obj, index=[0, 1, 2]) + tm.assert_series_equal(result, expected) + + def test_constructor_boolean_index(self): + # GH#18579 + s1 = Series([1, 2, 3], index=[4, 5, 6]) + + index = s1 == 2 + result = Series([1, 3, 2], index=index) + expected = Series([1, 3, 2], index=[False, True, False]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("dtype", ["bool", "int32", "int64", "float64"]) + def test_constructor_index_dtype(self, dtype): + # GH 17088 + + s = Series(Index([0, 2, 4]), dtype=dtype) + assert s.dtype == dtype + + @pytest.mark.parametrize( + "input_vals", + [ + ([1, 2]), + (["1", "2"]), + (list(date_range("1/1/2011", periods=2, freq="h"))), + (list(date_range("1/1/2011", periods=2, freq="h", tz="US/Eastern"))), + ([Interval(left=0, right=5)]), + ], + ) + def test_constructor_list_str(self, input_vals, string_dtype): + # GH 16605 + # Ensure that data elements from a list are converted to strings + # when dtype is str, 'str', or 'U' + result = Series(input_vals, dtype=string_dtype) + expected = Series(input_vals).astype(string_dtype) + tm.assert_series_equal(result, expected) + + def test_constructor_list_str_na(self, string_dtype): + result = Series([1.0, 2.0, np.nan], dtype=string_dtype) + expected = Series(["1.0", "2.0", np.nan], dtype=object) + tm.assert_series_equal(result, expected) + assert np.isnan(result[2]) + + def test_constructor_generator(self): + gen = (i for i in range(10)) + + result = Series(gen) + exp = Series(range(10)) + tm.assert_series_equal(result, exp) + + # same but with non-default index + gen = (i for i in range(10)) + result = Series(gen, index=range(10, 20)) + exp.index = range(10, 20) + tm.assert_series_equal(result, exp) + + def test_constructor_map(self): + # GH8909 + m = (x for x in range(10)) + + result = Series(m) + exp = Series(range(10)) + tm.assert_series_equal(result, exp) + + # same but with non-default index + m = (x for x in range(10)) + result = Series(m, index=range(10, 20)) + exp.index = range(10, 20) + tm.assert_series_equal(result, exp) + + def test_constructor_categorical(self): + cat = Categorical([0, 1, 2, 0, 1, 2], ["a", "b", "c"]) + res = Series(cat) + tm.assert_categorical_equal(res.values, cat) + + # can cast to a new dtype + result = Series(Categorical([1, 2, 3]), dtype="int64") + expected = Series([1, 2, 3], dtype="int64") + tm.assert_series_equal(result, expected) + + def test_construct_from_categorical_with_dtype(self): + # GH12574 + ser = Series(Categorical([1, 2, 3]), dtype="category") + assert isinstance(ser.dtype, CategoricalDtype) + + def test_construct_intlist_values_category_dtype(self): + ser = Series([1, 2, 3], dtype="category") + assert isinstance(ser.dtype, CategoricalDtype) + + def test_constructor_categorical_with_coercion(self): + factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"]) + # test basic creation / coercion of categoricals + s = Series(factor, name="A") + assert s.dtype == "category" + assert len(s) == len(factor) + + # in a frame + df = DataFrame({"A": factor}) + result = df["A"] + tm.assert_series_equal(result, s) + result = df.iloc[:, 0] + tm.assert_series_equal(result, s) + assert len(df) == len(factor) + + df = DataFrame({"A": s}) + result = df["A"] + tm.assert_series_equal(result, s) + assert len(df) == len(factor) + + # multiples + df = DataFrame({"A": s, "B": s, "C": 1}) + result1 = df["A"] + result2 = df["B"] + tm.assert_series_equal(result1, s) + tm.assert_series_equal(result2, s, check_names=False) + assert result2.name == "B" + assert len(df) == len(factor) + + def test_constructor_categorical_with_coercion2(self): + # GH8623 + x = DataFrame( + [[1, "John P. Doe"], [2, "Jane Dove"], [1, "John P. Doe"]], + columns=["person_id", "person_name"], + ) + x["person_name"] = Categorical(x.person_name) # doing this breaks transform + + expected = x.iloc[0].person_name + result = x.person_name.iloc[0] + assert result == expected + + result = x.person_name[0] + assert result == expected + + result = x.person_name.loc[0] + assert result == expected + + def test_constructor_series_to_categorical(self): + # see GH#16524: test conversion of Series to Categorical + series = Series(["a", "b", "c"]) + + result = Series(series, dtype="category") + expected = Series(["a", "b", "c"], dtype="category") + + tm.assert_series_equal(result, expected) + + def test_constructor_categorical_dtype(self): + result = Series( + ["a", "b"], dtype=CategoricalDtype(["a", "b", "c"], ordered=True) + ) + assert isinstance(result.dtype, CategoricalDtype) + tm.assert_index_equal(result.cat.categories, Index(["a", "b", "c"])) + assert result.cat.ordered + + result = Series(["a", "b"], dtype=CategoricalDtype(["b", "a"])) + assert isinstance(result.dtype, CategoricalDtype) + tm.assert_index_equal(result.cat.categories, Index(["b", "a"])) + assert result.cat.ordered is False + + # GH 19565 - Check broadcasting of scalar with Categorical dtype + result = Series( + "a", index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True) + ) + expected = Series( + ["a", "a"], index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True) + ) + tm.assert_series_equal(result, expected) + + def test_constructor_categorical_string(self): + # GH 26336: the string 'category' maintains existing CategoricalDtype + cdt = CategoricalDtype(categories=list("dabc"), ordered=True) + expected = Series(list("abcabc"), dtype=cdt) + + # Series(Categorical, dtype='category') keeps existing dtype + cat = Categorical(list("abcabc"), dtype=cdt) + result = Series(cat, dtype="category") + tm.assert_series_equal(result, expected) + + # Series(Series[Categorical], dtype='category') keeps existing dtype + result = Series(result, dtype="category") + tm.assert_series_equal(result, expected) + + def test_categorical_sideeffects_free(self): + # Passing a categorical to a Series and then changing values in either + # the series or the categorical should not change the values in the + # other one, IF you specify copy! + cat = Categorical(["a", "b", "c", "a"]) + s = Series(cat, copy=True) + assert s.cat is not cat + s = s.cat.rename_categories([1, 2, 3]) + exp_s = np.array([1, 2, 3, 1], dtype=np.int64) + exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_) + tm.assert_numpy_array_equal(s.__array__(), exp_s) + tm.assert_numpy_array_equal(cat.__array__(), exp_cat) + + # setting + s[0] = 2 + exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64) + tm.assert_numpy_array_equal(s.__array__(), exp_s2) + tm.assert_numpy_array_equal(cat.__array__(), exp_cat) + + # however, copy is False by default + # so this WILL change values + cat = Categorical(["a", "b", "c", "a"]) + s = Series(cat, copy=False) + assert s.values is cat + s = s.cat.rename_categories([1, 2, 3]) + assert s.values is not cat + exp_s = np.array([1, 2, 3, 1], dtype=np.int64) + tm.assert_numpy_array_equal(s.__array__(), exp_s) + + s[0] = 2 + exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64) + tm.assert_numpy_array_equal(s.__array__(), exp_s2) + + def test_unordered_compare_equal(self): + left = Series(["a", "b", "c"], dtype=CategoricalDtype(["a", "b"])) + right = Series(Categorical(["a", "b", np.nan], categories=["a", "b"])) + tm.assert_series_equal(left, right) + + def test_constructor_maskedarray(self): + data = ma.masked_all((3,), dtype=float) + result = Series(data) + expected = Series([np.nan, np.nan, np.nan]) + tm.assert_series_equal(result, expected) + + data[0] = 0.0 + data[2] = 2.0 + index = ["a", "b", "c"] + result = Series(data, index=index) + expected = Series([0.0, np.nan, 2.0], index=index) + tm.assert_series_equal(result, expected) + + data[1] = 1.0 + result = Series(data, index=index) + expected = Series([0.0, 1.0, 2.0], index=index) + tm.assert_series_equal(result, expected) + + data = ma.masked_all((3,), dtype=int) + result = Series(data) + expected = Series([np.nan, np.nan, np.nan], dtype=float) + tm.assert_series_equal(result, expected) + + data[0] = 0 + data[2] = 2 + index = ["a", "b", "c"] + result = Series(data, index=index) + expected = Series([0, np.nan, 2], index=index, dtype=float) + tm.assert_series_equal(result, expected) + + data[1] = 1 + result = Series(data, index=index) + expected = Series([0, 1, 2], index=index, dtype=int) + with pytest.raises(AssertionError, match="Series classes are different"): + # TODO should this be raising at all? + # https://github.com/pandas-dev/pandas/issues/56131 + tm.assert_series_equal(result, expected) + + data = ma.masked_all((3,), dtype=bool) + result = Series(data) + expected = Series([np.nan, np.nan, np.nan], dtype=object) + tm.assert_series_equal(result, expected) + + data[0] = True + data[2] = False + index = ["a", "b", "c"] + result = Series(data, index=index) + expected = Series([True, np.nan, False], index=index, dtype=object) + tm.assert_series_equal(result, expected) + + data[1] = True + result = Series(data, index=index) + expected = Series([True, True, False], index=index, dtype=bool) + with pytest.raises(AssertionError, match="Series classes are different"): + # TODO should this be raising at all? + # https://github.com/pandas-dev/pandas/issues/56131 + tm.assert_series_equal(result, expected) + + data = ma.masked_all((3,), dtype="M8[ns]") + result = Series(data) + expected = Series([iNaT, iNaT, iNaT], dtype="M8[ns]") + tm.assert_series_equal(result, expected) + + data[0] = datetime(2001, 1, 1) + data[2] = datetime(2001, 1, 3) + index = ["a", "b", "c"] + result = Series(data, index=index) + expected = Series( + [datetime(2001, 1, 1), iNaT, datetime(2001, 1, 3)], + index=index, + dtype="M8[ns]", + ) + tm.assert_series_equal(result, expected) + + data[1] = datetime(2001, 1, 2) + result = Series(data, index=index) + expected = Series( + [datetime(2001, 1, 1), datetime(2001, 1, 2), datetime(2001, 1, 3)], + index=index, + dtype="M8[ns]", + ) + tm.assert_series_equal(result, expected) + + def test_constructor_maskedarray_hardened(self): + # Check numpy masked arrays with hard masks -- from GH24574 + data = ma.masked_all((3,), dtype=float).harden_mask() + result = Series(data) + expected = Series([np.nan, np.nan, np.nan]) + tm.assert_series_equal(result, expected) + + def test_series_ctor_plus_datetimeindex(self, using_copy_on_write): + rng = date_range("20090415", "20090519", freq="B") + data = {k: 1 for k in rng} + + result = Series(data, index=rng) + if using_copy_on_write: + assert result.index.is_(rng) + else: + assert result.index is rng + + def test_constructor_default_index(self): + s = Series([0, 1, 2]) + tm.assert_index_equal(s.index, Index(range(3)), exact=True) + + @pytest.mark.parametrize( + "input", + [ + [1, 2, 3], + (1, 2, 3), + list(range(3)), + Categorical(["a", "b", "a"]), + (i for i in range(3)), + (x for x in range(3)), + ], + ) + def test_constructor_index_mismatch(self, input): + # GH 19342 + # test that construction of a Series with an index of different length + # raises an error + msg = r"Length of values \(3\) does not match length of index \(4\)" + with pytest.raises(ValueError, match=msg): + Series(input, index=np.arange(4)) + + def test_constructor_numpy_scalar(self): + # GH 19342 + # construction with a numpy scalar + # should not raise + result = Series(np.array(100), index=np.arange(4), dtype="int64") + expected = Series(100, index=np.arange(4), dtype="int64") + tm.assert_series_equal(result, expected) + + def test_constructor_broadcast_list(self): + # GH 19342 + # construction with single-element container and index + # should raise + msg = r"Length of values \(1\) does not match length of index \(3\)" + with pytest.raises(ValueError, match=msg): + Series(["foo"], index=["a", "b", "c"]) + + def test_constructor_corner(self): + df = DataFrame(range(5), index=date_range("2020-01-01", periods=5)) + objs = [df, df] + s = Series(objs, index=[0, 1]) + assert isinstance(s, Series) + + def test_constructor_sanitize(self): + s = Series(np.array([1.0, 1.0, 8.0]), dtype="i8") + assert s.dtype == np.dtype("i8") + + msg = r"Cannot convert non-finite values \(NA or inf\) to integer" + with pytest.raises(IntCastingNaNError, match=msg): + Series(np.array([1.0, 1.0, np.nan]), copy=True, dtype="i8") + + def test_constructor_copy(self): + # GH15125 + # test dtype parameter has no side effects on copy=True + for data in [[1.0], np.array([1.0])]: + x = Series(data) + y = Series(x, copy=True, dtype=float) + + # copy=True maintains original data in Series + tm.assert_series_equal(x, y) + + # changes to origin of copy does not affect the copy + x[0] = 2.0 + assert not x.equals(y) + assert x[0] == 2.0 + assert y[0] == 1.0 + + @td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite test + @pytest.mark.parametrize( + "index", + [ + date_range("20170101", periods=3, tz="US/Eastern"), + date_range("20170101", periods=3), + timedelta_range("1 day", periods=3), + period_range("2012Q1", periods=3, freq="Q"), + Index(list("abc")), + Index([1, 2, 3]), + RangeIndex(0, 3), + ], + ids=lambda x: type(x).__name__, + ) + def test_constructor_limit_copies(self, index): + # GH 17449 + # limit copies of input + s = Series(index) + + # we make 1 copy; this is just a smoke test here + assert s._mgr.blocks[0].values is not index + + def test_constructor_shallow_copy(self): + # constructing a Series from Series with copy=False should still + # give a "shallow" copy (share data, not attributes) + # https://github.com/pandas-dev/pandas/issues/49523 + s = Series([1, 2, 3]) + s_orig = s.copy() + s2 = Series(s) + assert s2._mgr is not s._mgr + # Overwriting index of s2 doesn't change s + s2.index = ["a", "b", "c"] + tm.assert_series_equal(s, s_orig) + + def test_constructor_pass_none(self): + s = Series(None, index=range(5)) + assert s.dtype == np.float64 + + s = Series(None, index=range(5), dtype=object) + assert s.dtype == np.object_ + + # GH 7431 + # inference on the index + s = Series(index=np.array([None])) + expected = Series(index=Index([None])) + tm.assert_series_equal(s, expected) + + def test_constructor_pass_nan_nat(self): + # GH 13467 + exp = Series([np.nan, np.nan], dtype=np.float64) + assert exp.dtype == np.float64 + tm.assert_series_equal(Series([np.nan, np.nan]), exp) + tm.assert_series_equal(Series(np.array([np.nan, np.nan])), exp) + + exp = Series([NaT, NaT]) + assert exp.dtype == "datetime64[ns]" + tm.assert_series_equal(Series([NaT, NaT]), exp) + tm.assert_series_equal(Series(np.array([NaT, NaT])), exp) + + tm.assert_series_equal(Series([NaT, np.nan]), exp) + tm.assert_series_equal(Series(np.array([NaT, np.nan])), exp) + + tm.assert_series_equal(Series([np.nan, NaT]), exp) + tm.assert_series_equal(Series(np.array([np.nan, NaT])), exp) + + def test_constructor_cast(self): + msg = "could not convert string to float" + with pytest.raises(ValueError, match=msg): + Series(["a", "b", "c"], dtype=float) + + def test_constructor_signed_int_overflow_raises(self): + # GH#41734 disallow silent overflow, enforced in 2.0 + if np_version_gt2: + msg = "The elements provided in the data cannot all be casted to the dtype" + err = OverflowError + else: + msg = "Values are too large to be losslessly converted" + err = ValueError + with pytest.raises(err, match=msg): + Series([1, 200, 923442], dtype="int8") + + with pytest.raises(err, match=msg): + Series([1, 200, 923442], dtype="uint8") + + @pytest.mark.parametrize( + "values", + [ + np.array([1], dtype=np.uint16), + np.array([1], dtype=np.uint32), + np.array([1], dtype=np.uint64), + [np.uint16(1)], + [np.uint32(1)], + [np.uint64(1)], + ], + ) + def test_constructor_numpy_uints(self, values): + # GH#47294 + value = values[0] + result = Series(values) + + assert result[0].dtype == value.dtype + assert result[0] == value + + def test_constructor_unsigned_dtype_overflow(self, any_unsigned_int_numpy_dtype): + # see gh-15832 + if np_version_gt2: + msg = ( + f"The elements provided in the data cannot " + f"all be casted to the dtype {any_unsigned_int_numpy_dtype}" + ) + else: + msg = "Trying to coerce negative values to unsigned integers" + with pytest.raises(OverflowError, match=msg): + Series([-1], dtype=any_unsigned_int_numpy_dtype) + + def test_constructor_floating_data_int_dtype(self, frame_or_series): + # GH#40110 + arr = np.random.default_rng(2).standard_normal(2) + + # Long-standing behavior (for Series, new in 2.0 for DataFrame) + # has been to ignore the dtype on these; + # not clear if this is what we want long-term + # expected = frame_or_series(arr) + + # GH#49599 as of 2.0 we raise instead of silently retaining float dtype + msg = "Trying to coerce float values to integer" + with pytest.raises(ValueError, match=msg): + frame_or_series(arr, dtype="i8") + + with pytest.raises(ValueError, match=msg): + frame_or_series(list(arr), dtype="i8") + + # pre-2.0, when we had NaNs, we silently ignored the integer dtype + arr[0] = np.nan + # expected = frame_or_series(arr) + + msg = r"Cannot convert non-finite values \(NA or inf\) to integer" + with pytest.raises(IntCastingNaNError, match=msg): + frame_or_series(arr, dtype="i8") + + exc = IntCastingNaNError + if frame_or_series is Series: + # TODO: try to align these + exc = ValueError + msg = "cannot convert float NaN to integer" + with pytest.raises(exc, match=msg): + # same behavior if we pass list instead of the ndarray + frame_or_series(list(arr), dtype="i8") + + # float array that can be losslessly cast to integers + arr = np.array([1.0, 2.0], dtype="float64") + expected = frame_or_series(arr.astype("i8")) + + obj = frame_or_series(arr, dtype="i8") + tm.assert_equal(obj, expected) + + obj = frame_or_series(list(arr), dtype="i8") + tm.assert_equal(obj, expected) + + def test_constructor_coerce_float_fail(self, any_int_numpy_dtype): + # see gh-15832 + # Updated: make sure we treat this list the same as we would treat + # the equivalent ndarray + # GH#49599 pre-2.0 we silently retained float dtype, in 2.0 we raise + vals = [1, 2, 3.5] + + msg = "Trying to coerce float values to integer" + with pytest.raises(ValueError, match=msg): + Series(vals, dtype=any_int_numpy_dtype) + with pytest.raises(ValueError, match=msg): + Series(np.array(vals), dtype=any_int_numpy_dtype) + + def test_constructor_coerce_float_valid(self, float_numpy_dtype): + s = Series([1, 2, 3.5], dtype=float_numpy_dtype) + expected = Series([1, 2, 3.5]).astype(float_numpy_dtype) + tm.assert_series_equal(s, expected) + + def test_constructor_invalid_coerce_ints_with_float_nan(self, any_int_numpy_dtype): + # GH 22585 + # Updated: make sure we treat this list the same as we would treat the + # equivalent ndarray + vals = [1, 2, np.nan] + # pre-2.0 this would return with a float dtype, in 2.0 we raise + + msg = "cannot convert float NaN to integer" + with pytest.raises(ValueError, match=msg): + Series(vals, dtype=any_int_numpy_dtype) + msg = r"Cannot convert non-finite values \(NA or inf\) to integer" + with pytest.raises(IntCastingNaNError, match=msg): + Series(np.array(vals), dtype=any_int_numpy_dtype) + + def test_constructor_dtype_no_cast(self, using_copy_on_write, warn_copy_on_write): + # see gh-1572 + s = Series([1, 2, 3]) + s2 = Series(s, dtype=np.int64) + + warn = FutureWarning if warn_copy_on_write else None + with tm.assert_produces_warning(warn): + s2[1] = 5 + if using_copy_on_write: + assert s[1] == 2 + else: + assert s[1] == 5 + + def test_constructor_datelike_coercion(self): + # GH 9477 + # incorrectly inferring on dateimelike looking when object dtype is + # specified + s = Series([Timestamp("20130101"), "NOV"], dtype=object) + assert s.iloc[0] == Timestamp("20130101") + assert s.iloc[1] == "NOV" + assert s.dtype == object + + def test_constructor_datelike_coercion2(self): + # the dtype was being reset on the slicing and re-inferred to datetime + # even thought the blocks are mixed + belly = "216 3T19".split() + wing1 = "2T15 4H19".split() + wing2 = "416 4T20".split() + mat = pd.to_datetime("2016-01-22 2019-09-07".split()) + df = DataFrame({"wing1": wing1, "wing2": wing2, "mat": mat}, index=belly) + + result = df.loc["3T19"] + assert result.dtype == object + result = df.loc["216"] + assert result.dtype == object + + def test_constructor_mixed_int_and_timestamp(self, frame_or_series): + # specifically Timestamp with nanos, not datetimes + objs = [Timestamp(9), 10, NaT._value] + result = frame_or_series(objs, dtype="M8[ns]") + + expected = frame_or_series([Timestamp(9), Timestamp(10), NaT]) + tm.assert_equal(result, expected) + + def test_constructor_datetimes_with_nulls(self): + # gh-15869 + for arr in [ + np.array([None, None, None, None, datetime.now(), None]), + np.array([None, None, datetime.now(), None]), + ]: + result = Series(arr) + assert result.dtype == "M8[ns]" + + def test_constructor_dtype_datetime64(self): + s = Series(iNaT, dtype="M8[ns]", index=range(5)) + assert isna(s).all() + + # in theory this should be all nulls, but since + # we are not specifying a dtype is ambiguous + s = Series(iNaT, index=range(5)) + assert not isna(s).all() + + s = Series(np.nan, dtype="M8[ns]", index=range(5)) + assert isna(s).all() + + s = Series([datetime(2001, 1, 2, 0, 0), iNaT], dtype="M8[ns]") + assert isna(s[1]) + assert s.dtype == "M8[ns]" + + s = Series([datetime(2001, 1, 2, 0, 0), np.nan], dtype="M8[ns]") + assert isna(s[1]) + assert s.dtype == "M8[ns]" + + def test_constructor_dtype_datetime64_10(self): + # GH3416 + pydates = [datetime(2013, 1, 1), datetime(2013, 1, 2), datetime(2013, 1, 3)] + dates = [np.datetime64(x) for x in pydates] + + ser = Series(dates) + assert ser.dtype == "M8[ns]" + + ser.iloc[0] = np.nan + assert ser.dtype == "M8[ns]" + + # GH3414 related + expected = Series(pydates, dtype="datetime64[ms]") + + result = Series(Series(dates).astype(np.int64) / 1000000, dtype="M8[ms]") + tm.assert_series_equal(result, expected) + + result = Series(dates, dtype="datetime64[ms]") + tm.assert_series_equal(result, expected) + + expected = Series( + [NaT, datetime(2013, 1, 2), datetime(2013, 1, 3)], dtype="datetime64[ns]" + ) + result = Series([np.nan] + dates[1:], dtype="datetime64[ns]") + tm.assert_series_equal(result, expected) + + def test_constructor_dtype_datetime64_11(self): + pydates = [datetime(2013, 1, 1), datetime(2013, 1, 2), datetime(2013, 1, 3)] + dates = [np.datetime64(x) for x in pydates] + + dts = Series(dates, dtype="datetime64[ns]") + + # valid astype + dts.astype("int64") + + # invalid casting + msg = r"Converting from datetime64\[ns\] to int32 is not supported" + with pytest.raises(TypeError, match=msg): + dts.astype("int32") + + # ints are ok + # we test with np.int64 to get similar results on + # windows / 32-bit platforms + result = Series(dts, dtype=np.int64) + expected = Series(dts.astype(np.int64)) + tm.assert_series_equal(result, expected) + + def test_constructor_dtype_datetime64_9(self): + # invalid dates can be help as object + result = Series([datetime(2, 1, 1)]) + assert result[0] == datetime(2, 1, 1, 0, 0) + + result = Series([datetime(3000, 1, 1)]) + assert result[0] == datetime(3000, 1, 1, 0, 0) + + def test_constructor_dtype_datetime64_8(self): + # don't mix types + result = Series([Timestamp("20130101"), 1], index=["a", "b"]) + assert result["a"] == Timestamp("20130101") + assert result["b"] == 1 + + def test_constructor_dtype_datetime64_7(self): + # GH6529 + # coerce datetime64 non-ns properly + dates = date_range("01-Jan-2015", "01-Dec-2015", freq="ME") + values2 = dates.view(np.ndarray).astype("datetime64[ns]") + expected = Series(values2, index=dates) + + for unit in ["s", "D", "ms", "us", "ns"]: + dtype = np.dtype(f"M8[{unit}]") + values1 = dates.view(np.ndarray).astype(dtype) + result = Series(values1, dates) + if unit == "D": + # for unit="D" we cast to nearest-supported reso, i.e. "s" + dtype = np.dtype("M8[s]") + assert result.dtype == dtype + tm.assert_series_equal(result, expected.astype(dtype)) + + # GH 13876 + # coerce to non-ns to object properly + expected = Series(values2, index=dates, dtype=object) + for dtype in ["s", "D", "ms", "us", "ns"]: + values1 = dates.view(np.ndarray).astype(f"M8[{dtype}]") + result = Series(values1, index=dates, dtype=object) + tm.assert_series_equal(result, expected) + + # leave datetime.date alone + dates2 = np.array([d.date() for d in dates.to_pydatetime()], dtype=object) + series1 = Series(dates2, dates) + tm.assert_numpy_array_equal(series1.values, dates2) + assert series1.dtype == object + + def test_constructor_dtype_datetime64_6(self): + # as of 2.0, these no longer infer datetime64 based on the strings, + # matching the Index behavior + + ser = Series([None, NaT, "2013-08-05 15:30:00.000001"]) + assert ser.dtype == object + + ser = Series([np.nan, NaT, "2013-08-05 15:30:00.000001"]) + assert ser.dtype == object + + ser = Series([NaT, None, "2013-08-05 15:30:00.000001"]) + assert ser.dtype == object + + ser = Series([NaT, np.nan, "2013-08-05 15:30:00.000001"]) + assert ser.dtype == object + + def test_constructor_dtype_datetime64_5(self): + # tz-aware (UTC and other tz's) + # GH 8411 + dr = date_range("20130101", periods=3) + assert Series(dr).iloc[0].tz is None + dr = date_range("20130101", periods=3, tz="UTC") + assert str(Series(dr).iloc[0].tz) == "UTC" + dr = date_range("20130101", periods=3, tz="US/Eastern") + assert str(Series(dr).iloc[0].tz) == "US/Eastern" + + def test_constructor_dtype_datetime64_4(self): + # non-convertible + ser = Series([1479596223000, -1479590, NaT]) + assert ser.dtype == "object" + assert ser[2] is NaT + assert "NaT" in str(ser) + + def test_constructor_dtype_datetime64_3(self): + # if we passed a NaT it remains + ser = Series([datetime(2010, 1, 1), datetime(2, 1, 1), NaT]) + assert ser.dtype == "object" + assert ser[2] is NaT + assert "NaT" in str(ser) + + def test_constructor_dtype_datetime64_2(self): + # if we passed a nan it remains + ser = Series([datetime(2010, 1, 1), datetime(2, 1, 1), np.nan]) + assert ser.dtype == "object" + assert ser[2] is np.nan + assert "NaN" in str(ser) + + def test_constructor_with_datetime_tz(self): + # 8260 + # support datetime64 with tz + + dr = date_range("20130101", periods=3, tz="US/Eastern") + s = Series(dr) + assert s.dtype.name == "datetime64[ns, US/Eastern]" + assert s.dtype == "datetime64[ns, US/Eastern]" + assert isinstance(s.dtype, DatetimeTZDtype) + assert "datetime64[ns, US/Eastern]" in str(s) + + # export + result = s.values + assert isinstance(result, np.ndarray) + assert result.dtype == "datetime64[ns]" + + exp = DatetimeIndex(result) + exp = exp.tz_localize("UTC").tz_convert(tz=s.dt.tz) + tm.assert_index_equal(dr, exp) + + # indexing + result = s.iloc[0] + assert result == Timestamp("2013-01-01 00:00:00-0500", tz="US/Eastern") + result = s[0] + assert result == Timestamp("2013-01-01 00:00:00-0500", tz="US/Eastern") + + result = s[Series([True, True, False], index=s.index)] + tm.assert_series_equal(result, s[0:2]) + + result = s.iloc[0:1] + tm.assert_series_equal(result, Series(dr[0:1])) + + # concat + result = pd.concat([s.iloc[0:1], s.iloc[1:]]) + tm.assert_series_equal(result, s) + + # short str + assert "datetime64[ns, US/Eastern]" in str(s) + + # formatting with NaT + result = s.shift() + assert "datetime64[ns, US/Eastern]" in str(result) + assert "NaT" in str(result) + + result = DatetimeIndex(s, freq="infer") + tm.assert_index_equal(result, dr) + + def test_constructor_with_datetime_tz5(self): + # long str + ser = Series(date_range("20130101", periods=1000, tz="US/Eastern")) + assert "datetime64[ns, US/Eastern]" in str(ser) + + def test_constructor_with_datetime_tz4(self): + # inference + ser = Series( + [ + Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"), + Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"), + ] + ) + assert ser.dtype == "datetime64[ns, US/Pacific]" + assert lib.infer_dtype(ser, skipna=True) == "datetime64" + + def test_constructor_with_datetime_tz3(self): + ser = Series( + [ + Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"), + Timestamp("2013-01-02 14:00:00-0800", tz="US/Eastern"), + ] + ) + assert ser.dtype == "object" + assert lib.infer_dtype(ser, skipna=True) == "datetime" + + def test_constructor_with_datetime_tz2(self): + # with all NaT + ser = Series(NaT, index=[0, 1], dtype="datetime64[ns, US/Eastern]") + dti = DatetimeIndex(["NaT", "NaT"], tz="US/Eastern").as_unit("ns") + expected = Series(dti) + tm.assert_series_equal(ser, expected) + + def test_constructor_no_partial_datetime_casting(self): + # GH#40111 + vals = [ + "nan", + Timestamp("1990-01-01"), + "2015-03-14T16:15:14.123-08:00", + "2019-03-04T21:56:32.620-07:00", + None, + ] + ser = Series(vals) + assert all(ser[i] is vals[i] for i in range(len(vals))) + + @pytest.mark.parametrize("arr_dtype", [np.int64, np.float64]) + @pytest.mark.parametrize("kind", ["M", "m"]) + @pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"]) + def test_construction_to_datetimelike_unit(self, arr_dtype, kind, unit): + # tests all units + # gh-19223 + # TODO: GH#19223 was about .astype, doesn't belong here + dtype = f"{kind}8[{unit}]" + arr = np.array([1, 2, 3], dtype=arr_dtype) + ser = Series(arr) + result = ser.astype(dtype) + + expected = Series(arr.astype(dtype)) + + if unit in ["ns", "us", "ms", "s"]: + assert result.dtype == dtype + assert expected.dtype == dtype + else: + # Otherwise we cast to nearest-supported unit, i.e. seconds + assert result.dtype == f"{kind}8[s]" + assert expected.dtype == f"{kind}8[s]" + + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("arg", ["2013-01-01 00:00:00", NaT, np.nan, None]) + def test_constructor_with_naive_string_and_datetimetz_dtype(self, arg): + # GH 17415: With naive string + result = Series([arg], dtype="datetime64[ns, CET]") + expected = Series(Timestamp(arg)).dt.tz_localize("CET") + tm.assert_series_equal(result, expected) + + def test_constructor_datetime64_bigendian(self): + # GH#30976 + ms = np.datetime64(1, "ms") + arr = np.array([np.datetime64(1, "ms")], dtype=">M8[ms]") + + result = Series(arr) + expected = Series([Timestamp(ms)]).astype("M8[ms]") + assert expected.dtype == "M8[ms]" + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("interval_constructor", [IntervalIndex, IntervalArray]) + def test_construction_interval(self, interval_constructor): + # construction from interval & array of intervals + intervals = interval_constructor.from_breaks(np.arange(3), closed="right") + result = Series(intervals) + assert result.dtype == "interval[int64, right]" + tm.assert_index_equal(Index(result.values), Index(intervals)) + + @pytest.mark.parametrize( + "data_constructor", [list, np.array], ids=["list", "ndarray[object]"] + ) + def test_constructor_infer_interval(self, data_constructor): + # GH 23563: consistent closed results in interval dtype + data = [Interval(0, 1), Interval(0, 2), None] + result = Series(data_constructor(data)) + expected = Series(IntervalArray(data)) + assert result.dtype == "interval[float64, right]" + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "data_constructor", [list, np.array], ids=["list", "ndarray[object]"] + ) + def test_constructor_interval_mixed_closed(self, data_constructor): + # GH 23563: mixed closed results in object dtype (not interval dtype) + data = [Interval(0, 1, closed="both"), Interval(0, 2, closed="neither")] + result = Series(data_constructor(data)) + assert result.dtype == object + assert result.tolist() == data + + def test_construction_consistency(self): + # make sure that we are not re-localizing upon construction + # GH 14928 + ser = Series(date_range("20130101", periods=3, tz="US/Eastern")) + + result = Series(ser, dtype=ser.dtype) + tm.assert_series_equal(result, ser) + + result = Series(ser.dt.tz_convert("UTC"), dtype=ser.dtype) + tm.assert_series_equal(result, ser) + + # Pre-2.0 dt64 values were treated as utc, which was inconsistent + # with DatetimeIndex, which treats them as wall times, see GH#33401 + result = Series(ser.values, dtype=ser.dtype) + expected = Series(ser.values).dt.tz_localize(ser.dtype.tz) + tm.assert_series_equal(result, expected) + + with tm.assert_produces_warning(None): + # one suggested alternative to the deprecated (changed in 2.0) usage + middle = Series(ser.values).dt.tz_localize("UTC") + result = middle.dt.tz_convert(ser.dtype.tz) + tm.assert_series_equal(result, ser) + + with tm.assert_produces_warning(None): + # the other suggested alternative to the deprecated usage + result = Series(ser.values.view("int64"), dtype=ser.dtype) + tm.assert_series_equal(result, ser) + + @pytest.mark.parametrize( + "data_constructor", [list, np.array], ids=["list", "ndarray[object]"] + ) + def test_constructor_infer_period(self, data_constructor): + data = [Period("2000", "D"), Period("2001", "D"), None] + result = Series(data_constructor(data)) + expected = Series(period_array(data)) + tm.assert_series_equal(result, expected) + assert result.dtype == "Period[D]" + + @pytest.mark.xfail(reason="PeriodDtype Series not supported yet") + def test_construct_from_ints_including_iNaT_scalar_period_dtype(self): + series = Series([0, 1000, 2000, pd._libs.iNaT], dtype="period[D]") + + val = series[3] + assert isna(val) + + series[2] = val + assert isna(series[2]) + + def test_constructor_period_incompatible_frequency(self): + data = [Period("2000", "D"), Period("2001", "Y")] + result = Series(data) + assert result.dtype == object + assert result.tolist() == data + + def test_constructor_periodindex(self): + # GH7932 + # converting a PeriodIndex when put in a Series + + pi = period_range("20130101", periods=5, freq="D") + s = Series(pi) + assert s.dtype == "Period[D]" + with tm.assert_produces_warning(FutureWarning, match="Dtype inference"): + expected = Series(pi.astype(object)) + tm.assert_series_equal(s, expected) + + def test_constructor_dict(self): + d = {"a": 0.0, "b": 1.0, "c": 2.0} + + result = Series(d) + expected = Series(d, index=sorted(d.keys())) + tm.assert_series_equal(result, expected) + + result = Series(d, index=["b", "c", "d", "a"]) + expected = Series([1, 2, np.nan, 0], index=["b", "c", "d", "a"]) + tm.assert_series_equal(result, expected) + + pidx = period_range("2020-01-01", periods=10, freq="D") + d = {pidx[0]: 0, pidx[1]: 1} + result = Series(d, index=pidx) + expected = Series(np.nan, pidx, dtype=np.float64) + expected.iloc[0] = 0 + expected.iloc[1] = 1 + tm.assert_series_equal(result, expected) + + def test_constructor_dict_list_value_explicit_dtype(self): + # GH 18625 + d = {"a": [[2], [3], [4]]} + result = Series(d, index=["a"], dtype="object") + expected = Series(d, index=["a"]) + tm.assert_series_equal(result, expected) + + def test_constructor_dict_order(self): + # GH19018 + # initialization ordering: by insertion order + d = {"b": 1, "a": 0, "c": 2} + result = Series(d) + expected = Series([1, 0, 2], index=list("bac")) + tm.assert_series_equal(result, expected) + + def test_constructor_dict_extension(self, ea_scalar_and_dtype, request): + ea_scalar, ea_dtype = ea_scalar_and_dtype + if isinstance(ea_scalar, Timestamp): + mark = pytest.mark.xfail( + reason="Construction from dict goes through " + "maybe_convert_objects which casts to nano" + ) + request.applymarker(mark) + d = {"a": ea_scalar} + result = Series(d, index=["a"]) + expected = Series(ea_scalar, index=["a"], dtype=ea_dtype) + + assert result.dtype == ea_dtype + + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("value", [2, np.nan, None, float("nan")]) + def test_constructor_dict_nan_key(self, value): + # GH 18480 + d = {1: "a", value: "b", float("nan"): "c", 4: "d"} + result = Series(d).sort_values() + expected = Series(["a", "b", "c", "d"], index=[1, value, np.nan, 4]) + tm.assert_series_equal(result, expected) + + # MultiIndex: + d = {(1, 1): "a", (2, np.nan): "b", (3, value): "c"} + result = Series(d).sort_values() + expected = Series( + ["a", "b", "c"], index=Index([(1, 1), (2, np.nan), (3, value)]) + ) + tm.assert_series_equal(result, expected) + + def test_constructor_dict_datetime64_index(self): + # GH 9456 + + dates_as_str = ["1984-02-19", "1988-11-06", "1989-12-03", "1990-03-15"] + values = [42544017.198965244, 1234565, 40512335.181958228, -1] + + def create_data(constructor): + return dict(zip((constructor(x) for x in dates_as_str), values)) + + data_datetime64 = create_data(np.datetime64) + data_datetime = create_data(lambda x: datetime.strptime(x, "%Y-%m-%d")) + data_Timestamp = create_data(Timestamp) + + expected = Series(values, (Timestamp(x) for x in dates_as_str)) + + result_datetime64 = Series(data_datetime64) + result_datetime = Series(data_datetime) + result_Timestamp = Series(data_Timestamp) + + tm.assert_series_equal(result_datetime64, expected) + tm.assert_series_equal(result_datetime, expected) + tm.assert_series_equal(result_Timestamp, expected) + + def test_constructor_dict_tuple_indexer(self): + # GH 12948 + data = {(1, 1, None): -1.0} + result = Series(data) + expected = Series( + -1.0, index=MultiIndex(levels=[[1], [1], [np.nan]], codes=[[0], [0], [-1]]) + ) + tm.assert_series_equal(result, expected) + + def test_constructor_mapping(self, non_dict_mapping_subclass): + # GH 29788 + ndm = non_dict_mapping_subclass({3: "three"}) + result = Series(ndm) + expected = Series(["three"], index=[3]) + + tm.assert_series_equal(result, expected) + + def test_constructor_list_of_tuples(self): + data = [(1, 1), (2, 2), (2, 3)] + s = Series(data) + assert list(s) == data + + def test_constructor_tuple_of_tuples(self): + data = ((1, 1), (2, 2), (2, 3)) + s = Series(data) + assert tuple(s) == data + + def test_constructor_dict_of_tuples(self): + data = {(1, 2): 3, (None, 5): 6} + result = Series(data).sort_values() + expected = Series([3, 6], index=MultiIndex.from_tuples([(1, 2), (None, 5)])) + tm.assert_series_equal(result, expected) + + # https://github.com/pandas-dev/pandas/issues/22698 + @pytest.mark.filterwarnings("ignore:elementwise comparison:FutureWarning") + def test_fromDict(self, using_infer_string): + data = {"a": 0, "b": 1, "c": 2, "d": 3} + + series = Series(data) + tm.assert_is_sorted(series.index) + + data = {"a": 0, "b": "1", "c": "2", "d": datetime.now()} + series = Series(data) + assert series.dtype == np.object_ + + data = {"a": 0, "b": "1", "c": "2", "d": "3"} + series = Series(data) + assert series.dtype == np.object_ if not using_infer_string else "string" + + data = {"a": "0", "b": "1"} + series = Series(data, dtype=float) + assert series.dtype == np.float64 + + def test_fromValue(self, datetime_series, using_infer_string): + nans = Series(np.nan, index=datetime_series.index, dtype=np.float64) + assert nans.dtype == np.float64 + assert len(nans) == len(datetime_series) + + strings = Series("foo", index=datetime_series.index) + assert strings.dtype == np.object_ if not using_infer_string else "string" + assert len(strings) == len(datetime_series) + + d = datetime.now() + dates = Series(d, index=datetime_series.index) + assert dates.dtype == "M8[us]" + assert len(dates) == len(datetime_series) + + # GH12336 + # Test construction of categorical series from value + categorical = Series(0, index=datetime_series.index, dtype="category") + expected = Series(0, index=datetime_series.index).astype("category") + assert categorical.dtype == "category" + assert len(categorical) == len(datetime_series) + tm.assert_series_equal(categorical, expected) + + def test_constructor_dtype_timedelta64(self): + # basic + td = Series([timedelta(days=i) for i in range(3)]) + assert td.dtype == "timedelta64[ns]" + + td = Series([timedelta(days=1)]) + assert td.dtype == "timedelta64[ns]" + + td = Series([timedelta(days=1), timedelta(days=2), np.timedelta64(1, "s")]) + + assert td.dtype == "timedelta64[ns]" + + # mixed with NaT + td = Series([timedelta(days=1), NaT], dtype="m8[ns]") + assert td.dtype == "timedelta64[ns]" + + td = Series([timedelta(days=1), np.nan], dtype="m8[ns]") + assert td.dtype == "timedelta64[ns]" + + td = Series([np.timedelta64(300000000), NaT], dtype="m8[ns]") + assert td.dtype == "timedelta64[ns]" + + # improved inference + # GH5689 + td = Series([np.timedelta64(300000000), NaT]) + assert td.dtype == "timedelta64[ns]" + + # because iNaT is int, not coerced to timedelta + td = Series([np.timedelta64(300000000), iNaT]) + assert td.dtype == "object" + + td = Series([np.timedelta64(300000000), np.nan]) + assert td.dtype == "timedelta64[ns]" + + td = Series([NaT, np.timedelta64(300000000)]) + assert td.dtype == "timedelta64[ns]" + + td = Series([np.timedelta64(1, "s")]) + assert td.dtype == "timedelta64[ns]" + + # valid astype + td.astype("int64") + + # invalid casting + msg = r"Converting from timedelta64\[ns\] to int32 is not supported" + with pytest.raises(TypeError, match=msg): + td.astype("int32") + + # this is an invalid casting + msg = "|".join( + [ + "Could not convert object to NumPy timedelta", + "Could not convert 'foo' to NumPy timedelta", + ] + ) + with pytest.raises(ValueError, match=msg): + Series([timedelta(days=1), "foo"], dtype="m8[ns]") + + # leave as object here + td = Series([timedelta(days=i) for i in range(3)] + ["foo"]) + assert td.dtype == "object" + + # as of 2.0, these no longer infer timedelta64 based on the strings, + # matching Index behavior + ser = Series([None, NaT, "1 Day"]) + assert ser.dtype == object + + ser = Series([np.nan, NaT, "1 Day"]) + assert ser.dtype == object + + ser = Series([NaT, None, "1 Day"]) + assert ser.dtype == object + + ser = Series([NaT, np.nan, "1 Day"]) + assert ser.dtype == object + + # GH 16406 + def test_constructor_mixed_tz(self): + s = Series([Timestamp("20130101"), Timestamp("20130101", tz="US/Eastern")]) + expected = Series( + [Timestamp("20130101"), Timestamp("20130101", tz="US/Eastern")], + dtype="object", + ) + tm.assert_series_equal(s, expected) + + def test_NaT_scalar(self): + series = Series([0, 1000, 2000, iNaT], dtype="M8[ns]") + + val = series[3] + assert isna(val) + + series[2] = val + assert isna(series[2]) + + def test_NaT_cast(self): + # GH10747 + result = Series([np.nan]).astype("M8[ns]") + expected = Series([NaT], dtype="M8[ns]") + tm.assert_series_equal(result, expected) + + def test_constructor_name_hashable(self): + for n in [777, 777.0, "name", datetime(2001, 11, 11), (1,), "\u05D0"]: + for data in [[1, 2, 3], np.ones(3), {"a": 0, "b": 1}]: + s = Series(data, name=n) + assert s.name == n + + def test_constructor_name_unhashable(self): + msg = r"Series\.name must be a hashable type" + for n in [["name_list"], np.ones(2), {1: 2}]: + for data in [["name_list"], np.ones(2), {1: 2}]: + with pytest.raises(TypeError, match=msg): + Series(data, name=n) + + def test_auto_conversion(self): + series = Series(list(date_range("1/1/2000", periods=10))) + assert series.dtype == "M8[ns]" + + def test_convert_non_ns(self): + # convert from a numpy array of non-ns timedelta64 + arr = np.array([1, 2, 3], dtype="timedelta64[s]") + ser = Series(arr) + assert ser.dtype == arr.dtype + + tdi = timedelta_range("00:00:01", periods=3, freq="s").as_unit("s") + expected = Series(tdi) + assert expected.dtype == arr.dtype + tm.assert_series_equal(ser, expected) + + # convert from a numpy array of non-ns datetime64 + arr = np.array( + ["2013-01-01", "2013-01-02", "2013-01-03"], dtype="datetime64[D]" + ) + ser = Series(arr) + expected = Series(date_range("20130101", periods=3, freq="D"), dtype="M8[s]") + assert expected.dtype == "M8[s]" + tm.assert_series_equal(ser, expected) + + arr = np.array( + ["2013-01-01 00:00:01", "2013-01-01 00:00:02", "2013-01-01 00:00:03"], + dtype="datetime64[s]", + ) + ser = Series(arr) + expected = Series( + date_range("20130101 00:00:01", periods=3, freq="s"), dtype="M8[s]" + ) + assert expected.dtype == "M8[s]" + tm.assert_series_equal(ser, expected) + + @pytest.mark.parametrize( + "index", + [ + date_range("1/1/2000", periods=10), + timedelta_range("1 day", periods=10), + period_range("2000-Q1", periods=10, freq="Q"), + ], + ids=lambda x: type(x).__name__, + ) + def test_constructor_cant_cast_datetimelike(self, index): + # floats are not ok + # strip Index to convert PeriodIndex -> Period + # We don't care whether the error message says + # PeriodIndex or PeriodArray + msg = f"Cannot cast {type(index).__name__.rstrip('Index')}.*? to " + + with pytest.raises(TypeError, match=msg): + Series(index, dtype=float) + + # ints are ok + # we test with np.int64 to get similar results on + # windows / 32-bit platforms + result = Series(index, dtype=np.int64) + expected = Series(index.astype(np.int64)) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "index", + [ + date_range("1/1/2000", periods=10), + timedelta_range("1 day", periods=10), + period_range("2000-Q1", periods=10, freq="Q"), + ], + ids=lambda x: type(x).__name__, + ) + def test_constructor_cast_object(self, index): + s = Series(index, dtype=object) + exp = Series(index).astype(object) + tm.assert_series_equal(s, exp) + + s = Series(Index(index, dtype=object), dtype=object) + exp = Series(index).astype(object) + tm.assert_series_equal(s, exp) + + s = Series(index.astype(object), dtype=object) + exp = Series(index).astype(object) + tm.assert_series_equal(s, exp) + + @pytest.mark.parametrize("dtype", [np.datetime64, np.timedelta64]) + def test_constructor_generic_timestamp_no_frequency(self, dtype, request): + # see gh-15524, gh-15987 + msg = "dtype has no unit. Please pass in" + + if np.dtype(dtype).name not in ["timedelta64", "datetime64"]: + mark = pytest.mark.xfail(reason="GH#33890 Is assigned ns unit") + request.applymarker(mark) + + with pytest.raises(ValueError, match=msg): + Series([], dtype=dtype) + + @pytest.mark.parametrize("unit", ["ps", "as", "fs", "Y", "M", "W", "D", "h", "m"]) + @pytest.mark.parametrize("kind", ["m", "M"]) + def test_constructor_generic_timestamp_bad_frequency(self, kind, unit): + # see gh-15524, gh-15987 + # as of 2.0 we raise on any non-supported unit rather than silently + # cast to nanos; previously we only raised for frequencies higher + # than ns + dtype = f"{kind}8[{unit}]" + + msg = "dtype=.* is not supported. Supported resolutions are" + with pytest.raises(TypeError, match=msg): + Series([], dtype=dtype) + + with pytest.raises(TypeError, match=msg): + # pre-2.0 the DataFrame cast raised but the Series case did not + DataFrame([[0]], dtype=dtype) + + @pytest.mark.parametrize("dtype", [None, "uint8", "category"]) + def test_constructor_range_dtype(self, dtype): + # GH 16804 + expected = Series([0, 1, 2, 3, 4], dtype=dtype or "int64") + result = Series(range(5), dtype=dtype) + tm.assert_series_equal(result, expected) + + def test_constructor_range_overflows(self): + # GH#30173 range objects that overflow int64 + rng = range(2**63, 2**63 + 4) + ser = Series(rng) + expected = Series(list(rng)) + tm.assert_series_equal(ser, expected) + assert list(ser) == list(rng) + assert ser.dtype == np.uint64 + + rng2 = range(2**63 + 4, 2**63, -1) + ser2 = Series(rng2) + expected2 = Series(list(rng2)) + tm.assert_series_equal(ser2, expected2) + assert list(ser2) == list(rng2) + assert ser2.dtype == np.uint64 + + rng3 = range(-(2**63), -(2**63) - 4, -1) + ser3 = Series(rng3) + expected3 = Series(list(rng3)) + tm.assert_series_equal(ser3, expected3) + assert list(ser3) == list(rng3) + assert ser3.dtype == object + + rng4 = range(2**73, 2**73 + 4) + ser4 = Series(rng4) + expected4 = Series(list(rng4)) + tm.assert_series_equal(ser4, expected4) + assert list(ser4) == list(rng4) + assert ser4.dtype == object + + def test_constructor_tz_mixed_data(self): + # GH 13051 + dt_list = [ + Timestamp("2016-05-01 02:03:37"), + Timestamp("2016-04-30 19:03:37-0700", tz="US/Pacific"), + ] + result = Series(dt_list) + expected = Series(dt_list, dtype=object) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("pydt", [True, False]) + def test_constructor_data_aware_dtype_naive(self, tz_aware_fixture, pydt): + # GH#25843, GH#41555, GH#33401 + tz = tz_aware_fixture + ts = Timestamp("2019", tz=tz) + if pydt: + ts = ts.to_pydatetime() + + msg = ( + "Cannot convert timezone-aware data to timezone-naive dtype. " + r"Use pd.Series\(values\).dt.tz_localize\(None\) instead." + ) + with pytest.raises(ValueError, match=msg): + Series([ts], dtype="datetime64[ns]") + + with pytest.raises(ValueError, match=msg): + Series(np.array([ts], dtype=object), dtype="datetime64[ns]") + + with pytest.raises(ValueError, match=msg): + Series({0: ts}, dtype="datetime64[ns]") + + msg = "Cannot unbox tzaware Timestamp to tznaive dtype" + with pytest.raises(TypeError, match=msg): + Series(ts, index=[0], dtype="datetime64[ns]") + + def test_constructor_datetime64(self): + rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s") + dates = np.asarray(rng) + + series = Series(dates) + assert np.issubdtype(series.dtype, np.dtype("M8[ns]")) + + def test_constructor_datetimelike_scalar_to_string_dtype( + self, nullable_string_dtype + ): + # https://github.com/pandas-dev/pandas/pull/33846 + result = Series("M", index=[1, 2, 3], dtype=nullable_string_dtype) + expected = Series(["M", "M", "M"], index=[1, 2, 3], dtype=nullable_string_dtype) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "values", + [ + [np.datetime64("2012-01-01"), np.datetime64("2013-01-01")], + ["2012-01-01", "2013-01-01"], + ], + ) + def test_constructor_sparse_datetime64(self, values): + # https://github.com/pandas-dev/pandas/issues/35762 + dtype = pd.SparseDtype("datetime64[ns]") + result = Series(values, dtype=dtype) + arr = pd.arrays.SparseArray(values, dtype=dtype) + expected = Series(arr) + tm.assert_series_equal(result, expected) + + def test_construction_from_ordered_collection(self): + # https://github.com/pandas-dev/pandas/issues/36044 + result = Series({"a": 1, "b": 2}.keys()) + expected = Series(["a", "b"]) + tm.assert_series_equal(result, expected) + + result = Series({"a": 1, "b": 2}.values()) + expected = Series([1, 2]) + tm.assert_series_equal(result, expected) + + def test_construction_from_large_int_scalar_no_overflow(self): + # https://github.com/pandas-dev/pandas/issues/36291 + n = 1_000_000_000_000_000_000_000 + result = Series(n, index=[0]) + expected = Series(n) + tm.assert_series_equal(result, expected) + + def test_constructor_list_of_periods_infers_period_dtype(self): + series = Series(list(period_range("2000-01-01", periods=10, freq="D"))) + assert series.dtype == "Period[D]" + + series = Series( + [Period("2011-01-01", freq="D"), Period("2011-02-01", freq="D")] + ) + assert series.dtype == "Period[D]" + + def test_constructor_subclass_dict(self, dict_subclass): + data = dict_subclass((x, 10.0 * x) for x in range(10)) + series = Series(data) + expected = Series(dict(data.items())) + tm.assert_series_equal(series, expected) + + def test_constructor_ordereddict(self): + # GH3283 + data = OrderedDict( + (f"col{i}", np.random.default_rng(2).random()) for i in range(12) + ) + + series = Series(data) + expected = Series(list(data.values()), list(data.keys())) + tm.assert_series_equal(series, expected) + + # Test with subclass + class A(OrderedDict): + pass + + series = Series(A(data)) + tm.assert_series_equal(series, expected) + + def test_constructor_dict_multiindex(self): + d = {("a", "a"): 0.0, ("b", "a"): 1.0, ("b", "c"): 2.0} + _d = sorted(d.items()) + result = Series(d) + expected = Series( + [x[1] for x in _d], index=MultiIndex.from_tuples([x[0] for x in _d]) + ) + tm.assert_series_equal(result, expected) + + d["z"] = 111.0 + _d.insert(0, ("z", d["z"])) + result = Series(d) + expected = Series( + [x[1] for x in _d], index=Index([x[0] for x in _d], tupleize_cols=False) + ) + result = result.reindex(index=expected.index) + tm.assert_series_equal(result, expected) + + def test_constructor_dict_multiindex_reindex_flat(self): + # construction involves reindexing with a MultiIndex corner case + data = {("i", "i"): 0, ("i", "j"): 1, ("j", "i"): 2, "j": np.nan} + expected = Series(data) + + result = Series(expected[:-1].to_dict(), index=expected.index) + tm.assert_series_equal(result, expected) + + def test_constructor_dict_timedelta_index(self): + # GH #12169 : Resample category data with timedelta index + # construct Series from dict as data and TimedeltaIndex as index + # will result NaN in result Series data + expected = Series( + data=["A", "B", "C"], index=pd.to_timedelta([0, 10, 20], unit="s") + ) + + result = Series( + data={ + pd.to_timedelta(0, unit="s"): "A", + pd.to_timedelta(10, unit="s"): "B", + pd.to_timedelta(20, unit="s"): "C", + }, + index=pd.to_timedelta([0, 10, 20], unit="s"), + ) + tm.assert_series_equal(result, expected) + + def test_constructor_infer_index_tz(self): + values = [188.5, 328.25] + tzinfo = tzoffset(None, 7200) + index = [ + datetime(2012, 5, 11, 11, tzinfo=tzinfo), + datetime(2012, 5, 11, 12, tzinfo=tzinfo), + ] + series = Series(data=values, index=index) + + assert series.index.tz == tzinfo + + # it works! GH#2443 + repr(series.index[0]) + + def test_constructor_with_pandas_dtype(self): + # going through 2D->1D path + vals = [(1,), (2,), (3,)] + ser = Series(vals) + dtype = ser.array.dtype # NumpyEADtype + ser2 = Series(vals, dtype=dtype) + tm.assert_series_equal(ser, ser2) + + def test_constructor_int_dtype_missing_values(self): + # GH#43017 + result = Series(index=[0], dtype="int64") + expected = Series(np.nan, index=[0], dtype="float64") + tm.assert_series_equal(result, expected) + + def test_constructor_bool_dtype_missing_values(self): + # GH#43018 + result = Series(index=[0], dtype="bool") + expected = Series(True, index=[0], dtype="bool") + tm.assert_series_equal(result, expected) + + def test_constructor_int64_dtype(self, any_int_dtype): + # GH#44923 + result = Series(["0", "1", "2"], dtype=any_int_dtype) + expected = Series([0, 1, 2], dtype=any_int_dtype) + tm.assert_series_equal(result, expected) + + def test_constructor_raise_on_lossy_conversion_of_strings(self): + # GH#44923 + if not np_version_gt2: + raises = pytest.raises( + ValueError, match="string values cannot be losslessly cast to int8" + ) + else: + raises = pytest.raises( + OverflowError, match="The elements provided in the data" + ) + with raises: + Series(["128"], dtype="int8") + + def test_constructor_dtype_timedelta_alternative_construct(self): + # GH#35465 + result = Series([1000000, 200000, 3000000], dtype="timedelta64[ns]") + expected = Series(pd.to_timedelta([1000000, 200000, 3000000], unit="ns")) + tm.assert_series_equal(result, expected) + + @pytest.mark.xfail( + reason="Not clear what the correct expected behavior should be with " + "integers now that we support non-nano. ATM (2022-10-08) we treat ints " + "as nanoseconds, then cast to the requested dtype. xref #48312" + ) + def test_constructor_dtype_timedelta_ns_s(self): + # GH#35465 + result = Series([1000000, 200000, 3000000], dtype="timedelta64[ns]") + expected = Series([1000000, 200000, 3000000], dtype="timedelta64[s]") + tm.assert_series_equal(result, expected) + + @pytest.mark.xfail( + reason="Not clear what the correct expected behavior should be with " + "integers now that we support non-nano. ATM (2022-10-08) we treat ints " + "as nanoseconds, then cast to the requested dtype. xref #48312" + ) + def test_constructor_dtype_timedelta_ns_s_astype_int64(self): + # GH#35465 + result = Series([1000000, 200000, 3000000], dtype="timedelta64[ns]").astype( + "int64" + ) + expected = Series([1000000, 200000, 3000000], dtype="timedelta64[s]").astype( + "int64" + ) + tm.assert_series_equal(result, expected) + + @pytest.mark.filterwarnings( + "ignore:elementwise comparison failed:DeprecationWarning" + ) + @pytest.mark.parametrize("func", [Series, DataFrame, Index, pd.array]) + def test_constructor_mismatched_null_nullable_dtype( + self, func, any_numeric_ea_dtype + ): + # GH#44514 + msg = "|".join( + [ + "cannot safely cast non-equivalent object", + r"int\(\) argument must be a string, a bytes-like object " + "or a (real )?number", + r"Cannot cast array data from dtype\('O'\) to dtype\('float64'\) " + "according to the rule 'safe'", + "object cannot be converted to a FloatingDtype", + "'values' contains non-numeric NA", + ] + ) + + for null in tm.NP_NAT_OBJECTS + [NaT]: + with pytest.raises(TypeError, match=msg): + func([null, 1.0, 3.0], dtype=any_numeric_ea_dtype) + + def test_series_constructor_ea_int_from_bool(self): + # GH#42137 + result = Series([True, False, True, pd.NA], dtype="Int64") + expected = Series([1, 0, 1, pd.NA], dtype="Int64") + tm.assert_series_equal(result, expected) + + result = Series([True, False, True], dtype="Int64") + expected = Series([1, 0, 1], dtype="Int64") + tm.assert_series_equal(result, expected) + + def test_series_constructor_ea_int_from_string_bool(self): + # GH#42137 + with pytest.raises(ValueError, match="invalid literal"): + Series(["True", "False", "True", pd.NA], dtype="Int64") + + @pytest.mark.parametrize("val", [1, 1.0]) + def test_series_constructor_overflow_uint_ea(self, val): + # GH#38798 + max_val = np.iinfo(np.uint64).max - 1 + result = Series([max_val, val], dtype="UInt64") + expected = Series(np.array([max_val, 1], dtype="uint64"), dtype="UInt64") + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("val", [1, 1.0]) + def test_series_constructor_overflow_uint_ea_with_na(self, val): + # GH#38798 + max_val = np.iinfo(np.uint64).max - 1 + result = Series([max_val, val, pd.NA], dtype="UInt64") + expected = Series( + IntegerArray( + np.array([max_val, 1, 0], dtype="uint64"), + np.array([0, 0, 1], dtype=np.bool_), + ) + ) + tm.assert_series_equal(result, expected) + + def test_series_constructor_overflow_uint_with_nan(self): + # GH#38798 + max_val = np.iinfo(np.uint64).max - 1 + result = Series([max_val, np.nan], dtype="UInt64") + expected = Series( + IntegerArray( + np.array([max_val, 1], dtype="uint64"), + np.array([0, 1], dtype=np.bool_), + ) + ) + tm.assert_series_equal(result, expected) + + def test_series_constructor_ea_all_na(self): + # GH#38798 + result = Series([np.nan, np.nan], dtype="UInt64") + expected = Series( + IntegerArray( + np.array([1, 1], dtype="uint64"), + np.array([1, 1], dtype=np.bool_), + ) + ) + tm.assert_series_equal(result, expected) + + def test_series_from_index_dtype_equal_does_not_copy(self): + # GH#52008 + idx = Index([1, 2, 3]) + expected = idx.copy(deep=True) + ser = Series(idx, dtype="int64") + ser.iloc[0] = 100 + tm.assert_index_equal(idx, expected) + + def test_series_string_inference(self): + # GH#54430 + pytest.importorskip("pyarrow") + dtype = "string[pyarrow_numpy]" + expected = Series(["a", "b"], dtype=dtype) + with pd.option_context("future.infer_string", True): + ser = Series(["a", "b"]) + tm.assert_series_equal(ser, expected) + + expected = Series(["a", 1], dtype="object") + with pd.option_context("future.infer_string", True): + ser = Series(["a", 1]) + tm.assert_series_equal(ser, expected) + + @pytest.mark.parametrize("na_value", [None, np.nan, pd.NA]) + def test_series_string_with_na_inference(self, na_value): + # GH#54430 + pytest.importorskip("pyarrow") + dtype = "string[pyarrow_numpy]" + expected = Series(["a", na_value], dtype=dtype) + with pd.option_context("future.infer_string", True): + ser = Series(["a", na_value]) + tm.assert_series_equal(ser, expected) + + def test_series_string_inference_scalar(self): + # GH#54430 + pytest.importorskip("pyarrow") + expected = Series("a", index=[1], dtype="string[pyarrow_numpy]") + with pd.option_context("future.infer_string", True): + ser = Series("a", index=[1]) + tm.assert_series_equal(ser, expected) + + def test_series_string_inference_array_string_dtype(self): + # GH#54496 + pytest.importorskip("pyarrow") + expected = Series(["a", "b"], dtype="string[pyarrow_numpy]") + with pd.option_context("future.infer_string", True): + ser = Series(np.array(["a", "b"])) + tm.assert_series_equal(ser, expected) + + def test_series_string_inference_storage_definition(self): + # GH#54793 + pytest.importorskip("pyarrow") + expected = Series(["a", "b"], dtype="string[pyarrow_numpy]") + with pd.option_context("future.infer_string", True): + result = Series(["a", "b"], dtype="string") + tm.assert_series_equal(result, expected) + + def test_series_constructor_infer_string_scalar(self): + # GH#55537 + with pd.option_context("future.infer_string", True): + ser = Series("a", index=[1, 2], dtype="string[python]") + expected = Series(["a", "a"], index=[1, 2], dtype="string[python]") + tm.assert_series_equal(ser, expected) + assert ser.dtype.storage == "python" + + def test_series_string_inference_na_first(self): + # GH#55655 + pytest.importorskip("pyarrow") + expected = Series([pd.NA, "b"], dtype="string[pyarrow_numpy]") + with pd.option_context("future.infer_string", True): + result = Series([pd.NA, "b"]) + tm.assert_series_equal(result, expected) + + def test_inference_on_pandas_objects(self): + # GH#56012 + ser = Series([Timestamp("2019-12-31")], dtype=object) + with tm.assert_produces_warning(None): + # This doesn't do inference + result = Series(ser) + assert result.dtype == np.object_ + + idx = Index([Timestamp("2019-12-31")], dtype=object) + + with tm.assert_produces_warning(FutureWarning, match="Dtype inference"): + result = Series(idx) + assert result.dtype != np.object_ + + +class TestSeriesConstructorIndexCoercion: + def test_series_constructor_datetimelike_index_coercion(self): + idx = date_range("2020-01-01", periods=5) + ser = Series( + np.random.default_rng(2).standard_normal(len(idx)), idx.astype(object) + ) + # as of 2.0, we no longer silently cast the object-dtype index + # to DatetimeIndex GH#39307, GH#23598 + assert not isinstance(ser.index, DatetimeIndex) + + @pytest.mark.parametrize("container", [None, np.array, Series, Index]) + @pytest.mark.parametrize("data", [1.0, range(4)]) + def test_series_constructor_infer_multiindex(self, container, data): + indexes = [["a", "a", "b", "b"], ["x", "y", "x", "y"]] + if container is not None: + indexes = [container(ind) for ind in indexes] + + multi = Series(data, index=indexes) + assert isinstance(multi.index, MultiIndex) + + # TODO: make this not cast to object in pandas 3.0 + @pytest.mark.skipif( + not np_version_gt2, reason="StringDType only available in numpy 2 and above" + ) + @pytest.mark.parametrize( + "data", + [ + ["a", "b", "c"], + ["a", "b", np.nan], + ], + ) + def test_np_string_array_object_cast(self, data): + from numpy.dtypes import StringDType + + arr = np.array(data, dtype=StringDType()) + res = Series(arr) + assert res.dtype == np.object_ + assert (res == data).all() + + +class TestSeriesConstructorInternals: + def test_constructor_no_pandas_array(self, using_array_manager): + ser = Series([1, 2, 3]) + result = Series(ser.array) + tm.assert_series_equal(ser, result) + if not using_array_manager: + assert isinstance(result._mgr.blocks[0], NumpyBlock) + assert result._mgr.blocks[0].is_numeric + + @td.skip_array_manager_invalid_test + def test_from_array(self): + result = Series(pd.array(["1h", "2h"], dtype="timedelta64[ns]")) + assert result._mgr.blocks[0].is_extension is False + + result = Series(pd.array(["2015"], dtype="datetime64[ns]")) + assert result._mgr.blocks[0].is_extension is False + + @td.skip_array_manager_invalid_test + def test_from_list_dtype(self): + result = Series(["1h", "2h"], dtype="timedelta64[ns]") + assert result._mgr.blocks[0].is_extension is False + + result = Series(["2015"], dtype="datetime64[ns]") + assert result._mgr.blocks[0].is_extension is False + + +def test_constructor(rand_series_with_duplicate_datetimeindex): + dups = rand_series_with_duplicate_datetimeindex + assert isinstance(dups, Series) + assert isinstance(dups.index, DatetimeIndex) + + +@pytest.mark.parametrize( + "input_dict,expected", + [ + ({0: 0}, np.array([[0]], dtype=np.int64)), + ({"a": "a"}, np.array([["a"]], dtype=object)), + ({1: 1}, np.array([[1]], dtype=np.int64)), + ], +) +def test_numpy_array(input_dict, expected): + result = np.array([Series(input_dict)]) + tm.assert_numpy_array_equal(result, expected) + + +def test_index_ordered_dict_keys(): + # GH 22077 + + param_index = OrderedDict( + [ + ((("a", "b"), ("c", "d")), 1), + ((("a", None), ("c", "d")), 2), + ] + ) + series = Series([1, 2], index=param_index.keys()) + expected = Series( + [1, 2], + index=MultiIndex.from_tuples( + [(("a", "b"), ("c", "d")), (("a", None), ("c", "d"))] + ), + ) + tm.assert_series_equal(series, expected) + + +@pytest.mark.parametrize( + "input_list", + [ + [1, complex("nan"), 2], + [1 + 1j, complex("nan"), 2 + 2j], + ], +) +def test_series_with_complex_nan(input_list): + # GH#53627 + ser = Series(input_list) + result = Series(ser.array) + assert ser.dtype == "complex128" + tm.assert_series_equal(ser, result) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_cumulative.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_cumulative.py new file mode 100644 index 0000000000000000000000000000000000000000..e6f7b2a5e69e0a97e2f898c6a665372ba3ec2a6b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_cumulative.py @@ -0,0 +1,157 @@ +""" +Tests for Series cumulative operations. + +See also +-------- +tests.frame.test_cumulative +""" + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + +methods = { + "cumsum": np.cumsum, + "cumprod": np.cumprod, + "cummin": np.minimum.accumulate, + "cummax": np.maximum.accumulate, +} + + +class TestSeriesCumulativeOps: + @pytest.mark.parametrize("func", [np.cumsum, np.cumprod]) + def test_datetime_series(self, datetime_series, func): + tm.assert_numpy_array_equal( + func(datetime_series).values, + func(np.array(datetime_series)), + check_dtype=True, + ) + + # with missing values + ts = datetime_series.copy() + ts[::2] = np.nan + + result = func(ts)[1::2] + expected = func(np.array(ts.dropna())) + + tm.assert_numpy_array_equal(result.values, expected, check_dtype=False) + + @pytest.mark.parametrize("method", ["cummin", "cummax"]) + def test_cummin_cummax(self, datetime_series, method): + ufunc = methods[method] + + result = getattr(datetime_series, method)().values + expected = ufunc(np.array(datetime_series)) + + tm.assert_numpy_array_equal(result, expected) + ts = datetime_series.copy() + ts[::2] = np.nan + result = getattr(ts, method)()[1::2] + expected = ufunc(ts.dropna()) + + result.index = result.index._with_freq(None) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "ts", + [ + pd.Timedelta(0), + pd.Timestamp("1999-12-31"), + pd.Timestamp("1999-12-31").tz_localize("US/Pacific"), + ], + ) + @pytest.mark.parametrize( + "method, skipna, exp_tdi", + [ + ["cummax", True, ["NaT", "2 days", "NaT", "2 days", "NaT", "3 days"]], + ["cummin", True, ["NaT", "2 days", "NaT", "1 days", "NaT", "1 days"]], + [ + "cummax", + False, + ["NaT", "NaT", "NaT", "NaT", "NaT", "NaT"], + ], + [ + "cummin", + False, + ["NaT", "NaT", "NaT", "NaT", "NaT", "NaT"], + ], + ], + ) + def test_cummin_cummax_datetimelike(self, ts, method, skipna, exp_tdi): + # with ts==pd.Timedelta(0), we are testing td64; with naive Timestamp + # we are testing datetime64[ns]; with Timestamp[US/Pacific] + # we are testing dt64tz + tdi = pd.to_timedelta(["NaT", "2 days", "NaT", "1 days", "NaT", "3 days"]) + ser = pd.Series(tdi + ts) + + exp_tdi = pd.to_timedelta(exp_tdi) + expected = pd.Series(exp_tdi + ts) + result = getattr(ser, method)(skipna=skipna) + tm.assert_series_equal(expected, result) + + @pytest.mark.parametrize( + "func, exp", + [ + ("cummin", pd.Period("2012-1-1", freq="D")), + ("cummax", pd.Period("2012-1-2", freq="D")), + ], + ) + def test_cummin_cummax_period(self, func, exp): + # GH#28385 + ser = pd.Series( + [pd.Period("2012-1-1", freq="D"), pd.NaT, pd.Period("2012-1-2", freq="D")] + ) + result = getattr(ser, func)(skipna=False) + expected = pd.Series([pd.Period("2012-1-1", freq="D"), pd.NaT, pd.NaT]) + tm.assert_series_equal(result, expected) + + result = getattr(ser, func)(skipna=True) + expected = pd.Series([pd.Period("2012-1-1", freq="D"), pd.NaT, exp]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "arg", + [ + [False, False, False, True, True, False, False], + [False, False, False, False, False, False, False], + ], + ) + @pytest.mark.parametrize( + "func", [lambda x: x, lambda x: ~x], ids=["identity", "inverse"] + ) + @pytest.mark.parametrize("method", methods.keys()) + def test_cummethods_bool(self, arg, func, method): + # GH#6270 + # checking Series method vs the ufunc applied to the values + + ser = func(pd.Series(arg)) + ufunc = methods[method] + + exp_vals = ufunc(ser.values) + expected = pd.Series(exp_vals) + + result = getattr(ser, method)() + + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "method, expected", + [ + ["cumsum", pd.Series([0, 1, np.nan, 1], dtype=object)], + ["cumprod", pd.Series([False, 0, np.nan, 0])], + ["cummin", pd.Series([False, False, np.nan, False])], + ["cummax", pd.Series([False, True, np.nan, True])], + ], + ) + def test_cummethods_bool_in_object_dtype(self, method, expected): + ser = pd.Series([False, True, np.nan, False]) + result = getattr(ser, method)() + tm.assert_series_equal(result, expected) + + def test_cumprod_timedelta(self): + # GH#48111 + ser = pd.Series([pd.Timedelta(days=1), pd.Timedelta(days=3)]) + with pytest.raises(TypeError, match="cumprod not supported for Timedelta"): + ser.cumprod() diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_formats.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_formats.py new file mode 100644 index 0000000000000000000000000000000000000000..a1c5018ea7961c5979b4f407f7096e81e2dda0c0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_formats.py @@ -0,0 +1,577 @@ +from datetime import ( + datetime, + timedelta, +) + +import numpy as np +import pytest + +from pandas._config import using_pyarrow_string_dtype + +import pandas as pd +from pandas import ( + Categorical, + DataFrame, + Index, + Series, + date_range, + option_context, + period_range, + timedelta_range, +) +import pandas._testing as tm + + +class TestSeriesRepr: + def test_multilevel_name_print_0(self): + # GH#55415 None does not get printed, but 0 does + # (matching DataFrame and flat index behavior) + mi = pd.MultiIndex.from_product([range(2, 3), range(3, 4)], names=[0, None]) + ser = Series(1.5, index=mi) + + res = repr(ser) + expected = "0 \n2 3 1.5\ndtype: float64" + assert res == expected + + def test_multilevel_name_print(self, lexsorted_two_level_string_multiindex): + index = lexsorted_two_level_string_multiindex + ser = Series(range(len(index)), index=index, name="sth") + expected = [ + "first second", + "foo one 0", + " two 1", + " three 2", + "bar one 3", + " two 4", + "baz two 5", + " three 6", + "qux one 7", + " two 8", + " three 9", + "Name: sth, dtype: int64", + ] + expected = "\n".join(expected) + assert repr(ser) == expected + + def test_small_name_printing(self): + # Test small Series. + s = Series([0, 1, 2]) + + s.name = "test" + assert "Name: test" in repr(s) + + s.name = None + assert "Name:" not in repr(s) + + def test_big_name_printing(self): + # Test big Series (diff code path). + s = Series(range(1000)) + + s.name = "test" + assert "Name: test" in repr(s) + + s.name = None + assert "Name:" not in repr(s) + + def test_empty_name_printing(self): + s = Series(index=date_range("20010101", "20020101"), name="test", dtype=object) + assert "Name: test" in repr(s) + + @pytest.mark.parametrize("args", [(), (0, -1)]) + def test_float_range(self, args): + str( + Series( + np.random.default_rng(2).standard_normal(1000), + index=np.arange(1000, *args), + ) + ) + + def test_empty_object(self): + # empty + str(Series(dtype=object)) + + def test_string(self, string_series): + str(string_series) + str(string_series.astype(int)) + + # with NaNs + string_series[5:7] = np.nan + str(string_series) + + def test_object(self, object_series): + str(object_series) + + def test_datetime(self, datetime_series): + str(datetime_series) + # with Nones + ots = datetime_series.astype("O") + ots[::2] = None + repr(ots) + + @pytest.mark.parametrize( + "name", + [ + "", + 1, + 1.2, + "foo", + "\u03B1\u03B2\u03B3", + "loooooooooooooooooooooooooooooooooooooooooooooooooooong", + ("foo", "bar", "baz"), + (1, 2), + ("foo", 1, 2.3), + ("\u03B1", "\u03B2", "\u03B3"), + ("\u03B1", "bar"), + ], + ) + def test_various_names(self, name, string_series): + # various names + string_series.name = name + repr(string_series) + + def test_tuple_name(self): + biggie = Series( + np.random.default_rng(2).standard_normal(1000), + index=np.arange(1000), + name=("foo", "bar", "baz"), + ) + repr(biggie) + + @pytest.mark.parametrize("arg", [100, 1001]) + def test_tidy_repr_name_0(self, arg): + # tidy repr + ser = Series(np.random.default_rng(2).standard_normal(arg), name=0) + rep_str = repr(ser) + assert "Name: 0" in rep_str + + @pytest.mark.xfail( + using_pyarrow_string_dtype(), reason="TODO: investigate why this is failing" + ) + def test_newline(self): + ser = Series(["a\n\r\tb"], name="a\n\r\td", index=["a\n\r\tf"]) + assert "\t" not in repr(ser) + assert "\r" not in repr(ser) + assert "a\n" not in repr(ser) + + @pytest.mark.parametrize( + "name, expected", + [ + ["foo", "Series([], Name: foo, dtype: int64)"], + [None, "Series([], dtype: int64)"], + ], + ) + def test_empty_int64(self, name, expected): + # with empty series (#4651) + s = Series([], dtype=np.int64, name=name) + assert repr(s) == expected + + def test_repr_bool_fails(self, capsys): + s = Series( + [ + DataFrame(np.random.default_rng(2).standard_normal((2, 2))) + for i in range(5) + ] + ) + + # It works (with no Cython exception barf)! + repr(s) + + captured = capsys.readouterr() + assert captured.err == "" + + def test_repr_name_iterable_indexable(self): + s = Series([1, 2, 3], name=np.int64(3)) + + # it works! + repr(s) + + s.name = ("\u05d0",) * 2 + repr(s) + + def test_repr_max_rows(self): + # GH 6863 + with option_context("display.max_rows", None): + str(Series(range(1001))) # should not raise exception + + def test_unicode_string_with_unicode(self): + df = Series(["\u05d0"], name="\u05d1") + str(df) + + ser = Series(["\u03c3"] * 10) + repr(ser) + + ser2 = Series(["\u05d0"] * 1000) + ser2.name = "title1" + repr(ser2) + + def test_str_to_bytes_raises(self): + # GH 26447 + df = Series(["abc"], name="abc") + msg = "^'str' object cannot be interpreted as an integer$" + with pytest.raises(TypeError, match=msg): + bytes(df) + + def test_timeseries_repr_object_dtype(self): + index = Index( + [datetime(2000, 1, 1) + timedelta(i) for i in range(1000)], dtype=object + ) + ts = Series(np.random.default_rng(2).standard_normal(len(index)), index) + repr(ts) + + ts = Series( + np.arange(20, dtype=np.float64), index=date_range("2020-01-01", periods=20) + ) + assert repr(ts).splitlines()[-1].startswith("Freq:") + + ts2 = ts.iloc[np.random.default_rng(2).integers(0, len(ts) - 1, 400)] + repr(ts2).splitlines()[-1] + + def test_latex_repr(self): + pytest.importorskip("jinja2") # uses Styler implementation + result = r"""\begin{tabular}{ll} +\toprule + & 0 \\ +\midrule +0 & $\alpha$ \\ +1 & b \\ +2 & c \\ +\bottomrule +\end{tabular} +""" + with option_context( + "styler.format.escape", None, "styler.render.repr", "latex" + ): + s = Series([r"$\alpha$", "b", "c"]) + assert result == s._repr_latex_() + + assert s._repr_latex_() is None + + def test_index_repr_in_frame_with_nan(self): + # see gh-25061 + i = Index([1, np.nan]) + s = Series([1, 2], index=i) + exp = """1.0 1\nNaN 2\ndtype: int64""" + + assert repr(s) == exp + + def test_format_pre_1900_dates(self): + rng = date_range("1/1/1850", "1/1/1950", freq="YE-DEC") + msg = "DatetimeIndex.format is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + rng.format() + ts = Series(1, index=rng) + repr(ts) + + def test_series_repr_nat(self): + series = Series([0, 1000, 2000, pd.NaT._value], dtype="M8[ns]") + + result = repr(series) + expected = ( + "0 1970-01-01 00:00:00.000000\n" + "1 1970-01-01 00:00:00.000001\n" + "2 1970-01-01 00:00:00.000002\n" + "3 NaT\n" + "dtype: datetime64[ns]" + ) + assert result == expected + + def test_float_repr(self): + # GH#35603 + # check float format when cast to object + ser = Series([1.0]).astype(object) + expected = "0 1.0\ndtype: object" + assert repr(ser) == expected + + def test_different_null_objects(self): + # GH#45263 + ser = Series([1, 2, 3, 4], [True, None, np.nan, pd.NaT]) + result = repr(ser) + expected = "True 1\nNone 2\nNaN 3\nNaT 4\ndtype: int64" + assert result == expected + + +class TestCategoricalRepr: + def test_categorical_repr_unicode(self): + # see gh-21002 + + class County: + name = "San Sebastián" + state = "PR" + + def __repr__(self) -> str: + return self.name + ", " + self.state + + cat = Categorical([County() for _ in range(61)]) + idx = Index(cat) + ser = idx.to_series() + + repr(ser) + str(ser) + + def test_categorical_repr(self, using_infer_string): + a = Series(Categorical([1, 2, 3, 4])) + exp = ( + "0 1\n1 2\n2 3\n3 4\n" + "dtype: category\nCategories (4, int64): [1, 2, 3, 4]" + ) + + assert exp == a.__str__() + + a = Series(Categorical(["a", "b"] * 25)) + if using_infer_string: + exp = ( + "0 a\n1 b\n" + " ..\n" + "48 a\n49 b\n" + "Length: 50, dtype: category\nCategories (2, string): [a, b]" + ) + else: + exp = ( + "0 a\n1 b\n" + " ..\n" + "48 a\n49 b\n" + "Length: 50, dtype: category\nCategories (2, object): ['a', 'b']" + ) + with option_context("display.max_rows", 5): + assert exp == repr(a) + + levs = list("abcdefghijklmnopqrstuvwxyz") + a = Series(Categorical(["a", "b"], categories=levs, ordered=True)) + if using_infer_string: + exp = ( + "0 a\n1 b\n" + "dtype: category\n" + "Categories (26, string): [a < b < c < d ... w < x < y < z]" + ) + else: + exp = ( + "0 a\n1 b\n" + "dtype: category\n" + "Categories (26, object): ['a' < 'b' < 'c' < 'd' ... " + "'w' < 'x' < 'y' < 'z']" + ) + assert exp == a.__str__() + + def test_categorical_series_repr(self): + s = Series(Categorical([1, 2, 3])) + exp = """0 1 +1 2 +2 3 +dtype: category +Categories (3, int64): [1, 2, 3]""" + + assert repr(s) == exp + + s = Series(Categorical(np.arange(10))) + exp = f"""0 0 +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +dtype: category +Categories (10, {np.dtype(int)}): [0, 1, 2, 3, ..., 6, 7, 8, 9]""" + + assert repr(s) == exp + + def test_categorical_series_repr_ordered(self): + s = Series(Categorical([1, 2, 3], ordered=True)) + exp = """0 1 +1 2 +2 3 +dtype: category +Categories (3, int64): [1 < 2 < 3]""" + + assert repr(s) == exp + + s = Series(Categorical(np.arange(10), ordered=True)) + exp = f"""0 0 +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +dtype: category +Categories (10, {np.dtype(int)}): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]""" + + assert repr(s) == exp + + def test_categorical_series_repr_datetime(self): + idx = date_range("2011-01-01 09:00", freq="h", periods=5) + s = Series(Categorical(idx)) + exp = """0 2011-01-01 09:00:00 +1 2011-01-01 10:00:00 +2 2011-01-01 11:00:00 +3 2011-01-01 12:00:00 +4 2011-01-01 13:00:00 +dtype: category +Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, + 2011-01-01 12:00:00, 2011-01-01 13:00:00]""" # noqa: E501 + + assert repr(s) == exp + + idx = date_range("2011-01-01 09:00", freq="h", periods=5, tz="US/Eastern") + s = Series(Categorical(idx)) + exp = """0 2011-01-01 09:00:00-05:00 +1 2011-01-01 10:00:00-05:00 +2 2011-01-01 11:00:00-05:00 +3 2011-01-01 12:00:00-05:00 +4 2011-01-01 13:00:00-05:00 +dtype: category +Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, + 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, + 2011-01-01 13:00:00-05:00]""" # noqa: E501 + + assert repr(s) == exp + + def test_categorical_series_repr_datetime_ordered(self): + idx = date_range("2011-01-01 09:00", freq="h", periods=5) + s = Series(Categorical(idx, ordered=True)) + exp = """0 2011-01-01 09:00:00 +1 2011-01-01 10:00:00 +2 2011-01-01 11:00:00 +3 2011-01-01 12:00:00 +4 2011-01-01 13:00:00 +dtype: category +Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 < + 2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa: E501 + + assert repr(s) == exp + + idx = date_range("2011-01-01 09:00", freq="h", periods=5, tz="US/Eastern") + s = Series(Categorical(idx, ordered=True)) + exp = """0 2011-01-01 09:00:00-05:00 +1 2011-01-01 10:00:00-05:00 +2 2011-01-01 11:00:00-05:00 +3 2011-01-01 12:00:00-05:00 +4 2011-01-01 13:00:00-05:00 +dtype: category +Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 < + 2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 < + 2011-01-01 13:00:00-05:00]""" # noqa: E501 + + assert repr(s) == exp + + def test_categorical_series_repr_period(self): + idx = period_range("2011-01-01 09:00", freq="h", periods=5) + s = Series(Categorical(idx)) + exp = """0 2011-01-01 09:00 +1 2011-01-01 10:00 +2 2011-01-01 11:00 +3 2011-01-01 12:00 +4 2011-01-01 13:00 +dtype: category +Categories (5, period[h]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, + 2011-01-01 13:00]""" # noqa: E501 + + assert repr(s) == exp + + idx = period_range("2011-01", freq="M", periods=5) + s = Series(Categorical(idx)) + exp = """0 2011-01 +1 2011-02 +2 2011-03 +3 2011-04 +4 2011-05 +dtype: category +Categories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]""" + + assert repr(s) == exp + + def test_categorical_series_repr_period_ordered(self): + idx = period_range("2011-01-01 09:00", freq="h", periods=5) + s = Series(Categorical(idx, ordered=True)) + exp = """0 2011-01-01 09:00 +1 2011-01-01 10:00 +2 2011-01-01 11:00 +3 2011-01-01 12:00 +4 2011-01-01 13:00 +dtype: category +Categories (5, period[h]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 < + 2011-01-01 13:00]""" # noqa: E501 + + assert repr(s) == exp + + idx = period_range("2011-01", freq="M", periods=5) + s = Series(Categorical(idx, ordered=True)) + exp = """0 2011-01 +1 2011-02 +2 2011-03 +3 2011-04 +4 2011-05 +dtype: category +Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]""" + + assert repr(s) == exp + + def test_categorical_series_repr_timedelta(self): + idx = timedelta_range("1 days", periods=5) + s = Series(Categorical(idx)) + exp = """0 1 days +1 2 days +2 3 days +3 4 days +4 5 days +dtype: category +Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]""" + + assert repr(s) == exp + + idx = timedelta_range("1 hours", periods=10) + s = Series(Categorical(idx)) + exp = """0 0 days 01:00:00 +1 1 days 01:00:00 +2 2 days 01:00:00 +3 3 days 01:00:00 +4 4 days 01:00:00 +5 5 days 01:00:00 +6 6 days 01:00:00 +7 7 days 01:00:00 +8 8 days 01:00:00 +9 9 days 01:00:00 +dtype: category +Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, + 3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00, + 8 days 01:00:00, 9 days 01:00:00]""" # noqa: E501 + + assert repr(s) == exp + + def test_categorical_series_repr_timedelta_ordered(self): + idx = timedelta_range("1 days", periods=5) + s = Series(Categorical(idx, ordered=True)) + exp = """0 1 days +1 2 days +2 3 days +3 4 days +4 5 days +dtype: category +Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]""" + + assert repr(s) == exp + + idx = timedelta_range("1 hours", periods=10) + s = Series(Categorical(idx, ordered=True)) + exp = """0 0 days 01:00:00 +1 1 days 01:00:00 +2 2 days 01:00:00 +3 3 days 01:00:00 +4 4 days 01:00:00 +5 5 days 01:00:00 +6 6 days 01:00:00 +7 7 days 01:00:00 +8 8 days 01:00:00 +9 9 days 01:00:00 +dtype: category +Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 < + 3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 < + 8 days 01:00:00 < 9 days 01:00:00]""" # noqa: E501 + + assert repr(s) == exp diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_iteration.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_iteration.py new file mode 100644 index 0000000000000000000000000000000000000000..edc82455234bba0203d817417e7bf122c876bfff --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_iteration.py @@ -0,0 +1,35 @@ +class TestIteration: + def test_keys(self, datetime_series): + assert datetime_series.keys() is datetime_series.index + + def test_iter_datetimes(self, datetime_series): + for i, val in enumerate(datetime_series): + # pylint: disable-next=unnecessary-list-index-lookup + assert val == datetime_series.iloc[i] + + def test_iter_strings(self, string_series): + for i, val in enumerate(string_series): + # pylint: disable-next=unnecessary-list-index-lookup + assert val == string_series.iloc[i] + + def test_iteritems_datetimes(self, datetime_series): + for idx, val in datetime_series.items(): + assert val == datetime_series[idx] + + def test_iteritems_strings(self, string_series): + for idx, val in string_series.items(): + assert val == string_series[idx] + + # assert is lazy (generators don't define reverse, lists do) + assert not hasattr(string_series.items(), "reverse") + + def test_items_datetimes(self, datetime_series): + for idx, val in datetime_series.items(): + assert val == datetime_series[idx] + + def test_items_strings(self, string_series): + for idx, val in string_series.items(): + assert val == string_series[idx] + + # assert is lazy (generators don't define reverse, lists do) + assert not hasattr(string_series.items(), "reverse") diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_logical_ops.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_logical_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..d9c94e871bd4bf39e0a380ca1bc3806dc33154c2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_logical_ops.py @@ -0,0 +1,548 @@ +from datetime import datetime +import operator + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Index, + Series, + bdate_range, +) +import pandas._testing as tm +from pandas.core import ops + + +class TestSeriesLogicalOps: + @pytest.mark.filterwarnings("ignore:Downcasting object dtype arrays:FutureWarning") + @pytest.mark.parametrize("bool_op", [operator.and_, operator.or_, operator.xor]) + def test_bool_operators_with_nas(self, bool_op): + # boolean &, |, ^ should work with object arrays and propagate NAs + ser = Series(bdate_range("1/1/2000", periods=10), dtype=object) + ser[::2] = np.nan + + mask = ser.isna() + filled = ser.fillna(ser[0]) + + result = bool_op(ser < ser[9], ser > ser[3]) + + expected = bool_op(filled < filled[9], filled > filled[3]) + expected[mask] = False + tm.assert_series_equal(result, expected) + + def test_logical_operators_bool_dtype_with_empty(self): + # GH#9016: support bitwise op for integer types + index = list("bca") + + s_tft = Series([True, False, True], index=index) + s_fff = Series([False, False, False], index=index) + s_empty = Series([], dtype=object) + + res = s_tft & s_empty + expected = s_fff.sort_index() + tm.assert_series_equal(res, expected) + + res = s_tft | s_empty + expected = s_tft.sort_index() + tm.assert_series_equal(res, expected) + + def test_logical_operators_int_dtype_with_int_dtype(self): + # GH#9016: support bitwise op for integer types + + s_0123 = Series(range(4), dtype="int64") + s_3333 = Series([3] * 4) + s_4444 = Series([4] * 4) + + res = s_0123 & s_3333 + expected = Series(range(4), dtype="int64") + tm.assert_series_equal(res, expected) + + res = s_0123 | s_4444 + expected = Series(range(4, 8), dtype="int64") + tm.assert_series_equal(res, expected) + + s_1111 = Series([1] * 4, dtype="int8") + res = s_0123 & s_1111 + expected = Series([0, 1, 0, 1], dtype="int64") + tm.assert_series_equal(res, expected) + + res = s_0123.astype(np.int16) | s_1111.astype(np.int32) + expected = Series([1, 1, 3, 3], dtype="int32") + tm.assert_series_equal(res, expected) + + def test_logical_operators_int_dtype_with_int_scalar(self): + # GH#9016: support bitwise op for integer types + s_0123 = Series(range(4), dtype="int64") + + res = s_0123 & 0 + expected = Series([0] * 4) + tm.assert_series_equal(res, expected) + + res = s_0123 & 1 + expected = Series([0, 1, 0, 1]) + tm.assert_series_equal(res, expected) + + def test_logical_operators_int_dtype_with_float(self): + # GH#9016: support bitwise op for integer types + s_0123 = Series(range(4), dtype="int64") + + warn_msg = ( + r"Logical ops \(and, or, xor\) between Pandas objects and " + "dtype-less sequences" + ) + + msg = "Cannot perform.+with a dtyped.+array and scalar of type" + with pytest.raises(TypeError, match=msg): + s_0123 & np.nan + with pytest.raises(TypeError, match=msg): + s_0123 & 3.14 + msg = "unsupported operand type.+for &:" + with pytest.raises(TypeError, match=msg): + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + s_0123 & [0.1, 4, 3.14, 2] + with pytest.raises(TypeError, match=msg): + s_0123 & np.array([0.1, 4, 3.14, 2]) + with pytest.raises(TypeError, match=msg): + s_0123 & Series([0.1, 4, -3.14, 2]) + + def test_logical_operators_int_dtype_with_str(self): + s_1111 = Series([1] * 4, dtype="int8") + + warn_msg = ( + r"Logical ops \(and, or, xor\) between Pandas objects and " + "dtype-less sequences" + ) + + msg = "Cannot perform 'and_' with a dtyped.+array and scalar of type" + with pytest.raises(TypeError, match=msg): + s_1111 & "a" + with pytest.raises(TypeError, match="unsupported operand.+for &"): + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + s_1111 & ["a", "b", "c", "d"] + + def test_logical_operators_int_dtype_with_bool(self): + # GH#9016: support bitwise op for integer types + s_0123 = Series(range(4), dtype="int64") + + expected = Series([False] * 4) + + result = s_0123 & False + tm.assert_series_equal(result, expected) + + warn_msg = ( + r"Logical ops \(and, or, xor\) between Pandas objects and " + "dtype-less sequences" + ) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = s_0123 & [False] + tm.assert_series_equal(result, expected) + + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = s_0123 & (False,) + tm.assert_series_equal(result, expected) + + result = s_0123 ^ False + expected = Series([False, True, True, True]) + tm.assert_series_equal(result, expected) + + def test_logical_operators_int_dtype_with_object(self, using_infer_string): + # GH#9016: support bitwise op for integer types + s_0123 = Series(range(4), dtype="int64") + + result = s_0123 & Series([False, np.nan, False, False]) + expected = Series([False] * 4) + tm.assert_series_equal(result, expected) + + s_abNd = Series(["a", "b", np.nan, "d"]) + if using_infer_string: + import pyarrow as pa + + with pytest.raises(pa.lib.ArrowNotImplementedError, match="has no kernel"): + s_0123 & s_abNd + else: + with pytest.raises(TypeError, match="unsupported.* 'int' and 'str'"): + s_0123 & s_abNd + + def test_logical_operators_bool_dtype_with_int(self): + index = list("bca") + + s_tft = Series([True, False, True], index=index) + s_fff = Series([False, False, False], index=index) + + res = s_tft & 0 + expected = s_fff + tm.assert_series_equal(res, expected) + + res = s_tft & 1 + expected = s_tft + tm.assert_series_equal(res, expected) + + def test_logical_ops_bool_dtype_with_ndarray(self): + # make sure we operate on ndarray the same as Series + left = Series([True, True, True, False, True]) + right = [True, False, None, True, np.nan] + + msg = ( + r"Logical ops \(and, or, xor\) between Pandas objects and " + "dtype-less sequences" + ) + + expected = Series([True, False, False, False, False]) + with tm.assert_produces_warning(FutureWarning, match=msg): + result = left & right + tm.assert_series_equal(result, expected) + result = left & np.array(right) + tm.assert_series_equal(result, expected) + result = left & Index(right) + tm.assert_series_equal(result, expected) + result = left & Series(right) + tm.assert_series_equal(result, expected) + + expected = Series([True, True, True, True, True]) + with tm.assert_produces_warning(FutureWarning, match=msg): + result = left | right + tm.assert_series_equal(result, expected) + result = left | np.array(right) + tm.assert_series_equal(result, expected) + result = left | Index(right) + tm.assert_series_equal(result, expected) + result = left | Series(right) + tm.assert_series_equal(result, expected) + + expected = Series([False, True, True, True, True]) + with tm.assert_produces_warning(FutureWarning, match=msg): + result = left ^ right + tm.assert_series_equal(result, expected) + result = left ^ np.array(right) + tm.assert_series_equal(result, expected) + result = left ^ Index(right) + tm.assert_series_equal(result, expected) + result = left ^ Series(right) + tm.assert_series_equal(result, expected) + + def test_logical_operators_int_dtype_with_bool_dtype_and_reindex(self): + # GH#9016: support bitwise op for integer types + + index = list("bca") + + s_tft = Series([True, False, True], index=index) + s_tft = Series([True, False, True], index=index) + s_tff = Series([True, False, False], index=index) + + s_0123 = Series(range(4), dtype="int64") + + # s_0123 will be all false now because of reindexing like s_tft + expected = Series([False] * 7, index=[0, 1, 2, 3, "a", "b", "c"]) + with tm.assert_produces_warning(FutureWarning): + result = s_tft & s_0123 + tm.assert_series_equal(result, expected) + + # GH 52538: Deprecate casting to object type when reindex is needed; + # matches DataFrame behavior + expected = Series([False] * 7, index=[0, 1, 2, 3, "a", "b", "c"]) + with tm.assert_produces_warning(FutureWarning): + result = s_0123 & s_tft + tm.assert_series_equal(result, expected) + + s_a0b1c0 = Series([1], list("b")) + + with tm.assert_produces_warning(FutureWarning): + res = s_tft & s_a0b1c0 + expected = s_tff.reindex(list("abc")) + tm.assert_series_equal(res, expected) + + with tm.assert_produces_warning(FutureWarning): + res = s_tft | s_a0b1c0 + expected = s_tft.reindex(list("abc")) + tm.assert_series_equal(res, expected) + + def test_scalar_na_logical_ops_corners(self): + s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10]) + + msg = "Cannot perform.+with a dtyped.+array and scalar of type" + with pytest.raises(TypeError, match=msg): + s & datetime(2005, 1, 1) + + s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)]) + s[::2] = np.nan + + expected = Series(True, index=s.index) + expected[::2] = False + + msg = ( + r"Logical ops \(and, or, xor\) between Pandas objects and " + "dtype-less sequences" + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + result = s & list(s) + tm.assert_series_equal(result, expected) + + def test_scalar_na_logical_ops_corners_aligns(self): + s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)]) + s[::2] = np.nan + d = DataFrame({"A": s}) + + expected = DataFrame(False, index=range(9), columns=["A"] + list(range(9))) + + result = s & d + tm.assert_frame_equal(result, expected) + + result = d & s + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("op", [operator.and_, operator.or_, operator.xor]) + def test_logical_ops_with_index(self, op): + # GH#22092, GH#19792 + ser = Series([True, True, False, False]) + idx1 = Index([True, False, True, False]) + idx2 = Index([1, 0, 1, 0]) + + expected = Series([op(ser[n], idx1[n]) for n in range(len(ser))]) + + result = op(ser, idx1) + tm.assert_series_equal(result, expected) + + expected = Series([op(ser[n], idx2[n]) for n in range(len(ser))], dtype=bool) + + result = op(ser, idx2) + tm.assert_series_equal(result, expected) + + def test_reversed_xor_with_index_returns_series(self): + # GH#22092, GH#19792 pre-2.0 these were aliased to setops + ser = Series([True, True, False, False]) + idx1 = Index([True, False, True, False], dtype=bool) + idx2 = Index([1, 0, 1, 0]) + + expected = Series([False, True, True, False]) + result = idx1 ^ ser + tm.assert_series_equal(result, expected) + + result = idx2 ^ ser + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "op", + [ + ops.rand_, + ops.ror_, + ], + ) + def test_reversed_logical_op_with_index_returns_series(self, op): + # GH#22092, GH#19792 + ser = Series([True, True, False, False]) + idx1 = Index([True, False, True, False]) + idx2 = Index([1, 0, 1, 0]) + + expected = Series(op(idx1.values, ser.values)) + result = op(ser, idx1) + tm.assert_series_equal(result, expected) + + expected = op(ser, Series(idx2)) + result = op(ser, idx2) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "op, expected", + [ + (ops.rand_, Series([False, False])), + (ops.ror_, Series([True, True])), + (ops.rxor, Series([True, True])), + ], + ) + def test_reverse_ops_with_index(self, op, expected): + # https://github.com/pandas-dev/pandas/pull/23628 + # multi-set Index ops are buggy, so let's avoid duplicates... + # GH#49503 + ser = Series([True, False]) + idx = Index([False, True]) + + result = op(ser, idx) + tm.assert_series_equal(result, expected) + + def test_logical_ops_label_based(self, using_infer_string): + # GH#4947 + # logical ops should be label based + + a = Series([True, False, True], list("bca")) + b = Series([False, True, False], list("abc")) + + expected = Series([False, True, False], list("abc")) + result = a & b + tm.assert_series_equal(result, expected) + + expected = Series([True, True, False], list("abc")) + result = a | b + tm.assert_series_equal(result, expected) + + expected = Series([True, False, False], list("abc")) + result = a ^ b + tm.assert_series_equal(result, expected) + + # rhs is bigger + a = Series([True, False, True], list("bca")) + b = Series([False, True, False, True], list("abcd")) + + expected = Series([False, True, False, False], list("abcd")) + result = a & b + tm.assert_series_equal(result, expected) + + expected = Series([True, True, False, False], list("abcd")) + result = a | b + tm.assert_series_equal(result, expected) + + # filling + + # vs empty + empty = Series([], dtype=object) + + result = a & empty.copy() + expected = Series([False, False, False], list("abc")) + tm.assert_series_equal(result, expected) + + result = a | empty.copy() + expected = Series([True, True, False], list("abc")) + tm.assert_series_equal(result, expected) + + # vs non-matching + with tm.assert_produces_warning(FutureWarning): + result = a & Series([1], ["z"]) + expected = Series([False, False, False, False], list("abcz")) + tm.assert_series_equal(result, expected) + + with tm.assert_produces_warning(FutureWarning): + result = a | Series([1], ["z"]) + expected = Series([True, True, False, False], list("abcz")) + tm.assert_series_equal(result, expected) + + # identity + # we would like s[s|e] == s to hold for any e, whether empty or not + with tm.assert_produces_warning(FutureWarning): + for e in [ + empty.copy(), + Series([1], ["z"]), + Series(np.nan, b.index), + Series(np.nan, a.index), + ]: + result = a[a | e] + tm.assert_series_equal(result, a[a]) + + for e in [Series(["z"])]: + warn = FutureWarning if using_infer_string else None + if using_infer_string: + import pyarrow as pa + + with tm.assert_produces_warning(warn, match="Operation between non"): + with pytest.raises( + pa.lib.ArrowNotImplementedError, match="has no kernel" + ): + result = a[a | e] + else: + result = a[a | e] + tm.assert_series_equal(result, a[a]) + + # vs scalars + index = list("bca") + t = Series([True, False, True]) + + for v in [True, 1, 2]: + result = Series([True, False, True], index=index) | v + expected = Series([True, True, True], index=index) + tm.assert_series_equal(result, expected) + + msg = "Cannot perform.+with a dtyped.+array and scalar of type" + for v in [np.nan, "foo"]: + with pytest.raises(TypeError, match=msg): + t | v + + for v in [False, 0]: + result = Series([True, False, True], index=index) | v + expected = Series([True, False, True], index=index) + tm.assert_series_equal(result, expected) + + for v in [True, 1]: + result = Series([True, False, True], index=index) & v + expected = Series([True, False, True], index=index) + tm.assert_series_equal(result, expected) + + for v in [False, 0]: + result = Series([True, False, True], index=index) & v + expected = Series([False, False, False], index=index) + tm.assert_series_equal(result, expected) + msg = "Cannot perform.+with a dtyped.+array and scalar of type" + for v in [np.nan]: + with pytest.raises(TypeError, match=msg): + t & v + + def test_logical_ops_df_compat(self): + # GH#1134 + s1 = Series([True, False, True], index=list("ABC"), name="x") + s2 = Series([True, True, False], index=list("ABD"), name="x") + + exp = Series([True, False, False, False], index=list("ABCD"), name="x") + tm.assert_series_equal(s1 & s2, exp) + tm.assert_series_equal(s2 & s1, exp) + + # True | np.nan => True + exp_or1 = Series([True, True, True, False], index=list("ABCD"), name="x") + tm.assert_series_equal(s1 | s2, exp_or1) + # np.nan | True => np.nan, filled with False + exp_or = Series([True, True, False, False], index=list("ABCD"), name="x") + tm.assert_series_equal(s2 | s1, exp_or) + + # DataFrame doesn't fill nan with False + tm.assert_frame_equal(s1.to_frame() & s2.to_frame(), exp.to_frame()) + tm.assert_frame_equal(s2.to_frame() & s1.to_frame(), exp.to_frame()) + + exp = DataFrame({"x": [True, True, np.nan, np.nan]}, index=list("ABCD")) + tm.assert_frame_equal(s1.to_frame() | s2.to_frame(), exp_or1.to_frame()) + tm.assert_frame_equal(s2.to_frame() | s1.to_frame(), exp_or.to_frame()) + + # different length + s3 = Series([True, False, True], index=list("ABC"), name="x") + s4 = Series([True, True, True, True], index=list("ABCD"), name="x") + + exp = Series([True, False, True, False], index=list("ABCD"), name="x") + tm.assert_series_equal(s3 & s4, exp) + tm.assert_series_equal(s4 & s3, exp) + + # np.nan | True => np.nan, filled with False + exp_or1 = Series([True, True, True, False], index=list("ABCD"), name="x") + tm.assert_series_equal(s3 | s4, exp_or1) + # True | np.nan => True + exp_or = Series([True, True, True, True], index=list("ABCD"), name="x") + tm.assert_series_equal(s4 | s3, exp_or) + + tm.assert_frame_equal(s3.to_frame() & s4.to_frame(), exp.to_frame()) + tm.assert_frame_equal(s4.to_frame() & s3.to_frame(), exp.to_frame()) + + tm.assert_frame_equal(s3.to_frame() | s4.to_frame(), exp_or1.to_frame()) + tm.assert_frame_equal(s4.to_frame() | s3.to_frame(), exp_or.to_frame()) + + @pytest.mark.xfail(reason="Will pass once #52839 deprecation is enforced") + def test_int_dtype_different_index_not_bool(self): + # GH 52500 + ser1 = Series([1, 2, 3], index=[10, 11, 23], name="a") + ser2 = Series([10, 20, 30], index=[11, 10, 23], name="a") + result = np.bitwise_xor(ser1, ser2) + expected = Series([21, 8, 29], index=[10, 11, 23], name="a") + tm.assert_series_equal(result, expected) + + result = ser1 ^ ser2 + tm.assert_series_equal(result, expected) + + def test_pyarrow_numpy_string_invalid(self): + # GH#56008 + pytest.importorskip("pyarrow") + ser = Series([False, True]) + ser2 = Series(["a", "b"], dtype="string[pyarrow_numpy]") + result = ser == ser2 + expected = Series(False, index=ser.index) + tm.assert_series_equal(result, expected) + + result = ser != ser2 + expected = Series(True, index=ser.index) + tm.assert_series_equal(result, expected) + + with pytest.raises(TypeError, match="Invalid comparison"): + ser > ser2 diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_missing.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_missing.py new file mode 100644 index 0000000000000000000000000000000000000000..cafc69c4d0f20f42dc184366db0ac8933a512f54 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_missing.py @@ -0,0 +1,105 @@ +from datetime import timedelta + +import numpy as np +import pytest + +from pandas._libs import iNaT + +import pandas as pd +from pandas import ( + Categorical, + Index, + NaT, + Series, + isna, +) +import pandas._testing as tm + + +class TestSeriesMissingData: + def test_categorical_nan_handling(self): + # NaNs are represented as -1 in labels + s = Series(Categorical(["a", "b", np.nan, "a"])) + tm.assert_index_equal(s.cat.categories, Index(["a", "b"])) + tm.assert_numpy_array_equal( + s.values.codes, np.array([0, 1, -1, 0], dtype=np.int8) + ) + + def test_isna_for_inf(self): + s = Series(["a", np.inf, np.nan, pd.NA, 1.0]) + msg = "use_inf_as_na option is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + with pd.option_context("mode.use_inf_as_na", True): + r = s.isna() + dr = s.dropna() + e = Series([False, True, True, True, False]) + de = Series(["a", 1.0], index=[0, 4]) + tm.assert_series_equal(r, e) + tm.assert_series_equal(dr, de) + + def test_timedelta64_nan(self): + td = Series([timedelta(days=i) for i in range(10)]) + + # nan ops on timedeltas + td1 = td.copy() + td1[0] = np.nan + assert isna(td1[0]) + assert td1[0]._value == iNaT + td1[0] = td[0] + assert not isna(td1[0]) + + # GH#16674 iNaT is treated as an integer when given by the user + with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"): + td1[1] = iNaT + assert not isna(td1[1]) + assert td1.dtype == np.object_ + assert td1[1] == iNaT + td1[1] = td[1] + assert not isna(td1[1]) + + td1[2] = NaT + assert isna(td1[2]) + assert td1[2]._value == iNaT + td1[2] = td[2] + assert not isna(td1[2]) + + # boolean setting + # GH#2899 boolean setting + td3 = np.timedelta64(timedelta(days=3)) + td7 = np.timedelta64(timedelta(days=7)) + td[(td > td3) & (td < td7)] = np.nan + assert isna(td).sum() == 3 + + @pytest.mark.xfail( + reason="Chained inequality raises when trying to define 'selector'" + ) + def test_logical_range_select(self, datetime_series): + # NumPy limitation =( + # https://github.com/pandas-dev/pandas/commit/9030dc021f07c76809848925cb34828f6c8484f3 + + selector = -0.5 <= datetime_series <= 0.5 + expected = (datetime_series >= -0.5) & (datetime_series <= 0.5) + tm.assert_series_equal(selector, expected) + + def test_valid(self, datetime_series): + ts = datetime_series.copy() + ts.index = ts.index._with_freq(None) + ts[::2] = np.nan + + result = ts.dropna() + assert len(result) == ts.count() + tm.assert_series_equal(result, ts[1::2]) + tm.assert_series_equal(result, ts[pd.notna(ts)]) + + +def test_hasnans_uncached_for_series(): + # GH#19700 + # set float64 dtype to avoid upcast when setting nan + idx = Index([0, 1], dtype="float64") + assert idx.hasnans is False + assert "hasnans" in idx._cache + ser = idx.to_series() + assert ser.hasnans is False + assert not hasattr(ser, "_cache") + ser.iloc[-1] = np.nan + assert ser.hasnans is True diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_npfuncs.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_npfuncs.py new file mode 100644 index 0000000000000000000000000000000000000000..11a51c4700d5c1733e7136909d86d2ea7294dd8e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_npfuncs.py @@ -0,0 +1,46 @@ +""" +Tests for np.foo applied to Series, not necessarily ufuncs. +""" + +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas import Series +import pandas._testing as tm + + +class TestPtp: + def test_ptp(self): + # GH#21614 + N = 1000 + arr = np.random.default_rng(2).standard_normal(N) + ser = Series(arr) + assert np.ptp(ser) == np.ptp(arr) + + +def test_numpy_unique(datetime_series): + # it works! + np.unique(datetime_series) + + +@pytest.mark.parametrize("index", [["a", "b", "c", "d", "e"], None]) +def test_numpy_argwhere(index): + # GH#35331 + + s = Series(range(5), index=index, dtype=np.int64) + + result = np.argwhere(s > 2).astype(np.int64) + expected = np.array([[3], [4]], dtype=np.int64) + + tm.assert_numpy_array_equal(result, expected) + + +@td.skip_if_no("pyarrow") +def test_log_arrow_backed_missing_value(): + # GH#56285 + ser = Series([1, 2, None], dtype="float64[pyarrow]") + result = np.log(ser) + expected = np.log(Series([1, 2, None], dtype="float64")) + tm.assert_series_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_reductions.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_reductions.py new file mode 100644 index 0000000000000000000000000000000000000000..76353ab25fca6e5208f099b030aa308aeb8837c1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_reductions.py @@ -0,0 +1,218 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import Series +import pandas._testing as tm + + +@pytest.mark.parametrize("operation, expected", [("min", "a"), ("max", "b")]) +def test_reductions_series_strings(operation, expected): + # GH#31746 + ser = Series(["a", "b"], dtype="string") + res_operation_serie = getattr(ser, operation)() + assert res_operation_serie == expected + + +@pytest.mark.parametrize("as_period", [True, False]) +def test_mode_extension_dtype(as_period): + # GH#41927 preserve dt64tz dtype + ser = Series([pd.Timestamp(1979, 4, n) for n in range(1, 5)]) + + if as_period: + ser = ser.dt.to_period("D") + else: + ser = ser.dt.tz_localize("US/Central") + + res = ser.mode() + assert res.dtype == ser.dtype + tm.assert_series_equal(res, ser) + + +def test_mode_nullable_dtype(any_numeric_ea_dtype): + # GH#55340 + ser = Series([1, 3, 2, pd.NA, 3, 2, pd.NA], dtype=any_numeric_ea_dtype) + result = ser.mode(dropna=False) + expected = Series([2, 3, pd.NA], dtype=any_numeric_ea_dtype) + tm.assert_series_equal(result, expected) + + result = ser.mode(dropna=True) + expected = Series([2, 3], dtype=any_numeric_ea_dtype) + tm.assert_series_equal(result, expected) + + ser[-1] = pd.NA + + result = ser.mode(dropna=True) + expected = Series([2, 3], dtype=any_numeric_ea_dtype) + tm.assert_series_equal(result, expected) + + result = ser.mode(dropna=False) + expected = Series([pd.NA], dtype=any_numeric_ea_dtype) + tm.assert_series_equal(result, expected) + + +def test_mode_infer_string(): + # GH#56183 + pytest.importorskip("pyarrow") + ser = Series(["a", "b"], dtype=object) + with pd.option_context("future.infer_string", True): + result = ser.mode() + expected = Series(["a", "b"], dtype=object) + tm.assert_series_equal(result, expected) + + +def test_reductions_td64_with_nat(): + # GH#8617 + ser = Series([0, pd.NaT], dtype="m8[ns]") + exp = ser[0] + assert ser.median() == exp + assert ser.min() == exp + assert ser.max() == exp + + +@pytest.mark.parametrize("skipna", [True, False]) +def test_td64_sum_empty(skipna): + # GH#37151 + ser = Series([], dtype="timedelta64[ns]") + + result = ser.sum(skipna=skipna) + assert isinstance(result, pd.Timedelta) + assert result == pd.Timedelta(0) + + +def test_td64_summation_overflow(): + # GH#9442 + ser = Series(pd.date_range("20130101", periods=100000, freq="h")) + ser[0] += pd.Timedelta("1s 1ms") + + # mean + result = (ser - ser.min()).mean() + expected = pd.Timedelta((pd.TimedeltaIndex(ser - ser.min()).asi8 / len(ser)).sum()) + + # the computation is converted to float so + # might be some loss of precision + assert np.allclose(result._value / 1000, expected._value / 1000) + + # sum + msg = "overflow in timedelta operation" + with pytest.raises(ValueError, match=msg): + (ser - ser.min()).sum() + + s1 = ser[0:10000] + with pytest.raises(ValueError, match=msg): + (s1 - s1.min()).sum() + s2 = ser[0:1000] + (s2 - s2.min()).sum() + + +def test_prod_numpy16_bug(): + ser = Series([1.0, 1.0, 1.0], index=range(3)) + result = ser.prod() + + assert not isinstance(result, Series) + + +@pytest.mark.parametrize("func", [np.any, np.all]) +@pytest.mark.parametrize("kwargs", [{"keepdims": True}, {"out": object()}]) +def test_validate_any_all_out_keepdims_raises(kwargs, func): + ser = Series([1, 2]) + param = next(iter(kwargs)) + name = func.__name__ + + msg = ( + f"the '{param}' parameter is not " + "supported in the pandas " + rf"implementation of {name}\(\)" + ) + with pytest.raises(ValueError, match=msg): + func(ser, **kwargs) + + +def test_validate_sum_initial(): + ser = Series([1, 2]) + msg = ( + r"the 'initial' parameter is not " + r"supported in the pandas " + r"implementation of sum\(\)" + ) + with pytest.raises(ValueError, match=msg): + np.sum(ser, initial=10) + + +def test_validate_median_initial(): + ser = Series([1, 2]) + msg = ( + r"the 'overwrite_input' parameter is not " + r"supported in the pandas " + r"implementation of median\(\)" + ) + with pytest.raises(ValueError, match=msg): + # It seems like np.median doesn't dispatch, so we use the + # method instead of the ufunc. + ser.median(overwrite_input=True) + + +def test_validate_stat_keepdims(): + ser = Series([1, 2]) + msg = ( + r"the 'keepdims' parameter is not " + r"supported in the pandas " + r"implementation of sum\(\)" + ) + with pytest.raises(ValueError, match=msg): + np.sum(ser, keepdims=True) + + +def test_mean_with_convertible_string_raises(using_array_manager, using_infer_string): + # GH#44008 + ser = Series(["1", "2"]) + if using_infer_string: + msg = "does not support" + with pytest.raises(TypeError, match=msg): + ser.sum() + else: + assert ser.sum() == "12" + msg = "Could not convert string '12' to numeric|does not support" + with pytest.raises(TypeError, match=msg): + ser.mean() + + df = ser.to_frame() + if not using_array_manager: + msg = r"Could not convert \['12'\] to numeric|does not support" + with pytest.raises(TypeError, match=msg): + df.mean() + + +def test_mean_dont_convert_j_to_complex(using_array_manager): + # GH#36703 + df = pd.DataFrame([{"db": "J", "numeric": 123}]) + if using_array_manager: + msg = "Could not convert string 'J' to numeric" + else: + msg = r"Could not convert \['J'\] to numeric|does not support" + with pytest.raises(TypeError, match=msg): + df.mean() + + with pytest.raises(TypeError, match=msg): + df.agg("mean") + + msg = "Could not convert string 'J' to numeric|does not support" + with pytest.raises(TypeError, match=msg): + df["db"].mean() + msg = "Could not convert string 'J' to numeric|ufunc 'divide'" + with pytest.raises(TypeError, match=msg): + np.mean(df["db"].astype("string").array) + + +def test_median_with_convertible_string_raises(using_array_manager): + # GH#34671 this _could_ return a string "2", but definitely not float 2.0 + msg = r"Cannot convert \['1' '2' '3'\] to numeric|does not support" + ser = Series(["1", "2", "3"]) + with pytest.raises(TypeError, match=msg): + ser.median() + + if not using_array_manager: + msg = r"Cannot convert \[\['1' '2' '3'\]\] to numeric|does not support" + df = ser.to_frame() + with pytest.raises(TypeError, match=msg): + df.median() diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_subclass.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_subclass.py new file mode 100644 index 0000000000000000000000000000000000000000..c2d5afcf884b12b3007905061b7c503359e71a5d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_subclass.py @@ -0,0 +1,82 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager|Passing a SingleBlockManager:DeprecationWarning" +) + + +class TestSeriesSubclassing: + @pytest.mark.parametrize( + "idx_method, indexer, exp_data, exp_idx", + [ + ["loc", ["a", "b"], [1, 2], "ab"], + ["iloc", [2, 3], [3, 4], "cd"], + ], + ) + def test_indexing_sliced(self, idx_method, indexer, exp_data, exp_idx): + s = tm.SubclassedSeries([1, 2, 3, 4], index=list("abcd")) + res = getattr(s, idx_method)[indexer] + exp = tm.SubclassedSeries(exp_data, index=list(exp_idx)) + tm.assert_series_equal(res, exp) + + def test_to_frame(self): + s = tm.SubclassedSeries([1, 2, 3, 4], index=list("abcd"), name="xxx") + res = s.to_frame() + exp = tm.SubclassedDataFrame({"xxx": [1, 2, 3, 4]}, index=list("abcd")) + tm.assert_frame_equal(res, exp) + + def test_subclass_unstack(self): + # GH 15564 + s = tm.SubclassedSeries([1, 2, 3, 4], index=[list("aabb"), list("xyxy")]) + + res = s.unstack() + exp = tm.SubclassedDataFrame({"x": [1, 3], "y": [2, 4]}, index=["a", "b"]) + + tm.assert_frame_equal(res, exp) + + def test_subclass_empty_repr(self): + sub_series = tm.SubclassedSeries() + assert "SubclassedSeries" in repr(sub_series) + + def test_asof(self): + N = 3 + rng = pd.date_range("1/1/1990", periods=N, freq="53s") + s = tm.SubclassedSeries({"A": [np.nan, np.nan, np.nan]}, index=rng) + + result = s.asof(rng[-2:]) + assert isinstance(result, tm.SubclassedSeries) + + def test_explode(self): + s = tm.SubclassedSeries([[1, 2, 3], "foo", [], [3, 4]]) + result = s.explode() + assert isinstance(result, tm.SubclassedSeries) + + def test_equals(self): + # https://github.com/pandas-dev/pandas/pull/34402 + # allow subclass in both directions + s1 = pd.Series([1, 2, 3]) + s2 = tm.SubclassedSeries([1, 2, 3]) + assert s1.equals(s2) + assert s2.equals(s1) + + +class SubclassedSeries(pd.Series): + @property + def _constructor(self): + def _new(*args, **kwargs): + # some constructor logic that accesses the Series' name + if self.name == "test": + return pd.Series(*args, **kwargs) + return SubclassedSeries(*args, **kwargs) + + return _new + + +def test_constructor_from_dict(): + # https://github.com/pandas-dev/pandas/issues/52445 + result = SubclassedSeries({"a": 1, "b": 2, "c": 3}) + assert isinstance(result, SubclassedSeries) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_ufunc.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_ufunc.py new file mode 100644 index 0000000000000000000000000000000000000000..9d13ebf740eabb562fea10336a39edc36e431f5e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_ufunc.py @@ -0,0 +1,460 @@ +from collections import deque +import re +import string + +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +import pandas as pd +import pandas._testing as tm +from pandas.arrays import SparseArray + + +@pytest.fixture(params=[np.add, np.logaddexp]) +def ufunc(request): + # dunder op + return request.param + + +@pytest.fixture(params=[True, False], ids=["sparse", "dense"]) +def sparse(request): + return request.param + + +@pytest.fixture +def arrays_for_binary_ufunc(): + """ + A pair of random, length-100 integer-dtype arrays, that are mostly 0. + """ + a1 = np.random.default_rng(2).integers(0, 10, 100, dtype="int64") + a2 = np.random.default_rng(2).integers(0, 10, 100, dtype="int64") + a1[::3] = 0 + a2[::4] = 0 + return a1, a2 + + +@pytest.mark.parametrize("ufunc", [np.positive, np.floor, np.exp]) +def test_unary_ufunc(ufunc, sparse): + # Test that ufunc(pd.Series) == pd.Series(ufunc) + arr = np.random.default_rng(2).integers(0, 10, 10, dtype="int64") + arr[::2] = 0 + if sparse: + arr = SparseArray(arr, dtype=pd.SparseDtype("int64", 0)) + + index = list(string.ascii_letters[:10]) + name = "name" + series = pd.Series(arr, index=index, name=name) + + result = ufunc(series) + expected = pd.Series(ufunc(arr), index=index, name=name) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("flip", [True, False], ids=["flipped", "straight"]) +def test_binary_ufunc_with_array(flip, sparse, ufunc, arrays_for_binary_ufunc): + # Test that ufunc(pd.Series(a), array) == pd.Series(ufunc(a, b)) + a1, a2 = arrays_for_binary_ufunc + if sparse: + a1 = SparseArray(a1, dtype=pd.SparseDtype("int64", 0)) + a2 = SparseArray(a2, dtype=pd.SparseDtype("int64", 0)) + + name = "name" # op(pd.Series, array) preserves the name. + series = pd.Series(a1, name=name) + other = a2 + + array_args = (a1, a2) + series_args = (series, other) # ufunc(series, array) + + if flip: + array_args = reversed(array_args) + series_args = reversed(series_args) # ufunc(array, series) + + expected = pd.Series(ufunc(*array_args), name=name) + result = ufunc(*series_args) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("flip", [True, False], ids=["flipped", "straight"]) +def test_binary_ufunc_with_index(flip, sparse, ufunc, arrays_for_binary_ufunc): + # Test that + # * func(pd.Series(a), pd.Series(b)) == pd.Series(ufunc(a, b)) + # * ufunc(Index, pd.Series) dispatches to pd.Series (returns a pd.Series) + a1, a2 = arrays_for_binary_ufunc + if sparse: + a1 = SparseArray(a1, dtype=pd.SparseDtype("int64", 0)) + a2 = SparseArray(a2, dtype=pd.SparseDtype("int64", 0)) + + name = "name" # op(pd.Series, array) preserves the name. + series = pd.Series(a1, name=name) + + other = pd.Index(a2, name=name).astype("int64") + + array_args = (a1, a2) + series_args = (series, other) # ufunc(series, array) + + if flip: + array_args = reversed(array_args) + series_args = reversed(series_args) # ufunc(array, series) + + expected = pd.Series(ufunc(*array_args), name=name) + result = ufunc(*series_args) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("shuffle", [True, False], ids=["unaligned", "aligned"]) +@pytest.mark.parametrize("flip", [True, False], ids=["flipped", "straight"]) +def test_binary_ufunc_with_series( + flip, shuffle, sparse, ufunc, arrays_for_binary_ufunc +): + # Test that + # * func(pd.Series(a), pd.Series(b)) == pd.Series(ufunc(a, b)) + # with alignment between the indices + a1, a2 = arrays_for_binary_ufunc + if sparse: + a1 = SparseArray(a1, dtype=pd.SparseDtype("int64", 0)) + a2 = SparseArray(a2, dtype=pd.SparseDtype("int64", 0)) + + name = "name" # op(pd.Series, array) preserves the name. + series = pd.Series(a1, name=name) + other = pd.Series(a2, name=name) + + idx = np.random.default_rng(2).permutation(len(a1)) + + if shuffle: + other = other.take(idx) + if flip: + index = other.align(series)[0].index + else: + index = series.align(other)[0].index + else: + index = series.index + + array_args = (a1, a2) + series_args = (series, other) # ufunc(series, array) + + if flip: + array_args = tuple(reversed(array_args)) + series_args = tuple(reversed(series_args)) # ufunc(array, series) + + expected = pd.Series(ufunc(*array_args), index=index, name=name) + result = ufunc(*series_args) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("flip", [True, False]) +def test_binary_ufunc_scalar(ufunc, sparse, flip, arrays_for_binary_ufunc): + # Test that + # * ufunc(pd.Series, scalar) == pd.Series(ufunc(array, scalar)) + # * ufunc(pd.Series, scalar) == ufunc(scalar, pd.Series) + arr, _ = arrays_for_binary_ufunc + if sparse: + arr = SparseArray(arr) + other = 2 + series = pd.Series(arr, name="name") + + series_args = (series, other) + array_args = (arr, other) + + if flip: + series_args = tuple(reversed(series_args)) + array_args = tuple(reversed(array_args)) + + expected = pd.Series(ufunc(*array_args), name="name") + result = ufunc(*series_args) + + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("ufunc", [np.divmod]) # TODO: np.modf, np.frexp +@pytest.mark.parametrize("shuffle", [True, False]) +@pytest.mark.filterwarnings("ignore:divide by zero:RuntimeWarning") +def test_multiple_output_binary_ufuncs(ufunc, sparse, shuffle, arrays_for_binary_ufunc): + # Test that + # the same conditions from binary_ufunc_scalar apply to + # ufuncs with multiple outputs. + + a1, a2 = arrays_for_binary_ufunc + # work around https://github.com/pandas-dev/pandas/issues/26987 + a1[a1 == 0] = 1 + a2[a2 == 0] = 1 + + if sparse: + a1 = SparseArray(a1, dtype=pd.SparseDtype("int64", 0)) + a2 = SparseArray(a2, dtype=pd.SparseDtype("int64", 0)) + + s1 = pd.Series(a1) + s2 = pd.Series(a2) + + if shuffle: + # ensure we align before applying the ufunc + s2 = s2.sample(frac=1) + + expected = ufunc(a1, a2) + assert isinstance(expected, tuple) + + result = ufunc(s1, s2) + assert isinstance(result, tuple) + tm.assert_series_equal(result[0], pd.Series(expected[0])) + tm.assert_series_equal(result[1], pd.Series(expected[1])) + + +def test_multiple_output_ufunc(sparse, arrays_for_binary_ufunc): + # Test that the same conditions from unary input apply to multi-output + # ufuncs + arr, _ = arrays_for_binary_ufunc + + if sparse: + arr = SparseArray(arr) + + series = pd.Series(arr, name="name") + result = np.modf(series) + expected = np.modf(arr) + + assert isinstance(result, tuple) + assert isinstance(expected, tuple) + + tm.assert_series_equal(result[0], pd.Series(expected[0], name="name")) + tm.assert_series_equal(result[1], pd.Series(expected[1], name="name")) + + +def test_binary_ufunc_drops_series_name(ufunc, sparse, arrays_for_binary_ufunc): + # Drop the names when they differ. + a1, a2 = arrays_for_binary_ufunc + s1 = pd.Series(a1, name="a") + s2 = pd.Series(a2, name="b") + + result = ufunc(s1, s2) + assert result.name is None + + +def test_object_series_ok(): + class Dummy: + def __init__(self, value) -> None: + self.value = value + + def __add__(self, other): + return self.value + other.value + + arr = np.array([Dummy(0), Dummy(1)]) + ser = pd.Series(arr) + tm.assert_series_equal(np.add(ser, ser), pd.Series(np.add(ser, arr))) + tm.assert_series_equal(np.add(ser, Dummy(1)), pd.Series(np.add(ser, Dummy(1)))) + + +@pytest.fixture( + params=[ + pd.array([1, 3, 2], dtype=np.int64), + pd.array([1, 3, 2], dtype="Int64"), + pd.array([1, 3, 2], dtype="Float32"), + pd.array([1, 10, 2], dtype="Sparse[int]"), + pd.to_datetime(["2000", "2010", "2001"]), + pd.to_datetime(["2000", "2010", "2001"]).tz_localize("CET"), + pd.to_datetime(["2000", "2010", "2001"]).to_period(freq="D"), + pd.to_timedelta(["1 Day", "3 Days", "2 Days"]), + pd.IntervalIndex([pd.Interval(0, 1), pd.Interval(2, 3), pd.Interval(1, 2)]), + ], + ids=lambda x: str(x.dtype), +) +def values_for_np_reduce(request): + # min/max tests assume that these are monotonic increasing + return request.param + + +class TestNumpyReductions: + # TODO: cases with NAs, axis kwarg for DataFrame + + def test_multiply(self, values_for_np_reduce, box_with_array, request): + box = box_with_array + values = values_for_np_reduce + + with tm.assert_produces_warning(None): + obj = box(values) + + if isinstance(values, pd.core.arrays.SparseArray): + mark = pytest.mark.xfail(reason="SparseArray has no 'prod'") + request.applymarker(mark) + + if values.dtype.kind in "iuf": + result = np.multiply.reduce(obj) + if box is pd.DataFrame: + expected = obj.prod(numeric_only=False) + tm.assert_series_equal(result, expected) + elif box is pd.Index: + # Index has no 'prod' + expected = obj._values.prod() + assert result == expected + else: + expected = obj.prod() + assert result == expected + else: + msg = "|".join( + [ + "does not support reduction", + "unsupported operand type", + "ufunc 'multiply' cannot use operands", + ] + ) + with pytest.raises(TypeError, match=msg): + np.multiply.reduce(obj) + + def test_add(self, values_for_np_reduce, box_with_array): + box = box_with_array + values = values_for_np_reduce + + with tm.assert_produces_warning(None): + obj = box(values) + + if values.dtype.kind in "miuf": + result = np.add.reduce(obj) + if box is pd.DataFrame: + expected = obj.sum(numeric_only=False) + tm.assert_series_equal(result, expected) + elif box is pd.Index: + # Index has no 'sum' + expected = obj._values.sum() + assert result == expected + else: + expected = obj.sum() + assert result == expected + else: + msg = "|".join( + [ + "does not support reduction", + "unsupported operand type", + "ufunc 'add' cannot use operands", + ] + ) + with pytest.raises(TypeError, match=msg): + np.add.reduce(obj) + + def test_max(self, values_for_np_reduce, box_with_array): + box = box_with_array + values = values_for_np_reduce + + same_type = True + if box is pd.Index and values.dtype.kind in ["i", "f"]: + # ATM Index casts to object, so we get python ints/floats + same_type = False + + with tm.assert_produces_warning(None): + obj = box(values) + + result = np.maximum.reduce(obj) + if box is pd.DataFrame: + # TODO: cases with axis kwarg + expected = obj.max(numeric_only=False) + tm.assert_series_equal(result, expected) + else: + expected = values[1] + assert result == expected + if same_type: + # check we have e.g. Timestamp instead of dt64 + assert type(result) == type(expected) + + def test_min(self, values_for_np_reduce, box_with_array): + box = box_with_array + values = values_for_np_reduce + + same_type = True + if box is pd.Index and values.dtype.kind in ["i", "f"]: + # ATM Index casts to object, so we get python ints/floats + same_type = False + + with tm.assert_produces_warning(None): + obj = box(values) + + result = np.minimum.reduce(obj) + if box is pd.DataFrame: + expected = obj.min(numeric_only=False) + tm.assert_series_equal(result, expected) + else: + expected = values[0] + assert result == expected + if same_type: + # check we have e.g. Timestamp instead of dt64 + assert type(result) == type(expected) + + +@pytest.mark.parametrize("type_", [list, deque, tuple]) +def test_binary_ufunc_other_types(type_): + a = pd.Series([1, 2, 3], name="name") + b = type_([3, 4, 5]) + + result = np.add(a, b) + expected = pd.Series(np.add(a.to_numpy(), b), name="name") + tm.assert_series_equal(result, expected) + + +def test_object_dtype_ok(): + class Thing: + def __init__(self, value) -> None: + self.value = value + + def __add__(self, other): + other = getattr(other, "value", other) + return type(self)(self.value + other) + + def __eq__(self, other) -> bool: + return type(other) is Thing and self.value == other.value + + def __repr__(self) -> str: + return f"Thing({self.value})" + + s = pd.Series([Thing(1), Thing(2)]) + result = np.add(s, Thing(1)) + expected = pd.Series([Thing(2), Thing(3)]) + tm.assert_series_equal(result, expected) + + +def test_outer(): + # https://github.com/pandas-dev/pandas/issues/27186 + ser = pd.Series([1, 2, 3]) + obj = np.array([1, 2, 3]) + + with pytest.raises(NotImplementedError, match=""): + np.subtract.outer(ser, obj) + + +def test_np_matmul(): + # GH26650 + df1 = pd.DataFrame(data=[[-1, 1, 10]]) + df2 = pd.DataFrame(data=[-1, 1, 10]) + expected = pd.DataFrame(data=[102]) + + result = np.matmul(df1, df2) + tm.assert_frame_equal(expected, result) + + +def test_array_ufuncs_for_many_arguments(): + # GH39853 + def add3(x, y, z): + return x + y + z + + ufunc = np.frompyfunc(add3, 3, 1) + ser = pd.Series([1, 2]) + + result = ufunc(ser, ser, 1) + expected = pd.Series([3, 5], dtype=object) + tm.assert_series_equal(result, expected) + + df = pd.DataFrame([[1, 2]]) + + msg = ( + "Cannot apply ufunc " + "to mixed DataFrame and Series inputs." + ) + with pytest.raises(NotImplementedError, match=re.escape(msg)): + ufunc(ser, ser, df) + + +# TODO(CoW) see https://github.com/pandas-dev/pandas/pull/51082 +@td.skip_copy_on_write_not_yet_implemented +def test_np_fix(): + # np.fix is not a ufunc but is composed of several ufunc calls under the hood + # with `out` and `where` keywords + ser = pd.Series([-1.5, -0.5, 0.5, 1.5]) + result = np.fix(ser) + expected = pd.Series([-1.0, -0.0, 0.0, 1.0]) + tm.assert_series_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_unary.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_unary.py new file mode 100644 index 0000000000000000000000000000000000000000..8f153788e413c5e9198dc35867bc628823555dbf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_unary.py @@ -0,0 +1,50 @@ +import pytest + +from pandas import Series +import pandas._testing as tm + + +class TestSeriesUnaryOps: + # __neg__, __pos__, __invert__ + + def test_neg(self): + ser = Series(range(5), dtype="float64", name="series") + tm.assert_series_equal(-ser, -1 * ser) + + def test_invert(self): + ser = Series(range(5), dtype="float64", name="series") + tm.assert_series_equal(-(ser < 0), ~(ser < 0)) + + @pytest.mark.parametrize( + "source, neg_target, abs_target", + [ + ([1, 2, 3], [-1, -2, -3], [1, 2, 3]), + ([1, 2, None], [-1, -2, None], [1, 2, None]), + ], + ) + def test_all_numeric_unary_operators( + self, any_numeric_ea_dtype, source, neg_target, abs_target + ): + # GH38794 + dtype = any_numeric_ea_dtype + ser = Series(source, dtype=dtype) + neg_result, pos_result, abs_result = -ser, +ser, abs(ser) + if dtype.startswith("U"): + neg_target = -Series(source, dtype=dtype) + else: + neg_target = Series(neg_target, dtype=dtype) + + abs_target = Series(abs_target, dtype=dtype) + + tm.assert_series_equal(neg_result, neg_target) + tm.assert_series_equal(pos_result, ser) + tm.assert_series_equal(abs_result, abs_target) + + @pytest.mark.parametrize("op", ["__neg__", "__abs__"]) + def test_unary_float_op_mask(self, float_ea_dtype, op): + dtype = float_ea_dtype + ser = Series([1.1, 2.2, 3.3], dtype=dtype) + result = getattr(ser, op)() + target = result.copy(deep=True) + ser[0] = None + tm.assert_series_equal(result, target) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_validate.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_validate.py new file mode 100644 index 0000000000000000000000000000000000000000..3c867f7582b7d3250bf5e009ffbf7545da404712 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/series/test_validate.py @@ -0,0 +1,26 @@ +import pytest + + +@pytest.mark.parametrize( + "func", + [ + "reset_index", + "_set_name", + "sort_values", + "sort_index", + "rename", + "dropna", + "drop_duplicates", + ], +) +@pytest.mark.parametrize("inplace", [1, "True", [1, 2, 3], 5.0]) +def test_validate_bool_args(string_series, func, inplace): + """Tests for error handling related to data types of method arguments.""" + msg = 'For argument "inplace" expected type bool' + kwargs = {"inplace": inplace} + + if func == "_set_name": + kwargs["name"] = "hello" + + with pytest.raises(ValueError, match=msg): + getattr(string_series, func)(**kwargs) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/test_aggregation.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/test_aggregation.py new file mode 100644 index 0000000000000000000000000000000000000000..7695c953712ed9925e4e804d0db1e8cf606a97eb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/test_aggregation.py @@ -0,0 +1,93 @@ +import numpy as np +import pytest + +from pandas.core.apply import ( + _make_unique_kwarg_list, + maybe_mangle_lambdas, +) + + +def test_maybe_mangle_lambdas_passthrough(): + assert maybe_mangle_lambdas("mean") == "mean" + assert maybe_mangle_lambdas(lambda x: x).__name__ == "" + # don't mangel single lambda. + assert maybe_mangle_lambdas([lambda x: x])[0].__name__ == "" + + +def test_maybe_mangle_lambdas_listlike(): + aggfuncs = [lambda x: 1, lambda x: 2] + result = maybe_mangle_lambdas(aggfuncs) + assert result[0].__name__ == "" + assert result[1].__name__ == "" + assert aggfuncs[0](None) == result[0](None) + assert aggfuncs[1](None) == result[1](None) + + +def test_maybe_mangle_lambdas(): + func = {"A": [lambda x: 0, lambda x: 1]} + result = maybe_mangle_lambdas(func) + assert result["A"][0].__name__ == "" + assert result["A"][1].__name__ == "" + + +def test_maybe_mangle_lambdas_args(): + func = {"A": [lambda x, a, b=1: (0, a, b), lambda x: 1]} + result = maybe_mangle_lambdas(func) + assert result["A"][0].__name__ == "" + assert result["A"][1].__name__ == "" + + assert func["A"][0](0, 1) == (0, 1, 1) + assert func["A"][0](0, 1, 2) == (0, 1, 2) + assert func["A"][0](0, 2, b=3) == (0, 2, 3) + + +def test_maybe_mangle_lambdas_named(): + func = {"C": np.mean, "D": {"foo": np.mean, "bar": np.mean}} + result = maybe_mangle_lambdas(func) + assert result == func + + +@pytest.mark.parametrize( + "order, expected_reorder", + [ + ( + [ + ("height", ""), + ("height", "max"), + ("weight", "max"), + ("height", ""), + ("weight", ""), + ], + [ + ("height", "_0"), + ("height", "max"), + ("weight", "max"), + ("height", "_1"), + ("weight", ""), + ], + ), + ( + [ + ("col2", "min"), + ("col1", ""), + ("col1", ""), + ("col1", ""), + ], + [ + ("col2", "min"), + ("col1", "_0"), + ("col1", "_1"), + ("col1", "_2"), + ], + ), + ( + [("col", ""), ("col", ""), ("col", "")], + [("col", "_0"), ("col", "_1"), ("col", "_2")], + ), + ], +) +def test_make_unique(order, expected_reorder): + # GH 27519, test if make_unique function reorders correctly + result = _make_unique_kwarg_list(order) + + assert result == expected_reorder diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/test_algos.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/test_algos.py new file mode 100644 index 0000000000000000000000000000000000000000..718d1b3ee2e834507919cd1e46b2e2bead191589 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/test_algos.py @@ -0,0 +1,2041 @@ +from datetime import datetime +import struct + +import numpy as np +import pytest + +from pandas._libs import ( + algos as libalgos, + hashtable as ht, +) + +from pandas.core.dtypes.common import ( + is_bool_dtype, + is_complex_dtype, + is_float_dtype, + is_integer_dtype, + is_object_dtype, +) +from pandas.core.dtypes.dtypes import CategoricalDtype + +import pandas as pd +from pandas import ( + Categorical, + CategoricalIndex, + DataFrame, + DatetimeIndex, + Index, + IntervalIndex, + MultiIndex, + NaT, + Period, + PeriodIndex, + Series, + Timedelta, + Timestamp, + cut, + date_range, + timedelta_range, + to_datetime, + to_timedelta, +) +import pandas._testing as tm +import pandas.core.algorithms as algos +from pandas.core.arrays import ( + DatetimeArray, + TimedeltaArray, +) +import pandas.core.common as com + + +class TestFactorize: + def test_factorize_complex(self): + # GH#17927 + array = [1, 2, 2 + 1j] + msg = "factorize with argument that is not not a Series" + with tm.assert_produces_warning(FutureWarning, match=msg): + labels, uniques = algos.factorize(array) + + expected_labels = np.array([0, 1, 2], dtype=np.intp) + tm.assert_numpy_array_equal(labels, expected_labels) + + # Should return a complex dtype in the future + expected_uniques = np.array([(1 + 0j), (2 + 0j), (2 + 1j)], dtype=object) + tm.assert_numpy_array_equal(uniques, expected_uniques) + + @pytest.mark.parametrize("sort", [True, False]) + def test_factorize(self, index_or_series_obj, sort): + obj = index_or_series_obj + result_codes, result_uniques = obj.factorize(sort=sort) + + constructor = Index + if isinstance(obj, MultiIndex): + constructor = MultiIndex.from_tuples + expected_arr = obj.unique() + if expected_arr.dtype == np.float16: + expected_arr = expected_arr.astype(np.float32) + expected_uniques = constructor(expected_arr) + if ( + isinstance(obj, Index) + and expected_uniques.dtype == bool + and obj.dtype == object + ): + expected_uniques = expected_uniques.astype(object) + + if sort: + expected_uniques = expected_uniques.sort_values() + + # construct an integer ndarray so that + # `expected_uniques.take(expected_codes)` is equal to `obj` + expected_uniques_list = list(expected_uniques) + expected_codes = [expected_uniques_list.index(val) for val in obj] + expected_codes = np.asarray(expected_codes, dtype=np.intp) + + tm.assert_numpy_array_equal(result_codes, expected_codes) + tm.assert_index_equal(result_uniques, expected_uniques, exact=True) + + def test_series_factorize_use_na_sentinel_false(self): + # GH#35667 + values = np.array([1, 2, 1, np.nan]) + ser = Series(values) + codes, uniques = ser.factorize(use_na_sentinel=False) + + expected_codes = np.array([0, 1, 0, 2], dtype=np.intp) + expected_uniques = Index([1.0, 2.0, np.nan]) + + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_index_equal(uniques, expected_uniques) + + def test_basic(self): + items = np.array(["a", "b", "b", "a", "a", "c", "c", "c"], dtype=object) + codes, uniques = algos.factorize(items) + tm.assert_numpy_array_equal(uniques, np.array(["a", "b", "c"], dtype=object)) + + codes, uniques = algos.factorize(items, sort=True) + exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + exp = np.array(["a", "b", "c"], dtype=object) + tm.assert_numpy_array_equal(uniques, exp) + + arr = np.arange(5, dtype=np.intp)[::-1] + + codes, uniques = algos.factorize(arr) + exp = np.array([0, 1, 2, 3, 4], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + exp = np.array([4, 3, 2, 1, 0], dtype=arr.dtype) + tm.assert_numpy_array_equal(uniques, exp) + + codes, uniques = algos.factorize(arr, sort=True) + exp = np.array([4, 3, 2, 1, 0], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + exp = np.array([0, 1, 2, 3, 4], dtype=arr.dtype) + tm.assert_numpy_array_equal(uniques, exp) + + arr = np.arange(5.0)[::-1] + + codes, uniques = algos.factorize(arr) + exp = np.array([0, 1, 2, 3, 4], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + exp = np.array([4.0, 3.0, 2.0, 1.0, 0.0], dtype=arr.dtype) + tm.assert_numpy_array_equal(uniques, exp) + + codes, uniques = algos.factorize(arr, sort=True) + exp = np.array([4, 3, 2, 1, 0], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + exp = np.array([0.0, 1.0, 2.0, 3.0, 4.0], dtype=arr.dtype) + tm.assert_numpy_array_equal(uniques, exp) + + def test_mixed(self): + # doc example reshaping.rst + x = Series(["A", "A", np.nan, "B", 3.14, np.inf]) + codes, uniques = algos.factorize(x) + + exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + exp = Index(["A", "B", 3.14, np.inf]) + tm.assert_index_equal(uniques, exp) + + codes, uniques = algos.factorize(x, sort=True) + exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + exp = Index([3.14, np.inf, "A", "B"]) + tm.assert_index_equal(uniques, exp) + + def test_factorize_datetime64(self): + # M8 + v1 = Timestamp("20130101 09:00:00.00004") + v2 = Timestamp("20130101") + x = Series([v1, v1, v1, v2, v2, v1]) + codes, uniques = algos.factorize(x) + + exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + exp = DatetimeIndex([v1, v2]) + tm.assert_index_equal(uniques, exp) + + codes, uniques = algos.factorize(x, sort=True) + exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + exp = DatetimeIndex([v2, v1]) + tm.assert_index_equal(uniques, exp) + + def test_factorize_period(self): + # period + v1 = Period("201302", freq="M") + v2 = Period("201303", freq="M") + x = Series([v1, v1, v1, v2, v2, v1]) + + # periods are not 'sorted' as they are converted back into an index + codes, uniques = algos.factorize(x) + exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + tm.assert_index_equal(uniques, PeriodIndex([v1, v2])) + + codes, uniques = algos.factorize(x, sort=True) + exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + tm.assert_index_equal(uniques, PeriodIndex([v1, v2])) + + def test_factorize_timedelta(self): + # GH 5986 + v1 = to_timedelta("1 day 1 min") + v2 = to_timedelta("1 day") + x = Series([v1, v2, v1, v1, v2, v2, v1]) + codes, uniques = algos.factorize(x) + exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + tm.assert_index_equal(uniques, to_timedelta([v1, v2])) + + codes, uniques = algos.factorize(x, sort=True) + exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp) + tm.assert_numpy_array_equal(codes, exp) + tm.assert_index_equal(uniques, to_timedelta([v2, v1])) + + def test_factorize_nan(self): + # nan should map to na_sentinel, not reverse_indexer[na_sentinel] + # rizer.factorize should not raise an exception if na_sentinel indexes + # outside of reverse_indexer + key = np.array([1, 2, 1, np.nan], dtype="O") + rizer = ht.ObjectFactorizer(len(key)) + for na_sentinel in (-1, 20): + ids = rizer.factorize(key, na_sentinel=na_sentinel) + expected = np.array([0, 1, 0, na_sentinel], dtype=np.intp) + assert len(set(key)) == len(set(expected)) + tm.assert_numpy_array_equal(pd.isna(key), expected == na_sentinel) + tm.assert_numpy_array_equal(ids, expected) + + def test_factorizer_with_mask(self): + # GH#49549 + data = np.array([1, 2, 3, 1, 1, 0], dtype="int64") + mask = np.array([False, False, False, False, False, True]) + rizer = ht.Int64Factorizer(len(data)) + result = rizer.factorize(data, mask=mask) + expected = np.array([0, 1, 2, 0, 0, -1], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + expected_uniques = np.array([1, 2, 3], dtype="int64") + tm.assert_numpy_array_equal(rizer.uniques.to_array(), expected_uniques) + + def test_factorizer_object_with_nan(self): + # GH#49549 + data = np.array([1, 2, 3, 1, np.nan]) + rizer = ht.ObjectFactorizer(len(data)) + result = rizer.factorize(data.astype(object)) + expected = np.array([0, 1, 2, 0, -1], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + expected_uniques = np.array([1, 2, 3], dtype=object) + tm.assert_numpy_array_equal(rizer.uniques.to_array(), expected_uniques) + + @pytest.mark.parametrize( + "data, expected_codes, expected_uniques", + [ + ( + [(1, 1), (1, 2), (0, 0), (1, 2), "nonsense"], + [0, 1, 2, 1, 3], + [(1, 1), (1, 2), (0, 0), "nonsense"], + ), + ( + [(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)], + [0, 1, 2, 1, 3], + [(1, 1), (1, 2), (0, 0), (1, 2, 3)], + ), + ([(1, 1), (1, 2), (0, 0), (1, 2)], [0, 1, 2, 1], [(1, 1), (1, 2), (0, 0)]), + ], + ) + def test_factorize_tuple_list(self, data, expected_codes, expected_uniques): + # GH9454 + msg = "factorize with argument that is not not a Series" + with tm.assert_produces_warning(FutureWarning, match=msg): + codes, uniques = pd.factorize(data) + + tm.assert_numpy_array_equal(codes, np.array(expected_codes, dtype=np.intp)) + + expected_uniques_array = com.asarray_tuplesafe(expected_uniques, dtype=object) + tm.assert_numpy_array_equal(uniques, expected_uniques_array) + + def test_complex_sorting(self): + # gh 12666 - check no segfault + x17 = np.array([complex(i) for i in range(17)], dtype=object) + + msg = "'[<>]' not supported between instances of .*" + with pytest.raises(TypeError, match=msg): + algos.factorize(x17[::-1], sort=True) + + def test_numeric_dtype_factorize(self, any_real_numpy_dtype): + # GH41132 + dtype = any_real_numpy_dtype + data = np.array([1, 2, 2, 1], dtype=dtype) + expected_codes = np.array([0, 1, 1, 0], dtype=np.intp) + expected_uniques = np.array([1, 2], dtype=dtype) + + codes, uniques = algos.factorize(data) + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_numpy_array_equal(uniques, expected_uniques) + + def test_float64_factorize(self, writable): + data = np.array([1.0, 1e8, 1.0, 1e-8, 1e8, 1.0], dtype=np.float64) + data.setflags(write=writable) + expected_codes = np.array([0, 1, 0, 2, 1, 0], dtype=np.intp) + expected_uniques = np.array([1.0, 1e8, 1e-8], dtype=np.float64) + + codes, uniques = algos.factorize(data) + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_numpy_array_equal(uniques, expected_uniques) + + def test_uint64_factorize(self, writable): + data = np.array([2**64 - 1, 1, 2**64 - 1], dtype=np.uint64) + data.setflags(write=writable) + expected_codes = np.array([0, 1, 0], dtype=np.intp) + expected_uniques = np.array([2**64 - 1, 1], dtype=np.uint64) + + codes, uniques = algos.factorize(data) + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_numpy_array_equal(uniques, expected_uniques) + + def test_int64_factorize(self, writable): + data = np.array([2**63 - 1, -(2**63), 2**63 - 1], dtype=np.int64) + data.setflags(write=writable) + expected_codes = np.array([0, 1, 0], dtype=np.intp) + expected_uniques = np.array([2**63 - 1, -(2**63)], dtype=np.int64) + + codes, uniques = algos.factorize(data) + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_numpy_array_equal(uniques, expected_uniques) + + def test_string_factorize(self, writable): + data = np.array(["a", "c", "a", "b", "c"], dtype=object) + data.setflags(write=writable) + expected_codes = np.array([0, 1, 0, 2, 1], dtype=np.intp) + expected_uniques = np.array(["a", "c", "b"], dtype=object) + + codes, uniques = algos.factorize(data) + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_numpy_array_equal(uniques, expected_uniques) + + def test_object_factorize(self, writable): + data = np.array(["a", "c", None, np.nan, "a", "b", NaT, "c"], dtype=object) + data.setflags(write=writable) + expected_codes = np.array([0, 1, -1, -1, 0, 2, -1, 1], dtype=np.intp) + expected_uniques = np.array(["a", "c", "b"], dtype=object) + + codes, uniques = algos.factorize(data) + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_numpy_array_equal(uniques, expected_uniques) + + def test_datetime64_factorize(self, writable): + # GH35650 Verify whether read-only datetime64 array can be factorized + data = np.array([np.datetime64("2020-01-01T00:00:00.000")], dtype="M8[ns]") + data.setflags(write=writable) + expected_codes = np.array([0], dtype=np.intp) + expected_uniques = np.array( + ["2020-01-01T00:00:00.000000000"], dtype="datetime64[ns]" + ) + + codes, uniques = pd.factorize(data) + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_numpy_array_equal(uniques, expected_uniques) + + @pytest.mark.parametrize("sort", [True, False]) + def test_factorize_rangeindex(self, sort): + # increasing -> sort doesn't matter + ri = pd.RangeIndex.from_range(range(10)) + expected = np.arange(10, dtype=np.intp), ri + + result = algos.factorize(ri, sort=sort) + tm.assert_numpy_array_equal(result[0], expected[0]) + tm.assert_index_equal(result[1], expected[1], exact=True) + + result = ri.factorize(sort=sort) + tm.assert_numpy_array_equal(result[0], expected[0]) + tm.assert_index_equal(result[1], expected[1], exact=True) + + @pytest.mark.parametrize("sort", [True, False]) + def test_factorize_rangeindex_decreasing(self, sort): + # decreasing -> sort matters + ri = pd.RangeIndex.from_range(range(10)) + expected = np.arange(10, dtype=np.intp), ri + + ri2 = ri[::-1] + expected = expected[0], ri2 + if sort: + expected = expected[0][::-1], expected[1][::-1] + + result = algos.factorize(ri2, sort=sort) + tm.assert_numpy_array_equal(result[0], expected[0]) + tm.assert_index_equal(result[1], expected[1], exact=True) + + result = ri2.factorize(sort=sort) + tm.assert_numpy_array_equal(result[0], expected[0]) + tm.assert_index_equal(result[1], expected[1], exact=True) + + def test_deprecate_order(self): + # gh 19727 - check warning is raised for deprecated keyword, order. + # Test not valid once order keyword is removed. + data = np.array([2**63, 1, 2**63], dtype=np.uint64) + with pytest.raises(TypeError, match="got an unexpected keyword"): + algos.factorize(data, order=True) + with tm.assert_produces_warning(False): + algos.factorize(data) + + @pytest.mark.parametrize( + "data", + [ + np.array([0, 1, 0], dtype="u8"), + np.array([-(2**63), 1, -(2**63)], dtype="i8"), + np.array(["__nan__", "foo", "__nan__"], dtype="object"), + ], + ) + def test_parametrized_factorize_na_value_default(self, data): + # arrays that include the NA default for that type, but isn't used. + codes, uniques = algos.factorize(data) + expected_uniques = data[[0, 1]] + expected_codes = np.array([0, 1, 0], dtype=np.intp) + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_numpy_array_equal(uniques, expected_uniques) + + @pytest.mark.parametrize( + "data, na_value", + [ + (np.array([0, 1, 0, 2], dtype="u8"), 0), + (np.array([1, 0, 1, 2], dtype="u8"), 1), + (np.array([-(2**63), 1, -(2**63), 0], dtype="i8"), -(2**63)), + (np.array([1, -(2**63), 1, 0], dtype="i8"), 1), + (np.array(["a", "", "a", "b"], dtype=object), "a"), + (np.array([(), ("a", 1), (), ("a", 2)], dtype=object), ()), + (np.array([("a", 1), (), ("a", 1), ("a", 2)], dtype=object), ("a", 1)), + ], + ) + def test_parametrized_factorize_na_value(self, data, na_value): + codes, uniques = algos.factorize_array(data, na_value=na_value) + expected_uniques = data[[1, 3]] + expected_codes = np.array([-1, 0, -1, 1], dtype=np.intp) + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_numpy_array_equal(uniques, expected_uniques) + + @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize( + "data, uniques", + [ + ( + np.array(["b", "a", None, "b"], dtype=object), + np.array(["b", "a"], dtype=object), + ), + ( + pd.array([2, 1, np.nan, 2], dtype="Int64"), + pd.array([2, 1], dtype="Int64"), + ), + ], + ids=["numpy_array", "extension_array"], + ) + def test_factorize_use_na_sentinel(self, sort, data, uniques): + codes, uniques = algos.factorize(data, sort=sort, use_na_sentinel=True) + if sort: + expected_codes = np.array([1, 0, -1, 1], dtype=np.intp) + expected_uniques = algos.safe_sort(uniques) + else: + expected_codes = np.array([0, 1, -1, 0], dtype=np.intp) + expected_uniques = uniques + tm.assert_numpy_array_equal(codes, expected_codes) + if isinstance(data, np.ndarray): + tm.assert_numpy_array_equal(uniques, expected_uniques) + else: + tm.assert_extension_array_equal(uniques, expected_uniques) + + @pytest.mark.parametrize( + "data, expected_codes, expected_uniques", + [ + ( + ["a", None, "b", "a"], + np.array([0, 1, 2, 0], dtype=np.dtype("intp")), + np.array(["a", np.nan, "b"], dtype=object), + ), + ( + ["a", np.nan, "b", "a"], + np.array([0, 1, 2, 0], dtype=np.dtype("intp")), + np.array(["a", np.nan, "b"], dtype=object), + ), + ], + ) + def test_object_factorize_use_na_sentinel_false( + self, data, expected_codes, expected_uniques + ): + codes, uniques = algos.factorize( + np.array(data, dtype=object), use_na_sentinel=False + ) + + tm.assert_numpy_array_equal(uniques, expected_uniques, strict_nan=True) + tm.assert_numpy_array_equal(codes, expected_codes, strict_nan=True) + + @pytest.mark.parametrize( + "data, expected_codes, expected_uniques", + [ + ( + [1, None, 1, 2], + np.array([0, 1, 0, 2], dtype=np.dtype("intp")), + np.array([1, np.nan, 2], dtype="O"), + ), + ( + [1, np.nan, 1, 2], + np.array([0, 1, 0, 2], dtype=np.dtype("intp")), + np.array([1, np.nan, 2], dtype=np.float64), + ), + ], + ) + def test_int_factorize_use_na_sentinel_false( + self, data, expected_codes, expected_uniques + ): + msg = "factorize with argument that is not not a Series" + with tm.assert_produces_warning(FutureWarning, match=msg): + codes, uniques = algos.factorize(data, use_na_sentinel=False) + + tm.assert_numpy_array_equal(uniques, expected_uniques, strict_nan=True) + tm.assert_numpy_array_equal(codes, expected_codes, strict_nan=True) + + @pytest.mark.parametrize( + "data, expected_codes, expected_uniques", + [ + ( + Index(Categorical(["a", "a", "b"])), + np.array([0, 0, 1], dtype=np.intp), + CategoricalIndex(["a", "b"], categories=["a", "b"], dtype="category"), + ), + ( + Series(Categorical(["a", "a", "b"])), + np.array([0, 0, 1], dtype=np.intp), + CategoricalIndex(["a", "b"], categories=["a", "b"], dtype="category"), + ), + ( + Series(DatetimeIndex(["2017", "2017"], tz="US/Eastern")), + np.array([0, 0], dtype=np.intp), + DatetimeIndex(["2017"], tz="US/Eastern"), + ), + ], + ) + def test_factorize_mixed_values(self, data, expected_codes, expected_uniques): + # GH 19721 + codes, uniques = algos.factorize(data) + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_index_equal(uniques, expected_uniques) + + def test_factorize_interval_non_nano(self, unit): + # GH#56099 + left = DatetimeIndex(["2016-01-01", np.nan, "2015-10-11"]).as_unit(unit) + right = DatetimeIndex(["2016-01-02", np.nan, "2015-10-15"]).as_unit(unit) + idx = IntervalIndex.from_arrays(left, right) + codes, cats = idx.factorize() + assert cats.dtype == f"interval[datetime64[{unit}], right]" + + ts = Timestamp(0).as_unit(unit) + idx2 = IntervalIndex.from_arrays(left - ts, right - ts) + codes2, cats2 = idx2.factorize() + assert cats2.dtype == f"interval[timedelta64[{unit}], right]" + + idx3 = IntervalIndex.from_arrays( + left.tz_localize("US/Pacific"), right.tz_localize("US/Pacific") + ) + codes3, cats3 = idx3.factorize() + assert cats3.dtype == f"interval[datetime64[{unit}, US/Pacific], right]" + + +class TestUnique: + def test_ints(self): + arr = np.random.default_rng(2).integers(0, 100, size=50) + + result = algos.unique(arr) + assert isinstance(result, np.ndarray) + + def test_objects(self): + arr = np.random.default_rng(2).integers(0, 100, size=50).astype("O") + + result = algos.unique(arr) + assert isinstance(result, np.ndarray) + + def test_object_refcount_bug(self): + lst = np.array(["A", "B", "C", "D", "E"], dtype=object) + for i in range(1000): + len(algos.unique(lst)) + + def test_on_index_object(self): + mindex = MultiIndex.from_arrays( + [np.arange(5).repeat(5), np.tile(np.arange(5), 5)] + ) + expected = mindex.values + expected.sort() + + mindex = mindex.repeat(2) + + result = pd.unique(mindex) + result.sort() + + tm.assert_almost_equal(result, expected) + + def test_dtype_preservation(self, any_numpy_dtype): + # GH 15442 + if any_numpy_dtype in (tm.BYTES_DTYPES + tm.STRING_DTYPES): + data = [1, 2, 2] + uniques = [1, 2] + elif is_integer_dtype(any_numpy_dtype): + data = [1, 2, 2] + uniques = [1, 2] + elif is_float_dtype(any_numpy_dtype): + data = [1, 2, 2] + uniques = [1.0, 2.0] + elif is_complex_dtype(any_numpy_dtype): + data = [complex(1, 0), complex(2, 0), complex(2, 0)] + uniques = [complex(1, 0), complex(2, 0)] + elif is_bool_dtype(any_numpy_dtype): + data = [True, True, False] + uniques = [True, False] + elif is_object_dtype(any_numpy_dtype): + data = ["A", "B", "B"] + uniques = ["A", "B"] + else: + # datetime64[ns]/M8[ns]/timedelta64[ns]/m8[ns] tested elsewhere + data = [1, 2, 2] + uniques = [1, 2] + + result = Series(data, dtype=any_numpy_dtype).unique() + expected = np.array(uniques, dtype=any_numpy_dtype) + + if any_numpy_dtype in tm.STRING_DTYPES: + expected = expected.astype(object) + + if expected.dtype.kind in ["m", "M"]: + # We get TimedeltaArray/DatetimeArray + assert isinstance(result, (DatetimeArray, TimedeltaArray)) + result = np.array(result) + tm.assert_numpy_array_equal(result, expected) + + def test_datetime64_dtype_array_returned(self): + # GH 9431 + expected = np.array( + [ + "2015-01-03T00:00:00.000000000", + "2015-01-01T00:00:00.000000000", + ], + dtype="M8[ns]", + ) + + dt_index = to_datetime( + [ + "2015-01-03T00:00:00.000000000", + "2015-01-01T00:00:00.000000000", + "2015-01-01T00:00:00.000000000", + ] + ) + result = algos.unique(dt_index) + tm.assert_numpy_array_equal(result, expected) + assert result.dtype == expected.dtype + + s = Series(dt_index) + result = algos.unique(s) + tm.assert_numpy_array_equal(result, expected) + assert result.dtype == expected.dtype + + arr = s.values + result = algos.unique(arr) + tm.assert_numpy_array_equal(result, expected) + assert result.dtype == expected.dtype + + def test_datetime_non_ns(self): + a = np.array(["2000", "2000", "2001"], dtype="datetime64[s]") + result = pd.unique(a) + expected = np.array(["2000", "2001"], dtype="datetime64[s]") + tm.assert_numpy_array_equal(result, expected) + + def test_timedelta_non_ns(self): + a = np.array(["2000", "2000", "2001"], dtype="timedelta64[s]") + result = pd.unique(a) + expected = np.array([2000, 2001], dtype="timedelta64[s]") + tm.assert_numpy_array_equal(result, expected) + + def test_timedelta64_dtype_array_returned(self): + # GH 9431 + expected = np.array([31200, 45678, 10000], dtype="m8[ns]") + + td_index = to_timedelta([31200, 45678, 31200, 10000, 45678]) + result = algos.unique(td_index) + tm.assert_numpy_array_equal(result, expected) + assert result.dtype == expected.dtype + + s = Series(td_index) + result = algos.unique(s) + tm.assert_numpy_array_equal(result, expected) + assert result.dtype == expected.dtype + + arr = s.values + result = algos.unique(arr) + tm.assert_numpy_array_equal(result, expected) + assert result.dtype == expected.dtype + + def test_uint64_overflow(self): + s = Series([1, 2, 2**63, 2**63], dtype=np.uint64) + exp = np.array([1, 2, 2**63], dtype=np.uint64) + tm.assert_numpy_array_equal(algos.unique(s), exp) + + def test_nan_in_object_array(self): + duplicated_items = ["a", np.nan, "c", "c"] + result = pd.unique(np.array(duplicated_items, dtype=object)) + expected = np.array(["a", np.nan, "c"], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + def test_categorical(self): + # we are expecting to return in the order + # of appearance + expected = Categorical(list("bac")) + + # we are expecting to return in the order + # of the categories + expected_o = Categorical(list("bac"), categories=list("abc"), ordered=True) + + # GH 15939 + c = Categorical(list("baabc")) + result = c.unique() + tm.assert_categorical_equal(result, expected) + + result = algos.unique(c) + tm.assert_categorical_equal(result, expected) + + c = Categorical(list("baabc"), ordered=True) + result = c.unique() + tm.assert_categorical_equal(result, expected_o) + + result = algos.unique(c) + tm.assert_categorical_equal(result, expected_o) + + # Series of categorical dtype + s = Series(Categorical(list("baabc")), name="foo") + result = s.unique() + tm.assert_categorical_equal(result, expected) + + result = pd.unique(s) + tm.assert_categorical_equal(result, expected) + + # CI -> return CI + ci = CategoricalIndex(Categorical(list("baabc"), categories=list("abc"))) + expected = CategoricalIndex(expected) + result = ci.unique() + tm.assert_index_equal(result, expected) + + result = pd.unique(ci) + tm.assert_index_equal(result, expected) + + def test_datetime64tz_aware(self, unit): + # GH 15939 + + dti = Index( + [ + Timestamp("20160101", tz="US/Eastern"), + Timestamp("20160101", tz="US/Eastern"), + ] + ).as_unit(unit) + ser = Series(dti) + + result = ser.unique() + expected = dti[:1]._data + tm.assert_extension_array_equal(result, expected) + + result = dti.unique() + expected = dti[:1] + tm.assert_index_equal(result, expected) + + result = pd.unique(ser) + expected = dti[:1]._data + tm.assert_extension_array_equal(result, expected) + + result = pd.unique(dti) + expected = dti[:1] + tm.assert_index_equal(result, expected) + + def test_order_of_appearance(self): + # 9346 + # light testing of guarantee of order of appearance + # these also are the doc-examples + result = pd.unique(Series([2, 1, 3, 3])) + tm.assert_numpy_array_equal(result, np.array([2, 1, 3], dtype="int64")) + + result = pd.unique(Series([2] + [1] * 5)) + tm.assert_numpy_array_equal(result, np.array([2, 1], dtype="int64")) + + msg = "unique with argument that is not not a Series, Index," + with tm.assert_produces_warning(FutureWarning, match=msg): + result = pd.unique(list("aabc")) + expected = np.array(["a", "b", "c"], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + result = pd.unique(Series(Categorical(list("aabc")))) + expected = Categorical(list("abc")) + tm.assert_categorical_equal(result, expected) + + def test_order_of_appearance_dt64(self, unit): + ser = Series([Timestamp("20160101"), Timestamp("20160101")]).dt.as_unit(unit) + result = pd.unique(ser) + expected = np.array(["2016-01-01T00:00:00.000000000"], dtype=f"M8[{unit}]") + tm.assert_numpy_array_equal(result, expected) + + def test_order_of_appearance_dt64tz(self, unit): + dti = DatetimeIndex( + [ + Timestamp("20160101", tz="US/Eastern"), + Timestamp("20160101", tz="US/Eastern"), + ] + ).as_unit(unit) + result = pd.unique(dti) + expected = DatetimeIndex( + ["2016-01-01 00:00:00"], dtype=f"datetime64[{unit}, US/Eastern]", freq=None + ) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "arg ,expected", + [ + (("1", "1", "2"), np.array(["1", "2"], dtype=object)), + (("foo",), np.array(["foo"], dtype=object)), + ], + ) + def test_tuple_with_strings(self, arg, expected): + # see GH 17108 + msg = "unique with argument that is not not a Series" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = pd.unique(arg) + tm.assert_numpy_array_equal(result, expected) + + def test_obj_none_preservation(self): + # GH 20866 + arr = np.array(["foo", None], dtype=object) + result = pd.unique(arr) + expected = np.array(["foo", None], dtype=object) + + tm.assert_numpy_array_equal(result, expected, strict_nan=True) + + def test_signed_zero(self): + # GH 21866 + a = np.array([-0.0, 0.0]) + result = pd.unique(a) + expected = np.array([-0.0]) # 0.0 and -0.0 are equivalent + tm.assert_numpy_array_equal(result, expected) + + def test_different_nans(self): + # GH 21866 + # create different nans from bit-patterns: + NAN1 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000000))[0] + NAN2 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000001))[0] + assert NAN1 != NAN1 + assert NAN2 != NAN2 + a = np.array([NAN1, NAN2]) # NAN1 and NAN2 are equivalent + result = pd.unique(a) + expected = np.array([np.nan]) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("el_type", [np.float64, object]) + def test_first_nan_kept(self, el_type): + # GH 22295 + # create different nans from bit-patterns: + bits_for_nan1 = 0xFFF8000000000001 + bits_for_nan2 = 0x7FF8000000000001 + NAN1 = struct.unpack("d", struct.pack("=Q", bits_for_nan1))[0] + NAN2 = struct.unpack("d", struct.pack("=Q", bits_for_nan2))[0] + assert NAN1 != NAN1 + assert NAN2 != NAN2 + a = np.array([NAN1, NAN2], dtype=el_type) + result = pd.unique(a) + assert result.size == 1 + # use bit patterns to identify which nan was kept: + result_nan_bits = struct.unpack("=Q", struct.pack("d", result[0]))[0] + assert result_nan_bits == bits_for_nan1 + + def test_do_not_mangle_na_values(self, unique_nulls_fixture, unique_nulls_fixture2): + # GH 22295 + if unique_nulls_fixture is unique_nulls_fixture2: + return # skip it, values not unique + a = np.array([unique_nulls_fixture, unique_nulls_fixture2], dtype=object) + result = pd.unique(a) + assert result.size == 2 + assert a[0] is unique_nulls_fixture + assert a[1] is unique_nulls_fixture2 + + def test_unique_masked(self, any_numeric_ea_dtype): + # GH#48019 + ser = Series([1, pd.NA, 2] * 3, dtype=any_numeric_ea_dtype) + result = pd.unique(ser) + expected = pd.array([1, pd.NA, 2], dtype=any_numeric_ea_dtype) + tm.assert_extension_array_equal(result, expected) + + +def test_nunique_ints(index_or_series_or_array): + # GH#36327 + values = index_or_series_or_array(np.random.default_rng(2).integers(0, 20, 30)) + result = algos.nunique_ints(values) + expected = len(algos.unique(values)) + assert result == expected + + +class TestIsin: + def test_invalid(self): + msg = ( + r"only list-like objects are allowed to be passed to isin\(\), " + r"you passed a `int`" + ) + with pytest.raises(TypeError, match=msg): + algos.isin(1, 1) + with pytest.raises(TypeError, match=msg): + algos.isin(1, [1]) + with pytest.raises(TypeError, match=msg): + algos.isin([1], 1) + + def test_basic(self): + msg = "isin with argument that is not not a Series" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = algos.isin([1, 2], [1]) + expected = np.array([True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.isin(np.array([1, 2]), [1]) + expected = np.array([True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.isin(Series([1, 2]), [1]) + expected = np.array([True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.isin(Series([1, 2]), Series([1])) + expected = np.array([True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.isin(Series([1, 2]), {1}) + expected = np.array([True, False]) + tm.assert_numpy_array_equal(result, expected) + + with tm.assert_produces_warning(FutureWarning, match=msg): + result = algos.isin(["a", "b"], ["a"]) + expected = np.array([True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.isin(Series(["a", "b"]), Series(["a"])) + expected = np.array([True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.isin(Series(["a", "b"]), {"a"}) + expected = np.array([True, False]) + tm.assert_numpy_array_equal(result, expected) + + with tm.assert_produces_warning(FutureWarning, match=msg): + result = algos.isin(["a", "b"], [1]) + expected = np.array([False, False]) + tm.assert_numpy_array_equal(result, expected) + + def test_i8(self): + arr = date_range("20130101", periods=3).values + result = algos.isin(arr, [arr[0]]) + expected = np.array([True, False, False]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.isin(arr, arr[0:2]) + expected = np.array([True, True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.isin(arr, set(arr[0:2])) + expected = np.array([True, True, False]) + tm.assert_numpy_array_equal(result, expected) + + arr = timedelta_range("1 day", periods=3).values + result = algos.isin(arr, [arr[0]]) + expected = np.array([True, False, False]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.isin(arr, arr[0:2]) + expected = np.array([True, True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.isin(arr, set(arr[0:2])) + expected = np.array([True, True, False]) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("dtype1", ["m8[ns]", "M8[ns]", "M8[ns, UTC]", "period[D]"]) + @pytest.mark.parametrize("dtype", ["i8", "f8", "u8"]) + def test_isin_datetimelike_values_numeric_comps(self, dtype, dtype1): + # Anything but object and we get all-False shortcut + + dta = date_range("2013-01-01", periods=3)._values + arr = Series(dta.view("i8")).array.view(dtype1) + + comps = arr.view("i8").astype(dtype) + + result = algos.isin(comps, arr) + expected = np.zeros(comps.shape, dtype=bool) + tm.assert_numpy_array_equal(result, expected) + + def test_large(self): + s = date_range("20000101", periods=2000000, freq="s").values + result = algos.isin(s, s[0:2]) + expected = np.zeros(len(s), dtype=bool) + expected[0] = True + expected[1] = True + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("dtype", ["m8[ns]", "M8[ns]", "M8[ns, UTC]", "period[D]"]) + def test_isin_datetimelike_all_nat(self, dtype): + # GH#56427 + dta = date_range("2013-01-01", periods=3)._values + arr = Series(dta.view("i8")).array.view(dtype) + + arr[0] = NaT + result = algos.isin(arr, [NaT]) + expected = np.array([True, False, False], dtype=bool) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("dtype", ["m8[ns]", "M8[ns]", "M8[ns, UTC]"]) + def test_isin_datetimelike_strings_deprecated(self, dtype): + # GH#53111 + dta = date_range("2013-01-01", periods=3)._values + arr = Series(dta.view("i8")).array.view(dtype) + + vals = [str(x) for x in arr] + msg = "The behavior of 'isin' with dtype=.* is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = algos.isin(arr, vals) + assert res.all() + + vals2 = np.array(vals, dtype=str) + with tm.assert_produces_warning(FutureWarning, match=msg): + res2 = algos.isin(arr, vals2) + assert res2.all() + + def test_isin_dt64tz_with_nat(self): + # the all-NaT values used to get inferred to tznaive, which was evaluated + # as non-matching GH#56427 + dti = date_range("2016-01-01", periods=3, tz="UTC") + ser = Series(dti) + ser[0] = NaT + + res = algos.isin(ser._values, [NaT]) + exp = np.array([True, False, False], dtype=bool) + tm.assert_numpy_array_equal(res, exp) + + def test_categorical_from_codes(self): + # GH 16639 + vals = np.array([0, 1, 2, 0]) + cats = ["a", "b", "c"] + Sd = Series(Categorical([1]).from_codes(vals, cats)) + St = Series(Categorical([1]).from_codes(np.array([0, 1]), cats)) + expected = np.array([True, True, False, True]) + result = algos.isin(Sd, St) + tm.assert_numpy_array_equal(expected, result) + + def test_categorical_isin(self): + vals = np.array([0, 1, 2, 0]) + cats = ["a", "b", "c"] + cat = Categorical([1]).from_codes(vals, cats) + other = Categorical([1]).from_codes(np.array([0, 1]), cats) + + expected = np.array([True, True, False, True]) + result = algos.isin(cat, other) + tm.assert_numpy_array_equal(expected, result) + + def test_same_nan_is_in(self): + # GH 22160 + # nan is special, because from " a is b" doesn't follow "a == b" + # at least, isin() should follow python's "np.nan in [nan] == True" + # casting to -> np.float64 -> another float-object somewhere on + # the way could lead jeopardize this behavior + comps = [np.nan] # could be casted to float64 + values = [np.nan] + expected = np.array([True]) + msg = "isin with argument that is not not a Series" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = algos.isin(comps, values) + tm.assert_numpy_array_equal(expected, result) + + def test_same_nan_is_in_large(self): + # https://github.com/pandas-dev/pandas/issues/22205 + s = np.tile(1.0, 1_000_001) + s[0] = np.nan + result = algos.isin(s, np.array([np.nan, 1])) + expected = np.ones(len(s), dtype=bool) + tm.assert_numpy_array_equal(result, expected) + + def test_same_nan_is_in_large_series(self): + # https://github.com/pandas-dev/pandas/issues/22205 + s = np.tile(1.0, 1_000_001) + series = Series(s) + s[0] = np.nan + result = series.isin(np.array([np.nan, 1])) + expected = Series(np.ones(len(s), dtype=bool)) + tm.assert_series_equal(result, expected) + + def test_same_object_is_in(self): + # GH 22160 + # there could be special treatment for nans + # the user however could define a custom class + # with similar behavior, then we at least should + # fall back to usual python's behavior: "a in [a] == True" + class LikeNan: + def __eq__(self, other) -> bool: + return False + + def __hash__(self): + return 0 + + a, b = LikeNan(), LikeNan() + + msg = "isin with argument that is not not a Series" + with tm.assert_produces_warning(FutureWarning, match=msg): + # same object -> True + tm.assert_numpy_array_equal(algos.isin([a], [a]), np.array([True])) + # different objects -> False + tm.assert_numpy_array_equal(algos.isin([a], [b]), np.array([False])) + + def test_different_nans(self): + # GH 22160 + # all nans are handled as equivalent + + comps = [float("nan")] + values = [float("nan")] + assert comps[0] is not values[0] # different nan-objects + + # as list of python-objects: + result = algos.isin(np.array(comps), values) + tm.assert_numpy_array_equal(np.array([True]), result) + + # as object-array: + result = algos.isin( + np.asarray(comps, dtype=object), np.asarray(values, dtype=object) + ) + tm.assert_numpy_array_equal(np.array([True]), result) + + # as float64-array: + result = algos.isin( + np.asarray(comps, dtype=np.float64), np.asarray(values, dtype=np.float64) + ) + tm.assert_numpy_array_equal(np.array([True]), result) + + def test_no_cast(self): + # GH 22160 + # ensure 42 is not casted to a string + comps = ["ss", 42] + values = ["42"] + expected = np.array([False, False]) + msg = "isin with argument that is not not a Series, Index" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = algos.isin(comps, values) + tm.assert_numpy_array_equal(expected, result) + + @pytest.mark.parametrize("empty", [[], Series(dtype=object), np.array([])]) + def test_empty(self, empty): + # see gh-16991 + vals = Index(["a", "b"]) + expected = np.array([False, False]) + + result = algos.isin(vals, empty) + tm.assert_numpy_array_equal(expected, result) + + def test_different_nan_objects(self): + # GH 22119 + comps = np.array(["nan", np.nan * 1j, float("nan")], dtype=object) + vals = np.array([float("nan")], dtype=object) + expected = np.array([False, False, True]) + result = algos.isin(comps, vals) + tm.assert_numpy_array_equal(expected, result) + + def test_different_nans_as_float64(self): + # GH 21866 + # create different nans from bit-patterns, + # these nans will land in different buckets in the hash-table + # if no special care is taken + NAN1 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000000))[0] + NAN2 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000001))[0] + assert NAN1 != NAN1 + assert NAN2 != NAN2 + + # check that NAN1 and NAN2 are equivalent: + arr = np.array([NAN1, NAN2], dtype=np.float64) + lookup1 = np.array([NAN1], dtype=np.float64) + result = algos.isin(arr, lookup1) + expected = np.array([True, True]) + tm.assert_numpy_array_equal(result, expected) + + lookup2 = np.array([NAN2], dtype=np.float64) + result = algos.isin(arr, lookup2) + expected = np.array([True, True]) + tm.assert_numpy_array_equal(result, expected) + + def test_isin_int_df_string_search(self): + """Comparing df with int`s (1,2) with a string at isin() ("1") + -> should not match values because int 1 is not equal str 1""" + df = DataFrame({"values": [1, 2]}) + result = df.isin(["1"]) + expected_false = DataFrame({"values": [False, False]}) + tm.assert_frame_equal(result, expected_false) + + def test_isin_nan_df_string_search(self): + """Comparing df with nan value (np.nan,2) with a string at isin() ("NaN") + -> should not match values because np.nan is not equal str NaN""" + df = DataFrame({"values": [np.nan, 2]}) + result = df.isin(np.array(["NaN"], dtype=object)) + expected_false = DataFrame({"values": [False, False]}) + tm.assert_frame_equal(result, expected_false) + + def test_isin_float_df_string_search(self): + """Comparing df with floats (1.4245,2.32441) with a string at isin() ("1.4245") + -> should not match values because float 1.4245 is not equal str 1.4245""" + df = DataFrame({"values": [1.4245, 2.32441]}) + result = df.isin(np.array(["1.4245"], dtype=object)) + expected_false = DataFrame({"values": [False, False]}) + tm.assert_frame_equal(result, expected_false) + + def test_isin_unsigned_dtype(self): + # GH#46485 + ser = Series([1378774140726870442], dtype=np.uint64) + result = ser.isin([1378774140726870528]) + expected = Series(False) + tm.assert_series_equal(result, expected) + + +class TestValueCounts: + def test_value_counts(self): + arr = np.random.default_rng(1234).standard_normal(4) + factor = cut(arr, 4) + + # assert isinstance(factor, n) + msg = "pandas.value_counts is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = algos.value_counts(factor) + breaks = [-1.606, -1.018, -0.431, 0.155, 0.741] + index = IntervalIndex.from_breaks(breaks).astype(CategoricalDtype(ordered=True)) + expected = Series([1, 0, 2, 1], index=index, name="count") + tm.assert_series_equal(result.sort_index(), expected.sort_index()) + + def test_value_counts_bins(self): + s = [1, 2, 3, 4] + msg = "pandas.value_counts is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = algos.value_counts(s, bins=1) + expected = Series( + [4], index=IntervalIndex.from_tuples([(0.996, 4.0)]), name="count" + ) + tm.assert_series_equal(result, expected) + + with tm.assert_produces_warning(FutureWarning, match=msg): + result = algos.value_counts(s, bins=2, sort=False) + expected = Series( + [2, 2], + index=IntervalIndex.from_tuples([(0.996, 2.5), (2.5, 4.0)]), + name="count", + ) + tm.assert_series_equal(result, expected) + + def test_value_counts_dtypes(self): + msg2 = "pandas.value_counts is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg2): + result = algos.value_counts(np.array([1, 1.0])) + assert len(result) == 1 + + with tm.assert_produces_warning(FutureWarning, match=msg2): + result = algos.value_counts(np.array([1, 1.0]), bins=1) + assert len(result) == 1 + + with tm.assert_produces_warning(FutureWarning, match=msg2): + result = algos.value_counts(Series([1, 1.0, "1"])) # object + assert len(result) == 2 + + msg = "bins argument only works with numeric data" + with pytest.raises(TypeError, match=msg): + with tm.assert_produces_warning(FutureWarning, match=msg2): + algos.value_counts(np.array(["1", 1], dtype=object), bins=1) + + def test_value_counts_nat(self): + td = Series([np.timedelta64(10000), NaT], dtype="timedelta64[ns]") + dt = to_datetime(["NaT", "2014-01-01"]) + + msg = "pandas.value_counts is deprecated" + + for ser in [td, dt]: + with tm.assert_produces_warning(FutureWarning, match=msg): + vc = algos.value_counts(ser) + vc_with_na = algos.value_counts(ser, dropna=False) + assert len(vc) == 1 + assert len(vc_with_na) == 2 + + exp_dt = Series({Timestamp("2014-01-01 00:00:00"): 1}, name="count") + with tm.assert_produces_warning(FutureWarning, match=msg): + result_dt = algos.value_counts(dt) + tm.assert_series_equal(result_dt, exp_dt) + + exp_td = Series({np.timedelta64(10000): 1}, name="count") + with tm.assert_produces_warning(FutureWarning, match=msg): + result_td = algos.value_counts(td) + tm.assert_series_equal(result_td, exp_td) + + @pytest.mark.parametrize("dtype", [object, "M8[us]"]) + def test_value_counts_datetime_outofbounds(self, dtype): + # GH 13663 + ser = Series( + [ + datetime(3000, 1, 1), + datetime(5000, 1, 1), + datetime(5000, 1, 1), + datetime(6000, 1, 1), + datetime(3000, 1, 1), + datetime(3000, 1, 1), + ], + dtype=dtype, + ) + res = ser.value_counts() + + exp_index = Index( + [datetime(3000, 1, 1), datetime(5000, 1, 1), datetime(6000, 1, 1)], + dtype=dtype, + ) + exp = Series([3, 2, 1], index=exp_index, name="count") + tm.assert_series_equal(res, exp) + + def test_categorical(self): + s = Series(Categorical(list("aaabbc"))) + result = s.value_counts() + expected = Series( + [3, 2, 1], index=CategoricalIndex(["a", "b", "c"]), name="count" + ) + + tm.assert_series_equal(result, expected, check_index_type=True) + + # preserve order? + s = s.cat.as_ordered() + result = s.value_counts() + expected.index = expected.index.as_ordered() + tm.assert_series_equal(result, expected, check_index_type=True) + + def test_categorical_nans(self): + s = Series(Categorical(list("aaaaabbbcc"))) # 4,3,2,1 (nan) + s.iloc[1] = np.nan + result = s.value_counts() + expected = Series( + [4, 3, 2], + index=CategoricalIndex(["a", "b", "c"], categories=["a", "b", "c"]), + name="count", + ) + tm.assert_series_equal(result, expected, check_index_type=True) + result = s.value_counts(dropna=False) + expected = Series( + [4, 3, 2, 1], index=CategoricalIndex(["a", "b", "c", np.nan]), name="count" + ) + tm.assert_series_equal(result, expected, check_index_type=True) + + # out of order + s = Series( + Categorical(list("aaaaabbbcc"), ordered=True, categories=["b", "a", "c"]) + ) + s.iloc[1] = np.nan + result = s.value_counts() + expected = Series( + [4, 3, 2], + index=CategoricalIndex( + ["a", "b", "c"], + categories=["b", "a", "c"], + ordered=True, + ), + name="count", + ) + tm.assert_series_equal(result, expected, check_index_type=True) + + result = s.value_counts(dropna=False) + expected = Series( + [4, 3, 2, 1], + index=CategoricalIndex( + ["a", "b", "c", np.nan], categories=["b", "a", "c"], ordered=True + ), + name="count", + ) + tm.assert_series_equal(result, expected, check_index_type=True) + + def test_categorical_zeroes(self): + # keep the `d` category with 0 + s = Series(Categorical(list("bbbaac"), categories=list("abcd"), ordered=True)) + result = s.value_counts() + expected = Series( + [3, 2, 1, 0], + index=Categorical( + ["b", "a", "c", "d"], categories=list("abcd"), ordered=True + ), + name="count", + ) + tm.assert_series_equal(result, expected, check_index_type=True) + + def test_value_counts_dropna(self): + # https://github.com/pandas-dev/pandas/issues/9443#issuecomment-73719328 + + tm.assert_series_equal( + Series([True, True, False]).value_counts(dropna=True), + Series([2, 1], index=[True, False], name="count"), + ) + tm.assert_series_equal( + Series([True, True, False]).value_counts(dropna=False), + Series([2, 1], index=[True, False], name="count"), + ) + + tm.assert_series_equal( + Series([True] * 3 + [False] * 2 + [None] * 5).value_counts(dropna=True), + Series([3, 2], index=Index([True, False], dtype=object), name="count"), + ) + tm.assert_series_equal( + Series([True] * 5 + [False] * 3 + [None] * 2).value_counts(dropna=False), + Series([5, 3, 2], index=[True, False, None], name="count"), + ) + tm.assert_series_equal( + Series([10.3, 5.0, 5.0]).value_counts(dropna=True), + Series([2, 1], index=[5.0, 10.3], name="count"), + ) + tm.assert_series_equal( + Series([10.3, 5.0, 5.0]).value_counts(dropna=False), + Series([2, 1], index=[5.0, 10.3], name="count"), + ) + + tm.assert_series_equal( + Series([10.3, 5.0, 5.0, None]).value_counts(dropna=True), + Series([2, 1], index=[5.0, 10.3], name="count"), + ) + + result = Series([10.3, 10.3, 5.0, 5.0, 5.0, None]).value_counts(dropna=False) + expected = Series([3, 2, 1], index=[5.0, 10.3, None], name="count") + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("dtype", (np.float64, object, "M8[ns]")) + def test_value_counts_normalized(self, dtype): + # GH12558 + s = Series([1] * 2 + [2] * 3 + [np.nan] * 5) + s_typed = s.astype(dtype) + result = s_typed.value_counts(normalize=True, dropna=False) + expected = Series( + [0.5, 0.3, 0.2], + index=Series([np.nan, 2.0, 1.0], dtype=dtype), + name="proportion", + ) + tm.assert_series_equal(result, expected) + + result = s_typed.value_counts(normalize=True, dropna=True) + expected = Series( + [0.6, 0.4], index=Series([2.0, 1.0], dtype=dtype), name="proportion" + ) + tm.assert_series_equal(result, expected) + + def test_value_counts_uint64(self): + arr = np.array([2**63], dtype=np.uint64) + expected = Series([1], index=[2**63], name="count") + msg = "pandas.value_counts is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = algos.value_counts(arr) + + tm.assert_series_equal(result, expected) + + arr = np.array([-1, 2**63], dtype=object) + expected = Series([1, 1], index=[-1, 2**63], name="count") + with tm.assert_produces_warning(FutureWarning, match=msg): + result = algos.value_counts(arr) + + tm.assert_series_equal(result, expected) + + def test_value_counts_series(self): + # GH#54857 + values = np.array([3, 1, 2, 3, 4, np.nan]) + result = Series(values).value_counts(bins=3) + expected = Series( + [2, 2, 1], + index=IntervalIndex.from_tuples( + [(0.996, 2.0), (2.0, 3.0), (3.0, 4.0)], dtype="interval[float64, right]" + ), + name="count", + ) + tm.assert_series_equal(result, expected) + + +class TestDuplicated: + def test_duplicated_with_nas(self): + keys = np.array([0, 1, np.nan, 0, 2, np.nan], dtype=object) + + result = algos.duplicated(keys) + expected = np.array([False, False, False, True, False, True]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.duplicated(keys, keep="first") + expected = np.array([False, False, False, True, False, True]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.duplicated(keys, keep="last") + expected = np.array([True, False, True, False, False, False]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.duplicated(keys, keep=False) + expected = np.array([True, False, True, True, False, True]) + tm.assert_numpy_array_equal(result, expected) + + keys = np.empty(8, dtype=object) + for i, t in enumerate( + zip([0, 0, np.nan, np.nan] * 2, [0, np.nan, 0, np.nan] * 2) + ): + keys[i] = t + + result = algos.duplicated(keys) + falses = [False] * 4 + trues = [True] * 4 + expected = np.array(falses + trues) + tm.assert_numpy_array_equal(result, expected) + + result = algos.duplicated(keys, keep="last") + expected = np.array(trues + falses) + tm.assert_numpy_array_equal(result, expected) + + result = algos.duplicated(keys, keep=False) + expected = np.array(trues + trues) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize( + "case", + [ + np.array([1, 2, 1, 5, 3, 2, 4, 1, 5, 6]), + np.array([1.1, 2.2, 1.1, np.nan, 3.3, 2.2, 4.4, 1.1, np.nan, 6.6]), + np.array( + [ + 1 + 1j, + 2 + 2j, + 1 + 1j, + 5 + 5j, + 3 + 3j, + 2 + 2j, + 4 + 4j, + 1 + 1j, + 5 + 5j, + 6 + 6j, + ] + ), + np.array(["a", "b", "a", "e", "c", "b", "d", "a", "e", "f"], dtype=object), + np.array( + [1, 2**63, 1, 3**5, 10, 2**63, 39, 1, 3**5, 7], dtype=np.uint64 + ), + ], + ) + def test_numeric_object_likes(self, case): + exp_first = np.array( + [False, False, True, False, False, True, False, True, True, False] + ) + exp_last = np.array( + [True, True, True, True, False, False, False, False, False, False] + ) + exp_false = exp_first | exp_last + + res_first = algos.duplicated(case, keep="first") + tm.assert_numpy_array_equal(res_first, exp_first) + + res_last = algos.duplicated(case, keep="last") + tm.assert_numpy_array_equal(res_last, exp_last) + + res_false = algos.duplicated(case, keep=False) + tm.assert_numpy_array_equal(res_false, exp_false) + + # index + for idx in [Index(case), Index(case, dtype="category")]: + res_first = idx.duplicated(keep="first") + tm.assert_numpy_array_equal(res_first, exp_first) + + res_last = idx.duplicated(keep="last") + tm.assert_numpy_array_equal(res_last, exp_last) + + res_false = idx.duplicated(keep=False) + tm.assert_numpy_array_equal(res_false, exp_false) + + # series + for s in [Series(case), Series(case, dtype="category")]: + res_first = s.duplicated(keep="first") + tm.assert_series_equal(res_first, Series(exp_first)) + + res_last = s.duplicated(keep="last") + tm.assert_series_equal(res_last, Series(exp_last)) + + res_false = s.duplicated(keep=False) + tm.assert_series_equal(res_false, Series(exp_false)) + + def test_datetime_likes(self): + dt = [ + "2011-01-01", + "2011-01-02", + "2011-01-01", + "NaT", + "2011-01-03", + "2011-01-02", + "2011-01-04", + "2011-01-01", + "NaT", + "2011-01-06", + ] + td = [ + "1 days", + "2 days", + "1 days", + "NaT", + "3 days", + "2 days", + "4 days", + "1 days", + "NaT", + "6 days", + ] + + cases = [ + np.array([Timestamp(d) for d in dt]), + np.array([Timestamp(d, tz="US/Eastern") for d in dt]), + np.array([Period(d, freq="D") for d in dt]), + np.array([np.datetime64(d) for d in dt]), + np.array([Timedelta(d) for d in td]), + ] + + exp_first = np.array( + [False, False, True, False, False, True, False, True, True, False] + ) + exp_last = np.array( + [True, True, True, True, False, False, False, False, False, False] + ) + exp_false = exp_first | exp_last + + for case in cases: + res_first = algos.duplicated(case, keep="first") + tm.assert_numpy_array_equal(res_first, exp_first) + + res_last = algos.duplicated(case, keep="last") + tm.assert_numpy_array_equal(res_last, exp_last) + + res_false = algos.duplicated(case, keep=False) + tm.assert_numpy_array_equal(res_false, exp_false) + + # index + for idx in [ + Index(case), + Index(case, dtype="category"), + Index(case, dtype=object), + ]: + res_first = idx.duplicated(keep="first") + tm.assert_numpy_array_equal(res_first, exp_first) + + res_last = idx.duplicated(keep="last") + tm.assert_numpy_array_equal(res_last, exp_last) + + res_false = idx.duplicated(keep=False) + tm.assert_numpy_array_equal(res_false, exp_false) + + # series + for s in [ + Series(case), + Series(case, dtype="category"), + Series(case, dtype=object), + ]: + res_first = s.duplicated(keep="first") + tm.assert_series_equal(res_first, Series(exp_first)) + + res_last = s.duplicated(keep="last") + tm.assert_series_equal(res_last, Series(exp_last)) + + res_false = s.duplicated(keep=False) + tm.assert_series_equal(res_false, Series(exp_false)) + + @pytest.mark.parametrize("case", [Index([1, 2, 3]), pd.RangeIndex(0, 3)]) + def test_unique_index(self, case): + assert case.is_unique is True + tm.assert_numpy_array_equal(case.duplicated(), np.array([False, False, False])) + + @pytest.mark.parametrize( + "arr, uniques", + [ + ( + [(0, 0), (0, 1), (1, 0), (1, 1), (0, 0), (0, 1), (1, 0), (1, 1)], + [(0, 0), (0, 1), (1, 0), (1, 1)], + ), + ( + [("b", "c"), ("a", "b"), ("a", "b"), ("b", "c")], + [("b", "c"), ("a", "b")], + ), + ([("a", 1), ("b", 2), ("a", 3), ("a", 1)], [("a", 1), ("b", 2), ("a", 3)]), + ], + ) + def test_unique_tuples(self, arr, uniques): + # https://github.com/pandas-dev/pandas/issues/16519 + expected = np.empty(len(uniques), dtype=object) + expected[:] = uniques + + msg = "unique with argument that is not not a Series" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = pd.unique(arr) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize( + "array,expected", + [ + ( + [1 + 1j, 0, 1, 1j, 1 + 2j, 1 + 2j], + # Should return a complex dtype in the future + np.array([(1 + 1j), 0j, (1 + 0j), 1j, (1 + 2j)], dtype=object), + ) + ], + ) + def test_unique_complex_numbers(self, array, expected): + # GH 17927 + msg = "unique with argument that is not not a Series" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = pd.unique(array) + tm.assert_numpy_array_equal(result, expected) + + +class TestHashTable: + @pytest.mark.parametrize( + "htable, data", + [ + (ht.PyObjectHashTable, [f"foo_{i}" for i in range(1000)]), + (ht.StringHashTable, [f"foo_{i}" for i in range(1000)]), + (ht.Float64HashTable, np.arange(1000, dtype=np.float64)), + (ht.Int64HashTable, np.arange(1000, dtype=np.int64)), + (ht.UInt64HashTable, np.arange(1000, dtype=np.uint64)), + ], + ) + def test_hashtable_unique(self, htable, data, writable): + # output of maker has guaranteed unique elements + s = Series(data) + if htable == ht.Float64HashTable: + # add NaN for float column + s.loc[500] = np.nan + elif htable == ht.PyObjectHashTable: + # use different NaN types for object column + s.loc[500:502] = [np.nan, None, NaT] + + # create duplicated selection + s_duplicated = s.sample(frac=3, replace=True).reset_index(drop=True) + s_duplicated.values.setflags(write=writable) + + # drop_duplicates has own cython code (hash_table_func_helper.pxi) + # and is tested separately; keeps first occurrence like ht.unique() + expected_unique = s_duplicated.drop_duplicates(keep="first").values + result_unique = htable().unique(s_duplicated.values) + tm.assert_numpy_array_equal(result_unique, expected_unique) + + # test return_inverse=True + # reconstruction can only succeed if the inverse is correct + result_unique, result_inverse = htable().unique( + s_duplicated.values, return_inverse=True + ) + tm.assert_numpy_array_equal(result_unique, expected_unique) + reconstr = result_unique[result_inverse] + tm.assert_numpy_array_equal(reconstr, s_duplicated.values) + + @pytest.mark.parametrize( + "htable, data", + [ + (ht.PyObjectHashTable, [f"foo_{i}" for i in range(1000)]), + (ht.StringHashTable, [f"foo_{i}" for i in range(1000)]), + (ht.Float64HashTable, np.arange(1000, dtype=np.float64)), + (ht.Int64HashTable, np.arange(1000, dtype=np.int64)), + (ht.UInt64HashTable, np.arange(1000, dtype=np.uint64)), + ], + ) + def test_hashtable_factorize(self, htable, writable, data): + # output of maker has guaranteed unique elements + s = Series(data) + if htable == ht.Float64HashTable: + # add NaN for float column + s.loc[500] = np.nan + elif htable == ht.PyObjectHashTable: + # use different NaN types for object column + s.loc[500:502] = [np.nan, None, NaT] + + # create duplicated selection + s_duplicated = s.sample(frac=3, replace=True).reset_index(drop=True) + s_duplicated.values.setflags(write=writable) + na_mask = s_duplicated.isna().values + + result_unique, result_inverse = htable().factorize(s_duplicated.values) + + # drop_duplicates has own cython code (hash_table_func_helper.pxi) + # and is tested separately; keeps first occurrence like ht.factorize() + # since factorize removes all NaNs, we do the same here + expected_unique = s_duplicated.dropna().drop_duplicates().values + tm.assert_numpy_array_equal(result_unique, expected_unique) + + # reconstruction can only succeed if the inverse is correct. Since + # factorize removes the NaNs, those have to be excluded here as well + result_reconstruct = result_unique[result_inverse[~na_mask]] + expected_reconstruct = s_duplicated.dropna().values + tm.assert_numpy_array_equal(result_reconstruct, expected_reconstruct) + + +class TestRank: + @pytest.mark.parametrize( + "arr", + [ + [np.nan, np.nan, 5.0, 5.0, 5.0, np.nan, 1, 2, 3, np.nan], + [4.0, np.nan, 5.0, 5.0, 5.0, np.nan, 1, 2, 4.0, np.nan], + ], + ) + def test_scipy_compat(self, arr): + sp_stats = pytest.importorskip("scipy.stats") + + arr = np.array(arr) + + mask = ~np.isfinite(arr) + arr = arr.copy() + result = libalgos.rank_1d(arr) + arr[mask] = np.inf + exp = sp_stats.rankdata(arr) + exp[mask] = np.nan + tm.assert_almost_equal(result, exp) + + @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) + def test_basic(self, writable, dtype): + exp = np.array([1, 2], dtype=np.float64) + + data = np.array([1, 100], dtype=dtype) + data.setflags(write=writable) + ser = Series(data) + result = algos.rank(ser) + tm.assert_numpy_array_equal(result, exp) + + @pytest.mark.parametrize("dtype", [np.float64, np.uint64]) + def test_uint64_overflow(self, dtype): + exp = np.array([1, 2], dtype=np.float64) + + s = Series([1, 2**63], dtype=dtype) + tm.assert_numpy_array_equal(algos.rank(s), exp) + + def test_too_many_ndims(self): + arr = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]) + msg = "Array with ndim > 2 are not supported" + + with pytest.raises(TypeError, match=msg): + algos.rank(arr) + + @pytest.mark.single_cpu + def test_pct_max_many_rows(self): + # GH 18271 + values = np.arange(2**24 + 1) + result = algos.rank(values, pct=True).max() + assert result == 1 + + values = np.arange(2**25 + 2).reshape(2**24 + 1, 2) + result = algos.rank(values, pct=True).max() + assert result == 1 + + +class TestMode: + def test_no_mode(self): + exp = Series([], dtype=np.float64, index=Index([], dtype=int)) + tm.assert_numpy_array_equal(algos.mode(np.array([])), exp.values) + + @pytest.mark.parametrize("dt", np.typecodes["AllInteger"] + np.typecodes["Float"]) + def test_mode_single(self, dt): + # GH 15714 + exp_single = [1] + data_single = [1] + + exp_multi = [1] + data_multi = [1, 1] + + ser = Series(data_single, dtype=dt) + exp = Series(exp_single, dtype=dt) + tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values) + tm.assert_series_equal(ser.mode(), exp) + + ser = Series(data_multi, dtype=dt) + exp = Series(exp_multi, dtype=dt) + tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values) + tm.assert_series_equal(ser.mode(), exp) + + def test_mode_obj_int(self): + exp = Series([1], dtype=int) + tm.assert_numpy_array_equal(algos.mode(exp.values), exp.values) + + exp = Series(["a", "b", "c"], dtype=object) + tm.assert_numpy_array_equal(algos.mode(exp.values), exp.values) + + @pytest.mark.parametrize("dt", np.typecodes["AllInteger"] + np.typecodes["Float"]) + def test_number_mode(self, dt): + exp_single = [1] + data_single = [1] * 5 + [2] * 3 + + exp_multi = [1, 3] + data_multi = [1] * 5 + [2] * 3 + [3] * 5 + + ser = Series(data_single, dtype=dt) + exp = Series(exp_single, dtype=dt) + tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values) + tm.assert_series_equal(ser.mode(), exp) + + ser = Series(data_multi, dtype=dt) + exp = Series(exp_multi, dtype=dt) + tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values) + tm.assert_series_equal(ser.mode(), exp) + + def test_strobj_mode(self): + exp = ["b"] + data = ["a"] * 2 + ["b"] * 3 + + ser = Series(data, dtype="c") + exp = Series(exp, dtype="c") + tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values) + tm.assert_series_equal(ser.mode(), exp) + + @pytest.mark.parametrize("dt", [str, object]) + def test_strobj_multi_char(self, dt): + exp = ["bar"] + data = ["foo"] * 2 + ["bar"] * 3 + + ser = Series(data, dtype=dt) + exp = Series(exp, dtype=dt) + tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values) + tm.assert_series_equal(ser.mode(), exp) + + def test_datelike_mode(self): + exp = Series(["1900-05-03", "2011-01-03", "2013-01-02"], dtype="M8[ns]") + ser = Series(["2011-01-03", "2013-01-02", "1900-05-03"], dtype="M8[ns]") + tm.assert_extension_array_equal(algos.mode(ser.values), exp._values) + tm.assert_series_equal(ser.mode(), exp) + + exp = Series(["2011-01-03", "2013-01-02"], dtype="M8[ns]") + ser = Series( + ["2011-01-03", "2013-01-02", "1900-05-03", "2011-01-03", "2013-01-02"], + dtype="M8[ns]", + ) + tm.assert_extension_array_equal(algos.mode(ser.values), exp._values) + tm.assert_series_equal(ser.mode(), exp) + + def test_timedelta_mode(self): + exp = Series(["-1 days", "0 days", "1 days"], dtype="timedelta64[ns]") + ser = Series(["1 days", "-1 days", "0 days"], dtype="timedelta64[ns]") + tm.assert_extension_array_equal(algos.mode(ser.values), exp._values) + tm.assert_series_equal(ser.mode(), exp) + + exp = Series(["2 min", "1 day"], dtype="timedelta64[ns]") + ser = Series( + ["1 day", "1 day", "-1 day", "-1 day 2 min", "2 min", "2 min"], + dtype="timedelta64[ns]", + ) + tm.assert_extension_array_equal(algos.mode(ser.values), exp._values) + tm.assert_series_equal(ser.mode(), exp) + + def test_mixed_dtype(self): + exp = Series(["foo"], dtype=object) + ser = Series([1, "foo", "foo"]) + tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values) + tm.assert_series_equal(ser.mode(), exp) + + def test_uint64_overflow(self): + exp = Series([2**63], dtype=np.uint64) + ser = Series([1, 2**63, 2**63], dtype=np.uint64) + tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values) + tm.assert_series_equal(ser.mode(), exp) + + exp = Series([1, 2**63], dtype=np.uint64) + ser = Series([1, 2**63], dtype=np.uint64) + tm.assert_numpy_array_equal(algos.mode(ser.values), exp.values) + tm.assert_series_equal(ser.mode(), exp) + + def test_categorical(self): + c = Categorical([1, 2]) + exp = c + res = Series(c).mode()._values + tm.assert_categorical_equal(res, exp) + + c = Categorical([1, "a", "a"]) + exp = Categorical(["a"], categories=[1, "a"]) + res = Series(c).mode()._values + tm.assert_categorical_equal(res, exp) + + c = Categorical([1, 1, 2, 3, 3]) + exp = Categorical([1, 3], categories=[1, 2, 3]) + res = Series(c).mode()._values + tm.assert_categorical_equal(res, exp) + + def test_index(self): + idx = Index([1, 2, 3]) + exp = Series([1, 2, 3], dtype=np.int64) + tm.assert_numpy_array_equal(algos.mode(idx), exp.values) + + idx = Index([1, "a", "a"]) + exp = Series(["a"], dtype=object) + tm.assert_numpy_array_equal(algos.mode(idx), exp.values) + + idx = Index([1, 1, 2, 3, 3]) + exp = Series([1, 3], dtype=np.int64) + tm.assert_numpy_array_equal(algos.mode(idx), exp.values) + + idx = Index( + ["1 day", "1 day", "-1 day", "-1 day 2 min", "2 min", "2 min"], + dtype="timedelta64[ns]", + ) + with pytest.raises(AttributeError, match="TimedeltaIndex"): + # algos.mode expects Arraylike, does *not* unwrap TimedeltaIndex + algos.mode(idx) + + def test_ser_mode_with_name(self): + # GH 46737 + ser = Series([1, 1, 3], name="foo") + result = ser.mode() + expected = Series([1], name="foo") + tm.assert_series_equal(result, expected) + + +class TestDiff: + @pytest.mark.parametrize("dtype", ["M8[ns]", "m8[ns]"]) + def test_diff_datetimelike_nat(self, dtype): + # NaT - NaT is NaT, not 0 + arr = np.arange(12).astype(np.int64).view(dtype).reshape(3, 4) + arr[:, 2] = arr.dtype.type("NaT", "ns") + result = algos.diff(arr, 1, axis=0) + + expected = np.ones(arr.shape, dtype="timedelta64[ns]") * 4 + expected[:, 2] = np.timedelta64("NaT", "ns") + expected[0, :] = np.timedelta64("NaT", "ns") + + tm.assert_numpy_array_equal(result, expected) + + result = algos.diff(arr.T, 1, axis=1) + tm.assert_numpy_array_equal(result, expected.T) + + def test_diff_ea_axis(self): + dta = date_range("2016-01-01", periods=3, tz="US/Pacific")._data + + msg = "cannot diff DatetimeArray on axis=1" + with pytest.raises(ValueError, match=msg): + algos.diff(dta, 1, axis=1) + + @pytest.mark.parametrize("dtype", ["int8", "int16"]) + def test_diff_low_precision_int(self, dtype): + arr = np.array([0, 1, 1, 0, 0], dtype=dtype) + result = algos.diff(arr, 1) + expected = np.array([np.nan, 1, 0, -1, 0], dtype="float32") + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("op", [np.array, pd.array]) +def test_union_with_duplicates(op): + # GH#36289 + lvals = op([3, 1, 3, 4]) + rvals = op([2, 3, 1, 1]) + expected = op([3, 3, 1, 1, 4, 2]) + if isinstance(expected, np.ndarray): + result = algos.union_with_duplicates(lvals, rvals) + tm.assert_numpy_array_equal(result, expected) + else: + result = algos.union_with_duplicates(lvals, rvals) + tm.assert_extension_array_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/test_downstream.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/test_downstream.py new file mode 100644 index 0000000000000000000000000000000000000000..51ce73ef54300c5d2f9be4e3988de77843a61a8e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/test_downstream.py @@ -0,0 +1,362 @@ +""" +Testing that we work in the downstream packages +""" +import array +import subprocess +import sys + +import numpy as np +import pytest + +from pandas.errors import IntCastingNaNError +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DataFrame, + DatetimeIndex, + Series, + TimedeltaIndex, +) +import pandas._testing as tm +from pandas.core.arrays import ( + DatetimeArray, + TimedeltaArray, +) + + +@pytest.fixture +def df(): + return DataFrame({"A": [1, 2, 3]}) + + +def test_dask(df): + # dask sets "compute.use_numexpr" to False, so catch the current value + # and ensure to reset it afterwards to avoid impacting other tests + olduse = pd.get_option("compute.use_numexpr") + + try: + pytest.importorskip("toolz") + dd = pytest.importorskip("dask.dataframe") + + ddf = dd.from_pandas(df, npartitions=3) + assert ddf.A is not None + assert ddf.compute() is not None + finally: + pd.set_option("compute.use_numexpr", olduse) + + +def test_dask_ufunc(): + # dask sets "compute.use_numexpr" to False, so catch the current value + # and ensure to reset it afterwards to avoid impacting other tests + olduse = pd.get_option("compute.use_numexpr") + + try: + da = pytest.importorskip("dask.array") + dd = pytest.importorskip("dask.dataframe") + + s = Series([1.5, 2.3, 3.7, 4.0]) + ds = dd.from_pandas(s, npartitions=2) + + result = da.fix(ds).compute() + expected = np.fix(s) + tm.assert_series_equal(result, expected) + finally: + pd.set_option("compute.use_numexpr", olduse) + + +def test_construct_dask_float_array_int_dtype_match_ndarray(): + # GH#40110 make sure we treat a float-dtype dask array with the same + # rules we would for an ndarray + dd = pytest.importorskip("dask.dataframe") + + arr = np.array([1, 2.5, 3]) + darr = dd.from_array(arr) + + res = Series(darr) + expected = Series(arr) + tm.assert_series_equal(res, expected) + + # GH#49599 in 2.0 we raise instead of silently ignoring the dtype + msg = "Trying to coerce float values to integers" + with pytest.raises(ValueError, match=msg): + Series(darr, dtype="i8") + + msg = r"Cannot convert non-finite values \(NA or inf\) to integer" + arr[2] = np.nan + with pytest.raises(IntCastingNaNError, match=msg): + Series(darr, dtype="i8") + # which is the same as we get with a numpy input + with pytest.raises(IntCastingNaNError, match=msg): + Series(arr, dtype="i8") + + +def test_xarray(df): + pytest.importorskip("xarray") + + assert df.to_xarray() is not None + + +def test_xarray_cftimeindex_nearest(): + # https://github.com/pydata/xarray/issues/3751 + cftime = pytest.importorskip("cftime") + xarray = pytest.importorskip("xarray") + + times = xarray.cftime_range("0001", periods=2) + key = cftime.DatetimeGregorian(2000, 1, 1) + result = times.get_indexer([key], method="nearest") + expected = 1 + assert result == expected + + +@pytest.mark.single_cpu +def test_oo_optimizable(): + # GH 21071 + subprocess.check_call([sys.executable, "-OO", "-c", "import pandas"]) + + +@pytest.mark.single_cpu +def test_oo_optimized_datetime_index_unpickle(): + # GH 42866 + subprocess.check_call( + [ + sys.executable, + "-OO", + "-c", + ( + "import pandas as pd, pickle; " + "pickle.loads(pickle.dumps(pd.date_range('2021-01-01', periods=1)))" + ), + ] + ) + + +def test_statsmodels(): + smf = pytest.importorskip("statsmodels.formula.api") + + df = DataFrame( + {"Lottery": range(5), "Literacy": range(5), "Pop1831": range(100, 105)} + ) + smf.ols("Lottery ~ Literacy + np.log(Pop1831)", data=df).fit() + + +def test_scikit_learn(): + pytest.importorskip("sklearn") + from sklearn import ( + datasets, + svm, + ) + + digits = datasets.load_digits() + clf = svm.SVC(gamma=0.001, C=100.0) + clf.fit(digits.data[:-1], digits.target[:-1]) + clf.predict(digits.data[-1:]) + + +def test_seaborn(): + seaborn = pytest.importorskip("seaborn") + tips = DataFrame( + {"day": pd.date_range("2023", freq="D", periods=5), "total_bill": range(5)} + ) + seaborn.stripplot(x="day", y="total_bill", data=tips) + + +def test_pandas_datareader(): + pytest.importorskip("pandas_datareader") + + +@pytest.mark.filterwarnings("ignore:Passing a BlockManager:DeprecationWarning") +def test_pyarrow(df): + pyarrow = pytest.importorskip("pyarrow") + table = pyarrow.Table.from_pandas(df) + result = table.to_pandas() + tm.assert_frame_equal(result, df) + + +def test_yaml_dump(df): + # GH#42748 + yaml = pytest.importorskip("yaml") + + dumped = yaml.dump(df) + + loaded = yaml.load(dumped, Loader=yaml.Loader) + tm.assert_frame_equal(df, loaded) + + loaded2 = yaml.load(dumped, Loader=yaml.UnsafeLoader) + tm.assert_frame_equal(df, loaded2) + + +@pytest.mark.single_cpu +def test_missing_required_dependency(): + # GH 23868 + # To ensure proper isolation, we pass these flags + # -S : disable site-packages + # -s : disable user site-packages + # -E : disable PYTHON* env vars, especially PYTHONPATH + # https://github.com/MacPython/pandas-wheels/pull/50 + + pyexe = sys.executable.replace("\\", "/") + + # We skip this test if pandas is installed as a site package. We first + # import the package normally and check the path to the module before + # executing the test which imports pandas with site packages disabled. + call = [pyexe, "-c", "import pandas;print(pandas.__file__)"] + output = subprocess.check_output(call).decode() + if "site-packages" in output: + pytest.skip("pandas installed as site package") + + # This test will fail if pandas is installed as a site package. The flags + # prevent pandas being imported and the test will report Failed: DID NOT + # RAISE + call = [pyexe, "-sSE", "-c", "import pandas"] + + msg = ( + rf"Command '\['{pyexe}', '-sSE', '-c', 'import pandas'\]' " + "returned non-zero exit status 1." + ) + + with pytest.raises(subprocess.CalledProcessError, match=msg) as exc: + subprocess.check_output(call, stderr=subprocess.STDOUT) + + output = exc.value.stdout.decode() + for name in ["numpy", "pytz", "dateutil"]: + assert name in output + + +def test_frame_setitem_dask_array_into_new_col(): + # GH#47128 + + # dask sets "compute.use_numexpr" to False, so catch the current value + # and ensure to reset it afterwards to avoid impacting other tests + olduse = pd.get_option("compute.use_numexpr") + + try: + da = pytest.importorskip("dask.array") + + dda = da.array([1, 2]) + df = DataFrame({"a": ["a", "b"]}) + df["b"] = dda + df["c"] = dda + df.loc[[False, True], "b"] = 100 + result = df.loc[[1], :] + expected = DataFrame({"a": ["b"], "b": [100], "c": [2]}, index=[1]) + tm.assert_frame_equal(result, expected) + finally: + pd.set_option("compute.use_numexpr", olduse) + + +def test_pandas_priority(): + # GH#48347 + + class MyClass: + __pandas_priority__ = 5000 + + def __radd__(self, other): + return self + + left = MyClass() + right = Series(range(3)) + + assert right.__add__(left) is NotImplemented + assert right + left is left + + +@pytest.fixture( + params=[ + "memoryview", + "array", + pytest.param("dask", marks=td.skip_if_no("dask.array")), + pytest.param("xarray", marks=td.skip_if_no("xarray")), + ] +) +def array_likes(request): + """ + Fixture giving a numpy array and a parametrized 'data' object, which can + be a memoryview, array, dask or xarray object created from the numpy array. + """ + # GH#24539 recognize e.g xarray, dask, ... + arr = np.array([1, 2, 3], dtype=np.int64) + + name = request.param + if name == "memoryview": + data = memoryview(arr) + elif name == "array": + data = array.array("i", arr) + elif name == "dask": + import dask.array + + data = dask.array.array(arr) + elif name == "xarray": + import xarray as xr + + data = xr.DataArray(arr) + + return arr, data + + +@pytest.mark.parametrize("dtype", ["M8[ns]", "m8[ns]"]) +def test_from_obscure_array(dtype, array_likes): + # GH#24539 recognize e.g xarray, dask, ... + # Note: we dont do this for PeriodArray bc _from_sequence won't accept + # an array of integers + # TODO: could check with arraylike of Period objects + arr, data = array_likes + + cls = {"M8[ns]": DatetimeArray, "m8[ns]": TimedeltaArray}[dtype] + + depr_msg = f"{cls.__name__}.__init__ is deprecated" + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + expected = cls(arr) + result = cls._from_sequence(data, dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + if not isinstance(data, memoryview): + # FIXME(GH#44431) these raise on memoryview and attempted fix + # fails on py3.10 + func = {"M8[ns]": pd.to_datetime, "m8[ns]": pd.to_timedelta}[dtype] + result = func(arr).array + expected = func(data).array + tm.assert_equal(result, expected) + + # Let's check the Indexes while we're here + idx_cls = {"M8[ns]": DatetimeIndex, "m8[ns]": TimedeltaIndex}[dtype] + result = idx_cls(arr) + expected = idx_cls(data) + tm.assert_index_equal(result, expected) + + +def test_dataframe_consortium() -> None: + """ + Test some basic methods of the dataframe consortium standard. + + Full testing is done at https://github.com/data-apis/dataframe-api-compat, + this is just to check that the entry point works as expected. + """ + pytest.importorskip("dataframe_api_compat") + df_pd = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df = df_pd.__dataframe_consortium_standard__() + result_1 = df.get_column_names() + expected_1 = ["a", "b"] + assert result_1 == expected_1 + + ser = Series([1, 2, 3], name="a") + col = ser.__column_consortium_standard__() + assert col.name == "a" + + +def test_xarray_coerce_unit(): + # GH44053 + xr = pytest.importorskip("xarray") + + arr = xr.DataArray([1, 2, 3]) + result = pd.to_datetime(arr, unit="ns") + expected = DatetimeIndex( + [ + "1970-01-01 00:00:00.000000001", + "1970-01-01 00:00:00.000000002", + "1970-01-01 00:00:00.000000003", + ], + dtype="datetime64[ns]", + freq=None, + ) + tm.assert_index_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/test_errors.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/test_errors.py new file mode 100644 index 0000000000000000000000000000000000000000..aeddc08e4b888c0937a3095a46003613e0115876 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/test_errors.py @@ -0,0 +1,112 @@ +import pytest + +from pandas.errors import ( + AbstractMethodError, + UndefinedVariableError, +) + +import pandas as pd + + +@pytest.mark.parametrize( + "exc", + [ + "AttributeConflictWarning", + "CSSWarning", + "CategoricalConversionWarning", + "ClosedFileError", + "DataError", + "DatabaseError", + "DtypeWarning", + "EmptyDataError", + "IncompatibilityWarning", + "IndexingError", + "InvalidColumnName", + "InvalidComparison", + "InvalidVersion", + "LossySetitemError", + "MergeError", + "NoBufferPresent", + "NumExprClobberingError", + "NumbaUtilError", + "OptionError", + "OutOfBoundsDatetime", + "ParserError", + "ParserWarning", + "PerformanceWarning", + "PossibleDataLossError", + "PossiblePrecisionLoss", + "PyperclipException", + "SettingWithCopyError", + "SettingWithCopyWarning", + "SpecificationError", + "UnsortedIndexError", + "UnsupportedFunctionCall", + "ValueLabelTypeMismatch", + ], +) +def test_exception_importable(exc): + from pandas import errors + + err = getattr(errors, exc) + assert err is not None + + # check that we can raise on them + + msg = "^$" + + with pytest.raises(err, match=msg): + raise err() + + +def test_catch_oob(): + from pandas import errors + + msg = "Cannot cast 1500-01-01 00:00:00 to unit='ns' without overflow" + with pytest.raises(errors.OutOfBoundsDatetime, match=msg): + pd.Timestamp("15000101").as_unit("ns") + + +@pytest.mark.parametrize( + "is_local", + [ + True, + False, + ], +) +def test_catch_undefined_variable_error(is_local): + variable_name = "x" + if is_local: + msg = f"local variable '{variable_name}' is not defined" + else: + msg = f"name '{variable_name}' is not defined" + + with pytest.raises(UndefinedVariableError, match=msg): + raise UndefinedVariableError(variable_name, is_local) + + +class Foo: + @classmethod + def classmethod(cls): + raise AbstractMethodError(cls, methodtype="classmethod") + + @property + def property(self): + raise AbstractMethodError(self, methodtype="property") + + def method(self): + raise AbstractMethodError(self) + + +def test_AbstractMethodError_classmethod(): + xpr = "This classmethod must be defined in the concrete class Foo" + with pytest.raises(AbstractMethodError, match=xpr): + Foo.classmethod() + + xpr = "This property must be defined in the concrete class Foo" + with pytest.raises(AbstractMethodError, match=xpr): + Foo().property + + xpr = "This method must be defined in the concrete class Foo" + with pytest.raises(AbstractMethodError, match=xpr): + Foo().method() diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/test_expressions.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/test_expressions.py new file mode 100644 index 0000000000000000000000000000000000000000..dfec99f0786ebf11a44dedfad8aa8e1015356fab --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/test_expressions.py @@ -0,0 +1,466 @@ +import operator +import re + +import numpy as np +import pytest + +from pandas import option_context +import pandas._testing as tm +from pandas.core.api import ( + DataFrame, + Index, + Series, +) +from pandas.core.computation import expressions as expr + + +@pytest.fixture +def _frame(): + return DataFrame( + np.random.default_rng(2).standard_normal((10001, 4)), + columns=list("ABCD"), + dtype="float64", + ) + + +@pytest.fixture +def _frame2(): + return DataFrame( + np.random.default_rng(2).standard_normal((100, 4)), + columns=list("ABCD"), + dtype="float64", + ) + + +@pytest.fixture +def _mixed(_frame): + return DataFrame( + { + "A": _frame["A"].copy(), + "B": _frame["B"].astype("float32"), + "C": _frame["C"].astype("int64"), + "D": _frame["D"].astype("int32"), + } + ) + + +@pytest.fixture +def _mixed2(_frame2): + return DataFrame( + { + "A": _frame2["A"].copy(), + "B": _frame2["B"].astype("float32"), + "C": _frame2["C"].astype("int64"), + "D": _frame2["D"].astype("int32"), + } + ) + + +@pytest.fixture +def _integer(): + return DataFrame( + np.random.default_rng(2).integers(1, 100, size=(10001, 4)), + columns=list("ABCD"), + dtype="int64", + ) + + +@pytest.fixture +def _integer_integers(_integer): + # integers to get a case with zeros + return _integer * np.random.default_rng(2).integers(0, 2, size=np.shape(_integer)) + + +@pytest.fixture +def _integer2(): + return DataFrame( + np.random.default_rng(2).integers(1, 100, size=(101, 4)), + columns=list("ABCD"), + dtype="int64", + ) + + +@pytest.fixture +def _array(_frame): + return _frame["A"].values.copy() + + +@pytest.fixture +def _array2(_frame2): + return _frame2["A"].values.copy() + + +@pytest.fixture +def _array_mixed(_mixed): + return _mixed["D"].values.copy() + + +@pytest.fixture +def _array_mixed2(_mixed2): + return _mixed2["D"].values.copy() + + +@pytest.mark.skipif(not expr.USE_NUMEXPR, reason="not using numexpr") +class TestExpressions: + @staticmethod + def call_op(df, other, flex: bool, opname: str): + if flex: + op = lambda x, y: getattr(x, opname)(y) + op.__name__ = opname + else: + op = getattr(operator, opname) + + with option_context("compute.use_numexpr", False): + expected = op(df, other) + + expr.get_test_result() + + result = op(df, other) + return result, expected + + @pytest.mark.parametrize( + "fixture", + [ + "_integer", + "_integer2", + "_integer_integers", + "_frame", + "_frame2", + "_mixed", + "_mixed2", + ], + ) + @pytest.mark.parametrize("flex", [True, False]) + @pytest.mark.parametrize( + "arith", ["add", "sub", "mul", "mod", "truediv", "floordiv"] + ) + def test_run_arithmetic(self, request, fixture, flex, arith, monkeypatch): + df = request.getfixturevalue(fixture) + with monkeypatch.context() as m: + m.setattr(expr, "_MIN_ELEMENTS", 0) + result, expected = self.call_op(df, df, flex, arith) + + if arith == "truediv": + assert all(x.kind == "f" for x in expected.dtypes.values) + tm.assert_equal(expected, result) + + for i in range(len(df.columns)): + result, expected = self.call_op( + df.iloc[:, i], df.iloc[:, i], flex, arith + ) + if arith == "truediv": + assert expected.dtype.kind == "f" + tm.assert_equal(expected, result) + + @pytest.mark.parametrize( + "fixture", + [ + "_integer", + "_integer2", + "_integer_integers", + "_frame", + "_frame2", + "_mixed", + "_mixed2", + ], + ) + @pytest.mark.parametrize("flex", [True, False]) + def test_run_binary(self, request, fixture, flex, comparison_op, monkeypatch): + """ + tests solely that the result is the same whether or not numexpr is + enabled. Need to test whether the function does the correct thing + elsewhere. + """ + df = request.getfixturevalue(fixture) + arith = comparison_op.__name__ + with option_context("compute.use_numexpr", False): + other = df.copy() + 1 + + with monkeypatch.context() as m: + m.setattr(expr, "_MIN_ELEMENTS", 0) + expr.set_test_mode(True) + + result, expected = self.call_op(df, other, flex, arith) + + used_numexpr = expr.get_test_result() + assert used_numexpr, "Did not use numexpr as expected." + tm.assert_equal(expected, result) + + for i in range(len(df.columns)): + binary_comp = other.iloc[:, i] + 1 + self.call_op(df.iloc[:, i], binary_comp, flex, "add") + + def test_invalid(self): + array = np.random.default_rng(2).standard_normal(1_000_001) + array2 = np.random.default_rng(2).standard_normal(100) + + # no op + result = expr._can_use_numexpr(operator.add, None, array, array, "evaluate") + assert not result + + # min elements + result = expr._can_use_numexpr(operator.add, "+", array2, array2, "evaluate") + assert not result + + # ok, we only check on first part of expression + result = expr._can_use_numexpr(operator.add, "+", array, array2, "evaluate") + assert result + + @pytest.mark.filterwarnings("ignore:invalid value encountered in:RuntimeWarning") + @pytest.mark.parametrize( + "opname,op_str", + [("add", "+"), ("sub", "-"), ("mul", "*"), ("truediv", "/"), ("pow", "**")], + ) + @pytest.mark.parametrize( + "left_fix,right_fix", [("_array", "_array2"), ("_array_mixed", "_array_mixed2")] + ) + def test_binary_ops(self, request, opname, op_str, left_fix, right_fix): + left = request.getfixturevalue(left_fix) + right = request.getfixturevalue(right_fix) + + def testit(left, right, opname, op_str): + if opname == "pow": + left = np.abs(left) + + op = getattr(operator, opname) + + # array has 0s + result = expr.evaluate(op, left, left, use_numexpr=True) + expected = expr.evaluate(op, left, left, use_numexpr=False) + tm.assert_numpy_array_equal(result, expected) + + result = expr._can_use_numexpr(op, op_str, right, right, "evaluate") + assert not result + + with option_context("compute.use_numexpr", False): + testit(left, right, opname, op_str) + + expr.set_numexpr_threads(1) + testit(left, right, opname, op_str) + expr.set_numexpr_threads() + testit(left, right, opname, op_str) + + @pytest.mark.parametrize( + "left_fix,right_fix", [("_array", "_array2"), ("_array_mixed", "_array_mixed2")] + ) + def test_comparison_ops(self, request, comparison_op, left_fix, right_fix): + left = request.getfixturevalue(left_fix) + right = request.getfixturevalue(right_fix) + + def testit(): + f12 = left + 1 + f22 = right + 1 + + op = comparison_op + + result = expr.evaluate(op, left, f12, use_numexpr=True) + expected = expr.evaluate(op, left, f12, use_numexpr=False) + tm.assert_numpy_array_equal(result, expected) + + result = expr._can_use_numexpr(op, op, right, f22, "evaluate") + assert not result + + with option_context("compute.use_numexpr", False): + testit() + + expr.set_numexpr_threads(1) + testit() + expr.set_numexpr_threads() + testit() + + @pytest.mark.parametrize("cond", [True, False]) + @pytest.mark.parametrize("fixture", ["_frame", "_frame2", "_mixed", "_mixed2"]) + def test_where(self, request, cond, fixture): + df = request.getfixturevalue(fixture) + + def testit(): + c = np.empty(df.shape, dtype=np.bool_) + c.fill(cond) + result = expr.where(c, df.values, df.values + 1) + expected = np.where(c, df.values, df.values + 1) + tm.assert_numpy_array_equal(result, expected) + + with option_context("compute.use_numexpr", False): + testit() + + expr.set_numexpr_threads(1) + testit() + expr.set_numexpr_threads() + testit() + + @pytest.mark.parametrize( + "op_str,opname", [("/", "truediv"), ("//", "floordiv"), ("**", "pow")] + ) + def test_bool_ops_raise_on_arithmetic(self, op_str, opname): + df = DataFrame( + { + "a": np.random.default_rng(2).random(10) > 0.5, + "b": np.random.default_rng(2).random(10) > 0.5, + } + ) + + msg = f"operator '{opname}' not implemented for bool dtypes" + f = getattr(operator, opname) + err_msg = re.escape(msg) + + with pytest.raises(NotImplementedError, match=err_msg): + f(df, df) + + with pytest.raises(NotImplementedError, match=err_msg): + f(df.a, df.b) + + with pytest.raises(NotImplementedError, match=err_msg): + f(df.a, True) + + with pytest.raises(NotImplementedError, match=err_msg): + f(False, df.a) + + with pytest.raises(NotImplementedError, match=err_msg): + f(False, df) + + with pytest.raises(NotImplementedError, match=err_msg): + f(df, True) + + @pytest.mark.parametrize( + "op_str,opname", [("+", "add"), ("*", "mul"), ("-", "sub")] + ) + def test_bool_ops_warn_on_arithmetic(self, op_str, opname): + n = 10 + df = DataFrame( + { + "a": np.random.default_rng(2).random(n) > 0.5, + "b": np.random.default_rng(2).random(n) > 0.5, + } + ) + + subs = {"+": "|", "*": "&", "-": "^"} + sub_funcs = {"|": "or_", "&": "and_", "^": "xor"} + + f = getattr(operator, opname) + fe = getattr(operator, sub_funcs[subs[op_str]]) + + if op_str == "-": + # raises TypeError + return + + with tm.use_numexpr(True, min_elements=5): + with tm.assert_produces_warning(): + r = f(df, df) + e = fe(df, df) + tm.assert_frame_equal(r, e) + + with tm.assert_produces_warning(): + r = f(df.a, df.b) + e = fe(df.a, df.b) + tm.assert_series_equal(r, e) + + with tm.assert_produces_warning(): + r = f(df.a, True) + e = fe(df.a, True) + tm.assert_series_equal(r, e) + + with tm.assert_produces_warning(): + r = f(False, df.a) + e = fe(False, df.a) + tm.assert_series_equal(r, e) + + with tm.assert_produces_warning(): + r = f(False, df) + e = fe(False, df) + tm.assert_frame_equal(r, e) + + with tm.assert_produces_warning(): + r = f(df, True) + e = fe(df, True) + tm.assert_frame_equal(r, e) + + @pytest.mark.parametrize( + "test_input,expected", + [ + ( + DataFrame( + [[0, 1, 2, "aa"], [0, 1, 2, "aa"]], columns=["a", "b", "c", "dtype"] + ), + DataFrame([[False, False], [False, False]], columns=["a", "dtype"]), + ), + ( + DataFrame( + [[0, 3, 2, "aa"], [0, 4, 2, "aa"], [0, 1, 1, "bb"]], + columns=["a", "b", "c", "dtype"], + ), + DataFrame( + [[False, False], [False, False], [False, False]], + columns=["a", "dtype"], + ), + ), + ], + ) + def test_bool_ops_column_name_dtype(self, test_input, expected): + # GH 22383 - .ne fails if columns containing column name 'dtype' + result = test_input.loc[:, ["a", "dtype"]].ne(test_input.loc[:, ["a", "dtype"]]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "arith", ("add", "sub", "mul", "mod", "truediv", "floordiv") + ) + @pytest.mark.parametrize("axis", (0, 1)) + def test_frame_series_axis(self, axis, arith, _frame, monkeypatch): + # GH#26736 Dataframe.floordiv(Series, axis=1) fails + + df = _frame + if axis == 1: + other = df.iloc[0, :] + else: + other = df.iloc[:, 0] + + with monkeypatch.context() as m: + m.setattr(expr, "_MIN_ELEMENTS", 0) + + op_func = getattr(df, arith) + + with option_context("compute.use_numexpr", False): + expected = op_func(other, axis=axis) + + result = op_func(other, axis=axis) + tm.assert_frame_equal(expected, result) + + @pytest.mark.parametrize( + "op", + [ + "__mod__", + "__rmod__", + "__floordiv__", + "__rfloordiv__", + ], + ) + @pytest.mark.parametrize("box", [DataFrame, Series, Index]) + @pytest.mark.parametrize("scalar", [-5, 5]) + def test_python_semantics_with_numexpr_installed( + self, op, box, scalar, monkeypatch + ): + # https://github.com/pandas-dev/pandas/issues/36047 + with monkeypatch.context() as m: + m.setattr(expr, "_MIN_ELEMENTS", 0) + data = np.arange(-50, 50) + obj = box(data) + method = getattr(obj, op) + result = method(scalar) + + # compare result with numpy + with option_context("compute.use_numexpr", False): + expected = method(scalar) + + tm.assert_equal(result, expected) + + # compare result element-wise with Python + for i, elem in enumerate(data): + if box == DataFrame: + scalar_result = result.iloc[i, 0] + else: + scalar_result = result[i] + try: + expected = getattr(int(elem), op)(scalar) + except ZeroDivisionError: + pass + else: + assert scalar_result == expected diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/test_flags.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/test_flags.py new file mode 100644 index 0000000000000000000000000000000000000000..9294b3fc3319b78b59d5637acdf3fd75737cd836 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/test_flags.py @@ -0,0 +1,48 @@ +import pytest + +import pandas as pd + + +class TestFlags: + def test_equality(self): + a = pd.DataFrame().set_flags(allows_duplicate_labels=True).flags + b = pd.DataFrame().set_flags(allows_duplicate_labels=False).flags + + assert a == a + assert b == b + assert a != b + assert a != 2 + + def test_set(self): + df = pd.DataFrame().set_flags(allows_duplicate_labels=True) + a = df.flags + a.allows_duplicate_labels = False + assert a.allows_duplicate_labels is False + a["allows_duplicate_labels"] = True + assert a.allows_duplicate_labels is True + + def test_repr(self): + a = repr(pd.DataFrame({"A"}).set_flags(allows_duplicate_labels=True).flags) + assert a == "" + a = repr(pd.DataFrame({"A"}).set_flags(allows_duplicate_labels=False).flags) + assert a == "" + + def test_obj_ref(self): + df = pd.DataFrame() + flags = df.flags + del df + with pytest.raises(ValueError, match="object has been deleted"): + flags.allows_duplicate_labels = True + + def test_getitem(self): + df = pd.DataFrame() + flags = df.flags + assert flags["allows_duplicate_labels"] is True + flags["allows_duplicate_labels"] = False + assert flags["allows_duplicate_labels"] is False + + with pytest.raises(KeyError, match="a"): + flags["a"] + + with pytest.raises(ValueError, match="a"): + flags["a"] = 10 diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/test_multilevel.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/test_multilevel.py new file mode 100644 index 0000000000000000000000000000000000000000..6644ec82fab17ac9e1c1744b595c38fda17114f5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/test_multilevel.py @@ -0,0 +1,355 @@ +import datetime + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + MultiIndex, + Series, +) +import pandas._testing as tm + + +class TestMultiLevel: + def test_reindex_level(self, multiindex_year_month_day_dataframe_random_data): + # axis=0 + ymd = multiindex_year_month_day_dataframe_random_data + + month_sums = ymd.groupby("month").sum() + result = month_sums.reindex(ymd.index, level=1) + expected = ymd.groupby(level="month").transform("sum") + + tm.assert_frame_equal(result, expected) + + # Series + result = month_sums["A"].reindex(ymd.index, level=1) + expected = ymd["A"].groupby(level="month").transform("sum") + tm.assert_series_equal(result, expected, check_names=False) + + # axis=1 + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + gb = ymd.T.groupby("month", axis=1) + + month_sums = gb.sum() + result = month_sums.reindex(columns=ymd.index, level=1) + expected = ymd.groupby(level="month").transform("sum").T + tm.assert_frame_equal(result, expected) + + def test_reindex(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + expected = frame.iloc[[0, 3]] + reindexed = frame.loc[[("foo", "one"), ("bar", "one")]] + tm.assert_frame_equal(reindexed, expected) + + def test_reindex_preserve_levels( + self, multiindex_year_month_day_dataframe_random_data, using_copy_on_write + ): + ymd = multiindex_year_month_day_dataframe_random_data + + new_index = ymd.index[::10] + chunk = ymd.reindex(new_index) + if using_copy_on_write: + assert chunk.index.is_(new_index) + else: + assert chunk.index is new_index + + chunk = ymd.loc[new_index] + assert chunk.index.equals(new_index) + + ymdT = ymd.T + chunk = ymdT.reindex(columns=new_index) + if using_copy_on_write: + assert chunk.columns.is_(new_index) + else: + assert chunk.columns is new_index + + chunk = ymdT.loc[:, new_index] + assert chunk.columns.equals(new_index) + + def test_groupby_transform(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + s = frame["A"] + grouper = s.index.get_level_values(0) + + grouped = s.groupby(grouper, group_keys=False) + + applied = grouped.apply(lambda x: x * 2) + expected = grouped.transform(lambda x: x * 2) + result = applied.reindex(expected.index) + tm.assert_series_equal(result, expected, check_names=False) + + def test_groupby_corner(self): + midx = MultiIndex( + levels=[["foo"], ["bar"], ["baz"]], + codes=[[0], [0], [0]], + names=["one", "two", "three"], + ) + df = DataFrame( + [np.random.default_rng(2).random(4)], + columns=["a", "b", "c", "d"], + index=midx, + ) + # should work + df.groupby(level="three") + + def test_groupby_level_no_obs(self): + # #1697 + midx = MultiIndex.from_tuples( + [ + ("f1", "s1"), + ("f1", "s2"), + ("f2", "s1"), + ("f2", "s2"), + ("f3", "s1"), + ("f3", "s2"), + ] + ) + df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]], columns=midx) + df1 = df.loc(axis=1)[df.columns.map(lambda u: u[0] in ["f2", "f3"])] + + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + grouped = df1.groupby(axis=1, level=0) + result = grouped.sum() + assert (result.columns == ["f2", "f3"]).all() + + def test_setitem_with_expansion_multiindex_columns( + self, multiindex_year_month_day_dataframe_random_data + ): + ymd = multiindex_year_month_day_dataframe_random_data + + df = ymd[:5].T + df[2000, 1, 10] = df[2000, 1, 7] + assert isinstance(df.columns, MultiIndex) + assert (df[2000, 1, 10] == df[2000, 1, 7]).all() + + def test_alignment(self): + x = Series( + data=[1, 2, 3], index=MultiIndex.from_tuples([("A", 1), ("A", 2), ("B", 3)]) + ) + + y = Series( + data=[4, 5, 6], index=MultiIndex.from_tuples([("Z", 1), ("Z", 2), ("B", 3)]) + ) + + res = x - y + exp_index = x.index.union(y.index) + exp = x.reindex(exp_index) - y.reindex(exp_index) + tm.assert_series_equal(res, exp) + + # hit non-monotonic code path + res = x[::-1] - y[::-1] + exp_index = x.index.union(y.index) + exp = x.reindex(exp_index) - y.reindex(exp_index) + tm.assert_series_equal(res, exp) + + def test_groupby_multilevel(self, multiindex_year_month_day_dataframe_random_data): + ymd = multiindex_year_month_day_dataframe_random_data + + result = ymd.groupby(level=[0, 1]).mean() + + k1 = ymd.index.get_level_values(0) + k2 = ymd.index.get_level_values(1) + + expected = ymd.groupby([k1, k2]).mean() + + # TODO groupby with level_values drops names + tm.assert_frame_equal(result, expected, check_names=False) + assert result.index.names == ymd.index.names[:2] + + result2 = ymd.groupby(level=ymd.index.names[:2]).mean() + tm.assert_frame_equal(result, result2) + + def test_multilevel_consolidate(self): + index = MultiIndex.from_tuples( + [("foo", "one"), ("foo", "two"), ("bar", "one"), ("bar", "two")] + ) + df = DataFrame( + np.random.default_rng(2).standard_normal((4, 4)), index=index, columns=index + ) + df["Totals", ""] = df.sum(1) + df = df._consolidate() + + def test_level_with_tuples(self): + index = MultiIndex( + levels=[[("foo", "bar", 0), ("foo", "baz", 0), ("foo", "qux", 0)], [0, 1]], + codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]], + ) + + series = Series(np.random.default_rng(2).standard_normal(6), index=index) + frame = DataFrame(np.random.default_rng(2).standard_normal((6, 4)), index=index) + + result = series[("foo", "bar", 0)] + result2 = series.loc[("foo", "bar", 0)] + expected = series[:2] + expected.index = expected.index.droplevel(0) + tm.assert_series_equal(result, expected) + tm.assert_series_equal(result2, expected) + + with pytest.raises(KeyError, match=r"^\(\('foo', 'bar', 0\), 2\)$"): + series[("foo", "bar", 0), 2] + + result = frame.loc[("foo", "bar", 0)] + result2 = frame.xs(("foo", "bar", 0)) + expected = frame[:2] + expected.index = expected.index.droplevel(0) + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result2, expected) + + index = MultiIndex( + levels=[[("foo", "bar"), ("foo", "baz"), ("foo", "qux")], [0, 1]], + codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]], + ) + + series = Series(np.random.default_rng(2).standard_normal(6), index=index) + frame = DataFrame(np.random.default_rng(2).standard_normal((6, 4)), index=index) + + result = series[("foo", "bar")] + result2 = series.loc[("foo", "bar")] + expected = series[:2] + expected.index = expected.index.droplevel(0) + tm.assert_series_equal(result, expected) + tm.assert_series_equal(result2, expected) + + result = frame.loc[("foo", "bar")] + result2 = frame.xs(("foo", "bar")) + expected = frame[:2] + expected.index = expected.index.droplevel(0) + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result2, expected) + + def test_reindex_level_partial_selection(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + result = frame.reindex(["foo", "qux"], level=0) + expected = frame.iloc[[0, 1, 2, 7, 8, 9]] + tm.assert_frame_equal(result, expected) + + result = frame.T.reindex(["foo", "qux"], axis=1, level=0) + tm.assert_frame_equal(result, expected.T) + + result = frame.loc[["foo", "qux"]] + tm.assert_frame_equal(result, expected) + + result = frame["A"].loc[["foo", "qux"]] + tm.assert_series_equal(result, expected["A"]) + + result = frame.T.loc[:, ["foo", "qux"]] + tm.assert_frame_equal(result, expected.T) + + @pytest.mark.parametrize("d", [4, "d"]) + def test_empty_frame_groupby_dtypes_consistency(self, d): + # GH 20888 + group_keys = ["a", "b", "c"] + df = DataFrame({"a": [1], "b": [2], "c": [3], "d": [d]}) + + g = df[df.a == 2].groupby(group_keys) + result = g.first().index + expected = MultiIndex( + levels=[[1], [2], [3]], codes=[[], [], []], names=["a", "b", "c"] + ) + + tm.assert_index_equal(result, expected) + + def test_duplicate_groupby_issues(self): + idx_tp = [ + ("600809", "20061231"), + ("600809", "20070331"), + ("600809", "20070630"), + ("600809", "20070331"), + ] + dt = ["demo", "demo", "demo", "demo"] + + idx = MultiIndex.from_tuples(idx_tp, names=["STK_ID", "RPT_Date"]) + s = Series(dt, index=idx) + + result = s.groupby(s.index).first() + assert len(result) == 3 + + def test_subsets_multiindex_dtype(self): + # GH 20757 + data = [["x", 1]] + columns = [("a", "b", np.nan), ("a", "c", 0.0)] + df = DataFrame(data, columns=MultiIndex.from_tuples(columns)) + expected = df.dtypes.a.b + result = df.a.b.dtypes + tm.assert_series_equal(result, expected) + + def test_datetime_object_multiindex(self): + data_dic = { + (0, datetime.date(2018, 3, 3)): {"A": 1, "B": 10}, + (0, datetime.date(2018, 3, 4)): {"A": 2, "B": 11}, + (1, datetime.date(2018, 3, 3)): {"A": 3, "B": 12}, + (1, datetime.date(2018, 3, 4)): {"A": 4, "B": 13}, + } + result = DataFrame.from_dict(data_dic, orient="index") + data = {"A": [1, 2, 3, 4], "B": [10, 11, 12, 13]} + index = [ + [0, 0, 1, 1], + [ + datetime.date(2018, 3, 3), + datetime.date(2018, 3, 4), + datetime.date(2018, 3, 3), + datetime.date(2018, 3, 4), + ], + ] + expected = DataFrame(data=data, index=index) + + tm.assert_frame_equal(result, expected) + + def test_multiindex_with_na(self): + df = DataFrame( + [ + ["A", np.nan, 1.23, 4.56], + ["A", "G", 1.23, 4.56], + ["A", "D", 9.87, 10.54], + ], + columns=["pivot_0", "pivot_1", "col_1", "col_2"], + ).set_index(["pivot_0", "pivot_1"]) + + df.at[("A", "F"), "col_2"] = 0.0 + + expected = DataFrame( + [ + ["A", np.nan, 1.23, 4.56], + ["A", "G", 1.23, 4.56], + ["A", "D", 9.87, 10.54], + ["A", "F", np.nan, 0.0], + ], + columns=["pivot_0", "pivot_1", "col_1", "col_2"], + ).set_index(["pivot_0", "pivot_1"]) + + tm.assert_frame_equal(df, expected) + + +class TestSorted: + """everything you wanted to test about sorting""" + + def test_sort_non_lexsorted(self): + # degenerate case where we sort but don't + # have a satisfying result :< + # GH 15797 + idx = MultiIndex( + [["A", "B", "C"], ["c", "b", "a"]], [[0, 1, 2, 0, 1, 2], [0, 2, 1, 1, 0, 2]] + ) + + df = DataFrame({"col": range(len(idx))}, index=idx, dtype="int64") + assert df.index.is_monotonic_increasing is False + + sorted = df.sort_index() + assert sorted.index.is_monotonic_increasing is True + + expected = DataFrame( + {"col": [1, 4, 5, 2]}, + index=MultiIndex.from_tuples( + [("B", "a"), ("B", "c"), ("C", "a"), ("C", "b")] + ), + dtype="int64", + ) + result = sorted.loc[pd.IndexSlice["B":"C", "a":"c"], :] + tm.assert_frame_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/test_nanops.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/test_nanops.py new file mode 100644 index 0000000000000000000000000000000000000000..a50054f33f382ed913261e0cafd944c2fd86aaa3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/test_nanops.py @@ -0,0 +1,1274 @@ +from functools import partial + +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas.core.dtypes.common import is_integer_dtype + +import pandas as pd +from pandas import ( + Series, + isna, +) +import pandas._testing as tm +from pandas.core import nanops + +use_bn = nanops._USE_BOTTLENECK + + +@pytest.fixture +def disable_bottleneck(monkeypatch): + with monkeypatch.context() as m: + m.setattr(nanops, "_USE_BOTTLENECK", False) + yield + + +@pytest.fixture +def arr_shape(): + return 11, 7 + + +@pytest.fixture +def arr_float(arr_shape): + return np.random.default_rng(2).standard_normal(arr_shape) + + +@pytest.fixture +def arr_complex(arr_float): + return arr_float + arr_float * 1j + + +@pytest.fixture +def arr_int(arr_shape): + return np.random.default_rng(2).integers(-10, 10, arr_shape) + + +@pytest.fixture +def arr_bool(arr_shape): + return np.random.default_rng(2).integers(0, 2, arr_shape) == 0 + + +@pytest.fixture +def arr_str(arr_float): + return np.abs(arr_float).astype("S") + + +@pytest.fixture +def arr_utf(arr_float): + return np.abs(arr_float).astype("U") + + +@pytest.fixture +def arr_date(arr_shape): + return np.random.default_rng(2).integers(0, 20000, arr_shape).astype("M8[ns]") + + +@pytest.fixture +def arr_tdelta(arr_shape): + return np.random.default_rng(2).integers(0, 20000, arr_shape).astype("m8[ns]") + + +@pytest.fixture +def arr_nan(arr_shape): + return np.tile(np.nan, arr_shape) + + +@pytest.fixture +def arr_float_nan(arr_float, arr_nan): + return np.vstack([arr_float, arr_nan]) + + +@pytest.fixture +def arr_nan_float1(arr_nan, arr_float): + return np.vstack([arr_nan, arr_float]) + + +@pytest.fixture +def arr_nan_nan(arr_nan): + return np.vstack([arr_nan, arr_nan]) + + +@pytest.fixture +def arr_inf(arr_float): + return arr_float * np.inf + + +@pytest.fixture +def arr_float_inf(arr_float, arr_inf): + return np.vstack([arr_float, arr_inf]) + + +@pytest.fixture +def arr_nan_inf(arr_nan, arr_inf): + return np.vstack([arr_nan, arr_inf]) + + +@pytest.fixture +def arr_float_nan_inf(arr_float, arr_nan, arr_inf): + return np.vstack([arr_float, arr_nan, arr_inf]) + + +@pytest.fixture +def arr_nan_nan_inf(arr_nan, arr_inf): + return np.vstack([arr_nan, arr_nan, arr_inf]) + + +@pytest.fixture +def arr_obj( + arr_float, arr_int, arr_bool, arr_complex, arr_str, arr_utf, arr_date, arr_tdelta +): + return np.vstack( + [ + arr_float.astype("O"), + arr_int.astype("O"), + arr_bool.astype("O"), + arr_complex.astype("O"), + arr_str.astype("O"), + arr_utf.astype("O"), + arr_date.astype("O"), + arr_tdelta.astype("O"), + ] + ) + + +@pytest.fixture +def arr_nan_nanj(arr_nan): + with np.errstate(invalid="ignore"): + return arr_nan + arr_nan * 1j + + +@pytest.fixture +def arr_complex_nan(arr_complex, arr_nan_nanj): + with np.errstate(invalid="ignore"): + return np.vstack([arr_complex, arr_nan_nanj]) + + +@pytest.fixture +def arr_nan_infj(arr_inf): + with np.errstate(invalid="ignore"): + return arr_inf * 1j + + +@pytest.fixture +def arr_complex_nan_infj(arr_complex, arr_nan_infj): + with np.errstate(invalid="ignore"): + return np.vstack([arr_complex, arr_nan_infj]) + + +@pytest.fixture +def arr_float_1d(arr_float): + return arr_float[:, 0] + + +@pytest.fixture +def arr_nan_1d(arr_nan): + return arr_nan[:, 0] + + +@pytest.fixture +def arr_float_nan_1d(arr_float_nan): + return arr_float_nan[:, 0] + + +@pytest.fixture +def arr_float1_nan_1d(arr_float1_nan): + return arr_float1_nan[:, 0] + + +@pytest.fixture +def arr_nan_float1_1d(arr_nan_float1): + return arr_nan_float1[:, 0] + + +class TestnanopsDataFrame: + def setup_method(self): + nanops._USE_BOTTLENECK = False + + arr_shape = (11, 7) + + self.arr_float = np.random.default_rng(2).standard_normal(arr_shape) + self.arr_float1 = np.random.default_rng(2).standard_normal(arr_shape) + self.arr_complex = self.arr_float + self.arr_float1 * 1j + self.arr_int = np.random.default_rng(2).integers(-10, 10, arr_shape) + self.arr_bool = np.random.default_rng(2).integers(0, 2, arr_shape) == 0 + self.arr_str = np.abs(self.arr_float).astype("S") + self.arr_utf = np.abs(self.arr_float).astype("U") + self.arr_date = ( + np.random.default_rng(2).integers(0, 20000, arr_shape).astype("M8[ns]") + ) + self.arr_tdelta = ( + np.random.default_rng(2).integers(0, 20000, arr_shape).astype("m8[ns]") + ) + + self.arr_nan = np.tile(np.nan, arr_shape) + self.arr_float_nan = np.vstack([self.arr_float, self.arr_nan]) + self.arr_float1_nan = np.vstack([self.arr_float1, self.arr_nan]) + self.arr_nan_float1 = np.vstack([self.arr_nan, self.arr_float1]) + self.arr_nan_nan = np.vstack([self.arr_nan, self.arr_nan]) + + self.arr_inf = self.arr_float * np.inf + self.arr_float_inf = np.vstack([self.arr_float, self.arr_inf]) + + self.arr_nan_inf = np.vstack([self.arr_nan, self.arr_inf]) + self.arr_float_nan_inf = np.vstack([self.arr_float, self.arr_nan, self.arr_inf]) + self.arr_nan_nan_inf = np.vstack([self.arr_nan, self.arr_nan, self.arr_inf]) + self.arr_obj = np.vstack( + [ + self.arr_float.astype("O"), + self.arr_int.astype("O"), + self.arr_bool.astype("O"), + self.arr_complex.astype("O"), + self.arr_str.astype("O"), + self.arr_utf.astype("O"), + self.arr_date.astype("O"), + self.arr_tdelta.astype("O"), + ] + ) + + with np.errstate(invalid="ignore"): + self.arr_nan_nanj = self.arr_nan + self.arr_nan * 1j + self.arr_complex_nan = np.vstack([self.arr_complex, self.arr_nan_nanj]) + + self.arr_nan_infj = self.arr_inf * 1j + self.arr_complex_nan_infj = np.vstack([self.arr_complex, self.arr_nan_infj]) + + self.arr_float_2d = self.arr_float + self.arr_float1_2d = self.arr_float1 + + self.arr_nan_2d = self.arr_nan + self.arr_float_nan_2d = self.arr_float_nan + self.arr_float1_nan_2d = self.arr_float1_nan + self.arr_nan_float1_2d = self.arr_nan_float1 + + self.arr_float_1d = self.arr_float[:, 0] + self.arr_float1_1d = self.arr_float1[:, 0] + + self.arr_nan_1d = self.arr_nan[:, 0] + self.arr_float_nan_1d = self.arr_float_nan[:, 0] + self.arr_float1_nan_1d = self.arr_float1_nan[:, 0] + self.arr_nan_float1_1d = self.arr_nan_float1[:, 0] + + def teardown_method(self): + nanops._USE_BOTTLENECK = use_bn + + def check_results(self, targ, res, axis, check_dtype=True): + res = getattr(res, "asm8", res) + + if ( + axis != 0 + and hasattr(targ, "shape") + and targ.ndim + and targ.shape != res.shape + ): + res = np.split(res, [targ.shape[0]], axis=0)[0] + + try: + tm.assert_almost_equal(targ, res, check_dtype=check_dtype) + except AssertionError: + # handle timedelta dtypes + if hasattr(targ, "dtype") and targ.dtype == "m8[ns]": + raise + + # There are sometimes rounding errors with + # complex and object dtypes. + # If it isn't one of those, re-raise the error. + if not hasattr(res, "dtype") or res.dtype.kind not in ["c", "O"]: + raise + # convert object dtypes to something that can be split into + # real and imaginary parts + if res.dtype.kind == "O": + if targ.dtype.kind != "O": + res = res.astype(targ.dtype) + else: + cast_dtype = "c16" if hasattr(np, "complex128") else "f8" + res = res.astype(cast_dtype) + targ = targ.astype(cast_dtype) + # there should never be a case where numpy returns an object + # but nanops doesn't, so make that an exception + elif targ.dtype.kind == "O": + raise + tm.assert_almost_equal(np.real(targ), np.real(res), check_dtype=check_dtype) + tm.assert_almost_equal(np.imag(targ), np.imag(res), check_dtype=check_dtype) + + def check_fun_data( + self, + testfunc, + targfunc, + testarval, + targarval, + skipna, + check_dtype=True, + empty_targfunc=None, + **kwargs, + ): + for axis in list(range(targarval.ndim)) + [None]: + targartempval = targarval if skipna else testarval + if skipna and empty_targfunc and isna(targartempval).all(): + targ = empty_targfunc(targartempval, axis=axis, **kwargs) + else: + targ = targfunc(targartempval, axis=axis, **kwargs) + + if targartempval.dtype == object and ( + targfunc is np.any or targfunc is np.all + ): + # GH#12863 the numpy functions will retain e.g. floatiness + if isinstance(targ, np.ndarray): + targ = targ.astype(bool) + else: + targ = bool(targ) + + res = testfunc(testarval, axis=axis, skipna=skipna, **kwargs) + + if ( + isinstance(targ, np.complex128) + and isinstance(res, float) + and np.isnan(targ) + and np.isnan(res) + ): + # GH#18463 + targ = res + + self.check_results(targ, res, axis, check_dtype=check_dtype) + if skipna: + res = testfunc(testarval, axis=axis, **kwargs) + self.check_results(targ, res, axis, check_dtype=check_dtype) + if axis is None: + res = testfunc(testarval, skipna=skipna, **kwargs) + self.check_results(targ, res, axis, check_dtype=check_dtype) + if skipna and axis is None: + res = testfunc(testarval, **kwargs) + self.check_results(targ, res, axis, check_dtype=check_dtype) + + if testarval.ndim <= 1: + return + + # Recurse on lower-dimension + testarval2 = np.take(testarval, 0, axis=-1) + targarval2 = np.take(targarval, 0, axis=-1) + self.check_fun_data( + testfunc, + targfunc, + testarval2, + targarval2, + skipna=skipna, + check_dtype=check_dtype, + empty_targfunc=empty_targfunc, + **kwargs, + ) + + def check_fun( + self, testfunc, targfunc, testar, skipna, empty_targfunc=None, **kwargs + ): + targar = testar + if testar.endswith("_nan") and hasattr(self, testar[:-4]): + targar = testar[:-4] + + testarval = getattr(self, testar) + targarval = getattr(self, targar) + self.check_fun_data( + testfunc, + targfunc, + testarval, + targarval, + skipna=skipna, + empty_targfunc=empty_targfunc, + **kwargs, + ) + + def check_funs( + self, + testfunc, + targfunc, + skipna, + allow_complex=True, + allow_all_nan=True, + allow_date=True, + allow_tdelta=True, + allow_obj=True, + **kwargs, + ): + self.check_fun(testfunc, targfunc, "arr_float", skipna, **kwargs) + self.check_fun(testfunc, targfunc, "arr_float_nan", skipna, **kwargs) + self.check_fun(testfunc, targfunc, "arr_int", skipna, **kwargs) + self.check_fun(testfunc, targfunc, "arr_bool", skipna, **kwargs) + objs = [ + self.arr_float.astype("O"), + self.arr_int.astype("O"), + self.arr_bool.astype("O"), + ] + + if allow_all_nan: + self.check_fun(testfunc, targfunc, "arr_nan", skipna, **kwargs) + + if allow_complex: + self.check_fun(testfunc, targfunc, "arr_complex", skipna, **kwargs) + self.check_fun(testfunc, targfunc, "arr_complex_nan", skipna, **kwargs) + if allow_all_nan: + self.check_fun(testfunc, targfunc, "arr_nan_nanj", skipna, **kwargs) + objs += [self.arr_complex.astype("O")] + + if allow_date: + targfunc(self.arr_date) + self.check_fun(testfunc, targfunc, "arr_date", skipna, **kwargs) + objs += [self.arr_date.astype("O")] + + if allow_tdelta: + try: + targfunc(self.arr_tdelta) + except TypeError: + pass + else: + self.check_fun(testfunc, targfunc, "arr_tdelta", skipna, **kwargs) + objs += [self.arr_tdelta.astype("O")] + + if allow_obj: + self.arr_obj = np.vstack(objs) + # some nanops handle object dtypes better than their numpy + # counterparts, so the numpy functions need to be given something + # else + if allow_obj == "convert": + targfunc = partial( + self._badobj_wrap, func=targfunc, allow_complex=allow_complex + ) + self.check_fun(testfunc, targfunc, "arr_obj", skipna, **kwargs) + + def _badobj_wrap(self, value, func, allow_complex=True, **kwargs): + if value.dtype.kind == "O": + if allow_complex: + value = value.astype("c16") + else: + value = value.astype("f8") + return func(value, **kwargs) + + @pytest.mark.parametrize( + "nan_op,np_op", [(nanops.nanany, np.any), (nanops.nanall, np.all)] + ) + def test_nan_funcs(self, nan_op, np_op, skipna): + self.check_funs(nan_op, np_op, skipna, allow_all_nan=False, allow_date=False) + + def test_nansum(self, skipna): + self.check_funs( + nanops.nansum, + np.sum, + skipna, + allow_date=False, + check_dtype=False, + empty_targfunc=np.nansum, + ) + + def test_nanmean(self, skipna): + self.check_funs( + nanops.nanmean, np.mean, skipna, allow_obj=False, allow_date=False + ) + + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_nanmedian(self, skipna): + self.check_funs( + nanops.nanmedian, + np.median, + skipna, + allow_complex=False, + allow_date=False, + allow_obj="convert", + ) + + @pytest.mark.parametrize("ddof", range(3)) + def test_nanvar(self, ddof, skipna): + self.check_funs( + nanops.nanvar, + np.var, + skipna, + allow_complex=False, + allow_date=False, + allow_obj="convert", + ddof=ddof, + ) + + @pytest.mark.parametrize("ddof", range(3)) + def test_nanstd(self, ddof, skipna): + self.check_funs( + nanops.nanstd, + np.std, + skipna, + allow_complex=False, + allow_date=False, + allow_obj="convert", + ddof=ddof, + ) + + @pytest.mark.parametrize("ddof", range(3)) + def test_nansem(self, ddof, skipna): + sp_stats = pytest.importorskip("scipy.stats") + + with np.errstate(invalid="ignore"): + self.check_funs( + nanops.nansem, + sp_stats.sem, + skipna, + allow_complex=False, + allow_date=False, + allow_tdelta=False, + allow_obj="convert", + ddof=ddof, + ) + + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + @pytest.mark.parametrize( + "nan_op,np_op", [(nanops.nanmin, np.min), (nanops.nanmax, np.max)] + ) + def test_nanops_with_warnings(self, nan_op, np_op, skipna): + self.check_funs(nan_op, np_op, skipna, allow_obj=False) + + def _argminmax_wrap(self, value, axis=None, func=None): + res = func(value, axis) + nans = np.min(value, axis) + nullnan = isna(nans) + if res.ndim: + res[nullnan] = -1 + elif ( + hasattr(nullnan, "all") + and nullnan.all() + or not hasattr(nullnan, "all") + and nullnan + ): + res = -1 + return res + + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_nanargmax(self, skipna): + func = partial(self._argminmax_wrap, func=np.argmax) + self.check_funs(nanops.nanargmax, func, skipna, allow_obj=False) + + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_nanargmin(self, skipna): + func = partial(self._argminmax_wrap, func=np.argmin) + self.check_funs(nanops.nanargmin, func, skipna, allow_obj=False) + + def _skew_kurt_wrap(self, values, axis=None, func=None): + if not isinstance(values.dtype.type, np.floating): + values = values.astype("f8") + result = func(values, axis=axis, bias=False) + # fix for handling cases where all elements in an axis are the same + if isinstance(result, np.ndarray): + result[np.max(values, axis=axis) == np.min(values, axis=axis)] = 0 + return result + elif np.max(values) == np.min(values): + return 0.0 + return result + + def test_nanskew(self, skipna): + sp_stats = pytest.importorskip("scipy.stats") + + func = partial(self._skew_kurt_wrap, func=sp_stats.skew) + with np.errstate(invalid="ignore"): + self.check_funs( + nanops.nanskew, + func, + skipna, + allow_complex=False, + allow_date=False, + allow_tdelta=False, + ) + + def test_nankurt(self, skipna): + sp_stats = pytest.importorskip("scipy.stats") + + func1 = partial(sp_stats.kurtosis, fisher=True) + func = partial(self._skew_kurt_wrap, func=func1) + with np.errstate(invalid="ignore"): + self.check_funs( + nanops.nankurt, + func, + skipna, + allow_complex=False, + allow_date=False, + allow_tdelta=False, + ) + + def test_nanprod(self, skipna): + self.check_funs( + nanops.nanprod, + np.prod, + skipna, + allow_date=False, + allow_tdelta=False, + empty_targfunc=np.nanprod, + ) + + def check_nancorr_nancov_2d(self, checkfun, targ0, targ1, **kwargs): + res00 = checkfun(self.arr_float_2d, self.arr_float1_2d, **kwargs) + res01 = checkfun( + self.arr_float_2d, + self.arr_float1_2d, + min_periods=len(self.arr_float_2d) - 1, + **kwargs, + ) + tm.assert_almost_equal(targ0, res00) + tm.assert_almost_equal(targ0, res01) + + res10 = checkfun(self.arr_float_nan_2d, self.arr_float1_nan_2d, **kwargs) + res11 = checkfun( + self.arr_float_nan_2d, + self.arr_float1_nan_2d, + min_periods=len(self.arr_float_2d) - 1, + **kwargs, + ) + tm.assert_almost_equal(targ1, res10) + tm.assert_almost_equal(targ1, res11) + + targ2 = np.nan + res20 = checkfun(self.arr_nan_2d, self.arr_float1_2d, **kwargs) + res21 = checkfun(self.arr_float_2d, self.arr_nan_2d, **kwargs) + res22 = checkfun(self.arr_nan_2d, self.arr_nan_2d, **kwargs) + res23 = checkfun(self.arr_float_nan_2d, self.arr_nan_float1_2d, **kwargs) + res24 = checkfun( + self.arr_float_nan_2d, + self.arr_nan_float1_2d, + min_periods=len(self.arr_float_2d) - 1, + **kwargs, + ) + res25 = checkfun( + self.arr_float_2d, + self.arr_float1_2d, + min_periods=len(self.arr_float_2d) + 1, + **kwargs, + ) + tm.assert_almost_equal(targ2, res20) + tm.assert_almost_equal(targ2, res21) + tm.assert_almost_equal(targ2, res22) + tm.assert_almost_equal(targ2, res23) + tm.assert_almost_equal(targ2, res24) + tm.assert_almost_equal(targ2, res25) + + def check_nancorr_nancov_1d(self, checkfun, targ0, targ1, **kwargs): + res00 = checkfun(self.arr_float_1d, self.arr_float1_1d, **kwargs) + res01 = checkfun( + self.arr_float_1d, + self.arr_float1_1d, + min_periods=len(self.arr_float_1d) - 1, + **kwargs, + ) + tm.assert_almost_equal(targ0, res00) + tm.assert_almost_equal(targ0, res01) + + res10 = checkfun(self.arr_float_nan_1d, self.arr_float1_nan_1d, **kwargs) + res11 = checkfun( + self.arr_float_nan_1d, + self.arr_float1_nan_1d, + min_periods=len(self.arr_float_1d) - 1, + **kwargs, + ) + tm.assert_almost_equal(targ1, res10) + tm.assert_almost_equal(targ1, res11) + + targ2 = np.nan + res20 = checkfun(self.arr_nan_1d, self.arr_float1_1d, **kwargs) + res21 = checkfun(self.arr_float_1d, self.arr_nan_1d, **kwargs) + res22 = checkfun(self.arr_nan_1d, self.arr_nan_1d, **kwargs) + res23 = checkfun(self.arr_float_nan_1d, self.arr_nan_float1_1d, **kwargs) + res24 = checkfun( + self.arr_float_nan_1d, + self.arr_nan_float1_1d, + min_periods=len(self.arr_float_1d) - 1, + **kwargs, + ) + res25 = checkfun( + self.arr_float_1d, + self.arr_float1_1d, + min_periods=len(self.arr_float_1d) + 1, + **kwargs, + ) + tm.assert_almost_equal(targ2, res20) + tm.assert_almost_equal(targ2, res21) + tm.assert_almost_equal(targ2, res22) + tm.assert_almost_equal(targ2, res23) + tm.assert_almost_equal(targ2, res24) + tm.assert_almost_equal(targ2, res25) + + def test_nancorr(self): + targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1] + targ1 = np.corrcoef(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1] + self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1) + targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1] + targ1 = np.corrcoef(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1] + self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="pearson") + + def test_nancorr_pearson(self): + targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1] + targ1 = np.corrcoef(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1] + self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, method="pearson") + targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1] + targ1 = np.corrcoef(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1] + self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="pearson") + + def test_nancorr_kendall(self): + sp_stats = pytest.importorskip("scipy.stats") + + targ0 = sp_stats.kendalltau(self.arr_float_2d, self.arr_float1_2d)[0] + targ1 = sp_stats.kendalltau(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0] + self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, method="kendall") + targ0 = sp_stats.kendalltau(self.arr_float_1d, self.arr_float1_1d)[0] + targ1 = sp_stats.kendalltau(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0] + self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="kendall") + + def test_nancorr_spearman(self): + sp_stats = pytest.importorskip("scipy.stats") + + targ0 = sp_stats.spearmanr(self.arr_float_2d, self.arr_float1_2d)[0] + targ1 = sp_stats.spearmanr(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0] + self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, method="spearman") + targ0 = sp_stats.spearmanr(self.arr_float_1d, self.arr_float1_1d)[0] + targ1 = sp_stats.spearmanr(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0] + self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="spearman") + + def test_invalid_method(self): + pytest.importorskip("scipy") + targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1] + targ1 = np.corrcoef(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1] + msg = "Unknown method 'foo', expected one of 'kendall', 'spearman'" + with pytest.raises(ValueError, match=msg): + self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="foo") + + def test_nancov(self): + targ0 = np.cov(self.arr_float_2d, self.arr_float1_2d)[0, 1] + targ1 = np.cov(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1] + self.check_nancorr_nancov_2d(nanops.nancov, targ0, targ1) + targ0 = np.cov(self.arr_float_1d, self.arr_float1_1d)[0, 1] + targ1 = np.cov(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1] + self.check_nancorr_nancov_1d(nanops.nancov, targ0, targ1) + + +@pytest.mark.parametrize( + "arr, correct", + [ + ("arr_complex", False), + ("arr_int", False), + ("arr_bool", False), + ("arr_str", False), + ("arr_utf", False), + ("arr_complex", False), + ("arr_complex_nan", False), + ("arr_nan_nanj", False), + ("arr_nan_infj", True), + ("arr_complex_nan_infj", True), + ], +) +def test_has_infs_non_float(request, arr, correct, disable_bottleneck): + val = request.getfixturevalue(arr) + while getattr(val, "ndim", True): + res0 = nanops._has_infs(val) + if correct: + assert res0 + else: + assert not res0 + + if not hasattr(val, "ndim"): + break + + # Reduce dimension for next step in the loop + val = np.take(val, 0, axis=-1) + + +@pytest.mark.parametrize( + "arr, correct", + [ + ("arr_float", False), + ("arr_nan", False), + ("arr_float_nan", False), + ("arr_nan_nan", False), + ("arr_float_inf", True), + ("arr_inf", True), + ("arr_nan_inf", True), + ("arr_float_nan_inf", True), + ("arr_nan_nan_inf", True), + ], +) +@pytest.mark.parametrize("astype", [None, "f4", "f2"]) +def test_has_infs_floats(request, arr, correct, astype, disable_bottleneck): + val = request.getfixturevalue(arr) + if astype is not None: + val = val.astype(astype) + while getattr(val, "ndim", True): + res0 = nanops._has_infs(val) + if correct: + assert res0 + else: + assert not res0 + + if not hasattr(val, "ndim"): + break + + # Reduce dimension for next step in the loop + val = np.take(val, 0, axis=-1) + + +@pytest.mark.parametrize( + "fixture", ["arr_float", "arr_complex", "arr_int", "arr_bool", "arr_str", "arr_utf"] +) +def test_bn_ok_dtype(fixture, request, disable_bottleneck): + obj = request.getfixturevalue(fixture) + assert nanops._bn_ok_dtype(obj.dtype, "test") + + +@pytest.mark.parametrize( + "fixture", + [ + "arr_date", + "arr_tdelta", + "arr_obj", + ], +) +def test_bn_not_ok_dtype(fixture, request, disable_bottleneck): + obj = request.getfixturevalue(fixture) + assert not nanops._bn_ok_dtype(obj.dtype, "test") + + +class TestEnsureNumeric: + def test_numeric_values(self): + # Test integer + assert nanops._ensure_numeric(1) == 1 + + # Test float + assert nanops._ensure_numeric(1.1) == 1.1 + + # Test complex + assert nanops._ensure_numeric(1 + 2j) == 1 + 2j + + def test_ndarray(self): + # Test numeric ndarray + values = np.array([1, 2, 3]) + assert np.allclose(nanops._ensure_numeric(values), values) + + # Test object ndarray + o_values = values.astype(object) + assert np.allclose(nanops._ensure_numeric(o_values), values) + + # Test convertible string ndarray + s_values = np.array(["1", "2", "3"], dtype=object) + msg = r"Could not convert \['1' '2' '3'\] to numeric" + with pytest.raises(TypeError, match=msg): + nanops._ensure_numeric(s_values) + + # Test non-convertible string ndarray + s_values = np.array(["foo", "bar", "baz"], dtype=object) + msg = r"Could not convert .* to numeric" + with pytest.raises(TypeError, match=msg): + nanops._ensure_numeric(s_values) + + def test_convertable_values(self): + with pytest.raises(TypeError, match="Could not convert string '1' to numeric"): + nanops._ensure_numeric("1") + with pytest.raises( + TypeError, match="Could not convert string '1.1' to numeric" + ): + nanops._ensure_numeric("1.1") + with pytest.raises( + TypeError, match=r"Could not convert string '1\+1j' to numeric" + ): + nanops._ensure_numeric("1+1j") + + def test_non_convertable_values(self): + msg = "Could not convert string 'foo' to numeric" + with pytest.raises(TypeError, match=msg): + nanops._ensure_numeric("foo") + + # with the wrong type, python raises TypeError for us + msg = "argument must be a string or a number" + with pytest.raises(TypeError, match=msg): + nanops._ensure_numeric({}) + with pytest.raises(TypeError, match=msg): + nanops._ensure_numeric([]) + + +class TestNanvarFixedValues: + # xref GH10242 + # Samples from a normal distribution. + @pytest.fixture + def variance(self): + return 3.0 + + @pytest.fixture + def samples(self, variance): + return self.prng.normal(scale=variance**0.5, size=100000) + + def test_nanvar_all_finite(self, samples, variance): + actual_variance = nanops.nanvar(samples) + tm.assert_almost_equal(actual_variance, variance, rtol=1e-2) + + def test_nanvar_nans(self, samples, variance): + samples_test = np.nan * np.ones(2 * samples.shape[0]) + samples_test[::2] = samples + + actual_variance = nanops.nanvar(samples_test, skipna=True) + tm.assert_almost_equal(actual_variance, variance, rtol=1e-2) + + actual_variance = nanops.nanvar(samples_test, skipna=False) + tm.assert_almost_equal(actual_variance, np.nan, rtol=1e-2) + + def test_nanstd_nans(self, samples, variance): + samples_test = np.nan * np.ones(2 * samples.shape[0]) + samples_test[::2] = samples + + actual_std = nanops.nanstd(samples_test, skipna=True) + tm.assert_almost_equal(actual_std, variance**0.5, rtol=1e-2) + + actual_std = nanops.nanvar(samples_test, skipna=False) + tm.assert_almost_equal(actual_std, np.nan, rtol=1e-2) + + def test_nanvar_axis(self, samples, variance): + # Generate some sample data. + samples_unif = self.prng.uniform(size=samples.shape[0]) + samples = np.vstack([samples, samples_unif]) + + actual_variance = nanops.nanvar(samples, axis=1) + tm.assert_almost_equal( + actual_variance, np.array([variance, 1.0 / 12]), rtol=1e-2 + ) + + def test_nanvar_ddof(self): + n = 5 + samples = self.prng.uniform(size=(10000, n + 1)) + samples[:, -1] = np.nan # Force use of our own algorithm. + + variance_0 = nanops.nanvar(samples, axis=1, skipna=True, ddof=0).mean() + variance_1 = nanops.nanvar(samples, axis=1, skipna=True, ddof=1).mean() + variance_2 = nanops.nanvar(samples, axis=1, skipna=True, ddof=2).mean() + + # The unbiased estimate. + var = 1.0 / 12 + tm.assert_almost_equal(variance_1, var, rtol=1e-2) + + # The underestimated variance. + tm.assert_almost_equal(variance_0, (n - 1.0) / n * var, rtol=1e-2) + + # The overestimated variance. + tm.assert_almost_equal(variance_2, (n - 1.0) / (n - 2.0) * var, rtol=1e-2) + + @pytest.mark.parametrize("axis", range(2)) + @pytest.mark.parametrize("ddof", range(3)) + def test_ground_truth(self, axis, ddof): + # Test against values that were precomputed with Numpy. + samples = np.empty((4, 4)) + samples[:3, :3] = np.array( + [ + [0.97303362, 0.21869576, 0.55560287], + [0.72980153, 0.03109364, 0.99155171], + [0.09317602, 0.60078248, 0.15871292], + ] + ) + samples[3] = samples[:, 3] = np.nan + + # Actual variances along axis=0, 1 for ddof=0, 1, 2 + variance = np.array( + [ + [ + [0.13762259, 0.05619224, 0.11568816], + [0.20643388, 0.08428837, 0.17353224], + [0.41286776, 0.16857673, 0.34706449], + ], + [ + [0.09519783, 0.16435395, 0.05082054], + [0.14279674, 0.24653093, 0.07623082], + [0.28559348, 0.49306186, 0.15246163], + ], + ] + ) + + # Test nanvar. + var = nanops.nanvar(samples, skipna=True, axis=axis, ddof=ddof) + tm.assert_almost_equal(var[:3], variance[axis, ddof]) + assert np.isnan(var[3]) + + # Test nanstd. + std = nanops.nanstd(samples, skipna=True, axis=axis, ddof=ddof) + tm.assert_almost_equal(std[:3], variance[axis, ddof] ** 0.5) + assert np.isnan(std[3]) + + @pytest.mark.parametrize("ddof", range(3)) + def test_nanstd_roundoff(self, ddof): + # Regression test for GH 10242 (test data taken from GH 10489). Ensure + # that variance is stable. + data = Series(766897346 * np.ones(10)) + result = data.std(ddof=ddof) + assert result == 0.0 + + @property + def prng(self): + return np.random.default_rng(2) + + +class TestNanskewFixedValues: + # xref GH 11974 + # Test data + skewness value (computed with scipy.stats.skew) + @pytest.fixture + def samples(self): + return np.sin(np.linspace(0, 1, 200)) + + @pytest.fixture + def actual_skew(self): + return -0.1875895205961754 + + @pytest.mark.parametrize("val", [3075.2, 3075.3, 3075.5]) + def test_constant_series(self, val): + # xref GH 11974 + data = val * np.ones(300) + skew = nanops.nanskew(data) + assert skew == 0.0 + + def test_all_finite(self): + alpha, beta = 0.3, 0.1 + left_tailed = self.prng.beta(alpha, beta, size=100) + assert nanops.nanskew(left_tailed) < 0 + + alpha, beta = 0.1, 0.3 + right_tailed = self.prng.beta(alpha, beta, size=100) + assert nanops.nanskew(right_tailed) > 0 + + def test_ground_truth(self, samples, actual_skew): + skew = nanops.nanskew(samples) + tm.assert_almost_equal(skew, actual_skew) + + def test_axis(self, samples, actual_skew): + samples = np.vstack([samples, np.nan * np.ones(len(samples))]) + skew = nanops.nanskew(samples, axis=1) + tm.assert_almost_equal(skew, np.array([actual_skew, np.nan])) + + def test_nans(self, samples): + samples = np.hstack([samples, np.nan]) + skew = nanops.nanskew(samples, skipna=False) + assert np.isnan(skew) + + def test_nans_skipna(self, samples, actual_skew): + samples = np.hstack([samples, np.nan]) + skew = nanops.nanskew(samples, skipna=True) + tm.assert_almost_equal(skew, actual_skew) + + @property + def prng(self): + return np.random.default_rng(2) + + +class TestNankurtFixedValues: + # xref GH 11974 + # Test data + kurtosis value (computed with scipy.stats.kurtosis) + @pytest.fixture + def samples(self): + return np.sin(np.linspace(0, 1, 200)) + + @pytest.fixture + def actual_kurt(self): + return -1.2058303433799713 + + @pytest.mark.parametrize("val", [3075.2, 3075.3, 3075.5]) + def test_constant_series(self, val): + # xref GH 11974 + data = val * np.ones(300) + kurt = nanops.nankurt(data) + assert kurt == 0.0 + + def test_all_finite(self): + alpha, beta = 0.3, 0.1 + left_tailed = self.prng.beta(alpha, beta, size=100) + assert nanops.nankurt(left_tailed) < 2 + + alpha, beta = 0.1, 0.3 + right_tailed = self.prng.beta(alpha, beta, size=100) + assert nanops.nankurt(right_tailed) < 0 + + def test_ground_truth(self, samples, actual_kurt): + kurt = nanops.nankurt(samples) + tm.assert_almost_equal(kurt, actual_kurt) + + def test_axis(self, samples, actual_kurt): + samples = np.vstack([samples, np.nan * np.ones(len(samples))]) + kurt = nanops.nankurt(samples, axis=1) + tm.assert_almost_equal(kurt, np.array([actual_kurt, np.nan])) + + def test_nans(self, samples): + samples = np.hstack([samples, np.nan]) + kurt = nanops.nankurt(samples, skipna=False) + assert np.isnan(kurt) + + def test_nans_skipna(self, samples, actual_kurt): + samples = np.hstack([samples, np.nan]) + kurt = nanops.nankurt(samples, skipna=True) + tm.assert_almost_equal(kurt, actual_kurt) + + @property + def prng(self): + return np.random.default_rng(2) + + +class TestDatetime64NaNOps: + @pytest.fixture(params=["s", "ms", "us", "ns"]) + def unit(self, request): + return request.param + + # Enabling mean changes the behavior of DataFrame.mean + # See https://github.com/pandas-dev/pandas/issues/24752 + def test_nanmean(self, unit): + dti = pd.date_range("2016-01-01", periods=3).as_unit(unit) + expected = dti[1] + + for obj in [dti, dti._data]: + result = nanops.nanmean(obj) + assert result == expected + + dti2 = dti.insert(1, pd.NaT) + + for obj in [dti2, dti2._data]: + result = nanops.nanmean(obj) + assert result == expected + + @pytest.mark.parametrize("constructor", ["M8", "m8"]) + def test_nanmean_skipna_false(self, constructor, unit): + dtype = f"{constructor}[{unit}]" + arr = np.arange(12).astype(np.int64).view(dtype).reshape(4, 3) + + arr[-1, -1] = "NaT" + + result = nanops.nanmean(arr, skipna=False) + assert np.isnat(result) + assert result.dtype == dtype + + result = nanops.nanmean(arr, axis=0, skipna=False) + expected = np.array([4, 5, "NaT"], dtype=arr.dtype) + tm.assert_numpy_array_equal(result, expected) + + result = nanops.nanmean(arr, axis=1, skipna=False) + expected = np.array([arr[0, 1], arr[1, 1], arr[2, 1], arr[-1, -1]]) + tm.assert_numpy_array_equal(result, expected) + + +def test_use_bottleneck(): + if nanops._BOTTLENECK_INSTALLED: + with pd.option_context("use_bottleneck", True): + assert pd.get_option("use_bottleneck") + + with pd.option_context("use_bottleneck", False): + assert not pd.get_option("use_bottleneck") + + +@pytest.mark.parametrize( + "numpy_op, expected", + [ + (np.sum, 10), + (np.nansum, 10), + (np.mean, 2.5), + (np.nanmean, 2.5), + (np.median, 2.5), + (np.nanmedian, 2.5), + (np.min, 1), + (np.max, 4), + (np.nanmin, 1), + (np.nanmax, 4), + ], +) +def test_numpy_ops(numpy_op, expected): + # GH8383 + result = numpy_op(Series([1, 2, 3, 4])) + assert result == expected + + +@pytest.mark.parametrize( + "operation", + [ + nanops.nanany, + nanops.nanall, + nanops.nansum, + nanops.nanmean, + nanops.nanmedian, + nanops.nanstd, + nanops.nanvar, + nanops.nansem, + nanops.nanargmax, + nanops.nanargmin, + nanops.nanmax, + nanops.nanmin, + nanops.nanskew, + nanops.nankurt, + nanops.nanprod, + ], +) +def test_nanops_independent_of_mask_param(operation): + # GH22764 + ser = Series([1, 2, np.nan, 3, np.nan, 4]) + mask = ser.isna() + median_expected = operation(ser._values) + median_result = operation(ser._values, mask=mask) + assert median_expected == median_result + + +@pytest.mark.parametrize("min_count", [-1, 0]) +def test_check_below_min_count_negative_or_zero_min_count(min_count): + # GH35227 + result = nanops.check_below_min_count((21, 37), None, min_count) + expected_result = False + assert result == expected_result + + +@pytest.mark.parametrize( + "mask", [None, np.array([False, False, True]), np.array([True] + 9 * [False])] +) +@pytest.mark.parametrize("min_count, expected_result", [(1, False), (101, True)]) +def test_check_below_min_count_positive_min_count(mask, min_count, expected_result): + # GH35227 + shape = (10, 10) + result = nanops.check_below_min_count(shape, mask, min_count) + assert result == expected_result + + +@td.skip_if_windows +@td.skip_if_32bit +@pytest.mark.parametrize("min_count, expected_result", [(1, False), (2812191852, True)]) +def test_check_below_min_count_large_shape(min_count, expected_result): + # GH35227 large shape used to show that the issue is fixed + shape = (2244367, 1253) + result = nanops.check_below_min_count(shape, mask=None, min_count=min_count) + assert result == expected_result + + +@pytest.mark.parametrize("func", ["nanmean", "nansum"]) +def test_check_bottleneck_disallow(any_real_numpy_dtype, func): + # GH 42878 bottleneck sometimes produces unreliable results for mean and sum + assert not nanops._bn_ok_dtype(np.dtype(any_real_numpy_dtype).type, func) + + +@pytest.mark.parametrize("val", [2**55, -(2**55), 20150515061816532]) +def test_nanmean_overflow(disable_bottleneck, val): + # GH 10155 + # In the previous implementation mean can overflow for int dtypes, it + # is now consistent with numpy + + ser = Series(val, index=range(500), dtype=np.int64) + result = ser.mean() + np_result = ser.values.mean() + assert result == val + assert result == np_result + assert result.dtype == np.float64 + + +@pytest.mark.parametrize( + "dtype", + [ + np.int16, + np.int32, + np.int64, + np.float32, + np.float64, + getattr(np, "float128", None), + ], +) +@pytest.mark.parametrize("method", ["mean", "std", "var", "skew", "kurt", "min", "max"]) +def test_returned_dtype(disable_bottleneck, dtype, method): + if dtype is None: + pytest.skip("np.float128 not available") + + ser = Series(range(10), dtype=dtype) + result = getattr(ser, method)() + if is_integer_dtype(dtype) and method not in ["min", "max"]: + assert result.dtype == np.float64 + else: + assert result.dtype == dtype diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/test_optional_dependency.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/test_optional_dependency.py new file mode 100644 index 0000000000000000000000000000000000000000..52b5f636b1254ceddf869704a6378f4ea5012b8c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/test_optional_dependency.py @@ -0,0 +1,100 @@ +import sys +import types + +import pytest + +from pandas.compat._optional import ( + VERSIONS, + import_optional_dependency, +) + +import pandas._testing as tm + + +def test_import_optional(): + match = "Missing .*notapackage.* pip .* conda .* notapackage" + with pytest.raises(ImportError, match=match) as exc_info: + import_optional_dependency("notapackage") + # The original exception should be there as context: + assert isinstance(exc_info.value.__context__, ImportError) + + result = import_optional_dependency("notapackage", errors="ignore") + assert result is None + + +def test_xlrd_version_fallback(): + pytest.importorskip("xlrd") + import_optional_dependency("xlrd") + + +def test_bad_version(monkeypatch): + name = "fakemodule" + module = types.ModuleType(name) + module.__version__ = "0.9.0" + sys.modules[name] = module + monkeypatch.setitem(VERSIONS, name, "1.0.0") + + match = "Pandas requires .*1.0.0.* of .fakemodule.*'0.9.0'" + with pytest.raises(ImportError, match=match): + import_optional_dependency("fakemodule") + + # Test min_version parameter + result = import_optional_dependency("fakemodule", min_version="0.8") + assert result is module + + with tm.assert_produces_warning(UserWarning): + result = import_optional_dependency("fakemodule", errors="warn") + assert result is None + + module.__version__ = "1.0.0" # exact match is OK + result = import_optional_dependency("fakemodule") + assert result is module + + with pytest.raises(ImportError, match="Pandas requires version '1.1.0'"): + import_optional_dependency("fakemodule", min_version="1.1.0") + + with tm.assert_produces_warning(UserWarning): + result = import_optional_dependency( + "fakemodule", errors="warn", min_version="1.1.0" + ) + assert result is None + + result = import_optional_dependency( + "fakemodule", errors="ignore", min_version="1.1.0" + ) + assert result is None + + +def test_submodule(monkeypatch): + # Create a fake module with a submodule + name = "fakemodule" + module = types.ModuleType(name) + module.__version__ = "0.9.0" + sys.modules[name] = module + sub_name = "submodule" + submodule = types.ModuleType(sub_name) + setattr(module, sub_name, submodule) + sys.modules[f"{name}.{sub_name}"] = submodule + monkeypatch.setitem(VERSIONS, name, "1.0.0") + + match = "Pandas requires .*1.0.0.* of .fakemodule.*'0.9.0'" + with pytest.raises(ImportError, match=match): + import_optional_dependency("fakemodule.submodule") + + with tm.assert_produces_warning(UserWarning): + result = import_optional_dependency("fakemodule.submodule", errors="warn") + assert result is None + + module.__version__ = "1.0.0" # exact match is OK + result = import_optional_dependency("fakemodule.submodule") + assert result is submodule + + +def test_no_version_raises(monkeypatch): + name = "fakemodule" + module = types.ModuleType(name) + sys.modules[name] = module + monkeypatch.setitem(VERSIONS, name, "1.0.0") + + with pytest.raises(ImportError, match="Can't determine .* fakemodule"): + import_optional_dependency(name) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/test_register_accessor.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/test_register_accessor.py new file mode 100644 index 0000000000000000000000000000000000000000..4e569dc40005d5883870b82f46859d5bc36578f9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/test_register_accessor.py @@ -0,0 +1,103 @@ +from collections.abc import Generator +import contextlib + +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.core import accessor + + +def test_dirname_mixin() -> None: + # GH37173 + + class X(accessor.DirNamesMixin): + x = 1 + y: int + + def __init__(self) -> None: + self.z = 3 + + result = [attr_name for attr_name in dir(X()) if not attr_name.startswith("_")] + + assert result == ["x", "z"] + + +@contextlib.contextmanager +def ensure_removed(obj, attr) -> Generator[None, None, None]: + """Ensure that an attribute added to 'obj' during the test is + removed when we're done + """ + try: + yield + finally: + try: + delattr(obj, attr) + except AttributeError: + pass + obj._accessors.discard(attr) + + +class MyAccessor: + def __init__(self, obj) -> None: + self.obj = obj + self.item = "item" + + @property + def prop(self): + return self.item + + def method(self): + return self.item + + +@pytest.mark.parametrize( + "obj, registrar", + [ + (pd.Series, pd.api.extensions.register_series_accessor), + (pd.DataFrame, pd.api.extensions.register_dataframe_accessor), + (pd.Index, pd.api.extensions.register_index_accessor), + ], +) +def test_register(obj, registrar): + with ensure_removed(obj, "mine"): + before = set(dir(obj)) + registrar("mine")(MyAccessor) + o = obj([]) if obj is not pd.Series else obj([], dtype=object) + assert o.mine.prop == "item" + after = set(dir(obj)) + assert (before ^ after) == {"mine"} + assert "mine" in obj._accessors + + +def test_accessor_works(): + with ensure_removed(pd.Series, "mine"): + pd.api.extensions.register_series_accessor("mine")(MyAccessor) + + s = pd.Series([1, 2]) + assert s.mine.obj is s + + assert s.mine.prop == "item" + assert s.mine.method() == "item" + + +def test_overwrite_warns(): + match = r".*MyAccessor.*fake.*Series.*" + with tm.assert_produces_warning(UserWarning, match=match): + with ensure_removed(pd.Series, "fake"): + setattr(pd.Series, "fake", 123) + pd.api.extensions.register_series_accessor("fake")(MyAccessor) + s = pd.Series([1, 2]) + assert s.fake.prop == "item" + + +def test_raises_attribute_error(): + with ensure_removed(pd.Series, "bad"): + + @pd.api.extensions.register_series_accessor("bad") + class Bad: + def __init__(self, data) -> None: + raise AttributeError("whoops") + + with pytest.raises(AttributeError, match="whoops"): + pd.Series([], dtype=object).bad diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/test_sorting.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/test_sorting.py new file mode 100644 index 0000000000000000000000000000000000000000..285f240028152072ed52b3657113d30a7fd63fea --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/test_sorting.py @@ -0,0 +1,487 @@ +from collections import defaultdict +from datetime import datetime +from itertools import product + +import numpy as np +import pytest + +from pandas import ( + NA, + DataFrame, + MultiIndex, + Series, + array, + concat, + merge, +) +import pandas._testing as tm +from pandas.core.algorithms import safe_sort +import pandas.core.common as com +from pandas.core.sorting import ( + _decons_group_index, + get_group_index, + is_int64_overflow_possible, + lexsort_indexer, + nargsort, +) + + +@pytest.fixture +def left_right(): + low, high, n = -1 << 10, 1 << 10, 1 << 20 + left = DataFrame( + np.random.default_rng(2).integers(low, high, (n, 7)), columns=list("ABCDEFG") + ) + left["left"] = left.sum(axis=1) + + # one-2-one match + i = np.random.default_rng(2).permutation(len(left)) + right = left.iloc[i].copy() + right.columns = right.columns[:-1].tolist() + ["right"] + right.index = np.arange(len(right)) + right["right"] *= -1 + return left, right + + +class TestSorting: + @pytest.mark.slow + def test_int64_overflow(self): + B = np.concatenate((np.arange(1000), np.arange(1000), np.arange(500))) + A = np.arange(2500) + df = DataFrame( + { + "A": A, + "B": B, + "C": A, + "D": B, + "E": A, + "F": B, + "G": A, + "H": B, + "values": np.random.default_rng(2).standard_normal(2500), + } + ) + + lg = df.groupby(["A", "B", "C", "D", "E", "F", "G", "H"]) + rg = df.groupby(["H", "G", "F", "E", "D", "C", "B", "A"]) + + left = lg.sum()["values"] + right = rg.sum()["values"] + + exp_index, _ = left.index.sortlevel() + tm.assert_index_equal(left.index, exp_index) + + exp_index, _ = right.index.sortlevel(0) + tm.assert_index_equal(right.index, exp_index) + + tups = list(map(tuple, df[["A", "B", "C", "D", "E", "F", "G", "H"]].values)) + tups = com.asarray_tuplesafe(tups) + + expected = df.groupby(tups).sum()["values"] + + for k, v in expected.items(): + assert left[k] == right[k[::-1]] + assert left[k] == v + assert len(left) == len(right) + + def test_int64_overflow_groupby_large_range(self): + # GH9096 + values = range(55109) + data = DataFrame.from_dict({"a": values, "b": values, "c": values, "d": values}) + grouped = data.groupby(["a", "b", "c", "d"]) + assert len(grouped) == len(values) + + @pytest.mark.parametrize("agg", ["mean", "median"]) + def test_int64_overflow_groupby_large_df_shuffled(self, agg): + rs = np.random.default_rng(2) + arr = rs.integers(-1 << 12, 1 << 12, (1 << 15, 5)) + i = rs.choice(len(arr), len(arr) * 4) + arr = np.vstack((arr, arr[i])) # add some duplicate rows + + i = rs.permutation(len(arr)) + arr = arr[i] # shuffle rows + + df = DataFrame(arr, columns=list("abcde")) + df["jim"], df["joe"] = np.zeros((2, len(df))) + gr = df.groupby(list("abcde")) + + # verify this is testing what it is supposed to test! + assert is_int64_overflow_possible(gr._grouper.shape) + + mi = MultiIndex.from_arrays( + [ar.ravel() for ar in np.array_split(np.unique(arr, axis=0), 5, axis=1)], + names=list("abcde"), + ) + + res = DataFrame( + np.zeros((len(mi), 2)), columns=["jim", "joe"], index=mi + ).sort_index() + + tm.assert_frame_equal(getattr(gr, agg)(), res) + + @pytest.mark.parametrize( + "order, na_position, exp", + [ + [ + True, + "last", + list(range(5, 105)) + list(range(5)) + list(range(105, 110)), + ], + [ + True, + "first", + list(range(5)) + list(range(105, 110)) + list(range(5, 105)), + ], + [ + False, + "last", + list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110)), + ], + [ + False, + "first", + list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1)), + ], + ], + ) + def test_lexsort_indexer(self, order, na_position, exp): + keys = [[np.nan] * 5 + list(range(100)) + [np.nan] * 5] + result = lexsort_indexer(keys, orders=order, na_position=na_position) + tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp)) + + @pytest.mark.parametrize( + "ascending, na_position, exp", + [ + [ + True, + "last", + list(range(5, 105)) + list(range(5)) + list(range(105, 110)), + ], + [ + True, + "first", + list(range(5)) + list(range(105, 110)) + list(range(5, 105)), + ], + [ + False, + "last", + list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110)), + ], + [ + False, + "first", + list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1)), + ], + ], + ) + def test_nargsort(self, ascending, na_position, exp): + # list places NaNs last, np.array(..., dtype="O") may not place NaNs first + items = np.array([np.nan] * 5 + list(range(100)) + [np.nan] * 5, dtype="O") + + # mergesort is the most difficult to get right because we want it to be + # stable. + + # According to numpy/core/tests/test_multiarray, """The number of + # sorted items must be greater than ~50 to check the actual algorithm + # because quick and merge sort fall over to insertion sort for small + # arrays.""" + + result = nargsort( + items, kind="mergesort", ascending=ascending, na_position=na_position + ) + tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False) + + +class TestMerge: + def test_int64_overflow_outer_merge(self): + # #2690, combinatorial explosion + df1 = DataFrame( + np.random.default_rng(2).standard_normal((1000, 7)), + columns=list("ABCDEF") + ["G1"], + ) + df2 = DataFrame( + np.random.default_rng(3).standard_normal((1000, 7)), + columns=list("ABCDEF") + ["G2"], + ) + result = merge(df1, df2, how="outer") + assert len(result) == 2000 + + @pytest.mark.slow + def test_int64_overflow_check_sum_col(self, left_right): + left, right = left_right + + out = merge(left, right, how="outer") + assert len(out) == len(left) + tm.assert_series_equal(out["left"], -out["right"], check_names=False) + result = out.iloc[:, :-2].sum(axis=1) + tm.assert_series_equal(out["left"], result, check_names=False) + assert result.name is None + + @pytest.mark.slow + @pytest.mark.parametrize("how", ["left", "right", "outer", "inner"]) + def test_int64_overflow_how_merge(self, left_right, how): + left, right = left_right + + out = merge(left, right, how="outer") + out.sort_values(out.columns.tolist(), inplace=True) + out.index = np.arange(len(out)) + tm.assert_frame_equal(out, merge(left, right, how=how, sort=True)) + + @pytest.mark.slow + def test_int64_overflow_sort_false_order(self, left_right): + left, right = left_right + + # check that left merge w/ sort=False maintains left frame order + out = merge(left, right, how="left", sort=False) + tm.assert_frame_equal(left, out[left.columns.tolist()]) + + out = merge(right, left, how="left", sort=False) + tm.assert_frame_equal(right, out[right.columns.tolist()]) + + @pytest.mark.slow + @pytest.mark.parametrize("how", ["left", "right", "outer", "inner"]) + @pytest.mark.parametrize("sort", [True, False]) + def test_int64_overflow_one_to_many_none_match(self, how, sort): + # one-2-many/none match + low, high, n = -1 << 10, 1 << 10, 1 << 11 + left = DataFrame( + np.random.default_rng(2).integers(low, high, (n, 7)).astype("int64"), + columns=list("ABCDEFG"), + ) + + # confirm that this is checking what it is supposed to check + shape = left.apply(Series.nunique).values + assert is_int64_overflow_possible(shape) + + # add duplicates to left frame + left = concat([left, left], ignore_index=True) + + right = DataFrame( + np.random.default_rng(3).integers(low, high, (n // 2, 7)).astype("int64"), + columns=list("ABCDEFG"), + ) + + # add duplicates & overlap with left to the right frame + i = np.random.default_rng(4).choice(len(left), n) + right = concat([right, right, left.iloc[i]], ignore_index=True) + + left["left"] = np.random.default_rng(2).standard_normal(len(left)) + right["right"] = np.random.default_rng(2).standard_normal(len(right)) + + # shuffle left & right frames + i = np.random.default_rng(5).permutation(len(left)) + left = left.iloc[i].copy() + left.index = np.arange(len(left)) + + i = np.random.default_rng(6).permutation(len(right)) + right = right.iloc[i].copy() + right.index = np.arange(len(right)) + + # manually compute outer merge + ldict, rdict = defaultdict(list), defaultdict(list) + + for idx, row in left.set_index(list("ABCDEFG")).iterrows(): + ldict[idx].append(row["left"]) + + for idx, row in right.set_index(list("ABCDEFG")).iterrows(): + rdict[idx].append(row["right"]) + + vals = [] + for k, lval in ldict.items(): + rval = rdict.get(k, [np.nan]) + for lv, rv in product(lval, rval): + vals.append( + k + + ( + lv, + rv, + ) + ) + + for k, rval in rdict.items(): + if k not in ldict: + vals.extend( + k + + ( + np.nan, + rv, + ) + for rv in rval + ) + + def align(df): + df = df.sort_values(df.columns.tolist()) + df.index = np.arange(len(df)) + return df + + out = DataFrame(vals, columns=list("ABCDEFG") + ["left", "right"]) + out = align(out) + + jmask = { + "left": out["left"].notna(), + "right": out["right"].notna(), + "inner": out["left"].notna() & out["right"].notna(), + "outer": np.ones(len(out), dtype="bool"), + } + + mask = jmask[how] + frame = align(out[mask].copy()) + assert mask.all() ^ mask.any() or how == "outer" + + res = merge(left, right, how=how, sort=sort) + if sort: + kcols = list("ABCDEFG") + tm.assert_frame_equal( + res[kcols].copy(), res[kcols].sort_values(kcols, kind="mergesort") + ) + + # as in GH9092 dtypes break with outer/right join + # 2021-12-18: dtype does not break anymore + tm.assert_frame_equal(frame, align(res)) + + +@pytest.mark.parametrize( + "codes_list, shape", + [ + [ + [ + np.tile([0, 1, 2, 3, 0, 1, 2, 3], 100).astype(np.int64), + np.tile([0, 2, 4, 3, 0, 1, 2, 3], 100).astype(np.int64), + np.tile([5, 1, 0, 2, 3, 0, 5, 4], 100).astype(np.int64), + ], + (4, 5, 6), + ], + [ + [ + np.tile(np.arange(10000, dtype=np.int64), 5), + np.tile(np.arange(10000, dtype=np.int64), 5), + ], + (10000, 10000), + ], + ], +) +def test_decons(codes_list, shape): + group_index = get_group_index(codes_list, shape, sort=True, xnull=True) + codes_list2 = _decons_group_index(group_index, shape) + + for a, b in zip(codes_list, codes_list2): + tm.assert_numpy_array_equal(a, b) + + +class TestSafeSort: + @pytest.mark.parametrize( + "arg, exp", + [ + [[3, 1, 2, 0, 4], [0, 1, 2, 3, 4]], + [ + np.array(list("baaacb"), dtype=object), + np.array(list("aaabbc"), dtype=object), + ], + [[], []], + ], + ) + def test_basic_sort(self, arg, exp): + result = safe_sort(np.array(arg)) + expected = np.array(exp) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("verify", [True, False]) + @pytest.mark.parametrize( + "codes, exp_codes", + [ + [[0, 1, 1, 2, 3, 0, -1, 4], [3, 1, 1, 2, 0, 3, -1, 4]], + [[], []], + ], + ) + def test_codes(self, verify, codes, exp_codes): + values = np.array([3, 1, 2, 0, 4]) + expected = np.array([0, 1, 2, 3, 4]) + + result, result_codes = safe_sort( + values, codes, use_na_sentinel=True, verify=verify + ) + expected_codes = np.array(exp_codes, dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + tm.assert_numpy_array_equal(result_codes, expected_codes) + + def test_codes_out_of_bound(self): + values = np.array([3, 1, 2, 0, 4]) + expected = np.array([0, 1, 2, 3, 4]) + + # out of bound indices + codes = [0, 101, 102, 2, 3, 0, 99, 4] + result, result_codes = safe_sort(values, codes, use_na_sentinel=True) + expected_codes = np.array([3, -1, -1, 2, 0, 3, -1, 4], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + tm.assert_numpy_array_equal(result_codes, expected_codes) + + def test_mixed_integer(self): + values = np.array(["b", 1, 0, "a", 0, "b"], dtype=object) + result = safe_sort(values) + expected = np.array([0, 0, 1, "a", "b", "b"], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + def test_mixed_integer_with_codes(self): + values = np.array(["b", 1, 0, "a"], dtype=object) + codes = [0, 1, 2, 3, 0, -1, 1] + result, result_codes = safe_sort(values, codes) + expected = np.array([0, 1, "a", "b"], dtype=object) + expected_codes = np.array([3, 1, 0, 2, 3, -1, 1], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + tm.assert_numpy_array_equal(result_codes, expected_codes) + + def test_unsortable(self): + # GH 13714 + arr = np.array([1, 2, datetime.now(), 0, 3], dtype=object) + msg = "'[<>]' not supported between instances of .*" + with pytest.raises(TypeError, match=msg): + safe_sort(arr) + + @pytest.mark.parametrize( + "arg, codes, err, msg", + [ + [1, None, TypeError, "Only np.ndarray, ExtensionArray, and Index"], + [np.array([0, 1, 2]), 1, TypeError, "Only list-like objects or None"], + [np.array([0, 1, 2, 1]), [0, 1], ValueError, "values should be unique"], + ], + ) + def test_exceptions(self, arg, codes, err, msg): + with pytest.raises(err, match=msg): + safe_sort(values=arg, codes=codes) + + @pytest.mark.parametrize( + "arg, exp", [[[1, 3, 2], [1, 2, 3]], [[1, 3, np.nan, 2], [1, 2, 3, np.nan]]] + ) + def test_extension_array(self, arg, exp): + a = array(arg, dtype="Int64") + result = safe_sort(a) + expected = array(exp, dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + @pytest.mark.parametrize("verify", [True, False]) + def test_extension_array_codes(self, verify): + a = array([1, 3, 2], dtype="Int64") + result, codes = safe_sort(a, [0, 1, -1, 2], use_na_sentinel=True, verify=verify) + expected_values = array([1, 2, 3], dtype="Int64") + expected_codes = np.array([0, 2, -1, 1], dtype=np.intp) + tm.assert_extension_array_equal(result, expected_values) + tm.assert_numpy_array_equal(codes, expected_codes) + + +def test_mixed_str_null(nulls_fixture): + values = np.array(["b", nulls_fixture, "a", "b"], dtype=object) + result = safe_sort(values) + expected = np.array(["a", "b", "b", nulls_fixture], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + +def test_safe_sort_multiindex(): + # GH#48412 + arr1 = Series([2, 1, NA, NA], dtype="Int64") + arr2 = [2, 1, 3, 3] + midx = MultiIndex.from_arrays([arr1, arr2]) + result = safe_sort(midx) + expected = MultiIndex.from_arrays( + [Series([1, 2, NA, NA], dtype="Int64"), [1, 2, 3, 3]] + ) + tm.assert_index_equal(result, expected)